Skip to content

Commit

Permalink
topology-aware: golangci-lint fixes.
Browse files Browse the repository at this point in the history
Signed-off-by: Krisztian Litkey <[email protected]>
  • Loading branch information
klihub committed Dec 10, 2024
1 parent 331b1ec commit 8eed4bb
Show file tree
Hide file tree
Showing 10 changed files with 58 additions and 161 deletions.
11 changes: 6 additions & 5 deletions cmd/plugins/topology-aware/policy/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,9 @@ const (

func (p *policy) saveAllocations() {
p.cache.SetPolicyEntry(keyAllocations, cache.Cacheable(&p.allocations))
p.cache.Save()
if err := p.cache.Save(); err != nil {
log.Warnf("failed to save allocations to cache: %v", err)
}
}

func (p *policy) restoreAllocations(allocations *allocations) error {
Expand Down Expand Up @@ -231,12 +233,11 @@ func (a *allocations) Get() interface{} {
func (a *allocations) Set(value interface{}) {
var from *allocations

switch value.(type) {
switch val := value.(type) {
case allocations:
v := value.(allocations)
from = &v
from = &val
case *allocations:
from = value.(*allocations)
from = val
}

a.grants = make(map[string]Grant, 32)
Expand Down
20 changes: 12 additions & 8 deletions cmd/plugins/topology-aware/policy/coldstart_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@ func sendEvent(param interface{}) error {

fmt.Printf("Event received: %v", param)
event := param.(*events.Policy)
globalPolicy.HandleEvent(event)
if _, err := globalPolicy.HandleEvent(event); err != nil {
log.Warnf("failed to handle test event: %v", err)
}
return nil
}

Expand Down Expand Up @@ -126,18 +128,20 @@ func TestColdStart(t *testing.T) {
t.Errorf("Expected one memory controller %v, got: %v", tc.expectedPMEMSystemNodeID, mems)
}

if grant.MemoryType()&memoryDRAM != 0 {
// FIXME: should we report only the limited memory types or the granted types
// while the cold start is going on?
// t.Errorf("No DRAM was expected before coldstart timer: %v", grant.MemoryType())
}
// FIXME: should we report only the limited memory types or the granted types
// while the cold start is going on?
//if grant.MemoryType()&memoryDRAM != 0 {
// t.Errorf("No DRAM was expected before coldstart timer: %v", grant.MemoryType())
//}

globalPolicy = policy

policy.options.SendEvent(&events.Policy{
if err := policy.options.SendEvent(&events.Policy{
Type: events.ContainerStarted,
Data: tc.container,
})
}); err != nil {
log.Warnf("failed to send test event: %v", err)
}

time.Sleep(tc.expectedColdStartTimeout * 2)

Expand Down
12 changes: 2 additions & 10 deletions cmd/plugins/topology-aware/policy/libmem.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ import libmem "github.com/containers/nri-plugins/pkg/resmgr/lib/memory"

func (p *policy) getMemOffer(pool Node, req Request) (*libmem.Offer, error) {
var (
zone libmem.NodeMask
mtyp libmem.TypeMask
ctr = req.GetContainer()
zone = libmem.NodeMask(0)
mtyp = libmem.TypeMask(0)
)

if memType := req.MemoryType(); memType == memoryPreserve {
Expand Down Expand Up @@ -80,18 +80,10 @@ func (p *policy) releaseMem(id string) error {
return p.memAllocator.Release(id)
}

func (p *policy) poolZoneType(pool Node, memType memoryType) libmem.TypeMask {
return p.memAllocator.ZoneType(libmem.NewNodeMask(pool.GetMemset(memType).Members()...))
}

func (p *policy) memZoneType(zone libmem.NodeMask) libmem.TypeMask {
return p.memAllocator.ZoneType(zone)
}

func (p *policy) poolZone(pool Node, memType memoryType) libmem.NodeMask {
return libmem.NewNodeMask(pool.GetMemset(memType).Members()...)
}

func (p *policy) poolZoneCapacity(pool Node, memType memoryType) int64 {
return p.memAllocator.ZoneCapacity(libmem.NewNodeMask(pool.GetMemset(memType).Members()...))
}
Expand Down
10 changes: 3 additions & 7 deletions cmd/plugins/topology-aware/policy/mocks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,11 +128,9 @@ func (p *mockCPUPackage) SstInfo() *sst.SstPackageInfo {
}

type mockCPU struct {
isolated cpuset.CPUSet
online cpuset.CPUSet
id idset.ID
node mockSystemNode
pkg mockCPUPackage
id idset.ID
node mockSystemNode
pkg mockCPUPackage
}

func (c *mockCPU) BaseFrequency() uint64 {
Expand Down Expand Up @@ -334,8 +332,6 @@ type mockContainer struct {
namespace string
returnValueForGetResourceRequirements v1.ResourceRequirements
returnValueForGetID string
memoryLimit int64
cpuset cpuset.CPUSet
returnValueForQOSClass v1.PodQOSClass
pod cache.Pod
}
Expand Down
28 changes: 9 additions & 19 deletions cmd/plugins/topology-aware/policy/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,9 @@ type Node interface {
// AssignNUMANodes assigns the given set of NUMA nodes to this one.
AssignNUMANodes(ids []idset.ID)
// DepthFirst traverse the tree@node calling the function at each node.
DepthFirst(func(Node) error) error
DepthFirst(func(Node))
// BreadthFirst traverse the tree@node calling the function at each node.
BreadthFirst(func(Node) error) error
BreadthFirst(func(Node))
// Dump state of the node.
Dump(string, ...int)
// Dump type-specific state of the node.
Expand Down Expand Up @@ -329,29 +329,19 @@ func (n *node) dump(prefix string, level ...int) {
}

// Do a depth-first traversal starting at node calling the given function at each node.
func (n *node) DepthFirst(fn func(Node) error) error {
func (n *node) DepthFirst(fn func(Node)) {
for _, c := range n.children {
if err := c.DepthFirst(fn); err != nil {
return err
}
c.DepthFirst(fn)
}

return fn(n)
fn(n)
}

// Do a breadth-first traversal starting at node calling the given function at each node.
func (n *node) BreadthFirst(fn func(Node) error) error {
if err := fn(n); err != nil {
return err
}

func (n *node) BreadthFirst(fn func(Node)) {
fn(n)
for _, c := range n.children {
if err := c.BreadthFirst(fn); err != nil {
return err
}
c.BreadthFirst(fn)
}

return nil
}

// System returns the policy System instance.
Expand Down Expand Up @@ -791,7 +781,7 @@ func (n *socketnode) HintScore(hint topology.Hint) float64 {
func (p *policy) NewVirtualNode(name string, parent Node) Node {
n := &virtualnode{}
n.self.node = n
n.node.init(p, fmt.Sprintf("%s", name), VirtualNode, parent)
n.node.init(p, name, VirtualNode, parent)

return n
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/plugins/topology-aware/policy/pod-preferences.go
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ func cpuAllocationPreferences(pod cache.Pod, container cache.Container) (int, in
switch {
case container.PreserveCpuResources():
return 0, fraction, false, cpuPreserve, prio
case preferReserved == true:
case preferReserved:
return 0, fraction, false, cpuReserved, prio
case checkReservedPoolNamespaces(namespace) && !explicitReservation:
return 0, fraction, false, cpuReserved, prio
Expand Down
68 changes: 4 additions & 64 deletions cmd/plugins/topology-aware/policy/pools.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ func (p *policy) buildPoolsByTopology() error {

// enumerate pools, calculate depth, discover resource capacity, assign NUMA nodes
p.pools = make([]Node, 0)
p.root.DepthFirst(func(n Node) error {
p.root.DepthFirst(func(n Node) {
p.pools = append(p.pools, n)
n.(*node).id = p.nodeCnt
p.nodeCnt++
Expand All @@ -195,8 +195,6 @@ func (p *policy) buildPoolsByTopology() error {

n.DiscoverSupply(assigned[n.(*node).self.node])
delete(assigned, n.(*node).self.node)

return nil
})

// make sure all PMEM, HBM nodes got assigned
Expand Down Expand Up @@ -634,75 +632,18 @@ func (p *policy) updateSharedAllocations(grant *Grant) {
}
}

func (p *policy) filterInsufficientResources(req Request, pools []Node) []Node {
filtered := make([]Node, 0)

memNeed := req.MemAmountToAllocate()
isolate := req.Isolate()
full, fraction := req.FullCPUs(), req.CPUFraction()

for _, node := range pools {
// check pool memory availability
memType := req.MemoryType()
if memType == memoryUnspec || memType == memoryPreserve {
memType = memoryAll
}

memAvail := p.poolZoneFree(node, memType)
if memAvail < memNeed {
log.Debug("%s has insufficient available %s memory (%s < %s)", node.Name(),
memType, prettyMem(memAvail), prettyMem(memNeed))
continue
}

log.Debug("%s has enough available %s memory (%s >= %s)", node.Name(),
memType, prettyMem(memAvail), prettyMem(memNeed))

cs := node.FreeSupply()

// check pool cpu availability
isolated := cs.IsolatedCPUs().Size()
slicable := cs.AllocatableSharedCPU()

if isolate {
if isolated < full && slicable < 1000*full {
log.Debug("%s has insufficient slicable capacity (%dm) for %d isolated CPUs",
node.Name(), slicable, full)
continue
}

log.Debug("%s has enough slicable capacity (%dm) for %d isolated CPUs",
node.Name(), slicable, full)
}

if slicable < 1000*full+fraction {
log.Debug("%s has insufficient slicable capacity (%dm) for %d+%dm full+fractional CPU",
node.Name(), slicable, full, fraction)
continue
}

log.Debug("%s has enough slicable capacity (%dm) for %d+%dm full+fractional CPU",
node.Name(), slicable, full, fraction)

filtered = append(filtered, node)
}

return filtered
}

// Score pools against the request and sort them by score.
func (p *policy) sortPoolsByScore(req Request, aff map[int]int32) (map[int]Score, []Node) {
scores := make(map[int]Score, p.nodeCnt)

p.root.DepthFirst(func(n Node) error {
p.root.DepthFirst(func(n Node) {
scores[n.NodeID()] = n.GetScore(req)
return nil
})

// Filter out pools which don't have enough uncompressible resources
// (memory) to satisfy the request.
//filteredPools := p.filterInsufficientResources(req, p.pools)
filteredPools := make([]Node, len(p.pools), len(p.pools))
filteredPools := make([]Node, len(p.pools))
copy(filteredPools, p.pools)

sort.Slice(filteredPools, func(i, j int) bool {
Expand Down Expand Up @@ -1048,12 +989,11 @@ func affinityScore(affinities map[int]int32, node Node) float64 {
a := affinities[n.NodeID()]
score += q * float64(a)
}
node.BreadthFirst(func(n Node) error {
node.BreadthFirst(func(n Node) {
diff := float64(n.RootDistance() - node.RootDistance())
q := math.Pow(Q, diff)
a := affinities[n.NodeID()]
score += q * float64(a)
return nil
})
return score
}
Expand Down
Loading

0 comments on commit 8eed4bb

Please sign in to comment.