diff --git a/cmd/plugins/topology-aware/policy/cache.go b/cmd/plugins/topology-aware/policy/cache.go index 95a1596f9..daa886c56 100644 --- a/cmd/plugins/topology-aware/policy/cache.go +++ b/cmd/plugins/topology-aware/policy/cache.go @@ -29,7 +29,7 @@ const ( ) func (p *policy) saveAllocations() { - p.cache.SetPolicyEntry(keyAllocations, cache.Cachable(&p.allocations)) + p.cache.SetPolicyEntry(keyAllocations, cache.Cacheable(&p.allocations)) p.cache.Save() } diff --git a/cmd/plugins/topology-aware/policy/cache_test.go b/cmd/plugins/topology-aware/policy/cache_test.go index effb9bca6..db9fcb4f7 100644 --- a/cmd/plugins/topology-aware/policy/cache_test.go +++ b/cmd/plugins/topology-aware/policy/cache_test.go @@ -73,7 +73,7 @@ func TestToGrant(t *testing.T) { t.Errorf("Expected error, but got success") } if !tc.expectedError && err != nil { - t.Errorf("Unxpected error: %+v", err) + t.Errorf("Unexpected error: %+v", err) } }) } @@ -122,7 +122,7 @@ func TestAllocationMarshalling(t *testing.T) { t.Errorf("Expected unmarshalling error, but got success") } if !tc.expectedUnmarshallingError && unmarshallingErr != nil { - t.Errorf("Unxpected unmarshalling error: %+v", unmarshallingErr) + t.Errorf("Unexpected unmarshalling error: %+v", unmarshallingErr) } out, marshallingErr := alloc.MarshalJSON() @@ -133,7 +133,7 @@ func TestAllocationMarshalling(t *testing.T) { t.Errorf("Expected marshalling error, but got success") } if !tc.expectedMarshallingError && marshallingErr != nil { - t.Errorf("Unxpected marshalling error: %+v", marshallingErr) + t.Errorf("Unexpected marshalling error: %+v", marshallingErr) } }) diff --git a/cmd/plugins/topology-aware/policy/node.go b/cmd/plugins/topology-aware/policy/node.go index 30dad107f..8391e2eec 100644 --- a/cmd/plugins/topology-aware/policy/node.go +++ b/cmd/plugins/topology-aware/policy/node.go @@ -242,7 +242,7 @@ func (n *node) RootDistance() int { return n.depth } -// NodeHeight returns the hight of this node (tree depth - node depth). +// NodeHeight returns the height of this node (tree depth - node depth). func (n *node) NodeHeight() int { if n.IsNil() { return -1 @@ -278,7 +278,7 @@ func (n *node) LinkParent(parent Node) { n.depth = parent.RootDistance() + 1 } -// AddChildren appends the nodes to the childres, *WITHOUT* setting their parent. +// AddChildren appends the nodes to the children, *WITHOUT* setting their parent. func (n *node) AddChildren(nodes []Node) { n.children = append(n.children, nodes...) } diff --git a/cmd/plugins/topology-aware/policy/pools.go b/cmd/plugins/topology-aware/policy/pools.go index 4fea186c5..7fd26b805 100644 --- a/cmd/plugins/topology-aware/policy/pools.go +++ b/cmd/plugins/topology-aware/policy/pools.go @@ -500,7 +500,7 @@ func (p *policy) allocatePool(container cache.Container, poolHint string) (Grant // Workload 1 no longer fits to the leaf node, because the total // reservation from the leaf node is over the memory maximum. // Thus, it's moved upwards in the tree to the root node. Memory - // resevations are again updated accordingly: + // reservations are again updated accordingly: // // +----------------+ // |Total mem: 4G | @@ -850,7 +850,7 @@ func (p *policy) sortPoolsByScore(req Request, aff map[int]int32) (map[int]Score return nil }) - // Filter out pools which don't have enough uncompressible resources + // Filter out pools which don't have enough incompressible resources // (memory) to satisfy the request. filteredPools := p.filterInsufficientResources(req, p.pools) @@ -897,29 +897,29 @@ func (p *policy) compareScores(request Request, pools []Node, scores map[int]Sco // * more isolated capacity wins // * for a tie, prefer the smaller id // - for (non-reserved) exclusive allocations - // * more slicable (shared) capacity wins + // * more sliceable (shared) capacity wins // * for a tie, prefer the smaller id // - for (non-reserved) shared-only allocations // * fewer colocated containers win // * for a tie prefer more shared capacity // - lower id wins // - // Before this comparison is reached, nodes with insufficient uncompressible resources + // Before this comparison is reached, nodes with insufficient incompressible resources // (memory) have been filtered out. // a node with insufficient isolated or shared capacity loses switch { case cpuType == cpuNormal && ((isolated2 < 0 && isolated1 >= 0) || (shared2 <= 0 && shared1 > 0)): - log.Debug(" => %s loses, insufficent isolated or shared", node2.Name()) + log.Debug(" => %s loses, insufficient isolated or shared", node2.Name()) return true case cpuType == cpuNormal && ((isolated1 < 0 && isolated2 >= 0) || (shared1 <= 0 && shared2 > 0)): - log.Debug(" => %s loses, insufficent isolated or shared", node1.Name()) + log.Debug(" => %s loses, insufficient isolated or shared", node1.Name()) return false case cpuType == cpuReserved && reserved2 < 0 && reserved1 >= 0: - log.Debug(" => %s loses, insufficent reserved", node2.Name()) + log.Debug(" => %s loses, insufficient reserved", node2.Name()) return true case cpuType == cpuReserved && reserved1 < 0 && reserved2 >= 0: - log.Debug(" => %s loses, insufficent reserved", node1.Name()) + log.Debug(" => %s loses, insufficient reserved", node1.Name()) return false } @@ -993,7 +993,7 @@ func (p *policy) compareScores(request Request, pools []Node, scores map[int]Sco return false } - log.Debug(" => %s WINS based on equal hint socres, lower id", + log.Debug(" => %s WINS based on equal hint scores, lower id", map[bool]string{true: node1.Name(), false: node2.Name()}[id1 < id2]) return id1 < id2 @@ -1088,18 +1088,18 @@ func (p *policy) compareScores(request Request, pools []Node, scores map[int]Sco } } - // more slicable shared capacity wins + // more sliceable shared capacity wins if request.FullCPUs() > 0 && (shared1 > 0 || shared2 > 0) { if shared1 > shared2 { - log.Debug(" => %s WINS on more slicable capacity", node1.Name()) + log.Debug(" => %s WINS on more sliceable capacity", node1.Name()) return true } if shared2 > shared1 { - log.Debug(" => %s WINS on more slicable capacity", node2.Name()) + log.Debug(" => %s WINS on more sliceable capacity", node2.Name()) return false } - log.Debug(" => %s WINS based on equal slicable capacity, lower id", + log.Debug(" => %s WINS based on equal sliceable capacity, lower id", map[bool]string{true: node1.Name(), false: node2.Name()}[id1 < id2]) return id1 < id2 @@ -1148,7 +1148,7 @@ func affinityScore(affinities map[int]int32, node Node) float64 { // D_x is Q ** (number of links from node to x). IOW, the // effective affinity is the sum of the affinity of n and // the affinity of each node x of the above mentioned set - // diluted proprotionally to the distance of x to n, with + // diluted proportionally to the distance of x to n, with // Q being 0.75. var score float64 diff --git a/cmd/plugins/topology-aware/policy/resources.go b/cmd/plugins/topology-aware/policy/resources.go index 0b20f4d7e..bb5ee4ee7 100644 --- a/cmd/plugins/topology-aware/policy/resources.go +++ b/cmd/plugins/topology-aware/policy/resources.go @@ -51,7 +51,7 @@ var ( } ) -// Supply represents avaialbe CPU and memory capacity of a node. +// Supply represents available CPU and memory capacity of a node. type Supply interface { // GetNode returns the node supplying this capacity. GetNode() Node @@ -130,7 +130,7 @@ type Request interface { Isolate() bool // MemoryType returns the type(s) of requested memory. MemoryType() memoryType - // MemAmountToAllocate retuns how much memory we need to reserve for a request. + // MemAmountToAllocate returns how much memory we need to reserve for a request. MemAmountToAllocate() uint64 // ColdStart returns the cold start timeout. ColdStart() time.Duration @@ -499,7 +499,7 @@ func (cs *supply) allocateMemory(r Request) (memoryMap, error) { // insufficient memory have been filtered out before allocation. // // However, for cold started containers we do check if there is - // enough PMEM free to accomodate the full request and bail out + // enough PMEM free to accommodate the full request and bail out // if that check fails. // @@ -1072,7 +1072,7 @@ func (cr *request) Isolate() bool { return cr.isolate } -// MemAmountToAllocate retuns how much memory we need to reserve for a request. +// MemAmountToAllocate returns how much memory we need to reserve for a request. func (cr *request) MemAmountToAllocate() uint64 { var amount uint64 = 0 switch cr.GetContainer().GetQOSClass() { @@ -1132,7 +1132,7 @@ func (cs *supply) GetScore(req Request) Score { score.isolated = cs.isolated.Size() - full } - // if we don't want isolated or there is not enough, calculate slicable capacity + // if we don't want isolated or there is not enough, calculate sliceable capacity if !cr.isolate || score.isolated < 0 { score.shared -= 1000 * full } diff --git a/cmd/plugins/topology-aware/policy/topology-aware-policy.go b/cmd/plugins/topology-aware/policy/topology-aware-policy.go index 99a5b00ad..42c5ea93c 100644 --- a/cmd/plugins/topology-aware/policy/topology-aware-policy.go +++ b/cmd/plugins/topology-aware/policy/topology-aware-policy.go @@ -43,7 +43,7 @@ const ( ColdStartDone = "cold-start-done" ) -// allocations is our cache.Cachable for saving resource allocations in the cache. +// allocations is our cache.Cacheable for saving resource allocations in the cache. type allocations struct { policy *policy grants map[string]Grant