Skip to content

Commit

Permalink
WiP: topology-aware: fix codespell errors.
Browse files Browse the repository at this point in the history
This will conflict if/when we merge containers#332/libmem. This will need to
get dropped, rebased on latest main, the checked and fixed again.
So I'll just keep it at the tip until then...

Signed-off-by: Krisztian Litkey <[email protected]>
  • Loading branch information
klihub committed Oct 1, 2024
1 parent 9c0d551 commit 64adbb8
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 26 deletions.
2 changes: 1 addition & 1 deletion cmd/plugins/topology-aware/policy/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ const (
)

func (p *policy) saveAllocations() {
p.cache.SetPolicyEntry(keyAllocations, cache.Cachable(&p.allocations))
p.cache.SetPolicyEntry(keyAllocations, cache.Cacheable(&p.allocations))
p.cache.Save()
}

Expand Down
6 changes: 3 additions & 3 deletions cmd/plugins/topology-aware/policy/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func TestToGrant(t *testing.T) {
t.Errorf("Expected error, but got success")
}
if !tc.expectedError && err != nil {
t.Errorf("Unxpected error: %+v", err)
t.Errorf("Unexpected error: %+v", err)
}
})
}
Expand Down Expand Up @@ -122,7 +122,7 @@ func TestAllocationMarshalling(t *testing.T) {
t.Errorf("Expected unmarshalling error, but got success")
}
if !tc.expectedUnmarshallingError && unmarshallingErr != nil {
t.Errorf("Unxpected unmarshalling error: %+v", unmarshallingErr)
t.Errorf("Unexpected unmarshalling error: %+v", unmarshallingErr)
}

out, marshallingErr := alloc.MarshalJSON()
Expand All @@ -133,7 +133,7 @@ func TestAllocationMarshalling(t *testing.T) {
t.Errorf("Expected marshalling error, but got success")
}
if !tc.expectedMarshallingError && marshallingErr != nil {
t.Errorf("Unxpected marshalling error: %+v", marshallingErr)
t.Errorf("Unexpected marshalling error: %+v", marshallingErr)
}

})
Expand Down
4 changes: 2 additions & 2 deletions cmd/plugins/topology-aware/policy/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ func (n *node) RootDistance() int {
return n.depth
}

// NodeHeight returns the hight of this node (tree depth - node depth).
// NodeHeight returns the height of this node (tree depth - node depth).
func (n *node) NodeHeight() int {
if n.IsNil() {
return -1
Expand Down Expand Up @@ -278,7 +278,7 @@ func (n *node) LinkParent(parent Node) {
n.depth = parent.RootDistance() + 1
}

// AddChildren appends the nodes to the childres, *WITHOUT* setting their parent.
// AddChildren appends the nodes to the children, *WITHOUT* setting their parent.
func (n *node) AddChildren(nodes []Node) {
n.children = append(n.children, nodes...)
}
Expand Down
28 changes: 14 additions & 14 deletions cmd/plugins/topology-aware/policy/pools.go
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ func (p *policy) allocatePool(container cache.Container, poolHint string) (Grant
// Workload 1 no longer fits to the leaf node, because the total
// reservation from the leaf node is over the memory maximum.
// Thus, it's moved upwards in the tree to the root node. Memory
// resevations are again updated accordingly:
// reservations are again updated accordingly:
//
// +----------------+
// |Total mem: 4G |
Expand Down Expand Up @@ -850,7 +850,7 @@ func (p *policy) sortPoolsByScore(req Request, aff map[int]int32) (map[int]Score
return nil
})

// Filter out pools which don't have enough uncompressible resources
// Filter out pools which don't have enough incompressible resources
// (memory) to satisfy the request.
filteredPools := p.filterInsufficientResources(req, p.pools)

Expand Down Expand Up @@ -897,29 +897,29 @@ func (p *policy) compareScores(request Request, pools []Node, scores map[int]Sco
// * more isolated capacity wins
// * for a tie, prefer the smaller id
// - for (non-reserved) exclusive allocations
// * more slicable (shared) capacity wins
// * more sliceable (shared) capacity wins
// * for a tie, prefer the smaller id
// - for (non-reserved) shared-only allocations
// * fewer colocated containers win
// * for a tie prefer more shared capacity
// - lower id wins
//
// Before this comparison is reached, nodes with insufficient uncompressible resources
// Before this comparison is reached, nodes with insufficient incompressible resources
// (memory) have been filtered out.

// a node with insufficient isolated or shared capacity loses
switch {
case cpuType == cpuNormal && ((isolated2 < 0 && isolated1 >= 0) || (shared2 <= 0 && shared1 > 0)):
log.Debug(" => %s loses, insufficent isolated or shared", node2.Name())
log.Debug(" => %s loses, insufficient isolated or shared", node2.Name())
return true
case cpuType == cpuNormal && ((isolated1 < 0 && isolated2 >= 0) || (shared1 <= 0 && shared2 > 0)):
log.Debug(" => %s loses, insufficent isolated or shared", node1.Name())
log.Debug(" => %s loses, insufficient isolated or shared", node1.Name())
return false
case cpuType == cpuReserved && reserved2 < 0 && reserved1 >= 0:
log.Debug(" => %s loses, insufficent reserved", node2.Name())
log.Debug(" => %s loses, insufficient reserved", node2.Name())
return true
case cpuType == cpuReserved && reserved1 < 0 && reserved2 >= 0:
log.Debug(" => %s loses, insufficent reserved", node1.Name())
log.Debug(" => %s loses, insufficient reserved", node1.Name())
return false
}

Expand Down Expand Up @@ -993,7 +993,7 @@ func (p *policy) compareScores(request Request, pools []Node, scores map[int]Sco
return false
}

log.Debug(" => %s WINS based on equal hint socres, lower id",
log.Debug(" => %s WINS based on equal hint scores, lower id",
map[bool]string{true: node1.Name(), false: node2.Name()}[id1 < id2])

return id1 < id2
Expand Down Expand Up @@ -1088,18 +1088,18 @@ func (p *policy) compareScores(request Request, pools []Node, scores map[int]Sco
}
}

// more slicable shared capacity wins
// more sliceable shared capacity wins
if request.FullCPUs() > 0 && (shared1 > 0 || shared2 > 0) {
if shared1 > shared2 {
log.Debug(" => %s WINS on more slicable capacity", node1.Name())
log.Debug(" => %s WINS on more sliceable capacity", node1.Name())
return true
}
if shared2 > shared1 {
log.Debug(" => %s WINS on more slicable capacity", node2.Name())
log.Debug(" => %s WINS on more sliceable capacity", node2.Name())
return false
}

log.Debug(" => %s WINS based on equal slicable capacity, lower id",
log.Debug(" => %s WINS based on equal sliceable capacity, lower id",
map[bool]string{true: node1.Name(), false: node2.Name()}[id1 < id2])

return id1 < id2
Expand Down Expand Up @@ -1148,7 +1148,7 @@ func affinityScore(affinities map[int]int32, node Node) float64 {
// D_x is Q ** (number of links from node to x). IOW, the
// effective affinity is the sum of the affinity of n and
// the affinity of each node x of the above mentioned set
// diluted proprotionally to the distance of x to n, with
// diluted proportionally to the distance of x to n, with
// Q being 0.75.

var score float64
Expand Down
10 changes: 5 additions & 5 deletions cmd/plugins/topology-aware/policy/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ var (
}
)

// Supply represents avaialbe CPU and memory capacity of a node.
// Supply represents available CPU and memory capacity of a node.
type Supply interface {
// GetNode returns the node supplying this capacity.
GetNode() Node
Expand Down Expand Up @@ -130,7 +130,7 @@ type Request interface {
Isolate() bool
// MemoryType returns the type(s) of requested memory.
MemoryType() memoryType
// MemAmountToAllocate retuns how much memory we need to reserve for a request.
// MemAmountToAllocate returns how much memory we need to reserve for a request.
MemAmountToAllocate() uint64
// ColdStart returns the cold start timeout.
ColdStart() time.Duration
Expand Down Expand Up @@ -499,7 +499,7 @@ func (cs *supply) allocateMemory(r Request) (memoryMap, error) {
// insufficient memory have been filtered out before allocation.
//
// However, for cold started containers we do check if there is
// enough PMEM free to accomodate the full request and bail out
// enough PMEM free to accommodate the full request and bail out
// if that check fails.
//

Expand Down Expand Up @@ -1072,7 +1072,7 @@ func (cr *request) Isolate() bool {
return cr.isolate
}

// MemAmountToAllocate retuns how much memory we need to reserve for a request.
// MemAmountToAllocate returns how much memory we need to reserve for a request.
func (cr *request) MemAmountToAllocate() uint64 {
var amount uint64 = 0
switch cr.GetContainer().GetQOSClass() {
Expand Down Expand Up @@ -1132,7 +1132,7 @@ func (cs *supply) GetScore(req Request) Score {
score.isolated = cs.isolated.Size() - full
}

// if we don't want isolated or there is not enough, calculate slicable capacity
// if we don't want isolated or there is not enough, calculate sliceable capacity
if !cr.isolate || score.isolated < 0 {
score.shared -= 1000 * full
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/plugins/topology-aware/policy/topology-aware-policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ const (
ColdStartDone = "cold-start-done"
)

// allocations is our cache.Cachable for saving resource allocations in the cache.
// allocations is our cache.Cacheable for saving resource allocations in the cache.
type allocations struct {
policy *policy
grants map[string]Grant
Expand Down

0 comments on commit 64adbb8

Please sign in to comment.