Skip to content

Commit

Permalink
Merge pull request #7376 from damikag/cleanup-remove-or-update-logs
Browse files Browse the repository at this point in the history
Remove/update spamming logs
  • Loading branch information
k8s-ci-robot authored Oct 16, 2024
2 parents c7944d7 + e20e5e6 commit 64a6432
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 12 deletions.
4 changes: 2 additions & 2 deletions cluster-autoscaler/clusterstate/clusterstate.go
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ func (csr *ClusterStateRegistry) IsClusterHealthy() bool {
func (csr *ClusterStateRegistry) IsNodeGroupHealthy(nodeGroupName string) bool {
acceptable, found := csr.acceptableRanges[nodeGroupName]
if !found {
klog.Warningf("Failed to find acceptable ranges for %v", nodeGroupName)
klog.V(5).Infof("Failed to find acceptable ranges for %v", nodeGroupName)
return false
}

Expand All @@ -430,7 +430,7 @@ func (csr *ClusterStateRegistry) IsNodeGroupHealthy(nodeGroupName string) bool {
if acceptable.CurrentTarget == 0 || (acceptable.MinNodes == 0 && acceptable.CurrentTarget > 0) {
return true
}
klog.Warningf("Failed to find readiness information for %v", nodeGroupName)
klog.V(5).Infof("Failed to find readiness information for %v", nodeGroupName)
return false
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@ func (p *filterOutDaemonSetPodListProcessor) Process(context *context.Autoscalin
// for scheduling. To improve that we are filtering them here, as the CA won't be
// able to help them so there is no point to in passing them to scale-up logic.

klog.V(4).Infof("Filtering out daemon set pods")

var nonDaemonSetPods []*apiv1.Pod
for _, pod := range unschedulablePods {
if !podutils.IsDaemonSetPod(pod) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
klog "k8s.io/klog/v2"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)

Expand Down Expand Up @@ -123,38 +122,32 @@ func IsCloudProviderNodeInfoSimilar(

for kind, qtyList := range capacity {
if len(qtyList) != 2 {
klog.V(3).Infof("nodes %s and %s are not similar, missing capacity %s", n1.Node().Name, n2.Node().Name, kind)
return false
}
switch kind {
case apiv1.ResourceMemory:
if !resourceListWithinTolerance(qtyList, ratioOpts.MaxCapacityMemoryDifferenceRatio) {
klog.V(3).Infof("nodes %s and %s are not similar, memory not within tolerance", n1.Node().Name, n2.Node().Name)
return false
}
default:
// For other capacity types we require exact match.
// If this is ever changed, enforcing MaxCoresTotal limits
// as it is now may no longer work.
if qtyList[0].Cmp(qtyList[1]) != 0 {
klog.V(3).Infof("nodes %s and %s are not similar, %s does not match", n1.Node().Name, n2.Node().Name, kind)
return false
}
}
}

// For allocatable and free we allow resource quantities to be within a few % of each other
if !resourceMapsWithinTolerance(allocatable, ratioOpts.MaxAllocatableDifferenceRatio) {
klog.V(3).Infof("nodes %s and %s are not similar, allocatable resources not within tolerance", n1.Node().Name, n2.Node().Name)
return false
}
if !resourceMapsWithinTolerance(free, ratioOpts.MaxFreeDifferenceRatio) {
klog.V(3).Infof("nodes %s and %s are not similar, free resources not within tolerance", n1.Node().Name, n2.Node().Name)
return false
}

if !compareLabels(nodes, ignoredLabels) {
klog.V(3).Infof("nodes %s and %s are not similar, labels do not match", n1.Node().Name, n2.Node().Name)
return false
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (n *PreFilteringScaleDownNodeProcessor) GetScaleDownCandidates(ctx *context
continue
}
if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
klog.V(4).Infof("Node %s should not be processed by cluster autoscaler (no node group config)", node.Name)
klog.V(5).Infof("Node %s should not be processed by cluster autoscaler (no node group config)", node.Name)
continue
}
size, found := nodeGroupSize[nodeGroup.Id()]
Expand Down

0 comments on commit 64a6432

Please sign in to comment.