Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

deploy our custom coredns addon #17008

Closed
wants to merge 11 commits into from
39 changes: 0 additions & 39 deletions pkg/kapi/kapi.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,42 +210,3 @@ func IsRetryableAPIError(err error) bool {
func KubectlBinaryPath(version string) string {
return path.Join(vmpath.GuestPersistentDir, "binaries", version, "kubectl")
}

// ScaleDeployment tries to set the number of deployment replicas in namespace and context.
// It will retry (usually needed due to "the object has been modified; please apply your changes to the latest version and try again" error) up to ReasonableMutateTime to ensure target scale is achieved.
func ScaleDeployment(kcontext, namespace, deploymentName string, replicas int) error {
client, err := Client(kcontext)
if err != nil {
return fmt.Errorf("client: %v", err)
}

err = wait.PollUntilContextTimeout(context.Background(), kconst.APICallRetryInterval, ReasonableMutateTime, true, func(ctx context.Context) (bool, error) {
scale, err := client.AppsV1().Deployments(namespace).GetScale(ctx, deploymentName, meta.GetOptions{})
if err != nil {
if !IsRetryableAPIError(err) {
return false, fmt.Errorf("non-retryable failure while getting %q deployment scale: %v", deploymentName, err)
}
klog.Warningf("failed getting %q deployment scale, will retry: %v", deploymentName, err)
return false, nil
}
if scale.Spec.Replicas != int32(replicas) {
scale.Spec.Replicas = int32(replicas)
if _, err = client.AppsV1().Deployments(namespace).UpdateScale(ctx, deploymentName, scale, meta.UpdateOptions{}); err != nil {
if !IsRetryableAPIError(err) {
return false, fmt.Errorf("non-retryable failure while rescaling %s deployment: %v", deploymentName, err)
}
klog.Warningf("failed rescaling %s deployment, will retry: %v", deploymentName, err)
}
// repeat (if change was successful - once again to check & confirm requested scale)
return false, nil
}
return true, nil
})
if err != nil {
klog.Warningf("failed rescaling %q deployment in %q namespace and %q context to %d replicas: %v", deploymentName, namespace, kcontext, replicas, err)
return err
}
klog.Infof("%q deployment in %q namespace and %q context rescaled to %d replicas", deploymentName, namespace, kcontext, replicas)

return nil
}
61 changes: 61 additions & 0 deletions pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/kapi"
kconst "k8s.io/minikube/third_party/kubeadm/app/constants"
)

Expand Down Expand Up @@ -155,3 +156,63 @@ func IsPodReady(pod *core.Pod) (ready bool, reason string) {
}
return false, fmt.Sprintf("pod %q in %q namespace does not have %q status: %+v", pod.Name, pod.Namespace, core.PodReady, pod.Status)
}

// UnloathPods deletes once pod(s) with label in namespace if they don't become Ready within timeout after they're Running.
func UnloathPods(ctx context.Context, kcontext, label, namespace string, timeout time.Duration) error {
client, err := kapi.Client(kcontext)
if err != nil {
return fmt.Errorf("kapi client: %v", err)
}

var pods *core.PodList
lap := time.Now()
// need at least one running pod
if err := wait.PollUntilContextCancel(ctx, kconst.APICallRetryInterval, true, func(_ context.Context) (done bool, err error) {
pods, err = client.CoreV1().Pods(namespace).List(ctx, meta.ListOptions{LabelSelector: label})
if err != nil || len(pods.Items) == 0 {
// reduce log spam
if time.Since(lap) > (2 * time.Second) {
klog.Infof("waiting for running pod(s) with %q label in %q namespace (error: %v)...", label, namespace, err)
lap = time.Now()
}
return false, nil
}

running := false
for _, pod := range pods.Items {
if running = (pod.Status.Phase == core.PodRunning); running {
break
}
}
if !running {
return false, nil
}
return true, nil
}); err != nil {
return fmt.Errorf("waiting for running pod(s) with %q label in %q namespace failed: %v", label, namespace, err)
}

// need at least one pod to become ready - within timeout
if err := wait.PollUntilContextTimeout(ctx, kconst.APICallRetryInterval, timeout, true, func(_ context.Context) (done bool, err error) {
ready := false
for _, pod := range pods.Items {
if ready, _ = IsPodReady(&pod); ready {
break
}
}
if !ready {
return false, nil
}
return true, nil
}); err != nil {
klog.Errorf("waiting for ready pod(s) with %q label in %q namespace failed (will try deleting them once): %v", label, namespace, err)
now := int64(0)
if err := client.CoreV1().Pods(namespace).DeleteCollection(ctx, meta.DeleteOptions{GracePeriodSeconds: &now}, meta.ListOptions{LabelSelector: label}); err != nil {
return fmt.Errorf("deleting pod(s) with %q label in %q namespace failed: %v", label, namespace, err)
}
klog.Infof("deleting pod(s) with %q label in %q namespace initiated", label, namespace)
return nil
}
klog.Infof("pod(s) with %q label in %q namespace reached %q condition within %v", label, namespace, core.PodReady, timeout)
return nil
}
Loading