From f6c7d646f75a6c5fbbffdeb41bf41d32b114a534 Mon Sep 17 00:00:00 2001 From: nithyatsu Date: Wed, 13 Sep 2023 13:29:49 -0700 Subject: [PATCH] Revert "Detect deployment failures with gateway (#6126)" This reverts commit 6a9173f962ac9d1fce1c7102ae64b072a517310b. --- go.mod | 2 +- pkg/corerp/handlers/kubernetes.go | 322 ++++- .../handlers/kubernetes_deployment_waiter.go | 323 ----- .../kubernetes_deployment_waiter_test.go | 1116 ----------------- .../handlers/kubernetes_http_proxy_waiter.go | 171 --- .../kubernetes_http_proxy_waiter_test.go | 326 ----- pkg/corerp/handlers/kubernetes_test.go | 1092 +++++++++++++++- pkg/corerp/model/application_model.go | 9 +- pkg/corerp/renderers/gateway/render.go | 15 +- pkg/corerp/renderers/gateway/render_test.go | 7 - .../shared/resources/gateway_test.go | 45 - .../corerp-resources-gateway-failure.bicep | 47 - test/functional/shared/rptest.go | 3 - 13 files changed, 1387 insertions(+), 2091 deletions(-) delete mode 100644 pkg/corerp/handlers/kubernetes_deployment_waiter.go delete mode 100644 pkg/corerp/handlers/kubernetes_deployment_waiter_test.go delete mode 100644 pkg/corerp/handlers/kubernetes_http_proxy_waiter.go delete mode 100644 pkg/corerp/handlers/kubernetes_http_proxy_waiter_test.go delete mode 100644 test/functional/shared/resources/testdata/corerp-resources-gateway-failure.bicep diff --git a/go.mod b/go.mod index 213c5e4731..4e1df70233 100644 --- a/go.mod +++ b/go.mod @@ -67,6 +67,7 @@ require ( github.com/wI2L/jsondiff v0.2.0 go.etcd.io/etcd/client/v3 v3.5.9 go.etcd.io/etcd/server/v3 v3.5.9 + go.mongodb.org/mongo-driver v1.12.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 go.opentelemetry.io/contrib/instrumentation/runtime v0.42.0 go.opentelemetry.io/otel v1.16.0 @@ -97,7 +98,6 @@ require ( github.com/tidwall/gjson v1.14.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect - go.mongodb.org/mongo-driver v1.12.0 // indirect ) require ( diff --git a/pkg/corerp/handlers/kubernetes.go b/pkg/corerp/handlers/kubernetes.go index cbcfa770a3..11cfb8eeb8 100644 --- a/pkg/corerp/handlers/kubernetes.go +++ b/pkg/corerp/handlers/kubernetes.go @@ -31,13 +31,14 @@ import ( resources_kubernetes "github.com/radius-project/radius/pkg/ucp/resources/kubernetes" "github.com/radius-project/radius/pkg/ucp/ucplog" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" k8s "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" @@ -45,33 +46,32 @@ import ( ) const ( + // MaxDeploymentTimeout is the max timeout for waiting for a deployment to be ready. + // Deployment duration should not reach to this timeout since async operation worker will time out context before MaxDeploymentTimeout. + MaxDeploymentTimeout = time.Minute * time.Duration(10) // DefaultCacheResyncInterval is the interval for resyncing informer. DefaultCacheResyncInterval = time.Second * time.Duration(30) ) -// Create an interface for deployment waiter and http proxy waiter -type ResourceWaiter interface { - addDynamicEventHandler(ctx context.Context, informerFactory dynamicinformer.DynamicSharedInformerFactory, informer cache.SharedIndexInformer, item client.Object, doneCh chan<- error) - addEventHandler(ctx context.Context, informerFactory informers.SharedInformerFactory, informer cache.SharedIndexInformer, item client.Object, doneCh chan<- error) - waitUntilReady(ctx context.Context, item client.Object) error -} - // NewKubernetesHandler creates a new KubernetesHandler which is used to handle Kubernetes resources. -func NewKubernetesHandler(client client.Client, clientSet k8s.Interface, discoveryClient discovery.ServerResourcesInterface, dynamicClientSet dynamic.Interface) ResourceHandler { +func NewKubernetesHandler(client client.Client, clientSet k8s.Interface, discoveryClient discovery.ServerResourcesInterface) ResourceHandler { return &kubernetesHandler{ - client: client, - k8sDiscoveryClient: discoveryClient, - httpProxyWaiter: NewHTTPProxyWaiter(dynamicClientSet), - deploymentWaiter: NewDeploymentWaiter(clientSet), + client: client, + clientSet: clientSet, + k8sDiscoveryClient: discoveryClient, + deploymentTimeOut: MaxDeploymentTimeout, + cacheResyncInterval: DefaultCacheResyncInterval, } } type kubernetesHandler struct { - client client.Client + client client.Client + clientSet k8s.Interface // k8sDiscoveryClient is the Kubernetes client to used for API version lookups on Kubernetes resources. Override this for testing. k8sDiscoveryClient discovery.ServerResourcesInterface - httpProxyWaiter ResourceWaiter - deploymentWaiter ResourceWaiter + + deploymentTimeOut time.Duration + cacheResyncInterval time.Duration } // Put stores the Kubernetes resource in the cluster and returns the properties of the resource. If the resource is a @@ -118,25 +118,297 @@ func (handler *kubernetesHandler) Put(ctx context.Context, options *PutOptions) switch strings.ToLower(item.GetKind()) { case "deployment": // Monitor the deployment until it is ready. - err = handler.deploymentWaiter.waitUntilReady(ctx, &item) + err = handler.waitUntilDeploymentIsReady(ctx, &item) if err != nil { return nil, err } logger.Info(fmt.Sprintf("Deployment %s in namespace %s is ready", item.GetName(), item.GetNamespace())) return properties, nil - case "httpproxy": - err = handler.httpProxyWaiter.waitUntilReady(ctx, &item) - if err != nil { - return nil, err - } - logger.Info(fmt.Sprintf("HTTP Proxy %s in namespace %s is ready", item.GetName(), item.GetNamespace())) - return properties, nil default: // We do not monitor the other resource types. return properties, nil } } +func (handler *kubernetesHandler) waitUntilDeploymentIsReady(ctx context.Context, item client.Object) error { + logger := ucplog.FromContextOrDiscard(ctx) + + // When the deployment is done, an error nil will be sent + // In case of an error, the error will be sent + doneCh := make(chan error, 1) + + ctx, cancel := context.WithTimeout(ctx, handler.deploymentTimeOut) + // This ensures that the informer is stopped when this function is returned. + defer cancel() + + err := handler.startInformers(ctx, item, doneCh) + if err != nil { + logger.Error(err, "failed to start deployment informer") + return err + } + + select { + case <-ctx.Done(): + // Get the final deployment status + dep, err := handler.clientSet.AppsV1().Deployments(item.GetNamespace()).Get(ctx, item.GetName(), metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("deployment timed out, name: %s, namespace %s, error occured while fetching latest status: %w", item.GetName(), item.GetNamespace(), err) + } + + // Now get the latest available observation of deployment current state + // note that there can be a race condition here, by the time it fetches the latest status, deployment might be succeeded + status := v1.DeploymentCondition{} + if len(dep.Status.Conditions) > 0 { + status = dep.Status.Conditions[len(dep.Status.Conditions)-1] + } + return fmt.Errorf("deployment timed out, name: %s, namespace %s, status: %s, reason: %s", item.GetName(), item.GetNamespace(), status.Message, status.Reason) + + case err := <-doneCh: + if err == nil { + logger.Info(fmt.Sprintf("Marking deployment %s in namespace %s as complete", item.GetName(), item.GetNamespace())) + } + return err + } +} + +func (handler *kubernetesHandler) addEventHandler(ctx context.Context, informerFactory informers.SharedInformerFactory, informer cache.SharedIndexInformer, item client.Object, doneCh chan<- error) { + logger := ucplog.FromContextOrDiscard(ctx) + + _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj any) { + handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) + }, + UpdateFunc: func(_, newObj any) { + handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) + }, + }) + + if err != nil { + logger.Error(err, "failed to add event handler") + } +} + +func (handler *kubernetesHandler) startInformers(ctx context.Context, item client.Object, doneCh chan<- error) error { + logger := ucplog.FromContextOrDiscard(ctx) + informerFactory := informers.NewSharedInformerFactoryWithOptions(handler.clientSet, handler.cacheResyncInterval, informers.WithNamespace(item.GetNamespace())) + // Add event handlers to the pod informer + handler.addEventHandler(ctx, informerFactory, informerFactory.Core().V1().Pods().Informer(), item, doneCh) + + // Add event handlers to the deployment informer + handler.addEventHandler(ctx, informerFactory, informerFactory.Apps().V1().Deployments().Informer(), item, doneCh) + + // Add event handlers to the replicaset informer + handler.addEventHandler(ctx, informerFactory, informerFactory.Apps().V1().ReplicaSets().Informer(), item, doneCh) + + // Start the informers + informerFactory.Start(ctx.Done()) + + // Wait for the deployment and pod informer's cache to be synced. + informerFactory.WaitForCacheSync(ctx.Done()) + + logger.Info(fmt.Sprintf("Informers started and caches synced for deployment: %s in namespace: %s", item.GetName(), item.GetNamespace())) + return nil +} + +// Check if all the pods in the deployment are ready +func (handler *kubernetesHandler) checkDeploymentStatus(ctx context.Context, informerFactory informers.SharedInformerFactory, item client.Object, doneCh chan<- error) bool { + logger := ucplog.FromContextOrDiscard(ctx).WithValues("deploymentName", item.GetName(), "namespace", item.GetNamespace()) + + // Get the deployment + deployment, err := informerFactory.Apps().V1().Deployments().Lister().Deployments(item.GetNamespace()).Get(item.GetName()) + if err != nil { + logger.Info("Unable to find deployment") + return false + } + + deploymentReplicaSet := handler.getCurrentReplicaSetForDeployment(ctx, informerFactory, deployment) + if deploymentReplicaSet == nil { + logger.Info("Unable to find replica set for deployment") + return false + } + + allReady := handler.checkAllPodsReady(ctx, informerFactory, deployment, deploymentReplicaSet, doneCh) + if !allReady { + logger.Info("All pods are not ready yet for deployment") + return false + } + + // Check if the deployment is ready + if deployment.Status.ObservedGeneration != deployment.Generation { + logger.Info(fmt.Sprintf("Deployment status is not ready: Observed generation: %d, Generation: %d, Deployment Replicaset: %s", deployment.Status.ObservedGeneration, deployment.Generation, deploymentReplicaSet.Name)) + return false + } + + // ObservedGeneration should be updated to latest generation to avoid stale replicas + for _, c := range deployment.Status.Conditions { + // check for complete deployment condition + // Reference https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#complete-deployment + if c.Type == v1.DeploymentProgressing && c.Status == corev1.ConditionTrue && strings.EqualFold(c.Reason, "NewReplicaSetAvailable") { + logger.Info(fmt.Sprintf("Deployment is ready. Observed generation: %d, Generation: %d, Deployment Replicaset: %s", deployment.Status.ObservedGeneration, deployment.Generation, deploymentReplicaSet.Name)) + doneCh <- nil + return true + } else { + logger.Info(fmt.Sprintf("Deployment status is: %s - %s, Reason: %s, Deployment replicaset: %s", c.Type, c.Status, c.Reason, deploymentReplicaSet.Name)) + } + } + return false +} + +// Gets the current replica set for the deployment +func (handler *kubernetesHandler) getCurrentReplicaSetForDeployment(ctx context.Context, informerFactory informers.SharedInformerFactory, deployment *v1.Deployment) *v1.ReplicaSet { + if deployment == nil { + return nil + } + + logger := ucplog.FromContextOrDiscard(ctx).WithValues("deploymentName", deployment.Name, "namespace", deployment.Namespace) + + // List all replicasets for this deployment + rl, err := informerFactory.Apps().V1().ReplicaSets().Lister().ReplicaSets(deployment.Namespace).List(labels.Everything()) + if err != nil { + // This is a valid state which will eventually be resolved. Therefore, only log the error here. + logger.Info(fmt.Sprintf("Unable to list replicasets for deployment: %s", err.Error())) + return nil + } + + if len(rl) == 0 { + // This is a valid state which will eventually be resolved. Therefore, only log the error here. + return nil + } + + deploymentRevision := deployment.Annotations["deployment.kubernetes.io/revision"] + + // Find the latest ReplicaSet associated with the deployment + for _, rs := range rl { + if !metav1.IsControlledBy(rs, deployment) { + continue + } + if rs.Annotations == nil { + continue + } + revision, ok := rs.Annotations["deployment.kubernetes.io/revision"] + if !ok { + continue + } + + // The first answer here https://stackoverflow.com/questions/59848252/kubectl-retrieving-the-current-new-replicaset-for-a-deployment-in-json-forma + // looks like the best way to determine the current replicaset. + // Match the replica set revision with the deployment revision + if deploymentRevision == revision { + return rs + } + } + + return nil +} + +func (handler *kubernetesHandler) checkAllPodsReady(ctx context.Context, informerFactory informers.SharedInformerFactory, obj *v1.Deployment, deploymentReplicaSet *v1.ReplicaSet, doneCh chan<- error) bool { + logger := ucplog.FromContextOrDiscard(ctx).WithValues("deploymentName", obj.GetName(), "namespace", obj.GetNamespace()) + logger.Info("Checking if all pods in the deployment are ready") + + podsInDeployment, err := handler.getPodsInDeployment(ctx, informerFactory, obj, deploymentReplicaSet) + if err != nil { + logger.Info(fmt.Sprintf("Error getting pods for deployment: %s", err.Error())) + return false + } + + allReady := true + for _, pod := range podsInDeployment { + podReady, err := handler.checkPodStatus(ctx, &pod) + if err != nil { + // Terminate the deployment and return the error encountered + doneCh <- err + return false + } + if !podReady { + allReady = false + } + } + + if allReady { + logger.Info(fmt.Sprintf("All %d pods in the deployment are ready", len(podsInDeployment))) + } + return allReady +} + +func (handler *kubernetesHandler) getPodsInDeployment(ctx context.Context, informerFactory informers.SharedInformerFactory, deployment *v1.Deployment, deploymentReplicaSet *v1.ReplicaSet) ([]corev1.Pod, error) { + logger := ucplog.FromContextOrDiscard(ctx) + + pods := []corev1.Pod{} + + // List all pods that match the current replica set + pl, err := informerFactory.Core().V1().Pods().Lister().Pods(deployment.GetNamespace()).List(labels.Set(deployment.Spec.Selector.MatchLabels).AsSelector()) + if err != nil { + logger.Info(fmt.Sprintf("Unable to find pods for deployment %s in namespace %s", deployment.GetName(), deployment.GetNamespace())) + return []corev1.Pod{}, nil + } + + // Filter out the pods that are not in the Deployment's current ReplicaSet + for _, p := range pl { + if !metav1.IsControlledBy(p, deploymentReplicaSet) { + continue + } + pods = append(pods, *p) + } + + return pods, nil +} + +func (handler *kubernetesHandler) checkPodStatus(ctx context.Context, pod *corev1.Pod) (bool, error) { + logger := ucplog.FromContextOrDiscard(ctx).WithValues("podName", pod.Name, "namespace", pod.Namespace) + + conditionPodReady := true + for _, cc := range pod.Status.Conditions { + if cc.Type == corev1.PodReady && cc.Status != corev1.ConditionTrue { + // Do not return false here else if the pod transitions to a crash loop backoff state, + // we won't be able to detect that condition. + conditionPodReady = false + } + + if cc.Type == corev1.ContainersReady && cc.Status != corev1.ConditionTrue { + // Do not return false here else if the pod transitions to a crash loop backoff state, + // we won't be able to detect that condition. + conditionPodReady = false + } + } + + // Sometimes container statuses are not yet available and we do not want to falsely return that the containers are ready + if len(pod.Status.ContainerStatuses) <= 0 { + return false, nil + } + + for _, cs := range pod.Status.ContainerStatuses { + // Check if the container state is terminated or unable to start due to crash loop, image pull back off or error + // Note that sometimes a pod can go into running state but can crash later and can go undetected by this condition + // We will rely on the user defining a readiness probe to ensure that the pod is ready to serve traffic for those cases + if cs.State.Terminated != nil { + logger.Info(fmt.Sprintf("Container state is terminated Reason: %s, Message: %s", cs.State.Terminated.Reason, cs.State.Terminated.Message)) + return false, fmt.Errorf("Container state is 'Terminated' Reason: %s, Message: %s", cs.State.Terminated.Reason, cs.State.Terminated.Message) + } else if cs.State.Waiting != nil { + if cs.State.Waiting.Reason == "ErrImagePull" || cs.State.Waiting.Reason == "CrashLoopBackOff" || cs.State.Waiting.Reason == "ImagePullBackOff" { + message := cs.State.Waiting.Message + if cs.LastTerminationState.Terminated != nil { + message += " LastTerminationState: " + cs.LastTerminationState.Terminated.Message + } + return false, fmt.Errorf("Container state is 'Waiting' Reason: %s, Message: %s", cs.State.Waiting.Reason, message) + } else { + return false, nil + } + } else if cs.State.Running == nil { + // The container is not yet running + return false, nil + } else if !cs.Ready { + // The container is running but has not passed its readiness probe yet + return false, nil + } + } + + if !conditionPodReady { + return false, nil + } + logger.Info("All containers for pod are ready") + return true, nil +} + // Delete decodes the identity data from the DeleteOptions, creates an unstructured object from the identity data, // and then attempts to delete the object from the Kubernetes cluster, returning an error if one occurs. func (handler *kubernetesHandler) Delete(ctx context.Context, options *DeleteOptions) error { diff --git a/pkg/corerp/handlers/kubernetes_deployment_waiter.go b/pkg/corerp/handlers/kubernetes_deployment_waiter.go deleted file mode 100644 index 71d61ebff0..0000000000 --- a/pkg/corerp/handlers/kubernetes_deployment_waiter.go +++ /dev/null @@ -1,323 +0,0 @@ -package handlers - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/radius-project/radius/pkg/ucp/ucplog" - v1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/informers" - k8s "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - // MaxDeploymentTimeout is the max timeout for waiting for a deployment to be ready. - // Deployment duration should not reach to this timeout since async operation worker will time out context before MaxDeploymentTimeout. - MaxDeploymentTimeout = time.Minute * time.Duration(10) -) - -type deploymentWaiter struct { - clientSet k8s.Interface - deploymentTimeOut time.Duration - cacheResyncInterval time.Duration -} - -func NewDeploymentWaiter(clientSet k8s.Interface) *deploymentWaiter { - return &deploymentWaiter{ - clientSet: clientSet, - deploymentTimeOut: MaxDeploymentTimeout, - cacheResyncInterval: DefaultCacheResyncInterval, - } -} - -func (handler *deploymentWaiter) addEventHandler(ctx context.Context, informerFactory informers.SharedInformerFactory, informer cache.SharedIndexInformer, item client.Object, doneCh chan<- error) { - logger := ucplog.FromContextOrDiscard(ctx) - - _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj any) { - handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) - }, - UpdateFunc: func(_, newObj any) { - handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) - }, - }) - - if err != nil { - logger.Error(err, "failed to add event handler") - } -} - -// addDynamicEventHandler is not implemented for deploymentWaiter -func (handler *deploymentWaiter) addDynamicEventHandler(ctx context.Context, informerFactory dynamicinformer.DynamicSharedInformerFactory, informer cache.SharedIndexInformer, item client.Object, doneCh chan<- error) { -} - -func (handler *deploymentWaiter) waitUntilReady(ctx context.Context, item client.Object) error { - logger := ucplog.FromContextOrDiscard(ctx) - - // When the deployment is done, an error nil will be sent - // In case of an error, the error will be sent - doneCh := make(chan error, 1) - - ctx, cancel := context.WithTimeout(ctx, handler.deploymentTimeOut) - // This ensures that the informer is stopped when this function is returned. - defer cancel() - - err := handler.startInformers(ctx, item, doneCh) - if err != nil { - logger.Error(err, "failed to start deployment informer") - return err - } - - select { - case <-ctx.Done(): - // Get the final deployment status - dep, err := handler.clientSet.AppsV1().Deployments(item.GetNamespace()).Get(ctx, item.GetName(), metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("deployment timed out, name: %s, namespace %s, error occured while fetching latest status: %w", item.GetName(), item.GetNamespace(), err) - } - - // Now get the latest available observation of deployment current state - // note that there can be a race condition here, by the time it fetches the latest status, deployment might be succeeded - status := v1.DeploymentCondition{} - if len(dep.Status.Conditions) > 0 { - status = dep.Status.Conditions[len(dep.Status.Conditions)-1] - } - return fmt.Errorf("deployment timed out, name: %s, namespace %s, status: %s, reason: %s", item.GetName(), item.GetNamespace(), status.Message, status.Reason) - - case err := <-doneCh: - if err == nil { - logger.Info(fmt.Sprintf("Marking deployment %s in namespace %s as complete", item.GetName(), item.GetNamespace())) - } - return err - } -} - -// Check if all the pods in the deployment are ready -func (handler *deploymentWaiter) checkDeploymentStatus(ctx context.Context, informerFactory informers.SharedInformerFactory, item client.Object, doneCh chan<- error) bool { - logger := ucplog.FromContextOrDiscard(ctx).WithValues("deploymentName", item.GetName(), "namespace", item.GetNamespace()) - - // Get the deployment - deployment, err := informerFactory.Apps().V1().Deployments().Lister().Deployments(item.GetNamespace()).Get(item.GetName()) - if err != nil { - logger.Info("Unable to find deployment") - return false - } - - deploymentReplicaSet := handler.getCurrentReplicaSetForDeployment(ctx, informerFactory, deployment) - if deploymentReplicaSet == nil { - logger.Info("Unable to find replica set for deployment") - return false - } - - allReady := handler.checkAllPodsReady(ctx, informerFactory, deployment, deploymentReplicaSet, doneCh) - if !allReady { - logger.Info("All pods are not ready yet for deployment") - return false - } - - // Check if the deployment is ready - if deployment.Status.ObservedGeneration != deployment.Generation { - logger.Info(fmt.Sprintf("Deployment status is not ready: Observed generation: %d, Generation: %d, Deployment Replicaset: %s", deployment.Status.ObservedGeneration, deployment.Generation, deploymentReplicaSet.Name)) - return false - } - - // ObservedGeneration should be updated to latest generation to avoid stale replicas - for _, c := range deployment.Status.Conditions { - // check for complete deployment condition - // Reference https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#complete-deployment - if c.Type == v1.DeploymentProgressing && c.Status == corev1.ConditionTrue && strings.EqualFold(c.Reason, "NewReplicaSetAvailable") { - logger.Info(fmt.Sprintf("Deployment is ready. Observed generation: %d, Generation: %d, Deployment Replicaset: %s", deployment.Status.ObservedGeneration, deployment.Generation, deploymentReplicaSet.Name)) - doneCh <- nil - return true - } else { - logger.Info(fmt.Sprintf("Deployment status is: %s - %s, Reason: %s, Deployment replicaset: %s", c.Type, c.Status, c.Reason, deploymentReplicaSet.Name)) - } - } - return false -} - -func (handler *deploymentWaiter) startInformers(ctx context.Context, item client.Object, doneCh chan<- error) error { - logger := ucplog.FromContextOrDiscard(ctx) - - informerFactory := informers.NewSharedInformerFactoryWithOptions(handler.clientSet, handler.cacheResyncInterval, informers.WithNamespace(item.GetNamespace())) - // Add event handlers to the pod informer - handler.addEventHandler(ctx, informerFactory, informerFactory.Core().V1().Pods().Informer(), item, doneCh) - - // Add event handlers to the deployment informer - handler.addEventHandler(ctx, informerFactory, informerFactory.Apps().V1().Deployments().Informer(), item, doneCh) - - // Add event handlers to the replicaset informer - handler.addEventHandler(ctx, informerFactory, informerFactory.Apps().V1().ReplicaSets().Informer(), item, doneCh) - - // Start the informers - informerFactory.Start(ctx.Done()) - - // Wait for the deployment and pod informer's cache to be synced. - informerFactory.WaitForCacheSync(ctx.Done()) - - logger.Info(fmt.Sprintf("Informers started and caches synced for deployment: %s in namespace: %s", item.GetName(), item.GetNamespace())) - return nil -} - -// Gets the current replica set for the deployment -func (handler *deploymentWaiter) getCurrentReplicaSetForDeployment(ctx context.Context, informerFactory informers.SharedInformerFactory, deployment *v1.Deployment) *v1.ReplicaSet { - if deployment == nil { - return nil - } - - logger := ucplog.FromContextOrDiscard(ctx).WithValues("deploymentName", deployment.Name, "namespace", deployment.Namespace) - - // List all replicasets for this deployment - rl, err := informerFactory.Apps().V1().ReplicaSets().Lister().ReplicaSets(deployment.Namespace).List(labels.Everything()) - if err != nil { - // This is a valid state which will eventually be resolved. Therefore, only log the error here. - logger.Info(fmt.Sprintf("Unable to list replicasets for deployment: %s", err.Error())) - return nil - } - - if len(rl) == 0 { - // This is a valid state which will eventually be resolved. Therefore, only log the error here. - return nil - } - - deploymentRevision := deployment.Annotations["deployment.kubernetes.io/revision"] - - // Find the latest ReplicaSet associated with the deployment - for _, rs := range rl { - if !metav1.IsControlledBy(rs, deployment) { - continue - } - if rs.Annotations == nil { - continue - } - revision, ok := rs.Annotations["deployment.kubernetes.io/revision"] - if !ok { - continue - } - - // The first answer here https://stackoverflow.com/questions/59848252/kubectl-retrieving-the-current-new-replicaset-for-a-deployment-in-json-forma - // looks like the best way to determine the current replicaset. - // Match the replica set revision with the deployment revision - if deploymentRevision == revision { - return rs - } - } - - return nil -} - -func (handler *deploymentWaiter) checkAllPodsReady(ctx context.Context, informerFactory informers.SharedInformerFactory, obj *v1.Deployment, deploymentReplicaSet *v1.ReplicaSet, doneCh chan<- error) bool { - logger := ucplog.FromContextOrDiscard(ctx).WithValues("deploymentName", obj.GetName(), "namespace", obj.GetNamespace()) - logger.Info("Checking if all pods in the deployment are ready") - - podsInDeployment, err := handler.getPodsInDeployment(ctx, informerFactory, obj, deploymentReplicaSet) - if err != nil { - logger.Info(fmt.Sprintf("Error getting pods for deployment: %s", err.Error())) - return false - } - - allReady := true - for _, pod := range podsInDeployment { - podReady, err := handler.checkPodStatus(ctx, &pod) - if err != nil { - // Terminate the deployment and return the error encountered - doneCh <- err - return false - } - if !podReady { - allReady = false - } - } - - if allReady { - logger.Info(fmt.Sprintf("All %d pods in the deployment are ready", len(podsInDeployment))) - } - return allReady -} - -func (handler *deploymentWaiter) getPodsInDeployment(ctx context.Context, informerFactory informers.SharedInformerFactory, deployment *v1.Deployment, deploymentReplicaSet *v1.ReplicaSet) ([]corev1.Pod, error) { - logger := ucplog.FromContextOrDiscard(ctx) - - pods := []corev1.Pod{} - - // List all pods that match the current replica set - pl, err := informerFactory.Core().V1().Pods().Lister().Pods(deployment.GetNamespace()).List(labels.Set(deployment.Spec.Selector.MatchLabels).AsSelector()) - if err != nil { - logger.Info(fmt.Sprintf("Unable to find pods for deployment %s in namespace %s", deployment.GetName(), deployment.GetNamespace())) - return []corev1.Pod{}, nil - } - - // Filter out the pods that are not in the Deployment's current ReplicaSet - for _, p := range pl { - if !metav1.IsControlledBy(p, deploymentReplicaSet) { - continue - } - pods = append(pods, *p) - } - - return pods, nil -} - -func (handler *deploymentWaiter) checkPodStatus(ctx context.Context, pod *corev1.Pod) (bool, error) { - logger := ucplog.FromContextOrDiscard(ctx).WithValues("podName", pod.Name, "namespace", pod.Namespace) - - conditionPodReady := true - for _, cc := range pod.Status.Conditions { - if cc.Type == corev1.PodReady && cc.Status != corev1.ConditionTrue { - // Do not return false here else if the pod transitions to a crash loop backoff state, - // we won't be able to detect that condition. - conditionPodReady = false - } - - if cc.Type == corev1.ContainersReady && cc.Status != corev1.ConditionTrue { - // Do not return false here else if the pod transitions to a crash loop backoff state, - // we won't be able to detect that condition. - conditionPodReady = false - } - } - - // Sometimes container statuses are not yet available and we do not want to falsely return that the containers are ready - if len(pod.Status.ContainerStatuses) <= 0 { - return false, nil - } - - for _, cs := range pod.Status.ContainerStatuses { - // Check if the container state is terminated or unable to start due to crash loop, image pull back off or error - // Note that sometimes a pod can go into running state but can crash later and can go undetected by this condition - // We will rely on the user defining a readiness probe to ensure that the pod is ready to serve traffic for those cases - if cs.State.Terminated != nil { - logger.Info(fmt.Sprintf("Container state is terminated Reason: %s, Message: %s", cs.State.Terminated.Reason, cs.State.Terminated.Message)) - return false, fmt.Errorf("Container state is 'Terminated' Reason: %s, Message: %s", cs.State.Terminated.Reason, cs.State.Terminated.Message) - } else if cs.State.Waiting != nil { - if cs.State.Waiting.Reason == "ErrImagePull" || cs.State.Waiting.Reason == "CrashLoopBackOff" || cs.State.Waiting.Reason == "ImagePullBackOff" { - message := cs.State.Waiting.Message - if cs.LastTerminationState.Terminated != nil { - message += " LastTerminationState: " + cs.LastTerminationState.Terminated.Message - } - return false, fmt.Errorf("Container state is 'Waiting' Reason: %s, Message: %s", cs.State.Waiting.Reason, message) - } else { - return false, nil - } - } else if cs.State.Running == nil { - // The container is not yet running - return false, nil - } else if !cs.Ready { - // The container is running but has not passed its readiness probe yet - return false, nil - } - } - - if !conditionPodReady { - return false, nil - } - logger.Info("All containers for pod are ready") - return true, nil -} diff --git a/pkg/corerp/handlers/kubernetes_deployment_waiter_test.go b/pkg/corerp/handlers/kubernetes_deployment_waiter_test.go deleted file mode 100644 index c7b48968a4..0000000000 --- a/pkg/corerp/handlers/kubernetes_deployment_waiter_test.go +++ /dev/null @@ -1,1116 +0,0 @@ -/* -Copyright 2023 The Radius Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package handlers - -import ( - "context" - "testing" - "time" - - "github.com/radius-project/radius/pkg/kubernetes" - "github.com/radius-project/radius/pkg/to" - "github.com/radius-project/radius/test/k8sutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" -) - -var testDeployment = &v1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "test-namespace", - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - }, - Spec: v1.DeploymentSpec{ - Replicas: to.Ptr(int32(1)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "test", - }, - }, - }, - Status: v1.DeploymentStatus{ - Conditions: []v1.DeploymentCondition{ - { - Type: v1.DeploymentProgressing, - Status: corev1.ConditionTrue, - Reason: "NewReplicaSetAvailable", - Message: "Deployment has minimum availability", - }, - }, - }, -} - -func addReplicaSetToDeployment(t *testing.T, ctx context.Context, clientset *fake.Clientset, deployment *v1.Deployment) *v1.ReplicaSet { - replicaSet := &v1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-replicaset-1", - Namespace: deployment.Namespace, - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, - Kind: "Deployment", - }), - }, - }, - } - - // Add the ReplicaSet objects to the fake Kubernetes clientset - _, err := clientset.AppsV1().ReplicaSets(replicaSet.Namespace).Create(ctx, replicaSet, metav1.CreateOptions{}) - require.NoError(t, err) - - _, err = clientset.AppsV1().Deployments(deployment.Namespace).Update(ctx, deployment, metav1.UpdateOptions{}) - require.NoError(t, err) - - return replicaSet -} - -func startInformers(ctx context.Context, clientSet *fake.Clientset, handler *kubernetesHandler) informers.SharedInformerFactory { - // Create a fake replicaset informer and start - informerFactory := informers.NewSharedInformerFactory(clientSet, 0) - - // Add informers - informerFactory.Apps().V1().Deployments().Informer() - informerFactory.Apps().V1().ReplicaSets().Informer() - informerFactory.Core().V1().Pods().Informer() - - informerFactory.Start(context.Background().Done()) - informerFactory.WaitForCacheSync(ctx.Done()) - return informerFactory -} - -func TestWaitUntilReady_NewResource(t *testing.T) { - ctx := context.Background() - - // Create first deployment that will be watched - deployment := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "test-namespace", - Labels: map[string]string{ - kubernetes.LabelManagedBy: kubernetes.LabelManagedByRadiusRP, - }, - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - }, - Spec: v1.DeploymentSpec{ - Replicas: to.Ptr(int32(1)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "test", - }, - }, - }, - Status: v1.DeploymentStatus{ - Conditions: []v1.DeploymentCondition{ - { - Type: v1.DeploymentProgressing, - Status: corev1.ConditionTrue, - Reason: "NewReplicaSetAvailable", - Message: "Deployment has minimum availability", - }, - }, - }, - } - - clientset := fake.NewSimpleClientset(deployment) - - // The deployment is not marked as ready till we find a replica set. Therefore, we need to create one. - addReplicaSetToDeployment(t, ctx, clientset, deployment) - - handler := kubernetesHandler{ - client: k8sutil.NewFakeKubeClient(nil), - deploymentWaiter: &deploymentWaiter{ - clientSet: clientset, - deploymentTimeOut: time.Duration(50) * time.Second, - cacheResyncInterval: time.Duration(10) * time.Second, - }, - } - - err := handler.deploymentWaiter.waitUntilReady(ctx, deployment) - require.NoError(t, err, "Failed to wait for deployment to be ready") -} - -func TestWaitUntilReady_Timeout(t *testing.T) { - ctx := context.Background() - // Create first deployment that will be watched - deployment := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "test-namespace", - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - }, - Status: v1.DeploymentStatus{ - Conditions: []v1.DeploymentCondition{ - { - Type: v1.DeploymentProgressing, - Status: corev1.ConditionFalse, - Reason: "NewReplicaSetAvailable", - Message: "Deployment has minimum availability", - }, - }, - }, - } - - deploymentClient := fake.NewSimpleClientset(deployment) - - handler := kubernetesHandler{ - client: k8sutil.NewFakeKubeClient(nil), - deploymentWaiter: &deploymentWaiter{ - clientSet: deploymentClient, - deploymentTimeOut: time.Duration(1) * time.Second, - cacheResyncInterval: time.Duration(10) * time.Second, - }, - } - - err := handler.deploymentWaiter.waitUntilReady(ctx, deployment) - require.Error(t, err) - require.Equal(t, "deployment timed out, name: test-deployment, namespace test-namespace, status: Deployment has minimum availability, reason: NewReplicaSetAvailable", err.Error()) -} - -func TestWaitUntilReady_DifferentResourceName(t *testing.T) { - ctx := context.Background() - // Create first deployment that will be watched - deployment := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "test-namespace", - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - }, - Status: v1.DeploymentStatus{ - Conditions: []v1.DeploymentCondition{ - { - Type: v1.DeploymentProgressing, - Status: corev1.ConditionTrue, - Reason: "NewReplicaSetAvailable", - Message: "Deployment has minimum availability", - }, - }, - }, - } - - clientset := fake.NewSimpleClientset(deployment) - - handler := kubernetesHandler{ - deploymentWaiter: &deploymentWaiter{ - clientSet: clientset, - deploymentTimeOut: time.Duration(1) * time.Second, - cacheResyncInterval: time.Duration(10) * time.Second, - }, - } - - err := handler.deploymentWaiter.waitUntilReady(ctx, &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-matched-deployment", - Namespace: "test-namespace", - }, - }) - - // It must be timed out because the name of the deployment does not match. - require.Error(t, err) - require.Equal(t, "deployment timed out, name: not-matched-deployment, namespace test-namespace, error occured while fetching latest status: deployments.apps \"not-matched-deployment\" not found", err.Error()) -} - -func TestGetPodsInDeployment(t *testing.T) { - // Create a fake Kubernetes clientset - fakeClient := fake.NewSimpleClientset() - - // Create a Deployment object - deployment := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "test-namespace", - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - }, - Spec: v1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "test-app", - }, - }, - }, - } - - // Create a ReplicaSet object - replicaset := &v1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-replicaset", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test-app", - }, - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - UID: "1234", - }, - } - - // Create a Pod object - pod1 := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod1", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test-app", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: replicaset.Name, - Controller: to.Ptr(true), - UID: "1234", - }, - }, - }, - } - - // Create a Pod object - pod2 := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod2", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "doesnotmatch", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "xyz", - Controller: to.Ptr(true), - UID: "1234", - }, - }, - }, - } - - // Add the Pod object to the fake Kubernetes clientset - _, err := fakeClient.CoreV1().Pods(pod1.Namespace).Create(context.Background(), pod1, metav1.CreateOptions{}) - require.NoError(t, err, "Failed to create Pod: %v", err) - - _, err = fakeClient.CoreV1().Pods(pod2.Namespace).Create(context.Background(), pod2, metav1.CreateOptions{}) - require.NoError(t, err, "Failed to create Pod: %v", err) - - // Create a KubernetesHandler object with the fake clientset - deploymentWaiter := &deploymentWaiter{ - clientSet: fakeClient, - } - handler := &kubernetesHandler{ - deploymentWaiter: deploymentWaiter, - } - - ctx := context.Background() - informerFactory := startInformers(ctx, fakeClient, handler) - - // Call the getPodsInDeployment function - pods, err := deploymentWaiter.getPodsInDeployment(ctx, informerFactory, deployment, replicaset) - require.NoError(t, err) - require.Equal(t, 1, len(pods)) - require.Equal(t, pod1.Name, pods[0].Name) -} - -func TestGetCurrentReplicaSetForDeployment(t *testing.T) { - // Create a fake Kubernetes clientset - fakeClient := fake.NewSimpleClientset() - - // Create a Deployment object - deployment := &v1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "test-namespace", - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - }, - } - - // Create a ReplicaSet object with a higher revision than the other ReplicaSet - replicaSet1 := &v1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-replicaset-1", - Namespace: "test-namespace", - Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, - Kind: "Deployment", - }), - }, - }, - } - // Create another ReplicaSet object with a lower revision than the other ReplicaSet - replicaSet2 := &v1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-replicaset-2", - Namespace: "test-namespace", - Annotations: map[string]string{"deployment.kubernetes.io/revision": "0"}, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, - Kind: "Deployment", - }), - }, - }, - } - - // Create another ReplicaSet object with a higher revision than the other ReplicaSet - replicaSet3 := &v1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-replicaset-3", - Namespace: "test-namespace", - Annotations: map[string]string{"deployment.kubernetes.io/revision": "3"}, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, - Kind: "Deployment", - }), - }, - }, - } - - // Add the ReplicaSet objects to the fake Kubernetes clientset - _, err := fakeClient.AppsV1().ReplicaSets(replicaSet1.Namespace).Create(context.Background(), replicaSet1, metav1.CreateOptions{}) - require.NoError(t, err) - _, err = fakeClient.AppsV1().ReplicaSets(replicaSet2.Namespace).Create(context.Background(), replicaSet2, metav1.CreateOptions{}) - require.NoError(t, err) - _, err = fakeClient.AppsV1().ReplicaSets(replicaSet2.Namespace).Create(context.Background(), replicaSet3, metav1.CreateOptions{}) - require.NoError(t, err) - - // Add the Deployment object to the fake Kubernetes clientset - _, err = fakeClient.AppsV1().Deployments(deployment.Namespace).Create(context.Background(), deployment, metav1.CreateOptions{}) - require.NoError(t, err) - - // Create a KubernetesHandler object with the fake clientset - deploymentWaiter := &deploymentWaiter{ - clientSet: fakeClient, - } - handler := &kubernetesHandler{ - deploymentWaiter: deploymentWaiter, - } - - ctx := context.Background() - informerFactory := startInformers(ctx, fakeClient, handler) - - // Call the getNewestReplicaSetForDeployment function - rs := deploymentWaiter.getCurrentReplicaSetForDeployment(ctx, informerFactory, deployment) - require.Equal(t, replicaSet1.Name, rs.Name) -} - -func TestCheckPodStatus(t *testing.T) { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "test-namespace", - }, - Status: corev1.PodStatus{}, - } - - podTests := []struct { - podCondition []corev1.PodCondition - containerStatus []corev1.ContainerStatus - isReady bool - expectedError string - }{ - { - // Container is in Terminated state - podCondition: nil, - containerStatus: []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Reason: "Error", - Message: "Container terminated due to an error", - }, - }, - }, - }, - isReady: false, - expectedError: "Container state is 'Terminated' Reason: Error, Message: Container terminated due to an error", - }, - { - // Container is in CrashLoopBackOff state - podCondition: nil, - containerStatus: []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Waiting: &corev1.ContainerStateWaiting{ - Reason: "CrashLoopBackOff", - Message: "Back-off 5m0s restarting failed container=test-container pod=test-pod", - }, - }, - }, - }, - isReady: false, - expectedError: "Container state is 'Waiting' Reason: CrashLoopBackOff, Message: Back-off 5m0s restarting failed container=test-container pod=test-pod", - }, - { - // Container is in ErrImagePull state - podCondition: nil, - containerStatus: []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Waiting: &corev1.ContainerStateWaiting{ - Reason: "ErrImagePull", - Message: "Cannot pull image", - }, - }, - }, - }, - isReady: false, - expectedError: "Container state is 'Waiting' Reason: ErrImagePull, Message: Cannot pull image", - }, - { - // Container is in ImagePullBackOff state - podCondition: nil, - containerStatus: []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Waiting: &corev1.ContainerStateWaiting{ - Reason: "ImagePullBackOff", - Message: "ImagePullBackOff", - }, - }, - }, - }, - isReady: false, - expectedError: "Container state is 'Waiting' Reason: ImagePullBackOff, Message: ImagePullBackOff", - }, - { - // No container statuses available - isReady: false, - expectedError: "", - }, - { - // Container is in Waiting state but not a terminally failed state - podCondition: nil, - containerStatus: []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Waiting: &corev1.ContainerStateWaiting{ - Reason: "ContainerCreating", - Message: "Container is being created", - }, - }, - Ready: false, - }, - }, - isReady: false, - expectedError: "", - }, - { - // Container's Running state is nil - podCondition: nil, - containerStatus: []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Running: nil, - }, - Ready: false, - }, - }, - isReady: false, - expectedError: "", - }, - { - // Readiness check is not yet passed - podCondition: nil, - containerStatus: []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - Ready: false, - }, - }, - isReady: false, - expectedError: "", - }, - { - // Container is in Ready state - podCondition: nil, - containerStatus: []corev1.ContainerStatus{ - { - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - Ready: true, - }, - }, - isReady: true, - expectedError: "", - }, - } - - ctx := context.Background() - deploymentWaiter := NewDeploymentWaiter(fake.NewSimpleClientset()) - for _, tc := range podTests { - pod.Status.Conditions = tc.podCondition - pod.Status.ContainerStatuses = tc.containerStatus - isReady, err := deploymentWaiter.checkPodStatus(ctx, pod) - if tc.expectedError != "" { - require.Error(t, err) - require.Equal(t, tc.expectedError, err.Error()) - } else { - require.NoError(t, err) - } - require.Equal(t, tc.isReady, isReady) - } -} - -func TestCheckAllPodsReady_Success(t *testing.T) { - // Create a fake Kubernetes clientset - clientset := fake.NewSimpleClientset() - - ctx := context.Background() - - _, err := clientset.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) - require.NoError(t, err) - - replicaSet := addReplicaSetToDeployment(t, ctx, clientset, testDeployment) - - // Create a pod - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - }, - }, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "test-container", - Ready: true, - }, - }, - }, - } - _, err = clientset.CoreV1().Pods("test-namespace").Create(context.Background(), pod, metav1.CreateOptions{}) - assert.NoError(t, err) - - // Create an informer factory and add the deployment and replica set to the cache - informerFactory := informers.NewSharedInformerFactory(clientset, 0) - addTestObjects(t, clientset, informerFactory, testDeployment, replicaSet, pod) - - // Create a done channel - doneCh := make(chan error) - - // Create a handler with the fake clientset - deploymentWaiter := &deploymentWaiter{ - clientSet: clientset, - } - - // Call the checkAllPodsReady function - allReady := deploymentWaiter.checkAllPodsReady(ctx, informerFactory, testDeployment, replicaSet, doneCh) - - // Check that all pods are ready - require.True(t, allReady) -} - -func TestCheckAllPodsReady_Fail(t *testing.T) { - // Create a fake Kubernetes clientset - clientset := fake.NewSimpleClientset() - - ctx := context.Background() - - _, err := clientset.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) - require.NoError(t, err) - - replicaSet := addReplicaSetToDeployment(t, ctx, clientset, testDeployment) - - // Create a pod - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod1", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: replicaSet.Name, - Controller: to.Ptr(true), - }, - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - }, - }, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "test-container", - Ready: false, - }, - }, - }, - } - _, err = clientset.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - require.NoError(t, err) - - // Create an informer factory and add the deployment and replica set to the cache - informerFactory := informers.NewSharedInformerFactory(clientset, 0) - addTestObjects(t, clientset, informerFactory, testDeployment, replicaSet, pod) - - // Create a done channel - doneCh := make(chan error) - - // Create a handler with the fake clientset - deploymentWaiter := &deploymentWaiter{ - clientSet: clientset, - } - - // Call the checkAllPodsReady function - allReady := deploymentWaiter.checkAllPodsReady(ctx, informerFactory, testDeployment, replicaSet, doneCh) - - // Check that all pods are ready - require.False(t, allReady) -} - -func TestCheckDeploymentStatus_AllReady(t *testing.T) { - // Create a fake Kubernetes fakeClient - fakeClient := fake.NewSimpleClientset() - - ctx := context.Background() - _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) - require.NoError(t, err) - replicaSet := addReplicaSetToDeployment(t, ctx, fakeClient, testDeployment) - - // Create a Pod object - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod1", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: replicaSet.Name, - Controller: to.Ptr(true), - }, - }, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: corev1.ConditionTrue, - }, - }, - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "test-container", - Ready: true, - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - }, - }, - }, - } - - // Add the Pod object to the fake Kubernetes clientset - _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) - require.NoError(t, err, "Failed to create Pod: %v", err) - - // Create an informer factory and add the deployment to the cache - informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) - addTestObjects(t, fakeClient, informerFactory, testDeployment, replicaSet, pod) - - // Create a fake item and object - item := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-deployment", - "namespace": "test-namespace", - }, - }, - } - - // Create a done channel - doneCh := make(chan error, 1) - - // Call the checkDeploymentStatus function - deploymentWaiter := &deploymentWaiter{ - clientSet: fakeClient, - } - - deploymentWaiter.checkDeploymentStatus(ctx, informerFactory, item, doneCh) - - err = <-doneCh - - // Check that the deployment readiness was checked - require.Nil(t, err) -} - -func TestCheckDeploymentStatus_NoReplicaSetsFound(t *testing.T) { - // Create a fake Kubernetes fakeClient - fakeClient := fake.NewSimpleClientset() - - ctx := context.Background() - _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) - require.NoError(t, err) - - // Create a Pod object - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod1", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test", - }, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: corev1.ConditionTrue, - }, - }, - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "test-container", - Ready: true, - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - }, - }, - }, - } - - // Add the Pod object to the fake Kubernetes clientset - _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) - require.NoError(t, err, "Failed to create Pod: %v", err) - - // Create an informer factory and add the deployment to the cache - informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) - err = informerFactory.Apps().V1().Deployments().Informer().GetIndexer().Add(testDeployment) - require.NoError(t, err, "Failed to add deployment to informer cache") - err = informerFactory.Core().V1().Pods().Informer().GetIndexer().Add(pod) - require.NoError(t, err, "Failed to add pod to informer cache") - // Note: No replica set added - - // Create a fake item and object - item := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-deployment", - "namespace": "test-namespace", - }, - }, - } - - // Create a done channel - doneCh := make(chan error, 1) - - // Call the checkDeploymentStatus function - deploymentWaiter := &deploymentWaiter{ - clientSet: fakeClient, - } - - allReady := deploymentWaiter.checkDeploymentStatus(ctx, informerFactory, item, doneCh) - - // Check that the deployment readiness was checked - require.False(t, allReady) -} - -func TestCheckDeploymentStatus_PodsNotReady(t *testing.T) { - // Create a fake Kubernetes fakeClient - fakeClient := fake.NewSimpleClientset() - - ctx := context.Background() - _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) - require.NoError(t, err) - replicaSet := addReplicaSetToDeployment(t, ctx, fakeClient, testDeployment) - - // Create a Pod object - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod1", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: replicaSet.Name, - Controller: to.Ptr(true), - }, - }, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: corev1.ConditionTrue, - }, - }, - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "test-container", - Ready: true, - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Reason: "Error", - Message: "Container terminated due to an error", - }, - }, - }, - }, - }, - } - - // Add the Pod object to the fake Kubernetes clientset - _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) - require.NoError(t, err, "Failed to create Pod: %v", err) - - // Create an informer factory and add the deployment to the cache - informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) - addTestObjects(t, fakeClient, informerFactory, testDeployment, replicaSet, pod) - - // Create a fake item and object - item := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-deployment", - "namespace": "test-namespace", - }, - }, - } - - // Create a done channel - doneCh := make(chan error, 1) - - // Call the checkDeploymentStatus function - deploymentWaiter := &deploymentWaiter{ - clientSet: fakeClient, - } - - go deploymentWaiter.checkDeploymentStatus(ctx, informerFactory, item, doneCh) - err = <-doneCh - - // Check that the deployment readiness was checked - require.Error(t, err) - require.Equal(t, err.Error(), "Container state is 'Terminated' Reason: Error, Message: Container terminated due to an error") -} - -func TestCheckDeploymentStatus_ObservedGenerationMismatch(t *testing.T) { - // Modify testDeployment to have a different generation than the observed generation - generationMismatchDeployment := testDeployment.DeepCopy() - generationMismatchDeployment.Generation = 2 - - // Create a fake Kubernetes fakeClient - fakeClient := fake.NewSimpleClientset() - - ctx := context.Background() - _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, generationMismatchDeployment, metav1.CreateOptions{}) - require.NoError(t, err) - replicaSet := addReplicaSetToDeployment(t, ctx, fakeClient, generationMismatchDeployment) - - // Create a Pod object - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod1", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: replicaSet.Name, - Controller: to.Ptr(true), - }, - }, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: corev1.ConditionTrue, - }, - }, - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "test-container", - Ready: true, - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - }, - }, - }, - } - - // Add the Pod object to the fake Kubernetes clientset - _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) - require.NoError(t, err, "Failed to create Pod: %v", err) - - // Create an informer factory and add the deployment to the cache - informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) - addTestObjects(t, fakeClient, informerFactory, generationMismatchDeployment, replicaSet, pod) - - // Create a fake item and object - item := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-deployment", - "namespace": "test-namespace", - }, - }, - } - - // Create a done channel - doneCh := make(chan error, 1) - - // Call the checkDeploymentStatus function - deploymentWaiter := &deploymentWaiter{ - clientSet: fakeClient, - } - - deploymentWaiter.checkDeploymentStatus(ctx, informerFactory, item, doneCh) - - // Check that the deployment readiness was checked - require.Zero(t, len(doneCh)) -} - -func TestCheckDeploymentStatus_DeploymentNotProgressing(t *testing.T) { - // Create a fake Kubernetes fakeClient - fakeClient := fake.NewSimpleClientset() - - deploymentNotProgressing := testDeployment.DeepCopy() - - ctx := context.Background() - _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, deploymentNotProgressing, metav1.CreateOptions{}) - require.NoError(t, err) - replicaSet := addReplicaSetToDeployment(t, ctx, fakeClient, deploymentNotProgressing) - - // Create a Pod object - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod1", - Namespace: "test-namespace", - Labels: map[string]string{ - "app": "test", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: replicaSet.Name, - Controller: to.Ptr(true), - }, - }, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: corev1.ConditionTrue, - }, - }, - ContainerStatuses: []corev1.ContainerStatus{ - { - Name: "test-container", - Ready: true, - State: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - }, - }, - }, - } - - // Add the Pod object to the fake Kubernetes clientset - _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) - require.NoError(t, err, "Failed to create Pod: %v", err) - - // Create an informer factory and add the deployment to the cache - informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) - addTestObjects(t, fakeClient, informerFactory, deploymentNotProgressing, replicaSet, pod) - - deploymentNotProgressing.Status = v1.DeploymentStatus{ - Conditions: []v1.DeploymentCondition{ - { - Type: v1.DeploymentProgressing, - Status: corev1.ConditionFalse, - Reason: "NewReplicaSetAvailable", - Message: "Deployment has minimum availability", - }, - }, - } - - // Create a fake item and object - item := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "name": "test-deployment", - "namespace": "test-namespace", - }, - }, - } - - // Create a done channel - doneCh := make(chan error, 1) - - // Call the checkDeploymentStatus function - deploymentWaiter := &deploymentWaiter{ - clientSet: fakeClient, - } - - ready := deploymentWaiter.checkDeploymentStatus(ctx, informerFactory, item, doneCh) - require.False(t, ready) -} - -func addTestObjects(t *testing.T, fakeClient *fake.Clientset, informerFactory informers.SharedInformerFactory, deployment *v1.Deployment, replicaSet *v1.ReplicaSet, pod *corev1.Pod) { - err := informerFactory.Apps().V1().Deployments().Informer().GetIndexer().Add(deployment) - require.NoError(t, err, "Failed to add deployment to informer cache") - err = informerFactory.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(replicaSet) - require.NoError(t, err, "Failed to add replica set to informer cache") - err = informerFactory.Core().V1().Pods().Informer().GetIndexer().Add(pod) - require.NoError(t, err, "Failed to add pod to informer cache") -} diff --git a/pkg/corerp/handlers/kubernetes_http_proxy_waiter.go b/pkg/corerp/handlers/kubernetes_http_proxy_waiter.go deleted file mode 100644 index 3030c5bed9..0000000000 --- a/pkg/corerp/handlers/kubernetes_http_proxy_waiter.go +++ /dev/null @@ -1,171 +0,0 @@ -package handlers - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - contourv1 "github.com/projectcontour/contour/apis/projectcontour/v1" - "github.com/radius-project/radius/pkg/kubernetes" - "github.com/radius-project/radius/pkg/ucp/ucplog" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - MaxHTTPProxyDeploymentTimeout = time.Minute * time.Duration(10) - HTTPProxyConditionValid = "Valid" - HTTPProxyStatusInvalid = "invalid" - HTTPProxyStatusValid = "valid" -) - -type httpProxyWaiter struct { - dynamicClientSet dynamic.Interface - httpProxyDeploymentTimeout time.Duration - cacheResyncInterval time.Duration -} - -// NewHTTPProxyWaiter returns a new instance of HTTPProxyWaiter -func NewHTTPProxyWaiter(dynamicClientSet dynamic.Interface) *httpProxyWaiter { - return &httpProxyWaiter{ - dynamicClientSet: dynamicClientSet, - httpProxyDeploymentTimeout: MaxHTTPProxyDeploymentTimeout, - cacheResyncInterval: DefaultCacheResyncInterval, - } -} - -func (handler *httpProxyWaiter) addDynamicEventHandler(ctx context.Context, informerFactory dynamicinformer.DynamicSharedInformerFactory, informer cache.SharedIndexInformer, item client.Object, doneCh chan<- error) { - logger := ucplog.FromContextOrDiscard(ctx) - - _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj any) { - handler.checkHTTPProxyStatus(ctx, informerFactory, item, doneCh) - }, - UpdateFunc: func(_, newObj any) { - handler.checkHTTPProxyStatus(ctx, informerFactory, item, doneCh) - }, - }) - - if err != nil { - logger.Error(err, "failed to add event handler") - } -} - -// addEventHandler is not implemented for HTTPProxyWaiter -func (handler *httpProxyWaiter) addEventHandler(ctx context.Context, informerFactory informers.SharedInformerFactory, informer cache.SharedIndexInformer, item client.Object, doneCh chan<- error) { -} - -func (handler *httpProxyWaiter) waitUntilReady(ctx context.Context, obj client.Object) error { - logger := ucplog.FromContextOrDiscard(ctx).WithValues("httpProxyName", obj.GetName(), "namespace", obj.GetNamespace()) - - doneCh := make(chan error, 1) - - ctx, cancel := context.WithTimeout(ctx, handler.httpProxyDeploymentTimeout) - // This ensures that the informer is stopped when this function is returned. - defer cancel() - - // Create dynamic informer for HTTPProxy - dynamicInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(handler.dynamicClientSet, 0, obj.GetNamespace(), nil) - httpProxyInformer := dynamicInformerFactory.ForResource(contourv1.HTTPProxyGVR) - // Add event handlers to the http proxy informer - handler.addDynamicEventHandler(ctx, dynamicInformerFactory, httpProxyInformer.Informer(), obj, doneCh) - - // Start the informers - dynamicInformerFactory.Start(ctx.Done()) - - // Wait for the cache to be synced. - dynamicInformerFactory.WaitForCacheSync(ctx.Done()) - - select { - case <-ctx.Done(): - // Get the final status - proxy, err := httpProxyInformer.Lister().Get(obj.GetName()) - - if err != nil { - return fmt.Errorf("proxy deployment timed out, name: %s, namespace %s, error occured while fetching latest status: %w", obj.GetName(), obj.GetNamespace(), err) - } - - p := contourv1.HTTPProxy{} - err = runtime.DefaultUnstructuredConverter.FromUnstructured(proxy.(*unstructured.Unstructured).Object, &p) - if err != nil { - return fmt.Errorf("proxy deployment timed out, name: %s, namespace %s, error occured while fetching latest status: %w", obj.GetName(), obj.GetNamespace(), err) - } - - status := contourv1.DetailedCondition{} - if len(p.Status.Conditions) > 0 { - status = p.Status.Conditions[len(p.Status.Conditions)-1] - } - return fmt.Errorf("HTTP proxy deployment timed out, name: %s, namespace %s, status: %s, reason: %s", obj.GetName(), obj.GetNamespace(), status.Message, status.Reason) - case err := <-doneCh: - if err == nil { - logger.Info(fmt.Sprintf("Marking HTTP proxy deployment %s in namespace %s as complete", obj.GetName(), obj.GetNamespace())) - } - return err - } -} - -func (handler *httpProxyWaiter) checkHTTPProxyStatus(ctx context.Context, dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, obj client.Object, doneCh chan<- error) bool { - logger := ucplog.FromContextOrDiscard(ctx).WithValues("httpProxyName", obj.GetName(), "namespace", obj.GetNamespace()) - selector := labels.SelectorFromSet( - map[string]string{ - kubernetes.LabelManagedBy: kubernetes.LabelManagedByRadiusRP, - kubernetes.LabelName: obj.GetName(), - }, - ) - proxies, err := dynamicInformerFactory.ForResource(contourv1.HTTPProxyGVR).Lister().List(selector) - if err != nil { - logger.Info(fmt.Sprintf("Unable to list http proxies: %s", err.Error())) - return false - } - - for _, proxy := range proxies { - p := contourv1.HTTPProxy{} - err = runtime.DefaultUnstructuredConverter.FromUnstructured(proxy.(*unstructured.Unstructured).Object, &p) - if err != nil { - logger.Info(fmt.Sprintf("Unable to convert http proxy: %s", err.Error())) - continue - } - - if len(p.Spec.Includes) == 0 && len(p.Spec.Routes) > 0 { - // This is a route HTTP proxy. We will not validate deployment completion for it and return success here - // logger.Info("Not validating the deployment of route HTTP proxy for ", p.Name) - doneCh <- nil - return true - } - - // We will check the status for the root HTTP proxy - if p.Status.CurrentStatus == HTTPProxyStatusInvalid { - if strings.Contains(p.Status.Description, "see Errors for details") { - var msg string - for _, c := range p.Status.Conditions { - if c.ObservedGeneration != p.Generation { - continue - } - if c.Type == HTTPProxyConditionValid && c.Status == "False" { - for _, e := range c.Errors { - msg += fmt.Sprintf("Error - Type: %s, Status: %s, Reason: %s, Message: %s\n", e.Type, e.Status, e.Reason, e.Message) - } - } - } - doneCh <- errors.New(msg) - } else { - doneCh <- fmt.Errorf("Failed to deploy HTTP proxy. Description: %s", p.Status.Description) - } - return false - } else if p.Status.CurrentStatus == HTTPProxyStatusValid { - // The HTTPProxy is ready - doneCh <- nil - return true - } - } - return false -} diff --git a/pkg/corerp/handlers/kubernetes_http_proxy_waiter_test.go b/pkg/corerp/handlers/kubernetes_http_proxy_waiter_test.go deleted file mode 100644 index 44837ef551..0000000000 --- a/pkg/corerp/handlers/kubernetes_http_proxy_waiter_test.go +++ /dev/null @@ -1,326 +0,0 @@ -/* -Copyright 2023 The Radius Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package handlers - -import ( - "context" - "testing" - - contourv1 "github.com/projectcontour/contour/apis/projectcontour/v1" - "github.com/radius-project/radius/pkg/kubernetes" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/dynamic/dynamicinformer" - fakedynamic "k8s.io/client-go/dynamic/fake" -) - -func TestCheckHTTPProxyStatus_ValidStatus(t *testing.T) { - - httpProxy := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "example.com", - Labels: map[string]string{ - kubernetes.LabelManagedBy: kubernetes.LabelManagedByRadiusRP, - kubernetes.LabelName: "example.com", - }, - }, - Status: contourv1.HTTPProxyStatus{ - CurrentStatus: HTTPProxyStatusValid, - }, - } - // create fake dynamic clientset - s := runtime.NewScheme() - err := contourv1.AddToScheme(s) - require.NoError(t, err) - fakeClient := fakedynamic.NewSimpleDynamicClient(s, httpProxy) - - // create a fake dynamic informer factory with a mock HTTPProxy informer - dynamicInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(fakeClient, 0, "default", nil) - err = dynamicInformerFactory.ForResource(contourv1.HTTPProxyGVR).Informer().GetIndexer().Add(httpProxy) - require.NoError(t, err, "Could not add test http proxy to informer cache") - - // create a mock object - obj := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "example.com", - }, - } - - // create a channel for the done signal - doneCh := make(chan error) - - httpProxyWaiter := &httpProxyWaiter{ - dynamicClientSet: fakeClient, - } - - ctx := context.Background() - dynamicInformerFactory.Start(ctx.Done()) - dynamicInformerFactory.WaitForCacheSync(ctx.Done()) - - // call the function with the fake clientset, informer factory, logger, object, and done channel - go httpProxyWaiter.checkHTTPProxyStatus(context.Background(), dynamicInformerFactory, obj, doneCh) - err = <-doneCh - require.NoError(t, err) -} - -func TestCheckHTTPProxyStatus_InvalidStatusForRootProxy(t *testing.T) { - - httpProxy := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "example.com", - Labels: map[string]string{ - kubernetes.LabelManagedBy: kubernetes.LabelManagedByRadiusRP, - kubernetes.LabelName: "example.com", - }, - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "example.com", - }, - Includes: []contourv1.Include{ - { - Name: "example.com", - Namespace: "default", - }, - }, - }, - Status: contourv1.HTTPProxyStatus{ - CurrentStatus: HTTPProxyStatusInvalid, - Description: "Failed to deploy HTTP proxy. see Errors for details", - Conditions: []contourv1.DetailedCondition{ - { - // specify Condition of type json - Condition: metav1.Condition{ - Type: HTTPProxyConditionValid, - Status: contourv1.ConditionFalse, - }, - Errors: []contourv1.SubCondition{ - { - Type: HTTPProxyConditionValid, - Status: contourv1.ConditionFalse, - Reason: "RouteNotDefined", - Message: "HTTPProxy is invalid", - }, - }, - }, - }, - }, - } - // create fake dynamic clientset - s := runtime.NewScheme() - err := contourv1.AddToScheme(s) - require.NoError(t, err) - fakeClient := fakedynamic.NewSimpleDynamicClient(s, httpProxy) - - // create a fake dynamic informer factory with a mock HTTPProxy informer - dynamicInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(fakeClient, 0, "default", nil) - err = dynamicInformerFactory.ForResource(contourv1.HTTPProxyGVR).Informer().GetIndexer().Add(httpProxy) - require.NoError(t, err, "Could not add test http proxy to informer cache") - - // create a mock object - obj := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "example.com", - }, - } - - // create a channel for the done signal - doneCh := make(chan error) - - httpProxyWaiter := &httpProxyWaiter{ - dynamicClientSet: fakeClient, - } - - ctx := context.Background() - dynamicInformerFactory.Start(ctx.Done()) - dynamicInformerFactory.WaitForCacheSync(ctx.Done()) - - // call the function with the fake clientset, informer factory, logger, object, and done channel - go httpProxyWaiter.checkHTTPProxyStatus(context.Background(), dynamicInformerFactory, obj, doneCh) - err = <-doneCh - require.EqualError(t, err, "Error - Type: Valid, Status: False, Reason: RouteNotDefined, Message: HTTPProxy is invalid\n") -} - -func TestCheckHTTPProxyStatus_InvalidStatusForRouteProxy(t *testing.T) { - httpProxyRoute := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "example.com", - Labels: map[string]string{ - kubernetes.LabelManagedBy: kubernetes.LabelManagedByRadiusRP, - kubernetes.LabelName: "example.com", - }, - }, - Spec: contourv1.HTTPProxySpec{ - Routes: []contourv1.Route{ - { - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/", - }, - }, - Services: []contourv1.Service{ - { - Name: "test", - Port: 80, - }, - }, - }, - }, - }, - Status: contourv1.HTTPProxyStatus{ - CurrentStatus: HTTPProxyStatusInvalid, - Description: "Failed to deploy HTTP proxy. see Errors for details", - Conditions: []contourv1.DetailedCondition{ - { - // specify Condition of type json - Condition: metav1.Condition{ - Type: HTTPProxyConditionValid, - Status: contourv1.ConditionFalse, - }, - Errors: []contourv1.SubCondition{ - { - Type: HTTPProxyConditionValid, - Status: contourv1.ConditionFalse, - Reason: "orphaned", - Message: "HTTPProxy is invalid", - }, - }, - }, - }, - }, - } - // create fake dynamic clientset - s := runtime.NewScheme() - err := contourv1.AddToScheme(s) - require.NoError(t, err) - fakeClient := fakedynamic.NewSimpleDynamicClient(s, httpProxyRoute) - - // create a fake dynamic informer factory with a mock HTTPProxy informer - dynamicInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(fakeClient, 0, "default", nil) - err = dynamicInformerFactory.ForResource(contourv1.HTTPProxyGVR).Informer().GetIndexer().Add(httpProxyRoute) - require.NoError(t, err, "Could not add test http proxy to informer cache") - - // create a mock object - obj := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "example.com", - }, - } - - // create a channel for the done signal - doneCh := make(chan error) - - httpProxyWaiter := &httpProxyWaiter{ - dynamicClientSet: fakeClient, - } - - ctx := context.Background() - dynamicInformerFactory.Start(ctx.Done()) - dynamicInformerFactory.WaitForCacheSync(ctx.Done()) - - // call the function with the fake clientset, informer factory, logger, object, and done channel - go httpProxyWaiter.checkHTTPProxyStatus(context.Background(), dynamicInformerFactory, obj, doneCh) - err = <-doneCh - require.NoError(t, err) -} - -func TestCheckHTTPProxyStatus_WrongSelector(t *testing.T) { - - httpProxy := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "abcd.com", - Labels: map[string]string{ - kubernetes.LabelManagedBy: kubernetes.LabelManagedByRadiusRP, - kubernetes.LabelName: "abcd.com", - }, - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "abcd.com", - }, - Includes: []contourv1.Include{ - { - Name: "abcd.com", - Namespace: "default", - }, - }, - }, - Status: contourv1.HTTPProxyStatus{ - CurrentStatus: HTTPProxyStatusInvalid, - Description: "Failed to deploy HTTP proxy. see Errors for details", - Conditions: []contourv1.DetailedCondition{ - { - // specify Condition of type json - Condition: metav1.Condition{ - Type: HTTPProxyConditionValid, - Status: contourv1.ConditionFalse, - }, - Errors: []contourv1.SubCondition{ - { - Type: HTTPProxyConditionValid, - Status: contourv1.ConditionFalse, - Reason: "RouteNotDefined", - Message: "HTTPProxy is invalid", - }, - }, - }, - }, - }, - } - - // create fake dynamic clientset - s := runtime.NewScheme() - err := contourv1.AddToScheme(s) - require.NoError(t, err) - fakeClient := fakedynamic.NewSimpleDynamicClient(s, httpProxy) - - // create a fake dynamic informer factory with a mock HTTPProxy informer - dynamicInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(fakeClient, 0, "default", nil) - err = dynamicInformerFactory.ForResource(contourv1.HTTPProxyGVR).Informer().GetIndexer().Add(httpProxy) - require.NoError(t, err, "Could not add test http proxy to informer cache") - - // create a mock object - obj := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "example.com", - }, - } - - // create a channel for the done signal - doneCh := make(chan error) - - httpProxyWaiter := &httpProxyWaiter{ - dynamicClientSet: fakeClient, - } - - ctx := context.Background() - dynamicInformerFactory.Start(ctx.Done()) - dynamicInformerFactory.WaitForCacheSync(ctx.Done()) - - // call the function with the fake clientset, informer factory, logger, object, and done channel - status := httpProxyWaiter.checkHTTPProxyStatus(context.Background(), dynamicInformerFactory, obj, doneCh) - require.False(t, status) -} diff --git a/pkg/corerp/handlers/kubernetes_test.go b/pkg/corerp/handlers/kubernetes_test.go index 0403b340f9..11d1c87e13 100644 --- a/pkg/corerp/handlers/kubernetes_test.go +++ b/pkg/corerp/handlers/kubernetes_test.go @@ -22,19 +22,94 @@ import ( "testing" "time" + "github.com/radius-project/radius/pkg/kubernetes" "github.com/radius-project/radius/pkg/resourcemodel" rpv1 "github.com/radius-project/radius/pkg/rp/v1" + "github.com/radius-project/radius/pkg/to" resources_kubernetes "github.com/radius-project/radius/pkg/ucp/resources/kubernetes" "github.com/radius-project/radius/test/k8sutil" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" ) +var testDeployment = &v1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-namespace", + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + }, + Spec: v1.DeploymentSpec{ + Replicas: to.Ptr(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + }, + Status: v1.DeploymentStatus{ + Conditions: []v1.DeploymentCondition{ + { + Type: v1.DeploymentProgressing, + Status: corev1.ConditionTrue, + Reason: "NewReplicaSetAvailable", + Message: "Deployment has minimum availability", + }, + }, + }, +} + +func addReplicaSetToDeployment(t *testing.T, ctx context.Context, clientset *fake.Clientset, deployment *v1.Deployment) *v1.ReplicaSet { + replicaSet := &v1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-replicaset-1", + Namespace: deployment.Namespace, + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ + Group: v1.SchemeGroupVersion.Group, + Version: v1.SchemeGroupVersion.Version, + Kind: "Deployment", + }), + }, + }, + } + + // Add the ReplicaSet objects to the fake Kubernetes clientset + _, err := clientset.AppsV1().ReplicaSets(replicaSet.Namespace).Create(ctx, replicaSet, metav1.CreateOptions{}) + require.NoError(t, err) + + _, err = clientset.AppsV1().Deployments(deployment.Namespace).Update(ctx, deployment, metav1.UpdateOptions{}) + require.NoError(t, err) + + return replicaSet +} + +func startInformers(ctx context.Context, clientSet *fake.Clientset, handler *kubernetesHandler) informers.SharedInformerFactory { + // Create a fake replicaset informer and start + informerFactory := informers.NewSharedInformerFactory(clientSet, 0) + + // Add informers + informerFactory.Apps().V1().Deployments().Informer() + informerFactory.Apps().V1().ReplicaSets().Informer() + informerFactory.Core().V1().Pods().Informer() + + informerFactory.Start(context.Background().Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + return informerFactory +} + func TestPut(t *testing.T) { putTests := []struct { name string @@ -96,16 +171,16 @@ func TestPut(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() - clientSet := fake.NewSimpleClientset(tc.in.Resource.CreateResource.Data.(runtime.Object)) handler := kubernetesHandler{ - client: k8sutil.NewFakeKubeClient(nil), - deploymentWaiter: &deploymentWaiter{ - clientSet: clientSet, - deploymentTimeOut: time.Duration(50) * time.Second, - cacheResyncInterval: time.Duration(1) * time.Second, - }, + client: k8sutil.NewFakeKubeClient(nil), + clientSet: nil, + deploymentTimeOut: time.Duration(50) * time.Second, + cacheResyncInterval: time.Duration(1) * time.Second, } + clientSet := fake.NewSimpleClientset(tc.in.Resource.CreateResource.Data.(runtime.Object)) + handler.clientSet = clientSet + // If the resource is a deployment, we need to add a replica set to it if tc.in.Resource.CreateResource.Data.(runtime.Object).GetObjectKind().GroupVersionKind().Kind == "Deployment" { // The deployment is not marked as ready till we find a replica set. Therefore, we need to create one. @@ -150,12 +225,10 @@ func TestDelete(t *testing.T) { } handler := kubernetesHandler{ - client: k8sutil.NewFakeKubeClient(nil), - k8sDiscoveryClient: dc, - deploymentWaiter: &deploymentWaiter{ - deploymentTimeOut: time.Duration(1) * time.Second, - cacheResyncInterval: time.Duration(10) * time.Second, - }, + client: k8sutil.NewFakeKubeClient(nil), + k8sDiscoveryClient: dc, + deploymentTimeOut: time.Duration(1) * time.Second, + cacheResyncInterval: time.Duration(10) * time.Second, } err := handler.client.Create(ctx, deployment) @@ -268,3 +341,996 @@ func TestConvertToUnstructured(t *testing.T) { }) } } + +func TestWaitUntilDeploymentIsReady_NewResource(t *testing.T) { + ctx := context.Background() + + // Create first deployment that will be watched + deployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-namespace", + Labels: map[string]string{ + kubernetes.LabelManagedBy: kubernetes.LabelManagedByRadiusRP, + }, + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + }, + Spec: v1.DeploymentSpec{ + Replicas: to.Ptr(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + }, + Status: v1.DeploymentStatus{ + Conditions: []v1.DeploymentCondition{ + { + Type: v1.DeploymentProgressing, + Status: corev1.ConditionTrue, + Reason: "NewReplicaSetAvailable", + Message: "Deployment has minimum availability", + }, + }, + }, + } + + clientset := fake.NewSimpleClientset(deployment) + + // The deployment is not marked as ready till we find a replica set. Therefore, we need to create one. + addReplicaSetToDeployment(t, ctx, clientset, deployment) + + handler := kubernetesHandler{ + clientSet: clientset, + deploymentTimeOut: time.Duration(50) * time.Second, + cacheResyncInterval: time.Duration(10) * time.Second, + } + + err := handler.waitUntilDeploymentIsReady(ctx, deployment) + require.NoError(t, err, "Failed to wait for deployment to be ready") +} + +func TestWaitUntilDeploymentIsReady_Timeout(t *testing.T) { + ctx := context.Background() + // Create first deployment that will be watched + deployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-namespace", + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + }, + Status: v1.DeploymentStatus{ + Conditions: []v1.DeploymentCondition{ + { + Type: v1.DeploymentProgressing, + Status: corev1.ConditionFalse, + Reason: "NewReplicaSetAvailable", + Message: "Deployment has minimum availability", + }, + }, + }, + } + + deploymentClient := fake.NewSimpleClientset(deployment) + + handler := kubernetesHandler{ + clientSet: deploymentClient, + deploymentTimeOut: time.Duration(1) * time.Second, + cacheResyncInterval: time.Duration(10) * time.Second, + } + + err := handler.waitUntilDeploymentIsReady(ctx, deployment) + require.Error(t, err) + require.Equal(t, "deployment timed out, name: test-deployment, namespace test-namespace, status: Deployment has minimum availability, reason: NewReplicaSetAvailable", err.Error()) +} + +func TestWaitUntilDeploymentIsReady_DifferentResourceName(t *testing.T) { + ctx := context.Background() + // Create first deployment that will be watched + deployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-namespace", + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + }, + Status: v1.DeploymentStatus{ + Conditions: []v1.DeploymentCondition{ + { + Type: v1.DeploymentProgressing, + Status: corev1.ConditionTrue, + Reason: "NewReplicaSetAvailable", + Message: "Deployment has minimum availability", + }, + }, + }, + } + + clientset := fake.NewSimpleClientset(deployment) + + handler := kubernetesHandler{ + clientSet: clientset, + deploymentTimeOut: time.Duration(1) * time.Second, + cacheResyncInterval: time.Duration(10) * time.Second, + } + + err := handler.waitUntilDeploymentIsReady(ctx, &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-matched-deployment", + Namespace: "test-namespace", + }, + }) + + // It must be timed out because the name of the deployment does not match. + require.Error(t, err) + require.Equal(t, "deployment timed out, name: not-matched-deployment, namespace test-namespace, error occured while fetching latest status: deployments.apps \"not-matched-deployment\" not found", err.Error()) +} + +func TestGetPodsInDeployment(t *testing.T) { + // Create a fake Kubernetes clientset + fakeClient := fake.NewSimpleClientset() + + // Create a Deployment object + deployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-namespace", + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + }, + Spec: v1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test-app", + }, + }, + }, + } + + // Create a ReplicaSet object + replicaset := &v1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-replicaset", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test-app", + }, + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + UID: "1234", + }, + } + + // Create a Pod object + pod1 := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod1", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test-app", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: replicaset.Name, + Controller: to.Ptr(true), + UID: "1234", + }, + }, + }, + } + + // Create a Pod object + pod2 := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod2", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "doesnotmatch", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "xyz", + Controller: to.Ptr(true), + UID: "1234", + }, + }, + }, + } + + // Add the Pod object to the fake Kubernetes clientset + _, err := fakeClient.CoreV1().Pods(pod1.Namespace).Create(context.Background(), pod1, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create Pod: %v", err) + + _, err = fakeClient.CoreV1().Pods(pod2.Namespace).Create(context.Background(), pod2, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create Pod: %v", err) + + // Create a KubernetesHandler object with the fake clientset + handler := &kubernetesHandler{ + clientSet: fakeClient, + } + + ctx := context.Background() + informerFactory := startInformers(ctx, fakeClient, handler) + + // Call the getPodsInDeployment function + pods, err := handler.getPodsInDeployment(ctx, informerFactory, deployment, replicaset) + require.NoError(t, err) + require.Equal(t, 1, len(pods)) + require.Equal(t, pod1.Name, pods[0].Name) +} + +func TestGetCurrentReplicaSetForDeployment(t *testing.T) { + // Create a fake Kubernetes clientset + fakeClient := fake.NewSimpleClientset() + + // Create a Deployment object + deployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-namespace", + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + }, + } + + // Create a ReplicaSet object with a higher revision than the other ReplicaSet + replicaSet1 := &v1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-replicaset-1", + Namespace: "test-namespace", + Annotations: map[string]string{"deployment.kubernetes.io/revision": "1"}, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ + Group: v1.SchemeGroupVersion.Group, + Version: v1.SchemeGroupVersion.Version, + Kind: "Deployment", + }), + }, + }, + } + // Create another ReplicaSet object with a lower revision than the other ReplicaSet + replicaSet2 := &v1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-replicaset-2", + Namespace: "test-namespace", + Annotations: map[string]string{"deployment.kubernetes.io/revision": "0"}, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ + Group: v1.SchemeGroupVersion.Group, + Version: v1.SchemeGroupVersion.Version, + Kind: "Deployment", + }), + }, + }, + } + + // Create another ReplicaSet object with a higher revision than the other ReplicaSet + replicaSet3 := &v1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-replicaset-3", + Namespace: "test-namespace", + Annotations: map[string]string{"deployment.kubernetes.io/revision": "3"}, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ + Group: v1.SchemeGroupVersion.Group, + Version: v1.SchemeGroupVersion.Version, + Kind: "Deployment", + }), + }, + }, + } + + // Add the ReplicaSet objects to the fake Kubernetes clientset + _, err := fakeClient.AppsV1().ReplicaSets(replicaSet1.Namespace).Create(context.Background(), replicaSet1, metav1.CreateOptions{}) + require.NoError(t, err) + _, err = fakeClient.AppsV1().ReplicaSets(replicaSet2.Namespace).Create(context.Background(), replicaSet2, metav1.CreateOptions{}) + require.NoError(t, err) + _, err = fakeClient.AppsV1().ReplicaSets(replicaSet2.Namespace).Create(context.Background(), replicaSet3, metav1.CreateOptions{}) + require.NoError(t, err) + + // Add the Deployment object to the fake Kubernetes clientset + _, err = fakeClient.AppsV1().Deployments(deployment.Namespace).Create(context.Background(), deployment, metav1.CreateOptions{}) + require.NoError(t, err) + + // Create a KubernetesHandler object with the fake clientset + handler := &kubernetesHandler{ + clientSet: fakeClient, + } + + ctx := context.Background() + informerFactory := startInformers(ctx, fakeClient, handler) + + // Call the getNewestReplicaSetForDeployment function + rs := handler.getCurrentReplicaSetForDeployment(ctx, informerFactory, deployment) + require.Equal(t, replicaSet1.Name, rs.Name) +} + +func TestCheckPodStatus(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + }, + Status: corev1.PodStatus{}, + } + + podTests := []struct { + podCondition []corev1.PodCondition + containerStatus []corev1.ContainerStatus + isReady bool + expectedError string + }{ + { + // Container is in Terminated state + podCondition: nil, + containerStatus: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Reason: "Error", + Message: "Container terminated due to an error", + }, + }, + }, + }, + isReady: false, + expectedError: "Container state is 'Terminated' Reason: Error, Message: Container terminated due to an error", + }, + { + // Container is in CrashLoopBackOff state + podCondition: nil, + containerStatus: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "CrashLoopBackOff", + Message: "Back-off 5m0s restarting failed container=test-container pod=test-pod", + }, + }, + }, + }, + isReady: false, + expectedError: "Container state is 'Waiting' Reason: CrashLoopBackOff, Message: Back-off 5m0s restarting failed container=test-container pod=test-pod", + }, + { + // Container is in ErrImagePull state + podCondition: nil, + containerStatus: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "ErrImagePull", + Message: "Cannot pull image", + }, + }, + }, + }, + isReady: false, + expectedError: "Container state is 'Waiting' Reason: ErrImagePull, Message: Cannot pull image", + }, + { + // Container is in ImagePullBackOff state + podCondition: nil, + containerStatus: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "ImagePullBackOff", + Message: "ImagePullBackOff", + }, + }, + }, + }, + isReady: false, + expectedError: "Container state is 'Waiting' Reason: ImagePullBackOff, Message: ImagePullBackOff", + }, + { + // No container statuses available + isReady: false, + expectedError: "", + }, + { + // Container is in Waiting state but not a terminally failed state + podCondition: nil, + containerStatus: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "ContainerCreating", + Message: "Container is being created", + }, + }, + Ready: false, + }, + }, + isReady: false, + expectedError: "", + }, + { + // Container's Running state is nil + podCondition: nil, + containerStatus: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Running: nil, + }, + Ready: false, + }, + }, + isReady: false, + expectedError: "", + }, + { + // Readiness check is not yet passed + podCondition: nil, + containerStatus: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + Ready: false, + }, + }, + isReady: false, + expectedError: "", + }, + { + // Container is in Ready state + podCondition: nil, + containerStatus: []corev1.ContainerStatus{ + { + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + Ready: true, + }, + }, + isReady: true, + expectedError: "", + }, + } + + ctx := context.Background() + handler := &kubernetesHandler{} + for _, tc := range podTests { + pod.Status.Conditions = tc.podCondition + pod.Status.ContainerStatuses = tc.containerStatus + isReady, err := handler.checkPodStatus(ctx, pod) + if tc.expectedError != "" { + require.Error(t, err) + require.Equal(t, tc.expectedError, err.Error()) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.isReady, isReady) + } +} + +func TestCheckAllPodsReady_Success(t *testing.T) { + // Create a fake Kubernetes clientset + clientset := fake.NewSimpleClientset() + + ctx := context.Background() + + _, err := clientset.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) + require.NoError(t, err) + + replicaSet := addReplicaSetToDeployment(t, ctx, clientset, testDeployment) + + // Create a pod + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "test-image", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + Ready: true, + }, + }, + }, + } + _, err = clientset.CoreV1().Pods("test-namespace").Create(context.Background(), pod, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create an informer factory and add the deployment and replica set to the cache + informerFactory := informers.NewSharedInformerFactory(clientset, 0) + addTestObjects(t, clientset, informerFactory, testDeployment, replicaSet, pod) + + // Create a done channel + doneCh := make(chan error) + + // Create a handler with the fake clientset + handler := &kubernetesHandler{ + clientSet: clientset, + } + + // Call the checkAllPodsReady function + allReady := handler.checkAllPodsReady(ctx, informerFactory, testDeployment, replicaSet, doneCh) + + // Check that all pods are ready + require.True(t, allReady) +} + +func TestCheckAllPodsReady_Fail(t *testing.T) { + // Create a fake Kubernetes clientset + clientset := fake.NewSimpleClientset() + + ctx := context.Background() + + _, err := clientset.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) + require.NoError(t, err) + + replicaSet := addReplicaSetToDeployment(t, ctx, clientset, testDeployment) + + // Create a pod + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod1", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: replicaSet.Name, + Controller: to.Ptr(true), + }, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "test-image", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + Ready: false, + }, + }, + }, + } + _, err = clientset.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) + require.NoError(t, err) + + // Create an informer factory and add the deployment and replica set to the cache + informerFactory := informers.NewSharedInformerFactory(clientset, 0) + addTestObjects(t, clientset, informerFactory, testDeployment, replicaSet, pod) + + // Create a done channel + doneCh := make(chan error) + + // Create a handler with the fake clientset + handler := &kubernetesHandler{ + clientSet: clientset, + } + + // Call the checkAllPodsReady function + allReady := handler.checkAllPodsReady(ctx, informerFactory, testDeployment, replicaSet, doneCh) + + // Check that all pods are ready + require.False(t, allReady) +} + +func TestCheckDeploymentStatus_AllReady(t *testing.T) { + // Create a fake Kubernetes fakeClient + fakeClient := fake.NewSimpleClientset() + + ctx := context.Background() + _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) + require.NoError(t, err) + replicaSet := addReplicaSetToDeployment(t, ctx, fakeClient, testDeployment) + + // Create a Pod object + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod1", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: replicaSet.Name, + Controller: to.Ptr(true), + }, + }, + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodScheduled, + Status: corev1.ConditionTrue, + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + Ready: true, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + }, + }, + } + + // Add the Pod object to the fake Kubernetes clientset + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create Pod: %v", err) + + // Create an informer factory and add the deployment to the cache + informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + addTestObjects(t, fakeClient, informerFactory, testDeployment, replicaSet, pod) + + // Create a fake item and object + item := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "test-namespace", + }, + }, + } + + // Create a done channel + doneCh := make(chan error, 1) + + // Call the checkDeploymentStatus function + handler := &kubernetesHandler{ + clientSet: fakeClient, + } + + go handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) + + err = <-doneCh + + // Check that the deployment readiness was checked + require.Nil(t, err) +} + +func TestCheckDeploymentStatus_NoReplicaSetsFound(t *testing.T) { + // Create a fake Kubernetes fakeClient + fakeClient := fake.NewSimpleClientset() + + ctx := context.Background() + _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) + require.NoError(t, err) + + // Create a Pod object + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod1", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test", + }, + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodScheduled, + Status: corev1.ConditionTrue, + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + Ready: true, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + }, + }, + } + + // Add the Pod object to the fake Kubernetes clientset + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create Pod: %v", err) + + // Create an informer factory and add the deployment to the cache + informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + err = informerFactory.Apps().V1().Deployments().Informer().GetIndexer().Add(testDeployment) + require.NoError(t, err, "Failed to add deployment to informer cache") + err = informerFactory.Core().V1().Pods().Informer().GetIndexer().Add(pod) + require.NoError(t, err, "Failed to add pod to informer cache") + // Note: No replica set added + + // Create a fake item and object + item := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "test-namespace", + }, + }, + } + + // Create a done channel + doneCh := make(chan error, 1) + + // Call the checkDeploymentStatus function + handler := &kubernetesHandler{ + clientSet: fakeClient, + } + + allReady := handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) + + // Check that the deployment readiness was checked + require.False(t, allReady) +} + +func TestCheckDeploymentStatus_PodsNotReady(t *testing.T) { + // Create a fake Kubernetes fakeClient + fakeClient := fake.NewSimpleClientset() + + ctx := context.Background() + _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) + require.NoError(t, err) + replicaSet := addReplicaSetToDeployment(t, ctx, fakeClient, testDeployment) + + // Create a Pod object + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod1", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: replicaSet.Name, + Controller: to.Ptr(true), + }, + }, + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodScheduled, + Status: corev1.ConditionTrue, + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + Ready: true, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Reason: "Error", + Message: "Container terminated due to an error", + }, + }, + }, + }, + }, + } + + // Add the Pod object to the fake Kubernetes clientset + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create Pod: %v", err) + + // Create an informer factory and add the deployment to the cache + informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + addTestObjects(t, fakeClient, informerFactory, testDeployment, replicaSet, pod) + + // Create a fake item and object + item := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "test-namespace", + }, + }, + } + + // Create a done channel + doneCh := make(chan error, 1) + + // Call the checkDeploymentStatus function + handler := &kubernetesHandler{ + clientSet: fakeClient, + } + + go handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) + err = <-doneCh + + // Check that the deployment readiness was checked + require.Error(t, err) + require.Equal(t, err.Error(), "Container state is 'Terminated' Reason: Error, Message: Container terminated due to an error") +} + +func TestCheckDeploymentStatus_ObservedGenerationMismatch(t *testing.T) { + // Modify testDeployment to have a different generation than the observed generation + testDeployment.Generation = 2 + + // Create a fake Kubernetes fakeClient + fakeClient := fake.NewSimpleClientset() + + ctx := context.Background() + _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) + require.NoError(t, err) + replicaSet := addReplicaSetToDeployment(t, ctx, fakeClient, testDeployment) + + // Create a Pod object + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod1", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: replicaSet.Name, + Controller: to.Ptr(true), + }, + }, + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodScheduled, + Status: corev1.ConditionTrue, + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + Ready: true, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + }, + }, + } + + // Add the Pod object to the fake Kubernetes clientset + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create Pod: %v", err) + + // Create an informer factory and add the deployment to the cache + informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + addTestObjects(t, fakeClient, informerFactory, testDeployment, replicaSet, pod) + + // Create a fake item and object + item := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "test-namespace", + }, + }, + } + + // Create a done channel + doneCh := make(chan error, 1) + + // Call the checkDeploymentStatus function + handler := &kubernetesHandler{ + clientSet: fakeClient, + } + + handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) + + // Check that the deployment readiness was checked + require.Zero(t, len(doneCh)) +} + +func TestCheckDeploymentStatus_DeploymentNotProgressing(t *testing.T) { + // Create a fake Kubernetes fakeClient + fakeClient := fake.NewSimpleClientset() + + ctx := context.Background() + _, err := fakeClient.AppsV1().Deployments("test-namespace").Create(ctx, testDeployment, metav1.CreateOptions{}) + require.NoError(t, err) + replicaSet := addReplicaSetToDeployment(t, ctx, fakeClient, testDeployment) + + // Create a Pod object + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod1", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: replicaSet.Name, + Controller: to.Ptr(true), + }, + }, + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodScheduled, + Status: corev1.ConditionTrue, + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + Ready: true, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + }, + }, + } + + // Add the Pod object to the fake Kubernetes clientset + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create Pod: %v", err) + + // Create an informer factory and add the deployment to the cache + informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + addTestObjects(t, fakeClient, informerFactory, testDeployment, replicaSet, pod) + + testDeployment.Status = v1.DeploymentStatus{ + Conditions: []v1.DeploymentCondition{ + { + Type: v1.DeploymentProgressing, + Status: corev1.ConditionFalse, + Reason: "NewReplicaSetAvailable", + Message: "Deployment has minimum availability", + }, + }, + } + + // Create a fake item and object + item := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "test-namespace", + }, + }, + } + + // Create a done channel + doneCh := make(chan error, 1) + + // Call the checkDeploymentStatus function + handler := &kubernetesHandler{ + clientSet: fakeClient, + } + + ready := handler.checkDeploymentStatus(ctx, informerFactory, item, doneCh) + require.False(t, ready) +} + +func addTestObjects(t *testing.T, fakeClient *fake.Clientset, informerFactory informers.SharedInformerFactory, deployment *v1.Deployment, replicaSet *v1.ReplicaSet, pod *corev1.Pod) { + err := informerFactory.Apps().V1().Deployments().Informer().GetIndexer().Add(testDeployment) + require.NoError(t, err, "Failed to add deployment to informer cache") + err = informerFactory.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(replicaSet) + require.NoError(t, err, "Failed to add replica set to informer cache") + err = informerFactory.Core().V1().Pods().Informer().GetIndexer().Add(pod) + require.NoError(t, err, "Failed to add pod to informer cache") +} diff --git a/pkg/corerp/model/application_model.go b/pkg/corerp/model/application_model.go index bd86257eaf..3f74b837d3 100644 --- a/pkg/corerp/model/application_model.go +++ b/pkg/corerp/model/application_model.go @@ -36,7 +36,6 @@ import ( resources_kubernetes "github.com/radius-project/radius/pkg/ucp/resources/kubernetes" "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -49,7 +48,7 @@ const ( // NewApplicationModel configures RBAC support on connections based on connection kind, configures the providers supported by the appmodel, // registers the renderers and handlers for various resources, and checks for duplicate registrations. -func NewApplicationModel(arm *armauth.ArmConfig, k8sClient client.Client, k8sClientSet kubernetes.Interface, discoveryClient discovery.ServerResourcesInterface, k8sDynamicClientSet dynamic.Interface) (ApplicationModel, error) { +func NewApplicationModel(arm *armauth.ArmConfig, k8sClient client.Client, k8sClientSet kubernetes.Interface, discoveryClient discovery.ServerResourcesInterface) (ApplicationModel, error) { // Configure RBAC support on connections based connection kind. // Role names can be user input or default roles assigned by Radius. // Leave RoleNames field empty if no default roles are supported for a connection kind. @@ -117,7 +116,7 @@ func NewApplicationModel(arm *armauth.ArmConfig, k8sClient client.Client, k8sCli Type: AnyResourceType, Provider: resourcemodel.ProviderKubernetes, }, - ResourceHandler: handlers.NewKubernetesHandler(k8sClient, k8sClientSet, discoveryClient, k8sDynamicClientSet), + ResourceHandler: handlers.NewKubernetesHandler(k8sClient, k8sClientSet, discoveryClient), }, { ResourceType: resourcemodel.ResourceType{ @@ -125,7 +124,7 @@ func NewApplicationModel(arm *armauth.ArmConfig, k8sClient client.Client, k8sCli Provider: resourcemodel.ProviderKubernetes, }, ResourceTransformer: azcontainer.TransformSecretProviderClass, - ResourceHandler: handlers.NewKubernetesHandler(k8sClient, k8sClientSet, discoveryClient, k8sDynamicClientSet), + ResourceHandler: handlers.NewKubernetesHandler(k8sClient, k8sClientSet, discoveryClient), }, { ResourceType: resourcemodel.ResourceType{ @@ -133,7 +132,7 @@ func NewApplicationModel(arm *armauth.ArmConfig, k8sClient client.Client, k8sCli Provider: resourcemodel.ProviderKubernetes, }, ResourceTransformer: azcontainer.TransformFederatedIdentitySA, - ResourceHandler: handlers.NewKubernetesHandler(k8sClient, k8sClientSet, discoveryClient, k8sDynamicClientSet), + ResourceHandler: handlers.NewKubernetesHandler(k8sClient, k8sClientSet, discoveryClient), }, } diff --git a/pkg/corerp/renderers/gateway/render.go b/pkg/corerp/renderers/gateway/render.go index 2fe79fa094..8203328292 100644 --- a/pkg/corerp/renderers/gateway/render.go +++ b/pkg/corerp/renderers/gateway/render.go @@ -103,7 +103,7 @@ func (r Renderer) Render(ctx context.Context, dm v1.DataModelInterface, options publicEndpoint = getPublicEndpoint(hostname, options.Environment.Gateway.Port, isHttps) } - gatewayObject, err := MakeRootHTTPProxy(ctx, options, gateway, gateway.Name, applicationName, hostname) + gatewayObject, err := MakeGateway(ctx, options, gateway, gateway.Name, applicationName, hostname) if err != nil { return renderers.RendererOutput{}, err } @@ -116,7 +116,7 @@ func (r Renderer) Render(ctx context.Context, dm v1.DataModelInterface, options }, } - httpRouteObjects, err := MakeRoutesHTTPProxies(ctx, options, *gateway, &gateway.Properties, gatewayName, gatewayObject, applicationName) + httpRouteObjects, err := MakeHttpRoutes(ctx, options, *gateway, &gateway.Properties, gatewayName, applicationName) if err != nil { return renderers.RendererOutput{}, err } @@ -128,9 +128,9 @@ func (r Renderer) Render(ctx context.Context, dm v1.DataModelInterface, options }, nil } -// MakeRootHTTPProxy validates the Gateway resource and its dependencies, and creates a Contour HTTPProxy resource +// MakeGateway validates the Gateway resource and its dependencies, and creates a Contour HTTPProxy resource // to act as the Gateway. -func MakeRootHTTPProxy(ctx context.Context, options renderers.RenderOptions, gateway *datamodel.Gateway, resourceName string, applicationName string, hostname string) (rpv1.OutputResource, error) { +func MakeGateway(ctx context.Context, options renderers.RenderOptions, gateway *datamodel.Gateway, resourceName string, applicationName string, hostname string) (rpv1.OutputResource, error) { includes := []contourv1.Include{} dependencies := options.Dependencies @@ -292,9 +292,9 @@ func MakeRootHTTPProxy(ctx context.Context, options renderers.RenderOptions, gat return rpv1.NewKubernetesOutputResource(rpv1.LocalIDGateway, rootHTTPProxy, rootHTTPProxy.ObjectMeta), nil } -// MakeRoutesHTTPProxies creates HTTPProxy objects for each route in the gateway and returns them as OutputResources. It returns +// MakeHttpRoutes creates HTTPProxy objects for each route in the gateway and returns them as OutputResources. It returns // an error if it fails to get the route name. -func MakeRoutesHTTPProxies(ctx context.Context, options renderers.RenderOptions, resource datamodel.Gateway, gateway *datamodel.GatewayProperties, gatewayName string, gatewayOutPutResource rpv1.OutputResource, applicationName string) ([]rpv1.OutputResource, error) { +func MakeHttpRoutes(ctx context.Context, options renderers.RenderOptions, resource datamodel.Gateway, gateway *datamodel.GatewayProperties, gatewayName string, applicationName string) ([]rpv1.OutputResource, error) { dependencies := options.Dependencies objects := make(map[string]*contourv1.HTTPProxy) @@ -387,9 +387,6 @@ func MakeRoutesHTTPProxies(ctx context.Context, options renderers.RenderOptions, } objects[localID] = httpProxyObject - - // Add the route as a dependency of the root http proxy to ensure that the route is created before the root http proxy - gatewayOutPutResource.CreateResource.Dependencies = append(gatewayOutPutResource.CreateResource.Dependencies, localID) } var outputResources []rpv1.OutputResource diff --git a/pkg/corerp/renderers/gateway/render_test.go b/pkg/corerp/renderers/gateway/render_test.go index dab84c49f1..204f75b041 100644 --- a/pkg/corerp/renderers/gateway/render_test.go +++ b/pkg/corerp/renderers/gateway/render_test.go @@ -19,7 +19,6 @@ package gateway import ( "context" "fmt" - "strings" "testing" contourv1 "github.com/projectcontour/contour/apis/projectcontour/v1" @@ -1467,12 +1466,6 @@ func validateHTTPProxy(t *testing.T, outputResources []rpv1.OutputResource, expe httpProxy, httpProxyOutputResource := kubernetes.FindContourHTTPProxy(outputResources) expectedHTTPProxyOutputResource := rpv1.NewKubernetesOutputResource(rpv1.LocalIDGateway, httpProxy, httpProxy.ObjectMeta) - for _, r := range outputResources { - if strings.Contains(r.LocalID, rpv1.LocalIDHttpRoute) { - expectedHTTPProxyOutputResource.CreateResource.Dependencies = append(expectedHTTPProxyOutputResource.CreateResource.Dependencies, r.LocalID) - } - } - require.Equal(t, expectedHTTPProxyOutputResource, httpProxyOutputResource) require.Equal(t, kubernetes.NormalizeResourceName(resourceName), httpProxy.Name) require.Equal(t, applicationName, httpProxy.Namespace) diff --git a/test/functional/shared/resources/gateway_test.go b/test/functional/shared/resources/gateway_test.go index 87cca8e772..5a00d9d215 100644 --- a/test/functional/shared/resources/gateway_test.go +++ b/test/functional/shared/resources/gateway_test.go @@ -31,7 +31,6 @@ import ( "github.com/radius-project/radius/test/step" "github.com/radius-project/radius/test/validation" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) const ( @@ -265,50 +264,6 @@ func Test_Gateway_TLSTermination(t *testing.T) { test.Test(t) } -func Test_Gateway_Failure(t *testing.T) { - template := "testdata/corerp-resources-gateway-failure.bicep" - name := "corerp-resources-gateway-failure" - secret := "secret" - - // We might see either of these states depending on the timing. - validateFn := step.ValidateAnyDetails("DeploymentFailed", []step.DeploymentErrorDetail{ - { - Code: "ResourceDeploymentFailure", - Details: []step.DeploymentErrorDetail{ - { - Code: "Internal", - MessageContains: "invalid TLS certificate", - }, - }, - }, - }) - - test := shared.NewRPTest(t, name, []shared.TestStep{ - { - Executor: step.NewDeployErrorExecutor(template, validateFn), - SkipObjectValidation: true, - SkipKubernetesOutputResourceValidation: true, - }, - }, - unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Secret", - "metadata": map[string]interface{}{ - "name": secret, - "namespace": "mynamespace", - }, - "type": "Opaque", - "data": map[string]interface{}{ - "tls.crt": "", - "tls.key": "", - }, - }, - }) - - test.Test(t) -} - func testGatewayWithPortForward(t *testing.T, ctx context.Context, at shared.RPTest, hostname string, remotePort int, isHttps bool, tests []GatewayTestConfig) error { // stopChan will close the port-forward connection on close stopChan := make(chan struct{}) diff --git a/test/functional/shared/resources/testdata/corerp-resources-gateway-failure.bicep b/test/functional/shared/resources/testdata/corerp-resources-gateway-failure.bicep deleted file mode 100644 index a2d04fe3b7..0000000000 --- a/test/functional/shared/resources/testdata/corerp-resources-gateway-failure.bicep +++ /dev/null @@ -1,47 +0,0 @@ -import radius as radius - -@description('ID of the Radius environment. Passed in automatically via the rad CLI') -param environment string - -resource demoApplication 'Applications.Core/applications@2022-03-15-privatepreview' = { - name: 'corerp-resources-gateway-failure-app' - properties: { - environment: environment - } -} - -resource demoSecretStore 'Applications.Core/secretStores@2022-03-15-privatepreview' = { - name: 'corerp-resources-gateway-failure-secretstore' - properties: { - application: demoApplication.id - type: 'certificate' - - // Reference the existing mynamespace/secret Kubernetes secret - resource: 'mynamespace/secret' - data: { - // Make the tls.crt and tls.key secrets available to the application - 'tls.crt': {} - 'tls.key': {} - } - } -} - -resource demoGateway 'Applications.Core/gateways@2022-03-15-privatepreview' = { - name: 'corerp-resources-gateway-failure-gateway' - properties: { - application: demoApplication.id - hostname: { - fullyQualifiedHostname: 'a.example.com' // Replace with your domain name. - } - routes: [ - { - path: '/' - destination: 'http://demo-container:3000' - } - ] - tls: { - certificateFrom: demoSecretStore.id - minimumProtocolVersion: '1.2' - } - } -} diff --git a/test/functional/shared/rptest.go b/test/functional/shared/rptest.go index 42a2865532..0f93500c4e 100644 --- a/test/functional/shared/rptest.go +++ b/test/functional/shared/rptest.go @@ -163,9 +163,6 @@ func (ct RPTest) CreateInitialResources(ctx context.Context) error { } for _, r := range ct.InitialResources { - if err := kubernetes.EnsureNamespace(ctx, ct.Options.K8sClient, r.GetNamespace()); err != nil { - return fmt.Errorf("failed to create namespace %s: %w", ct.Name, err) - } if err := ct.Options.Client.Create(ctx, &r); err != nil { return fmt.Errorf("failed to create resource %#v: %w", r, err) }