diff --git a/controllers/common_controller_utils.go b/controllers/common_controller_utils.go index dec50526..c208b4d6 100644 --- a/controllers/common_controller_utils.go +++ b/controllers/common_controller_utils.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "os" "reflect" @@ -17,7 +18,6 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/yaml" ) @@ -26,61 +26,76 @@ import ( // If a list of MirrorPeer objects are provided, it will check // the mapping from the provided MirrorPeers. // If no MirrorPeers are provided, it will fetch all the MirrorPeers in the HUB. -func createOrUpdateDestinationSecretsFromSource(ctx context.Context, rc client.Client, sourceSecret *corev1.Secret, mirrorPeers ...multiclusterv1alpha1.MirrorPeer) error { - logger := log.FromContext(ctx) +func createOrUpdateDestinationSecretsFromSource(ctx context.Context, rc client.Client, sourceSecret *corev1.Secret, logger *slog.Logger, mirrorPeers ...multiclusterv1alpha1.MirrorPeer) error { + logger.Info("Validating source secret", "Secret", sourceSecret.Name, "Namespace", sourceSecret.Namespace) + err := utils.ValidateSourceSecret(sourceSecret) if err != nil { - logger.Error(err, "Updating secrets failed. Invalid secret type.", "secret", sourceSecret.Name, "namespace", sourceSecret.Namespace) + logger.Error("Updating secrets failed due to invalid source secret type", "error", err, "Secret", sourceSecret.Name, "Namespace", sourceSecret.Namespace) return err } if mirrorPeers == nil { + logger.Info("Fetching all MirrorPeer objects as none were provided") mirrorPeers, err = utils.FetchAllMirrorPeers(ctx, rc) if err != nil { - logger.Error(err, "Unable to get the list of MirrorPeer objects") + logger.Error("Unable to retrieve the list of MirrorPeer objects", "error", err) return err } - logger.V(2).Info("Successfully got the list of MirrorPeers", "MirrorPeerListObj", mirrorPeers) + logger.Info("Successfully retrieved the list of MirrorPeers", "Count", len(mirrorPeers)) } + logger.Info("Determining peers connected to the source secret", "Secret", sourceSecret.Name) uniqueConnectedPeers, err := PeersConnectedToSecret(sourceSecret, mirrorPeers) if err != nil { - logger.Error(err, "ConnectedPeers returned an error", "secret", sourceSecret.Name, "namespace", sourceSecret.Namespace, "mirrorpeers", mirrorPeers) + logger.Error("Error determining connected peers for the source secret", "error", err, "Secret", sourceSecret.Name, "Namespace", sourceSecret.Namespace) return err } - logger.V(2).Info("Listing all the Peers connected to the Source", "SourceSecret", sourceSecret.Name, "namespace", sourceSecret.Namespace, "connected-peers-length", len(uniqueConnectedPeers)) + logger.Info("Peers connected to the source secret identified", "ConnectedPeersCount", len(uniqueConnectedPeers)) - // anyErr will have the last found error var anyErr error for _, eachConnectedPeer := range uniqueConnectedPeers { + logger.Info("Creating or updating destination secret for peer", "PeerRef", eachConnectedPeer) namedPeerRef := NewNamedPeerRefWithSecretData(sourceSecret, eachConnectedPeer) - err := namedPeerRef.CreateOrUpdateDestinationSecret(ctx, rc) + err := namedPeerRef.CreateOrUpdateDestinationSecret(ctx, rc, logger) if err != nil { - logger.Error(err, "Unable to update the destination secret", "secret", sourceSecret.Name, "namespace", sourceSecret.Namespace, "PeerRef", eachConnectedPeer) + logger.Error("Failed to create or update destination secret", "error", err, "SourceSecret", sourceSecret.Name, "DestinationPeerRef", eachConnectedPeer) anyErr = err } } + if anyErr != nil { + logger.Error("One or more errors occurred while updating destination secrets", "lastError", anyErr) + } else { + logger.Info("All destination secrets updated successfully") + } + return anyErr } -func processDestinationSecretUpdation(ctx context.Context, rc client.Client, destSecret *corev1.Secret) error { - logger := log.FromContext(ctx) +func processDestinationSecretUpdation(ctx context.Context, rc client.Client, destSecret *corev1.Secret, logger *slog.Logger) error { + logger.Info("Validating destination secret", "Secret", destSecret.Name, "Namespace", destSecret.Namespace) + err := utils.ValidateDestinationSecret(destSecret) if err != nil { - logger.Error(err, "Destination secret validation failed", "secret", destSecret.Name, "namespace", destSecret.Namespace) + logger.Error("Destination secret validation failed", "error", err, "Secret", destSecret.Name, "Namespace", destSecret.Namespace) return err } + + logger.Info("Fetching all MirrorPeers for secret update", "Secret", destSecret.Name) mirrorPeers, err := utils.FetchAllMirrorPeers(ctx, rc) if err != nil { - logger.Error(err, "Failed to get the list of MirrorPeer objects") + logger.Error("Failed to get the list of MirrorPeer objects", "error", err) return err } + + logger.Info("Determining peers connected to the destination secret", "Secret", destSecret.Name) uniqueConnectedPeers, err := PeersConnectedToSecret(destSecret, mirrorPeers) if err != nil { - logger.Error(err, "Failed to get the peers connected to the secret", "secret", destSecret.Name, "namespace", destSecret.Namespace) + logger.Error("Failed to get the peers connected to the secret", "error", err, "Secret", destSecret.Name, "Namespace", destSecret.Namespace) return err } + var connectedSource *corev1.Secret for _, eachConnectedPeer := range uniqueConnectedPeers { var connectedSecret corev1.Secret @@ -88,48 +103,66 @@ func processDestinationSecretUpdation(ctx context.Context, rc client.Client, des err := nPeerRef.GetAssociatedSecret(ctx, rc, &connectedSecret) if err != nil { if k8serrors.IsNotFound(err) { + logger.Info("Associated source secret not found, skipping", "PeerRef", eachConnectedPeer, "Secret", destSecret.Name) continue } - logger.Error(err, "Unexpected error while finding the source secret", "peer-ref", eachConnectedPeer, "secret", destSecret.Name, "namespace", destSecret.Namespace) + logger.Error("Unexpected error while finding the source secret", "error", err, "PeerRef", eachConnectedPeer, "Secret", destSecret.Name, "Namespace", destSecret.Namespace) return err } if utils.IsSecretSource(&connectedSecret) { - connectedSource = connectedSecret.DeepCopy() + connectedSource = &connectedSecret break } } if connectedSource == nil { - logger.Error(nil, "No connected source found. Removing the dangling destination secret", "secret", destSecret.Name, "namespace", destSecret.Namespace) + logger.Info("No connected source found, potentially removing the dangling destination secret", "Secret", destSecret.Name, "Namespace", destSecret.Namespace) err = rc.Delete(ctx, destSecret) + if err != nil { + logger.Error("Failed to delete the dangling destination secret", "error", err, "Secret", destSecret.Name, "Namespace", destSecret.Namespace) + } return err } - err = createOrUpdateDestinationSecretsFromSource(ctx, rc, connectedSource, mirrorPeers...) + + logger.Info("Updating destination secret from the connected source", "SourceSecret", connectedSource.Name, "DestinationSecret", destSecret.Name) + err = createOrUpdateDestinationSecretsFromSource(ctx, rc, connectedSource, logger, mirrorPeers...) + if err != nil { + logger.Error("Failed to update destination secret from the source", "error", err, "SourceSecret", connectedSource.Name, "DestinationSecret", destSecret.Name) + } + return err } -func processDestinationSecretCleanup(ctx context.Context, rc client.Client) error { - logger := log.FromContext(ctx) +func processDestinationSecretCleanup(ctx context.Context, rc client.Client, logger *slog.Logger) error { allDestinationSecrets, err := fetchAllDestinationSecrets(ctx, rc, "") if err != nil { - logger.Error(err, "Unable to get all the destination secrets") + logger.Error("Unable to fetch all destination secrets", "error", err) return err } + var anyError error for _, eachDSecret := range allDestinationSecrets { - err = processDestinationSecretUpdation(ctx, rc, &eachDSecret) + logger.Info("Processing update for destination secret", "SecretName", eachDSecret.Name, "Namespace", eachDSecret.Namespace) + err = processDestinationSecretUpdation(ctx, rc, &eachDSecret, logger) if err != nil { - anyError = err - logger.Error(err, "Failed to update destination secret", "secret", eachDSecret.Name, "namespace", eachDSecret.Namespace) + logger.Error("Failed to update destination secret", "error", err, "SecretName", eachDSecret.Name, "Namespace", eachDSecret.Namespace) + if anyError == nil { + anyError = err + } } } + + if anyError != nil { + logger.Error("One or more errors occurred during the cleanup of destination secrets", "error", anyError) + } else { + logger.Info("All destination secrets processed successfully") + } + return anyError } -func createOrUpdateRamenS3Secret(ctx context.Context, rc client.Client, secret *corev1.Secret, data map[string][]byte, ramenHubNamespace string) error { - logger := log.FromContext(ctx) +func createOrUpdateRamenS3Secret(ctx context.Context, rc client.Client, secret *corev1.Secret, data map[string][]byte, ramenHubNamespace string, logger *slog.Logger) error { - // convert aws s3 secret from s3 origin secret into ramen secret expectedSecret := corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secret.Name, @@ -150,33 +183,40 @@ func createOrUpdateRamenS3Secret(ctx context.Context, rc client.Client, secret * Name: secret.Name, Namespace: ramenHubNamespace, } + err := rc.Get(ctx, namespacedName, &localSecret) if err != nil { if k8serrors.IsNotFound(err) { - // creating new s3 secret on ramen openshift-dr-system namespace - logger.Info("Creating a s3 secret", "secret", expectedSecret.Name, "namespace", expectedSecret.Namespace) - return rc.Create(ctx, &expectedSecret) + logger.Info("Creating a new S3 secret", "SecretName", expectedSecret.Name, "Namespace", expectedSecret.Namespace) + if createErr := rc.Create(ctx, &expectedSecret); createErr != nil { + logger.Error("Failed to create the S3 secret", "error", createErr, "SecretName", expectedSecret.Name, "Namespace", ramenHubNamespace) + return createErr + } + return nil } - logger.Error(err, "unable to fetch the s3 secret", "secret", secret.Name, "namespace", ramenHubNamespace) + logger.Error("Failed to fetch the S3 secret", "error", err, "SecretName", secret.Name, "Namespace", ramenHubNamespace) return err } if !reflect.DeepEqual(expectedSecret.Data, localSecret.Data) { - // updating existing s3 secret on ramen openshift-dr-system namespace - logger.Info("Updating the s3 secret", "secret", expectedSecret.Name, "namespace", ramenHubNamespace) - _, err := controllerutil.CreateOrUpdate(ctx, rc, &localSecret, func() error { + logger.Info("Updating the existing S3 secret", "SecretName", expectedSecret.Name, "Namespace", ramenHubNamespace) + _, updateErr := controllerutil.CreateOrUpdate(ctx, rc, &localSecret, func() error { localSecret.Data = expectedSecret.Data return nil }) - return err + if updateErr != nil { + logger.Error("Failed to update the S3 secret", "error", updateErr, "SecretName", expectedSecret.Name, "Namespace", ramenHubNamespace) + return updateErr + } + } else { + logger.Info("No updates required for the S3 secret", "SecretName", expectedSecret.Name, "Namespace", ramenHubNamespace) } - // no changes return nil } -func createOrUpdateExternalSecret(ctx context.Context, rc client.Client, secret *corev1.Secret, data map[string][]byte, namespace string) error { - logger := log.FromContext(ctx) +func createOrUpdateExternalSecret(ctx context.Context, rc client.Client, secret *corev1.Secret, data map[string][]byte, namespace string, logger *slog.Logger) error { + logger.Info("Processing external secret", "SecretName", secret.Name, "Namespace", namespace) expectedSecret := corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -188,7 +228,6 @@ func createOrUpdateExternalSecret(ctx context.Context, rc client.Client, secret }, Type: corev1.SecretTypeOpaque, Data: map[string][]byte{ - // May add more parameters for external mode "fsid": data["fsid"], }, } @@ -198,25 +237,34 @@ func createOrUpdateExternalSecret(ctx context.Context, rc client.Client, secret Name: secret.Name, Namespace: namespace, } + err := rc.Get(ctx, namespacedName, &localSecret) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Creating a external; secret", "secret", expectedSecret.Name, "namespace", expectedSecret.Namespace) - return rc.Create(ctx, &expectedSecret) + logger.Info("External secret not found, creating new one", "SecretName", expectedSecret.Name, "Namespace", namespace) + if createErr := rc.Create(ctx, &expectedSecret); createErr != nil { + logger.Error("Failed to create the external secret", "error", createErr, "SecretName", expectedSecret.Name, "Namespace", namespace) + return createErr + } + return nil } - logger.Error(err, "unable to fetch the external secret", "secret", secret.Name, "namespace", namespace) + logger.Error("Failed to fetch the external secret", "error", err, "SecretName", secret.Name, "Namespace", namespace) return err } if !reflect.DeepEqual(expectedSecret.Data, localSecret.Data) { - logger.Info("Updating the external secret", "secret", expectedSecret.Name, "namespace", namespace) - _, err := controllerutil.CreateOrUpdate(ctx, rc, &localSecret, func() error { + logger.Info("Data mismatch found, updating external secret", "SecretName", expectedSecret.Name, "Namespace", namespace) + _, updateErr := controllerutil.CreateOrUpdate(ctx, rc, &localSecret, func() error { localSecret.Data = expectedSecret.Data return nil }) - return err + if updateErr != nil { + logger.Error("Failed to update the external secret", "error", updateErr, "SecretName", expectedSecret.Name, "Namespace", namespace) + } + return updateErr } + logger.Info("No updates required for the external secret", "SecretName", expectedSecret.Name, "Namespace", namespace) return nil } @@ -264,41 +312,38 @@ func areS3ProfileFieldsEqual(expected rmn.S3StoreProfile, found rmn.S3StoreProfi return true } -func updateRamenHubOperatorConfig(ctx context.Context, rc client.Client, secret *corev1.Secret, data map[string][]byte, mirrorPeers []multiclusterv1alpha1.MirrorPeer, ramenHubNamespace string) error { - logger := log.FromContext(ctx) +func updateRamenHubOperatorConfig(ctx context.Context, rc client.Client, secret *corev1.Secret, data map[string][]byte, mirrorPeers []multiclusterv1alpha1.MirrorPeer, ramenHubNamespace string, logger *slog.Logger) error { + logger.Info("Starting to update Ramen Hub Operator config", "SecretName", secret.Name, "Namespace", secret.Namespace) clusterPeerRef, err := utils.CreatePeerRefFromSecret(secret) if err != nil { - logger.Error(err, "unable to create peerref", "secret", secret.Name, "namespace", secret.Namespace) + logger.Error("Failed to create peer reference from secret", "error", err, "SecretName", secret.Name, "Namespace", secret.Namespace) return err } + if mirrorPeers == nil { mirrorPeers, err = utils.FetchAllMirrorPeers(ctx, rc) - } - if err != nil { - logger.Error(err, "unable to get the list of MirrorPeer objects") - return err + if err != nil { + logger.Error("Failed to fetch all MirrorPeers", "error", err) + return err + } } - // filter mirror peer using clusterPeerRef if !isS3ProfileManagedPeerRef(clusterPeerRef, mirrorPeers) { - // ManageS3 is disabled on MirrorPeer spec, skip ramen hub operator config update + logger.Info("Manage S3 is disabled on MirrorPeer spec, skipping update", "PeerRef", clusterPeerRef) return nil } - // converting s3 bucket config into ramen s3 profile expectedS3Profile := rmn.S3StoreProfile{ S3ProfileName: string(data[utils.S3ProfileName]), S3Bucket: string(data[utils.S3BucketName]), S3Region: string(data[utils.S3Region]), S3CompatibleEndpoint: string(data[utils.S3Endpoint]), - // referenceing ramen secret S3SecretRef: corev1.SecretReference{ Name: secret.Name, }, } - // fetch ramen hub operator configmap currentRamenConfigMap := corev1.ConfigMap{} namespacedName := types.NamespacedName{ Name: utils.RamenHubOperatorConfigName, @@ -306,99 +351,99 @@ func updateRamenHubOperatorConfig(ctx context.Context, rc client.Client, secret } err = rc.Get(ctx, namespacedName, ¤tRamenConfigMap) if err != nil { - logger.Error(err, "unable to fetch DR hub operator config", "config", utils.RamenHubOperatorConfigName, "namespace", ramenHubNamespace) + logger.Error("Failed to fetch Ramen Hub Operator config map", "error", err, "ConfigMapName", namespacedName) return err } - // extract ramen manager config str from configmap ramenConfigData, ok := currentRamenConfigMap.Data["ramen_manager_config.yaml"] if !ok { - return fmt.Errorf("DR hub operator config data is empty for the config %q in namespace %q", utils.RamenHubOperatorConfigName, ramenHubNamespace) + err = fmt.Errorf("DR hub operator config data is empty for the config %q in namespace %q", utils.RamenHubOperatorConfigName, ramenHubNamespace) + logger.Error("DR hub operator config data is missing", "error", err) + return err } - // converting ramen manager config str into RamenConfig ramenConfig := rmn.RamenConfig{} err = yaml.Unmarshal([]byte(ramenConfigData), &ramenConfig) if err != nil { - logger.Error(err, "failed to unmarshal DR hub operator config data", "config", utils.RamenHubOperatorConfigName, "namespace", ramenHubNamespace) + logger.Error("Failed to unmarshal DR hub operator config data", "error", err) return err } isUpdated := false for i, currentS3Profile := range ramenConfig.S3StoreProfiles { if currentS3Profile.S3ProfileName == expectedS3Profile.S3ProfileName { - if areS3ProfileFieldsEqual(expectedS3Profile, currentS3Profile) { - // no change detected on already exiting s3 profile in RamenConfig + logger.Info("No change detected in S3 profile, skipping update", "S3ProfileName", expectedS3Profile.S3ProfileName) return nil } - // changes deducted on existing s3 profile updateS3ProfileFields(&expectedS3Profile, &ramenConfig.S3StoreProfiles[i]) isUpdated = true + logger.Info("S3 profile updated", "S3ProfileName", expectedS3Profile.S3ProfileName) break } } if !isUpdated { - // new s3 profile is deducted ramenConfig.S3StoreProfiles = append(ramenConfig.S3StoreProfiles, expectedS3Profile) + logger.Info("New S3 profile added", "S3ProfileName", expectedS3Profile.S3ProfileName) } - // converting RamenConfig into ramen manager config str ramenConfigDataStr, err := yaml.Marshal(ramenConfig) if err != nil { - logger.Error(err, "failed to marshal DR hub operator config data", "config", utils.RamenHubOperatorConfigName, "namespace", ramenHubNamespace) + logger.Error("Failed to marshal Ramen config", "error", err) return err } - // update ramen hub operator configmap - logger.Info("Updating DR hub operator config with S3 profile", utils.RamenHubOperatorConfigName, expectedS3Profile.S3ProfileName) _, err = controllerutil.CreateOrUpdate(ctx, rc, ¤tRamenConfigMap, func() error { - // attach ramen manager config str into configmap currentRamenConfigMap.Data["ramen_manager_config.yaml"] = string(ramenConfigDataStr) return nil }) + if err != nil { + logger.Error("Failed to update Ramen Hub Operator config map", "error", err) + return err + } - return err + logger.Info("Ramen Hub Operator config updated successfully", "ConfigMapName", namespacedName) + return nil } -func createOrUpdateSecretsFromInternalSecret(ctx context.Context, rc client.Client, secret *corev1.Secret, mirrorPeers []multiclusterv1alpha1.MirrorPeer) error { - logger := log.FromContext(ctx) +func createOrUpdateSecretsFromInternalSecret(ctx context.Context, rc client.Client, secret *corev1.Secret, mirrorPeers []multiclusterv1alpha1.MirrorPeer, logger *slog.Logger) error { + logger.Info("Validating internal secret", "SecretName", secret.Name, "Namespace", secret.Namespace) if err := utils.ValidateInternalSecret(secret, utils.InternalLabel); err != nil { - logger.Error(err, "Provided internal secret is not valid", "secret", secret.Name, "namespace", secret.Namespace) + logger.Error("Provided internal secret is not valid", "error", err, "SecretName", secret.Name, "Namespace", secret.Namespace) return err } - data := make(map[string][]byte) - err := json.Unmarshal(secret.Data[utils.SecretDataKey], &data) - if err != nil { - logger.Error(err, "failed to unmarshal secret data", "secret", secret.Name, "namespace", secret.Namespace) + var data map[string][]byte + if err := json.Unmarshal(secret.Data[utils.SecretDataKey], &data); err != nil { + logger.Error("Failed to unmarshal secret data", "error", err, "SecretName", secret.Name, "Namespace", secret.Namespace) return err } - if string(secret.Data[utils.SecretOriginKey]) == utils.OriginMap["S3Origin"] { + secretOrigin := string(secret.Data[utils.SecretOriginKey]) + logger.Info("Processing secret based on origin", "Origin", secretOrigin, "SecretName", secret.Name) + + if secretOrigin == utils.OriginMap["S3Origin"] { if ok := utils.ValidateS3Secret(data); !ok { - return fmt.Errorf("invalid S3 secret format for secret name %q in namesapce %q", secret.Name, secret.Namespace) + err := fmt.Errorf("invalid S3 secret format for secret name %q in namespace %q", secret.Name, secret.Namespace) + logger.Error("Invalid S3 secret format", "error", err, "SecretName", secret.Name, "Namespace", secret.Namespace) + return err } currentNamespace := os.Getenv("POD_NAMESPACE") - // S3 origin secret has two part 1. s3 bucket secret 2. s3 bucket config - // create ramen s3 secret using s3 bucket secret - err := createOrUpdateRamenS3Secret(ctx, rc, secret, data, currentNamespace) - if err != nil { + if err := createOrUpdateRamenS3Secret(ctx, rc, secret, data, currentNamespace, logger); err != nil { + logger.Error("Failed to create or update Ramen S3 secret", "error", err, "SecretName", secret.Name, "Namespace", currentNamespace) return err } - // update ramen hub operator config using s3 bucket config and ramen s3 secret reference - err = updateRamenHubOperatorConfig(ctx, rc, secret, data, mirrorPeers, currentNamespace) - if err != nil { + if err := updateRamenHubOperatorConfig(ctx, rc, secret, data, mirrorPeers, currentNamespace, logger); err != nil { + logger.Error("Failed to update Ramen Hub Operator config", "error", err, "SecretName", secret.Name, "Namespace", currentNamespace) return err } - } - if string(secret.Data[utils.SecretOriginKey]) == utils.OriginMap["RookOrigin"] { + } else if secretOrigin == utils.OriginMap["RookOrigin"] { currentNamespace := os.Getenv("POD_NAMESPACE") - err := createOrUpdateExternalSecret(ctx, rc, secret, data, currentNamespace) - if err != nil { + if err := createOrUpdateExternalSecret(ctx, rc, secret, data, currentNamespace, logger); err != nil { + logger.Error("Failed to create or update external secret", "error", err, "SecretName", secret.Name, "Namespace", currentNamespace) return err } } @@ -408,30 +453,26 @@ func createOrUpdateSecretsFromInternalSecret(ctx context.Context, rc client.Clie // processDeletedSecrets finds out which type of secret is deleted // and do appropriate action -func processDeletedSecrets(ctx context.Context, rc client.Client, req types.NamespacedName) error { - var err error - logger := log.FromContext(ctx, "controller", "MirrorPeerController") - // get all the secrets with the same name +func processDeletedSecrets(ctx context.Context, rc client.Client, req types.NamespacedName, logger *slog.Logger) error { + logger.Info("Processing deleted secrets", "SecretName", req.Name, "Namespace", req.Namespace) + + // Fetch all secrets with a specific label, excluding those with the IgnoreLabel allSecrets, err := utils.FetchAllSecretsWithLabel(ctx, rc, "", utils.IgnoreLabel) if err != nil { - logger.Error(err, "Unable to get the list of secrets") + logger.Error("Failed to fetch secrets", "error", err) return err } - // sameNamedDestinationSecrets will collect the same named destination secrets + var sameNamedDestinationSecrets []corev1.Secret - // sourceSecretPointer will point to the source secret var sourceSecretPointer *corev1.Secret - // append all the secrets which have the same requested name + + // Filter secrets by name and type for _, eachSecret := range allSecrets { if eachSecret.Name == req.Name { - // check similarly named secret is present (or not) - if secretType := eachSecret.Labels[utils.SecretLabelTypeKey]; secretType == string(utils.SourceLabel) { - // if 'sourceSecretPointer' already points to a source secret, - // it is an error. We should not have TWO source - // secrets of same name. + if eachSecret.Labels[utils.SecretLabelTypeKey] == string(utils.SourceLabel) { if sourceSecretPointer != nil { - err = errors.New("multiple source secrets detected") - logger.Error(err, "Cannot have more than one source secrets with the same name", "request", req, "source-secret", sourceSecretPointer.Name, "namespace", sourceSecretPointer.Namespace) + err = errors.New("multiple source secrets detected with the same name") + logger.Error("Multiple source secrets found", "error", err, "SecretName", req.Name, "Namespace", req.Namespace) return err } sourceSecretPointer = eachSecret.DeepCopy() @@ -441,37 +482,26 @@ func processDeletedSecrets(ctx context.Context, rc client.Client, req types.Name } } - logger.V(2).Info("List of secrets with requested name", "secret-name", req.Name, "secret-length", len(sameNamedDestinationSecrets)) - if sourceSecretPointer == nil { - // if there is neither source secret nor any other similarly named secrets, - // that means all 'req.Name'-ed secrets are cleaned up and nothing to be done if len(sameNamedDestinationSecrets) == 0 { + logger.Info("No related secrets need cleaning", "SecretName", req.Name) return nil } - logger.Info("A SOURCE secret deletion detected", "secret-name", req.Name, "namespace", req.Namespace) + logger.Info("Source secret deletion detected, cleaning up destination secrets", "SecretName", req.Name) var anyErr error - // if source secret is not present, remove all the destinations|GREENs for _, eachDestSecret := range sameNamedDestinationSecrets { err = rc.Delete(ctx, &eachDestSecret) if err != nil { - logger.Error(err, "Deletion failed", "secret", eachDestSecret.Name, "namespace", eachDestSecret.Namespace) + logger.Error("Failed to delete destination secret", "error", err, "SecretName", eachDestSecret.Name, "Namespace", eachDestSecret.Namespace) anyErr = err } } - // if any error has happened, - // we will return the last found error - if anyErr != nil { - return anyErr - } + return anyErr } else { - logger.Info("A DESTINATION secret deletion detected", "secret-name", req.Name, "namespace", req.Namespace) - // in this section, one of the destination is removed - // action: use the source secret pointed by 'sourceSecretPointer' - // and restore the missing destination secret - err = createOrUpdateDestinationSecretsFromSource(ctx, rc, sourceSecretPointer) + logger.Info("Destination secret deletion detected, attempting to restore", "SecretName", req.Name) + err = createOrUpdateDestinationSecretsFromSource(ctx, rc, sourceSecretPointer, logger) if err != nil { - logger.Error(err, "Unable to update the destination secret", "source-secret", sourceSecretPointer.Name, "namespace", sourceSecretPointer.Namespace) + logger.Error("Failed to update destination secret from source", "error", err, "SourceSecretName", sourceSecretPointer.Name, "Namespace", sourceSecretPointer.Namespace) return err } } diff --git a/controllers/drpolicy_controller.go b/controllers/drpolicy_controller.go index 836be79c..3c0116d4 100644 --- a/controllers/drpolicy_controller.go +++ b/controllers/drpolicy_controller.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "time" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -12,13 +13,11 @@ import ( ramenv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" multiclusterv1alpha1 "github.com/red-hat-storage/odf-multicluster-orchestrator/api/v1alpha1" "github.com/red-hat-storage/odf-multicluster-orchestrator/controllers/utils" - "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" workv1 "open-cluster-management.io/api/work/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -46,87 +45,110 @@ const ( type DRPolicyReconciler struct { HubClient client.Client Scheme *runtime.Scheme + Logger *slog.Logger } // SetupWithManager sets up the controller with the Manager. func (r *DRPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.Logger.Info("Setting up DRPolicyReconciler with manager") + dpPredicate := utils.ComposePredicates(predicate.GenerationChangedPredicate{}) - return ctrl.NewControllerManagedBy(mgr). + err := ctrl.NewControllerManagedBy(mgr). For(&ramenv1alpha1.DRPolicy{}, builder.WithPredicates(dpPredicate)). Complete(r) + + if err != nil { + r.Logger.Error("Failed to set up DRPolicyReconciler with manager", "error", err) + return err + } + + r.Logger.Info("Successfully set up DRPolicyReconciler with manager") + return nil } func (r *DRPolicyReconciler) getMirrorPeerForClusterSet(ctx context.Context, clusterSet []string) (*multiclusterv1alpha1.MirrorPeer, error) { + logger := r.Logger.With("method", "getMirrorPeerForClusterSet", "ClusterSet", clusterSet) + var mpList multiclusterv1alpha1.MirrorPeerList err := r.HubClient.List(ctx, &mpList) if err != nil { - klog.Error("could not list mirrorpeers on hub") + logger.Error("Could not list MirrorPeers on hub", "error", err) return nil, err } if len(mpList.Items) == 0 { - klog.Info("no mirrorpeers found on hub yet") + logger.Info("No MirrorPeers found on hub yet") return nil, k8serrors.NewNotFound(schema.GroupResource{Group: multiclusterv1alpha1.GroupVersion.Group, Resource: "MirrorPeer"}, "MirrorPeerList") } + for _, mp := range mpList.Items { if (mp.Spec.Items[0].ClusterName == clusterSet[0] && mp.Spec.Items[1].ClusterName == clusterSet[1]) || (mp.Spec.Items[1].ClusterName == clusterSet[0] && mp.Spec.Items[0].ClusterName == clusterSet[1]) { - klog.Infof("found mirrorpeer %q for drpolicy", mp.Name) + logger.Info("Found MirrorPeer for DRPolicy", "MirrorPeerName", mp.Name) return &mp, nil } } - klog.Info("could not find any mirrorpeer for drpolicy") + logger.Info("Could not find any MirrorPeer for DRPolicy") return nil, k8serrors.NewNotFound(schema.GroupResource{Group: multiclusterv1alpha1.GroupVersion.Group, Resource: "MirrorPeer"}, fmt.Sprintf("ClusterSet-%s-%s", clusterSet[0], clusterSet[1])) } + func (r *DRPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.Infof("running DRPolicy reconciler on hub cluster") - // Fetch DRPolicy for given Request + r.Logger.Info("Running DRPolicy reconciler on hub cluster", "RequestNamespace", req.Namespace, "RequestName", req.Name) + + // Fetch DRPolicy for the given request var drpolicy ramenv1alpha1.DRPolicy err := r.HubClient.Get(ctx, req.NamespacedName, &drpolicy) if err != nil { - if errors.IsNotFound(err) { - klog.Info("Could not find DRPolicy. Ignoring since object must have been deleted") + if k8serrors.IsNotFound(err) { + r.Logger.Info("DRPolicy not found. Ignoring since the object must have been deleted", "RequestNamespace", req.Namespace, "RequestName", req.Name) return ctrl.Result{}, nil } - klog.Error(err, "Failed to get DRPolicy") + r.Logger.Error("Failed to get DRPolicy", "error", err, "RequestNamespace", req.Namespace, "RequestName", req.Name) return ctrl.Result{}, err } - // find mirrorpeer for clusterset for the storagecluster namespaces + // Find MirrorPeer for clusterset for the storagecluster namespaces mirrorPeer, err := r.getMirrorPeerForClusterSet(ctx, drpolicy.Spec.DRClusters) if err != nil { if k8serrors.IsNotFound(err) { + r.Logger.Info("MirrorPeer not found. Requeuing", "DRClusters", drpolicy.Spec.DRClusters) return ctrl.Result{RequeueAfter: time.Second * 10}, nil } - klog.Error("error occurred while trying to fetch MirrorPeer for given DRPolicy") + r.Logger.Error("Error occurred while trying to fetch MirrorPeer for given DRPolicy", "error", err) return ctrl.Result{}, err } if mirrorPeer.Spec.Type == multiclusterv1alpha1.Async { clusterFSIDs := make(map[string]string) - klog.Infof("Fetching clusterFSIDs") + r.Logger.Info("Fetching cluster FSIDs") err = r.fetchClusterFSIDs(ctx, mirrorPeer, clusterFSIDs) if err != nil { - if errors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { + r.Logger.Info("Cluster FSIDs not found, requeuing") return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } - return ctrl.Result{}, fmt.Errorf("an unknown error occured while fetching the cluster fsids, retrying again: %v", err) + r.Logger.Error("An unknown error occurred while fetching the cluster FSIDs, retrying", "error", err) + return ctrl.Result{}, fmt.Errorf("an unknown error occurred while fetching the cluster FSIDs, retrying: %v", err) } err = r.createOrUpdateManifestWorkForVRC(ctx, mirrorPeer, &drpolicy, clusterFSIDs) if err != nil { + r.Logger.Error("Failed to create VolumeReplicationClass via ManifestWork", "error", err) return ctrl.Result{}, fmt.Errorf("failed to create VolumeReplicationClass via ManifestWork: %v", err) } } + r.Logger.Info("Successfully reconciled DRPolicy", "RequestNamespace", req.Namespace, "RequestName", req.Name) return ctrl.Result{}, nil } func (r *DRPolicyReconciler) createOrUpdateManifestWorkForVRC(ctx context.Context, mp *multiclusterv1alpha1.MirrorPeer, dp *ramenv1alpha1.DRPolicy, clusterFSIDs map[string]string) error { + logger := r.Logger.With("method", "createOrUpdateManifestWorkForVRC", "DRPolicy", dp.Name, "MirrorPeer", mp.Name) replicationId, err := utils.CreateUniqueReplicationId(clusterFSIDs) if err != nil { + logger.Error("Failed to create unique replication ID", "error", err) return err } @@ -144,25 +166,30 @@ func (r *DRPolicyReconciler) createOrUpdateManifestWorkForVRC(ctx context.Contex switch { case err == nil: - klog.Infof("%s already exists. updating...", manifestWorkName) - case !errors.IsNotFound(err): - klog.Error(err, "failed to get ManifestWork: %s", manifestWorkName) + logger.Info("ManifestWork already exists, updating", "ManifestWorkName", manifestWorkName) + case !k8serrors.IsNotFound(err): + logger.Error("Failed to get ManifestWork", "ManifestWorkName", manifestWorkName, "error", err) return err } interval := dp.Spec.SchedulingInterval - params := make(map[string]string) - params[MirroringModeKey] = DefaultMirroringMode - params[SchedulingIntervalKey] = interval - params[ReplicationSecretNameKey] = RBDReplicationSecretName - params[ReplicationSecretNamespaceKey] = pr.StorageClusterRef.Namespace + params := map[string]string{ + MirroringModeKey: DefaultMirroringMode, + SchedulingIntervalKey: interval, + ReplicationSecretNameKey: RBDReplicationSecretName, + ReplicationSecretNamespaceKey: pr.StorageClusterRef.Namespace, + } + vrcName := fmt.Sprintf(RBDVolumeReplicationClassNameTemplate, utils.FnvHash(interval)) - labels := make(map[string]string) - labels[fmt.Sprintf(RamenLabelTemplate, ReplicationIDKey)] = replicationId - labels[fmt.Sprintf(RamenLabelTemplate, "maintenancemodes")] = "Failover" + labels := map[string]string{ + fmt.Sprintf(RamenLabelTemplate, ReplicationIDKey): replicationId, + fmt.Sprintf(RamenLabelTemplate, "maintenancemodes"): "Failover", + } + vrc := replicationv1alpha1.VolumeReplicationClass{ TypeMeta: metav1.TypeMeta{ - Kind: "VolumeReplicationClass", APIVersion: "replication.storage.openshift.io/v1alpha1", + Kind: "VolumeReplicationClass", + APIVersion: "replication.storage.openshift.io/v1alpha1", }, ObjectMeta: metav1.ObjectMeta{ Name: vrcName, @@ -179,6 +206,7 @@ func (r *DRPolicyReconciler) createOrUpdateManifestWorkForVRC(ctx context.Contex objJson, err := json.Marshal(vrc) if err != nil { + logger.Error("Failed to marshal VolumeReplicationClass to JSON", "VolumeReplicationClass", vrcName, "error", err) return fmt.Errorf("failed to marshal %v to JSON, error %w", vrc, err) } @@ -210,7 +238,7 @@ func (r *DRPolicyReconciler) createOrUpdateManifestWorkForVRC(ctx context.Contex mw := workv1.ManifestWork{ ObjectMeta: metav1.ObjectMeta{ Name: manifestWorkName, - Namespace: pr.ClusterName, //target cluster + Namespace: pr.ClusterName, OwnerReferences: []metav1.OwnerReference{ { Kind: dp.Kind, @@ -221,6 +249,7 @@ func (r *DRPolicyReconciler) createOrUpdateManifestWorkForVRC(ctx context.Contex }, }, } + _, err = controllerutil.CreateOrUpdate(ctx, r.HubClient, &mw, func() error { mw.Spec = workv1.ManifestWorkSpec{ Workload: workv1.ManifestsTemplate{ @@ -231,35 +260,44 @@ func (r *DRPolicyReconciler) createOrUpdateManifestWorkForVRC(ctx context.Contex }) if err != nil { - klog.Error(err, "failed to create/update ManifestWork: %s", manifestWorkName) + logger.Error("Failed to create/update ManifestWork", "ManifestWorkName", manifestWorkName, "error", err) return err } - klog.Infof("ManifestWork created for %s", vrcName) + logger.Info("ManifestWork created/updated successfully", "ManifestWorkName", manifestWorkName, "VolumeReplicationClassName", vrcName) } return nil } func (r *DRPolicyReconciler) fetchClusterFSIDs(ctx context.Context, peer *multiclusterv1alpha1.MirrorPeer, clusterFSIDs map[string]string) error { + logger := r.Logger.With("method", "fetchClusterFSIDs", "MirrorPeer", peer.Name) + for _, pr := range peer.Spec.Items { rookSecretName := utils.GetSecretNameByPeerRef(pr) - klog.Info("Fetching rook secret ", "Secret Name:", rookSecretName) + logger.Info("Fetching rook secret", "SecretName", rookSecretName, "ClusterName", pr.ClusterName) + hs, err := utils.FetchSecretWithName(ctx, r.HubClient, types.NamespacedName{Name: rookSecretName, Namespace: pr.ClusterName}) if err != nil { - if errors.IsNotFound(err) { - klog.Infof("could not find secret %q. will attempt to fetch it again after a delay", rookSecretName) + if k8serrors.IsNotFound(err) { + logger.Info("Secret not found, will attempt to fetch again after a delay", "SecretName", rookSecretName, "ClusterName", pr.ClusterName) + return err } + logger.Error("Failed to fetch rook secret", "SecretName", rookSecretName, "ClusterName", pr.ClusterName, "error", err) return err } - klog.Info("Unmarshalling rook secret ", "Secret Name:", rookSecretName) + + logger.Info("Unmarshalling rook secret", "SecretName", rookSecretName, "ClusterName", pr.ClusterName) rt, err := utils.UnmarshalHubSecret(hs) if err != nil { - klog.Error(err, "Failed to unmarshal rook secret", "Secret", rookSecretName) + logger.Error("Failed to unmarshal rook secret", "SecretName", rookSecretName, "ClusterName", pr.ClusterName, "error", err) return err } + clusterFSIDs[pr.ClusterName] = rt.FSID + logger.Info("Successfully fetched FSID for cluster", "ClusterName", pr.ClusterName, "FSID", rt.FSID) } + logger.Info("Successfully fetched all cluster FSIDs", "MirrorPeer", peer.Name) return nil } diff --git a/controllers/manager.go b/controllers/manager.go index f5e66766..bff3415f 100644 --- a/controllers/manager.go +++ b/controllers/manager.go @@ -3,6 +3,7 @@ package controllers import ( "context" "flag" + "log/slog" "os" consolev1alpha1 "github.com/openshift/api/console/v1alpha1" @@ -120,9 +121,11 @@ func (o *ManagerOptions) runManager() { namespace := os.Getenv("POD_NAMESPACE") + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) if err = (&MirrorPeerReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), + Logger: logger, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "MirrorPeer") os.Exit(1) @@ -132,6 +135,7 @@ func (o *ManagerOptions) runManager() { if err = (&MirrorPeerSecretReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), + Logger: logger, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "MirrorPeer") os.Exit(1) @@ -224,6 +228,7 @@ func (o *ManagerOptions) runManager() { if err = (&DRPolicyReconciler{ HubClient: mgr.GetClient(), Scheme: mgr.GetScheme(), + Logger: logger, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "DRPolicy") os.Exit(1) diff --git a/controllers/mirrorpeer_controller.go b/controllers/mirrorpeer_controller.go index 951c03bf..000a5a53 100644 --- a/controllers/mirrorpeer_controller.go +++ b/controllers/mirrorpeer_controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "log/slog" "os" "github.com/red-hat-storage/odf-multicluster-orchestrator/addons/setup" @@ -41,7 +42,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -50,6 +50,7 @@ import ( type MirrorPeerReconciler struct { client.Client Scheme *runtime.Scheme + Logger *slog.Logger } const mirrorPeerFinalizer = "hub.multicluster.odf.openshift.io" @@ -80,8 +81,8 @@ const spokeClusterRoleBindingName = "spoke-clusterrole-bindings" // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - + logger := r.Logger + logger.Info("Reconciling request", "req", req) // Fetch MirrorPeer for given Request var mirrorPeer multiclusterv1alpha1.MirrorPeer err := r.Get(ctx, req.NamespacedName, &mirrorPeer) @@ -94,33 +95,35 @@ func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } // Error reading the object - requeue the request. - logger.Error(err, "Failed to get MirrorPeer") + logger.Error("Failed to get MirrorPeer", "error", err) return ctrl.Result{}, err } - logger.V(2).Info("Validating MirrorPeer", "MirrorPeer", req.NamespacedName) + logger.Info("Validating MirrorPeer", "MirrorPeer", req.NamespacedName) // Validate MirrorPeer // MirrorPeer.Spec must be defined if err := undefinedMirrorPeerSpec(mirrorPeer.Spec); err != nil { + logger.Error("MirrorPeer spec is undefined", "error", err) return ctrl.Result{Requeue: false}, err } // MirrorPeer.Spec.Items must be unique if err := uniqueSpecItems(mirrorPeer.Spec); err != nil { + logger.Error("MirrorPeer spec items are not unique", "error", err) return ctrl.Result{Requeue: false}, err } for i := range mirrorPeer.Spec.Items { // MirrorPeer.Spec.Items must not have empty fields if err := emptySpecItems(mirrorPeer.Spec.Items[i]); err != nil { - // return error and do not requeue since user needs to update the spec - // when user updates the spec, new reconcile will be triggered + logger.Error("MirrorPeer spec items have empty fields", "error", err) return reconcile.Result{Requeue: false}, err } // MirrorPeer.Spec.Items[*].ClusterName must be a valid ManagedCluster if err := isManagedCluster(ctx, r.Client, mirrorPeer.Spec.Items[i].ClusterName); err != nil { + logger.Error("Invalid ManagedCluster", "ClusterName", mirrorPeer.Spec.Items[i].ClusterName, "error", err) return ctrl.Result{}, err } } - logger.V(2).Info("All validations for MirrorPeer passed", "MirrorPeer", req.NamespacedName) + logger.Info("All validations for MirrorPeer passed", "MirrorPeer", req.NamespacedName) if mirrorPeer.GetDeletionTimestamp().IsZero() { if !utils.ContainsString(mirrorPeer.GetFinalizers(), mirrorPeerFinalizer) { @@ -132,7 +135,7 @@ func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) mirrorPeer.Status.Phase = multiclusterv1alpha1.Deleting statusErr := r.Client.Status().Update(ctx, &mirrorPeer) if statusErr != nil { - logger.Error(statusErr, "Error occurred while updating the status of mirrorpeer", "MirrorPeer", mirrorPeer) + logger.Error("Error occurred while updating the status of mirrorpeer", "MirrorPeer", mirrorPeer, "Error", statusErr) return ctrl.Result{Requeue: true}, nil } if utils.ContainsString(mirrorPeer.GetFinalizers(), mirrorPeerFinalizer) { @@ -141,12 +144,12 @@ func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) return reconcile.Result{Requeue: true}, err } if err := r.deleteSecrets(ctx, mirrorPeer); err != nil { - logger.Error(err, "Failed to delete resources") + logger.Error("Failed to delete resources", "error", err) return reconcile.Result{Requeue: true}, err } mirrorPeer.Finalizers = utils.RemoveString(mirrorPeer.Finalizers, mirrorPeerFinalizer) if err := r.Client.Update(ctx, &mirrorPeer); err != nil { - logger.Info("Failed to remove finalizer from MirrorPeer") + logger.Error("Failed to remove finalizer from MirrorPeer", "error", err) return reconcile.Result{}, err } } @@ -165,9 +168,10 @@ func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) err = r.Client.Update(ctx, mirrorPeerCopy) if err != nil { - return checkK8sUpdateErrors(err, mirrorPeerCopy) + logger.Error("Failed to update mirrorpeer with disaster recovery label", "error", err) + return checkK8sUpdateErrors(err, mirrorPeerCopy, logger) } - logger.Info("Successfully added label to mirrorpeer for disaster recovery", "Mirrorpeer", mirrorPeerCopy.Name) + logger.Info("Successfully added label to mirrorpeer for disaster recovery. Requeing request...", "Mirrorpeer", mirrorPeerCopy.Name) return ctrl.Result{Requeue: true}, nil } @@ -179,19 +183,20 @@ func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) } statusErr := r.Client.Status().Update(ctx, &mirrorPeer) if statusErr != nil { - logger.Error(statusErr, "Error occurred while updating the status of mirrorpeer", "MirrorPeer", mirrorPeer) + logger.Error("Error occurred while updating the status of mirrorpeer. Requeing request...", "MirrorPeer", mirrorPeer, "Error ", statusErr) // Requeue, but don't throw return ctrl.Result{Requeue: true}, nil } } if err := r.processManagedClusterAddon(ctx, mirrorPeer); err != nil { + logger.Error("Failed to process managedclusteraddon", "MirrorPeer", mirrorPeer.Name, "error", err) return ctrl.Result{}, err } err = r.createClusterRoleBindingsForSpoke(ctx, mirrorPeer) if err != nil { - logger.Error(err, "Failed to create cluster role bindings for spoke") + logger.Error("Failed to create cluster role bindings for spoke", "MirrorPeer", mirrorPeer.Name, "error", err) return ctrl.Result{}, err } @@ -206,31 +211,32 @@ func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) err = r.Client.Get(ctx, namespacedName, &s3Secret) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("s3 secret is not yet synchronised. retrying till it is available", "peerref", peerRef.ClusterName, "MirrorPeer", mirrorPeer) + logger.Info("S3 secret is not yet synchronised. retrying till it is available. Requeing request...", "MirrorPeer", mirrorPeer, "Cluster", peerRef.ClusterName) return ctrl.Result{Requeue: true}, nil } - logger.Error(err, "error in fetching s3 internal secret", "peerref", peerRef.ClusterName, "MirrorPeer", mirrorPeer) + logger.Error("Error in fetching s3 internal secret", "MirrorPeer", mirrorPeer, "Cluster", peerRef.ClusterName, "error", err) return ctrl.Result{}, err } - err = createOrUpdateSecretsFromInternalSecret(ctx, r.Client, &s3Secret, []multiclusterv1alpha1.MirrorPeer{mirrorPeer}) + err = createOrUpdateSecretsFromInternalSecret(ctx, r.Client, &s3Secret, []multiclusterv1alpha1.MirrorPeer{mirrorPeer}, logger) if err != nil { - logger.Error(err, "error in updating S3 profile", "peerref", peerRef.ClusterName, "MirrorPeer", mirrorPeer) + logger.Error("Error in updating S3 profile", "MirrorPeer", mirrorPeer, "Cluster", peerRef.ClusterName, "error", err) return ctrl.Result{}, err } } } - if err = processMirrorPeerSecretChanges(ctx, r.Client, mirrorPeer); err != nil { + if err = r.processMirrorPeerSecretChanges(ctx, r.Client, mirrorPeer); err != nil { + logger.Error("Error processing MirrorPeer secret changes", "MirrorPeer", mirrorPeer.Name, "error", err) return ctrl.Result{}, err } err = r.createDRClusters(ctx, &mirrorPeer) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("secret not synchronised yet, retrying to create DRCluster", "MirrorPeer", mirrorPeer) + logger.Info("Secret not synchronised yet, retrying to create DRCluster", "MirrorPeer", mirrorPeer) return ctrl.Result{Requeue: true}, nil } - logger.Error(err, "failed to create DRClusters for MirrorPeer", "MirrorPeer", mirrorPeer.Name) + logger.Error("Failed to create DRClusters for MirrorPeer", "MirrorPeer", mirrorPeer.Name, "error", err) return ctrl.Result{}, err } @@ -240,37 +246,56 @@ func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request) // processManagedClusterAddon creates an addon for the cluster management in all the peer refs, // the resources gets an owner ref of the mirrorpeer to let the garbage collector handle it if the mirrorpeer gets deleted func (r *MirrorPeerReconciler) processManagedClusterAddon(ctx context.Context, mirrorPeer multiclusterv1alpha1.MirrorPeer) error { - logger := log.FromContext(ctx) - // Create or Update ManagedClusterAddon - for i := range mirrorPeer.Spec.Items { + logger := r.Logger + logger.Info("Processing ManagedClusterAddons for MirrorPeer", "MirrorPeer", mirrorPeer.Name) + + for _, item := range mirrorPeer.Spec.Items { + logger.Info("Handling ManagedClusterAddon for cluster", "ClusterName", item.ClusterName) + var managedClusterAddOn addonapiv1alpha1.ManagedClusterAddOn - if err := r.Client.Get(ctx, types.NamespacedName{ + namespacedName := types.NamespacedName{ Name: setup.TokenExchangeName, - Namespace: mirrorPeer.Spec.Items[i].ClusterName, - }, &managedClusterAddOn); err != nil { + Namespace: item.ClusterName, + } + + err := r.Client.Get(ctx, namespacedName, &managedClusterAddOn) + if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Cannot find managedClusterAddon, creating") + logger.Info("ManagedClusterAddon not found, will create a new one", "ClusterName", item.ClusterName) + annotations := make(map[string]string) annotations[utils.DRModeAnnotationKey] = string(mirrorPeer.Spec.Type) - managedClusterAddOn = addonapiv1alpha1.ManagedClusterAddOn{ ObjectMeta: metav1.ObjectMeta{ Name: setup.TokenExchangeName, - Namespace: mirrorPeer.Spec.Items[i].ClusterName, + Namespace: item.ClusterName, Annotations: annotations, }, } + } else { + logger.Error("Failed to get ManagedClusterAddon", "error", err, "ClusterName", item.ClusterName) + return err } } - _, err := controllerutil.CreateOrUpdate(ctx, r.Client, &managedClusterAddOn, func() error { - managedClusterAddOn.Spec.InstallNamespace = mirrorPeer.Spec.Items[i].StorageClusterRef.Namespace - return controllerutil.SetOwnerReference(&mirrorPeer, &managedClusterAddOn, r.Scheme) + + _, err = controllerutil.CreateOrUpdate(ctx, r.Client, &managedClusterAddOn, func() error { + managedClusterAddOn.Spec.InstallNamespace = item.StorageClusterRef.Namespace + if err := controllerutil.SetOwnerReference(&mirrorPeer, &managedClusterAddOn, r.Scheme); err != nil { + logger.Error("Failed to set owner reference on ManagedClusterAddon", "error", err, "ClusterName", item.ClusterName) + return err + } + return nil }) + if err != nil { - logger.Error(err, "Failed to reconcile ManagedClusterAddOn.", "ManagedClusterAddOn", klog.KRef(managedClusterAddOn.Namespace, managedClusterAddOn.Name)) + logger.Error("Failed to reconcile ManagedClusterAddOn", "ManagedClusterAddOn", klog.KRef(managedClusterAddOn.Namespace, managedClusterAddOn.Name), "error", err) return err } + + logger.Info("Successfully reconciled ManagedClusterAddOn", "ClusterName", item.ClusterName) } + + logger.Info("Successfully processed all ManagedClusterAddons for MirrorPeer", "MirrorPeer", mirrorPeer.Name) return nil } @@ -279,26 +304,33 @@ func (r *MirrorPeerReconciler) processManagedClusterAddon(ctx context.Context, m // If two mirrorpeers are pointing to the same peer ref, but only gets deleted the orphan green secret in // the still standing peer ref gets deleted by the mirrorpeer secret controller func (r *MirrorPeerReconciler) deleteSecrets(ctx context.Context, mirrorPeer multiclusterv1alpha1.MirrorPeer) error { - logger := log.FromContext(ctx) - for i := range mirrorPeer.Spec.Items { + logger := r.Logger + logger.Info("Starting deletion of secrets for MirrorPeer", "MirrorPeer", mirrorPeer.Name) + + for i, peerRef := range mirrorPeer.Spec.Items { + logger.Info("Checking if PeerRef is used by another MirrorPeer", "PeerRef", peerRef.ClusterName) + peerRefUsed, err := utils.DoesAnotherMirrorPeerPointToPeerRef(ctx, r.Client, &mirrorPeer.Spec.Items[i]) if err != nil { + logger.Error("Error checking if PeerRef is used by another MirrorPeer", "PeerRef", peerRef.ClusterName, "error", err) return err } + if !peerRefUsed { - secretLabels := []string{string(utils.SourceLabel), string(utils.DestinationLabel)} + logger.Info("PeerRef is not used by another MirrorPeer, proceeding to delete secrets", "PeerRef", peerRef.ClusterName) + secretLabels := []string{string(utils.SourceLabel), string(utils.DestinationLabel)} if mirrorPeer.Spec.ManageS3 { secretLabels = append(secretLabels, string(utils.InternalLabel)) } secretRequirement, err := labels.NewRequirement(utils.SecretLabelTypeKey, selection.In, secretLabels) if err != nil { - logger.Error(err, "cannot parse new requirement") + logger.Error("Cannot create label requirement for deleting secrets", "error", err) + return err } secretSelector := labels.NewSelector().Add(*secretRequirement) - deleteOpt := client.DeleteAllOfOptions{ ListOptions: client.ListOptions{ Namespace: mirrorPeer.Spec.Items[i].ClusterName, @@ -308,226 +340,275 @@ func (r *MirrorPeerReconciler) deleteSecrets(ctx context.Context, mirrorPeer mul var secret corev1.Secret if err := r.DeleteAllOf(ctx, &secret, &deleteOpt); err != nil { - logger.Error(err, "Error while deleting secrets for MirrorPeer", "MirrorPeer", mirrorPeer.Name) + logger.Error("Error while deleting secrets for MirrorPeer", "MirrorPeer", mirrorPeer.Name, "PeerRef", peerRef.ClusterName, "error", err) + return err } + + logger.Info("Secrets successfully deleted", "PeerRef", peerRef.ClusterName) + } else { + logger.Info("PeerRef is still used by another MirrorPeer, skipping deletion", "PeerRef", peerRef.ClusterName) } } + + logger.Info("Completed deletion of secrets for MirrorPeer", "MirrorPeer", mirrorPeer.Name) return nil } -func processMirrorPeerSecretChanges(ctx context.Context, rc client.Client, mirrorPeerObj multiclusterv1alpha1.MirrorPeer) error { - logger := log.FromContext(ctx) +func (r *MirrorPeerReconciler) processMirrorPeerSecretChanges(ctx context.Context, rc client.Client, mirrorPeerObj multiclusterv1alpha1.MirrorPeer) error { + logger := r.Logger + logger.Info("Processing mirror peer secret changes", "MirrorPeer", mirrorPeerObj.Name) + var anyErr error for _, eachPeerRef := range mirrorPeerObj.Spec.Items { + logger.Info("Fetching all source secrets", "ClusterName", eachPeerRef.ClusterName) sourceSecrets, err := fetchAllSourceSecrets(ctx, rc, eachPeerRef.ClusterName) if err != nil { - logger.Error(err, "Unable to get a list of source secrets", "namespace", eachPeerRef.ClusterName) + logger.Error("Unable to get a list of source secrets", "error", err, "namespace", eachPeerRef.ClusterName) anyErr = err continue } + // get the source secret associated with the PeerRef matchingSourceSecret := utils.FindMatchingSecretWithPeerRef(eachPeerRef, sourceSecrets) - // if no match found (ie; no source secret found); just continue if matchingSourceSecret == nil { + logger.Info("No matching source secret found for peer ref", "PeerRef", eachPeerRef.ClusterName) continue } - err = createOrUpdateDestinationSecretsFromSource(ctx, rc, matchingSourceSecret, mirrorPeerObj) + err = createOrUpdateDestinationSecretsFromSource(ctx, rc, matchingSourceSecret, logger, mirrorPeerObj) if err != nil { - logger.Error(err, "Error while updating Destination secrets", "source-secret", matchingSourceSecret.Name, "namespace", matchingSourceSecret.Namespace) + logger.Error("Error while updating destination secrets", "source-secret", matchingSourceSecret.Name, "namespace", matchingSourceSecret.Namespace, "error", err) anyErr = err } } + if anyErr == nil { - // if there are no other errors, - // cleanup any other orphan destination secrets - anyErr = processDestinationSecretCleanup(ctx, rc) + logger.Info("Cleaning up any orphan destination secrets") + anyErr = processDestinationSecretCleanup(ctx, rc, logger) + if anyErr != nil { + logger.Error("Error cleaning up orphan destination secrets", "error", anyErr) + } + } else { + logger.Info("Errors encountered in updating secrets; skipping cleanup") } + return anyErr } func (r *MirrorPeerReconciler) checkTokenExchangeStatus(ctx context.Context, mp multiclusterv1alpha1.MirrorPeer) (bool, error) { - logger := log.FromContext(ctx) + logger := r.Logger + + logger.Info("Checking token exchange status for MirrorPeer", "MirrorPeer", mp.Name) - //TODO Add support for more peer refs when applicable + // Assuming pr1 and pr2 are defined as first two peer refs in spec pr1 := mp.Spec.Items[0] pr2 := mp.Spec.Items[1] + // Check for source secret for pr1 err := r.checkForSourceSecret(ctx, pr1) if err != nil { - logger.Error(err, "Failed to find valid source secret", "PeerRef", pr1) + logger.Error("Failed to find valid source secret for the first peer reference", "PeerRef", pr1.ClusterName, "error", err) return false, err } + // Check for destination secret for pr1 in pr2's cluster err = r.checkForDestinationSecret(ctx, pr1, pr2.ClusterName) if err != nil { - logger.Error(err, "Failed to find valid destination secret", "PeerRef", pr1) + logger.Error("Failed to find valid destination secret for the first peer reference", "PeerRef", pr1.ClusterName, "DestinationCluster", pr2.ClusterName, "error", err) return false, err } + // Check for source secret for pr2 err = r.checkForSourceSecret(ctx, pr2) if err != nil { - logger.Error(err, "Failed to find valid source secret", "PeerRef", pr2) + logger.Error("Failed to find valid source secret for the second peer reference", "PeerRef", pr2.ClusterName, "error", err) return false, err } + // Check for destination secret for pr2 in pr1's cluster err = r.checkForDestinationSecret(ctx, pr2, pr1.ClusterName) if err != nil { - logger.Error(err, "Failed to find destination secret", "PeerRef", pr2) + logger.Error("Failed to find valid destination secret for the second peer reference", "PeerRef", pr2.ClusterName, "DestinationCluster", pr1.ClusterName, "error", err) return false, err } + logger.Info("Successfully validated token exchange status for all peer references", "MirrorPeer", mp.Name) return true, nil } func (r *MirrorPeerReconciler) checkForSourceSecret(ctx context.Context, peerRef multiclusterv1alpha1.PeerRef) error { - logger := log.FromContext(ctx) + logger := r.Logger prSecretName := utils.GetSecretNameByPeerRef(peerRef) var peerSourceSecret corev1.Secret err := r.Client.Get(ctx, types.NamespacedName{ - Name: prSecretName, - // Source Namespace for the secret - Namespace: peerRef.ClusterName, + Name: prSecretName, + Namespace: peerRef.ClusterName, // Source Namespace for the secret }, &peerSourceSecret) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Source secret not found", "Secret", prSecretName) + logger.Info("Source secret not found", "Secret", prSecretName, "Namespace", peerRef.ClusterName) return err } + logger.Error("Failed to fetch source secret", "error", err, "Secret", prSecretName, "Namespace", peerRef.ClusterName) + return err } err = utils.ValidateSourceSecret(&peerSourceSecret) if err != nil { + logger.Error("Validation failed for source secret", "error", err, "Secret", prSecretName, "Namespace", peerRef.ClusterName) return err } + logger.Info("Source secret validated successfully", "Secret", prSecretName, "Namespace", peerRef.ClusterName) return nil } + func (r *MirrorPeerReconciler) checkForDestinationSecret(ctx context.Context, peerRef multiclusterv1alpha1.PeerRef, destNamespace string) error { - logger := log.FromContext(ctx) + logger := r.Logger prSecretName := utils.GetSecretNameByPeerRef(peerRef) var peerDestinationSecret corev1.Secret + err := r.Client.Get(ctx, types.NamespacedName{ - Name: prSecretName, - // Destination Namespace for the secret - Namespace: destNamespace, + Name: prSecretName, + Namespace: destNamespace, // Destination Namespace for the secret }, &peerDestinationSecret) + if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Destination secret not found", "Secret", prSecretName) + logger.Info("Destination secret not found", "Secret", prSecretName, "Namespace", destNamespace) return err } + logger.Error("Failed to fetch destination secret", "error", err, "Secret", prSecretName, "Namespace", destNamespace) + return err } + err = utils.ValidateDestinationSecret(&peerDestinationSecret) if err != nil { + logger.Error("Validation failed for destination secret", "error", err, "Secret", prSecretName, "Namespace", destNamespace) return err } + logger.Info("Destination secret validated successfully", "Secret", prSecretName, "Namespace", destNamespace) return nil - } // SetupWithManager sets up the controller with the Manager. func (r *MirrorPeerReconciler) SetupWithManager(mgr ctrl.Manager) error { + logger := r.Logger + logger.Info("Setting up controller for MirrorPeer") + mpPredicate := utils.ComposePredicates(predicate.GenerationChangedPredicate{}) - return ctrl.NewControllerManagedBy(mgr). + err := ctrl.NewControllerManagedBy(mgr). For(&multiclusterv1alpha1.MirrorPeer{}, builder.WithPredicates(mpPredicate)). Complete(r) + + if err != nil { + logger.Error("Failed to set up controller for MirrorPeer", "error", err) + return err + } + + logger.Info("Controller for MirrorPeer successfully set up") + return nil } // CheckK8sUpdateErrors checks what type of error occurs when trying to update a k8s object // and logs according to the object -func checkK8sUpdateErrors(err error, obj client.Object) (ctrl.Result, error) { +func checkK8sUpdateErrors(err error, obj client.Object, logger *slog.Logger) (ctrl.Result, error) { if k8serrors.IsConflict(err) { - klog.Info("Object is being updated by another process. Retrying", obj.GetObjectKind(), obj.GetName()) + logger.Info("Object is being updated by another process. Retrying", "Kind", obj.GetObjectKind().GroupVersionKind().Kind, "Name", obj.GetName()) return ctrl.Result{Requeue: true}, nil } else if k8serrors.IsNotFound(err) { - klog.Info("Object no longer exists. Ignoring since object must have been deleted", obj.GetObjectKind(), obj.GetName()) + logger.Info("Object no longer exists. Ignoring since object must have been deleted", "Kind", obj.GetObjectKind().GroupVersionKind().Kind, "Name", obj.GetName()) return ctrl.Result{}, nil } else if err != nil { - klog.Info("Warning: Failed to update object", obj.GetName(), "Error", err) + logger.Error("Failed to update object", "error", err, "Name", obj.GetName()) + return ctrl.Result{}, err } return ctrl.Result{}, nil } func (r *MirrorPeerReconciler) createDRClusters(ctx context.Context, mp *multiclusterv1alpha1.MirrorPeer) error { - logger := log.FromContext(ctx) + logger := r.Logger currentNamespace := os.Getenv("POD_NAMESPACE") + for _, pr := range mp.Spec.Items { clusterName := pr.ClusterName s3SecretName := utils.GetSecretNameByPeerRef(pr, utils.S3ProfilePrefix) + rookSecretName := utils.GetSecretNameByPeerRef(pr) + var fsid string - dc := ramenv1alpha1.DRCluster{ - ObjectMeta: metav1.ObjectMeta{Name: clusterName}, - } + logger.Info("Fetching Rook secret", "SecretName", rookSecretName, "Namespace", currentNamespace) - rookSecretName := utils.GetSecretNameByPeerRef(pr) + hs, err := utils.FetchSecretWithName(ctx, r.Client, types.NamespacedName{Name: rookSecretName, Namespace: currentNamespace}) + if err != nil { + logger.Error("Failed to fetch Rook secret", "error", err, "SecretName", rookSecretName, "Namespace", currentNamespace) + return err + } - var fsid string + logger.Info("Unmarshalling Rook secret", "SecretName", rookSecretName) if mp.Spec.Type == multiclusterv1alpha1.Sync { - logger.Info("Fetching rook secret ", "Secret Name:", rookSecretName) - hs, err := utils.FetchSecretWithName(ctx, r.Client, types.NamespacedName{Name: rookSecretName, Namespace: currentNamespace}) - if err != nil { - return err - } - logger.Info("Unmarshalling rook secret ", "Secret Name:", rookSecretName) rt, err := utils.UnmarshalRookSecretExternal(hs) if err != nil { - logger.Error(err, "Failed to unmarshal rook secret", "Secret", rookSecretName) + logger.Error("Failed to unmarshal Rook secret", "error", err, "SecretName", rookSecretName) return err } fsid = rt.FSID } else { - logger.Info("Fetching rook secret ", "Secret Name:", rookSecretName) - hs, err := utils.FetchSecretWithName(ctx, r.Client, types.NamespacedName{Name: rookSecretName, Namespace: clusterName}) - if err != nil { - return err - } - logger.Info("Unmarshalling rook secret ", "Secret Name:", rookSecretName) rt, err := utils.UnmarshalHubSecret(hs) if err != nil { - logger.Error(err, "Failed to unmarshal rook secret", "Secret", rookSecretName) + logger.Error("Failed to unmarshal Hub secret", "error", err, "SecretName", rookSecretName) return err } fsid = rt.FSID } - dc.Spec.Region = ramenv1alpha1.Region(fsid) - logger.Info("Fetching s3 secret ", "Secret Name:", s3SecretName) + dc := ramenv1alpha1.DRCluster{ + ObjectMeta: metav1.ObjectMeta{Name: clusterName}, + Spec: ramenv1alpha1.DRClusterSpec{Region: ramenv1alpha1.Region(fsid)}, + } + + logger.Info("Fetching S3 secret", "SecretName", s3SecretName, "Namespace", clusterName) ss, err := utils.FetchSecretWithName(ctx, r.Client, types.NamespacedName{Name: s3SecretName, Namespace: clusterName}) if err != nil { + logger.Error("Failed to fetch S3 secret", "error", err, "SecretName", s3SecretName, "Namespace", clusterName) return err } - logger.Info("Unmarshalling s3 secret ", "Secret Name:", s3SecretName) + logger.Info("Unmarshalling S3 secret", "SecretName", s3SecretName) st, err := utils.UnmarshalS3Secret(ss) if err != nil { + logger.Error("Failed to unmarshal S3 secret", "error", err, "SecretName", s3SecretName) return err } - logger.Info("creating and updating dr clusters") + logger.Info("Creating and updating DR clusters", "ClusterName", clusterName) _, err = controllerutil.CreateOrUpdate(ctx, r.Client, &dc, func() error { dc.Spec.S3ProfileName = st.S3ProfileName return nil }) - if err != nil { + logger.Error("Failed to create/update DR cluster", "error", err, "ClusterName", clusterName) return err } - } return nil } func (r *MirrorPeerReconciler) createClusterRoleBindingsForSpoke(ctx context.Context, peer multiclusterv1alpha1.MirrorPeer) error { + logger := r.Logger + logger.Info("Starting to create or update ClusterRoleBindings for the spoke", "MirrorPeerName", peer.Name) + crb := rbacv1.ClusterRoleBinding{} err := r.Client.Get(ctx, types.NamespacedName{Name: spokeClusterRoleBindingName}, &crb) if err != nil { if !k8serrors.IsNotFound(err) { + logger.Error("Failed to get ClusterRoleBinding", "error", err, "ClusterRoleBindingName", spokeClusterRoleBindingName) return err } + logger.Info("ClusterRoleBinding not found, will be created", "ClusterRoleBindingName", spokeClusterRoleBindingName) } + var subjects []rbacv1.Subject if crb.Subjects != nil { subjects = crb.Subjects @@ -539,10 +620,12 @@ func (r *MirrorPeerReconciler) createClusterRoleBindingsForSpoke(ctx context.Con gsub := getSubjectByPeerRef(pr, "Group") if !utils.ContainsSubject(subjects, usub) { subjects = append(subjects, *usub) + logger.Info("Adding user subject to ClusterRoleBinding", "User", usub.Name) } if !utils.ContainsSubject(subjects, gsub) { subjects = append(subjects, *gsub) + logger.Info("Adding group subject to ClusterRoleBinding", "Group", gsub.Name) } } @@ -555,49 +638,53 @@ func (r *MirrorPeerReconciler) createClusterRoleBindingsForSpoke(ctx context.Con spokeRoleBinding.Subjects = subjects if spokeRoleBinding.CreationTimestamp.IsZero() { - // RoleRef is immutable. So inject it only while creating new object. + // RoleRef is immutable, so it's set only while creating new object spokeRoleBinding.RoleRef = rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: "open-cluster-management:token-exchange:agent", } + logger.Info("Setting RoleRef for new ClusterRoleBinding", "RoleRef", spokeRoleBinding.RoleRef.Name) } return nil }) if err != nil { + logger.Error("Failed to create or update ClusterRoleBinding", "error", err, "ClusterRoleBindingName", spokeClusterRoleBindingName) return err } + + logger.Info("Successfully created or updated ClusterRoleBinding", "ClusterRoleBindingName", spokeClusterRoleBindingName) return nil } func (r *MirrorPeerReconciler) updateMirrorPeerStatus(ctx context.Context, mirrorPeer multiclusterv1alpha1.MirrorPeer) (ctrl.Result, error) { - logger := log.FromContext(ctx) + logger := r.Logger + if mirrorPeer.Spec.Type == multiclusterv1alpha1.Async { tokensExchanged, err := r.checkTokenExchangeStatus(ctx, mirrorPeer) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Secrets not found; Attempting to reconcile again") + logger.Info("Secrets not found; Attempting to reconcile again", "MirrorPeer", mirrorPeer.Name) return ctrl.Result{Requeue: true}, nil } - logger.Info("Error while exchanging tokens", "MirrorPeer", mirrorPeer) + logger.Error("Error while exchanging tokens", "error", err, "MirrorPeer", mirrorPeer.Name) mirrorPeer.Status.Message = err.Error() statusErr := r.Client.Status().Update(ctx, &mirrorPeer) if statusErr != nil { - logger.Error(statusErr, "Error occurred while updating the status of mirrorpeer", "MirrorPeer", mirrorPeer) + logger.Error("Error occurred while updating the status of mirrorpeer", "error", statusErr, "MirrorPeer", mirrorPeer.Name) } return ctrl.Result{}, err } if tokensExchanged { - logger.Info("Tokens exchanged", "MirrorPeer", mirrorPeer) + logger.Info("Tokens exchanged", "MirrorPeer", mirrorPeer.Name) mirrorPeer.Status.Phase = multiclusterv1alpha1.ExchangedSecret mirrorPeer.Status.Message = "" statusErr := r.Client.Status().Update(ctx, &mirrorPeer) if statusErr != nil { - logger.Error(statusErr, "Error occured while updating the status of mirrorpeer", "MirrorPeer", mirrorPeer) - // Requeue, but don't throw + logger.Error("Error occurred while updating the status of mirrorpeer", "error", statusErr, "MirrorPeer", mirrorPeer.Name) return ctrl.Result{Requeue: true}, nil } return ctrl.Result{}, nil @@ -607,25 +694,24 @@ func (r *MirrorPeerReconciler) updateMirrorPeerStatus(ctx context.Context, mirro s3ProfileSynced, err := r.checkS3ProfileStatus(ctx, mirrorPeer) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Secrets not found; Attempting to reconcile again") + logger.Info("S3 secrets not found; Attempting to reconcile again", "MirrorPeer", mirrorPeer.Name) return ctrl.Result{Requeue: true}, nil } - logger.Info("Error while syncing S3 Profile", "MirrorPeer", mirrorPeer) + logger.Error("Error while syncing S3 Profile", "error", err, "MirrorPeer", mirrorPeer.Name) statusErr := r.Client.Status().Update(ctx, &mirrorPeer) if statusErr != nil { - logger.Error(statusErr, "Error occurred while updating the status of mirrorpeer", "MirrorPeer", mirrorPeer) + logger.Error("Error occurred while updating the status of mirrorpeer", "error", statusErr, "MirrorPeer", mirrorPeer.Name) } return ctrl.Result{}, err } if s3ProfileSynced { - logger.Info("S3Profile synced to hub", "MirrorPeer", mirrorPeer) + logger.Info("S3 Profile synced to hub", "MirrorPeer", mirrorPeer.Name) mirrorPeer.Status.Phase = multiclusterv1alpha1.S3ProfileSynced mirrorPeer.Status.Message = "" statusErr := r.Client.Status().Update(ctx, &mirrorPeer) if statusErr != nil { - logger.Error(statusErr, "Error occurred while updating the status of mirrorpeer", "MirrorPeer", mirrorPeer) - // Requeue, but don't throw + logger.Error("Error occurred while updating the status of mirrorpeer", "error", statusErr, "MirrorPeer", mirrorPeer.Name) return ctrl.Result{Requeue: true}, nil } return ctrl.Result{}, nil @@ -636,15 +722,24 @@ func (r *MirrorPeerReconciler) updateMirrorPeerStatus(ctx context.Context, mirro } func (r *MirrorPeerReconciler) checkS3ProfileStatus(ctx context.Context, mp multiclusterv1alpha1.MirrorPeer) (bool, error) { - // If S3Profile secret can be fetched for each peerref in the MirrorPeer then the sync is successful + logger := r.Logger + logger.Info("Checking S3 profile status for each peer reference in the MirrorPeer", "MirrorPeerName", mp.Name) + for _, pr := range mp.Spec.Items { clusterName := pr.ClusterName s3SecretName := utils.GetSecretNameByPeerRef(pr, utils.S3ProfilePrefix) + logger.Info("Attempting to fetch S3 secret", "SecretName", s3SecretName, "ClusterName", clusterName) + _, err := utils.FetchSecretWithName(ctx, r.Client, types.NamespacedName{Name: s3SecretName, Namespace: clusterName}) if err != nil { + logger.Error("Failed to fetch S3 secret", "error", err, "SecretName", s3SecretName, "ClusterName", clusterName) return false, err } + + logger.Info("Successfully fetched S3 secret", "SecretName", s3SecretName, "ClusterName", clusterName) } + + logger.Info("Successfully verified S3 profile status for all peer references", "MirrorPeerName", mp.Name) return true, nil } diff --git a/controllers/mirrorpeersecret_controller.go b/controllers/mirrorpeersecret_controller.go index df48f0d2..6b94e449 100644 --- a/controllers/mirrorpeersecret_controller.go +++ b/controllers/mirrorpeersecret_controller.go @@ -18,6 +18,8 @@ package controllers import ( "context" + "fmt" + "log/slog" "github.com/red-hat-storage/odf-multicluster-orchestrator/controllers/utils" @@ -27,13 +29,11 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/yaml" ) @@ -43,6 +43,7 @@ import ( type MirrorPeerSecretReconciler struct { client.Client Scheme *runtime.Scheme + Logger *slog.Logger } //+kubebuilder:rbac:groups=multicluster.odf.openshift.io,resources=mirrorpeers,verbs=get;list;watch;create;update;patch;delete @@ -57,120 +58,144 @@ type MirrorPeerSecretReconciler struct { // Reconcile standard reconcile function for MirrorPeerSecret controller func (r *MirrorPeerSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // reconcile for 'Secrets' (source or destination) - return mirrorPeerSecretReconcile(ctx, r.Client, req) + logger := r.Logger + logger.Info("Starting reconciliation for Secret", "RequestNamespace", req.Namespace, "RequestName", req.Name) + + result, err := r.mirrorPeerSecretReconcile(ctx, r.Client, req) + + if err != nil { + logger.Error("Reconciliation error for Secret", "error", err, "RequestNamespace", req.Namespace, "RequestName", req.Name) + } else { + logger.Info("Reconciliation completed for Secret", "RequestNamespace", req.Namespace, "RequestName", req.Name) + } + + return result, err } -func updateSecretWithHubRecoveryLabel(ctx context.Context, rc client.Client, peerSecret corev1.Secret) error { - logger := log.FromContext(ctx, "controller", "MirrorPeerSecret") - logger.Info("Adding backup labels to the secret") +func (r *MirrorPeerSecretReconciler) updateSecretWithHubRecoveryLabel(ctx context.Context, rc client.Client, peerSecret corev1.Secret) error { + logger := r.Logger + logger.Info("Adding backup labels to the secret", "SecretName", peerSecret.Name, "Namespace", peerSecret.Namespace) if peerSecret.ObjectMeta.Labels == nil { peerSecret.ObjectMeta.Labels = make(map[string]string) } _, err := controllerutil.CreateOrUpdate(ctx, rc, &peerSecret, func() error { - peerSecret.ObjectMeta.Labels[utils.HubRecoveryLabel] = "" + peerSecret.ObjectMeta.Labels[utils.HubRecoveryLabel] = "active" // Assuming 'active' is the intended label value return nil }) + if err != nil { + logger.Error("Failed to add or update hub recovery label", "error", err, "SecretName", peerSecret.Name, "Namespace", peerSecret.Namespace) + } + return err } -func mirrorPeerSecretReconcile(ctx context.Context, rc client.Client, req ctrl.Request) (ctrl.Result, error) { - var err error - logger := log.FromContext(ctx, "controller", "MirrorPeerSecret") +func (r *MirrorPeerSecretReconciler) mirrorPeerSecretReconcile(ctx context.Context, rc client.Client, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger + logger.Info("Reconciling secret", "Request", req.NamespacedName) + var peerSecret corev1.Secret - err = rc.Get(ctx, req.NamespacedName, &peerSecret) + err := rc.Get(ctx, req.NamespacedName, &peerSecret) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Secret not found", "req", req) - return ctrl.Result{}, processDeletedSecrets(ctx, rc, req.NamespacedName) + logger.Info("Secret not found, processing deletion", "Request", req.NamespacedName) + return ctrl.Result{}, processDeletedSecrets(ctx, rc, req.NamespacedName, logger) } - logger.Error(err, "Error in getting secret", "request", req) + logger.Error("Error retrieving secret", "error", err, "Request", req.NamespacedName) return ctrl.Result{}, err } + if utils.IsSecretSource(&peerSecret) { if err := utils.ValidateSourceSecret(&peerSecret); err != nil { - logger.Error(err, "Provided source secret is not valid", "secret", peerSecret.Name, "namespace", peerSecret.Namespace) + logger.Error("Source secret validation failed", "error", err, "Secret", peerSecret.Name) return ctrl.Result{}, err } if !utils.HasHubRecoveryLabels(&peerSecret) { - err = updateSecretWithHubRecoveryLabel(ctx, rc, peerSecret) + err = r.updateSecretWithHubRecoveryLabel(ctx, rc, peerSecret) if err != nil { - logger.Error(err, "Error occured while adding backup labels to secret. Requeing the request") + logger.Error("Failed to add hub recovery labels", "error", err, "Secret", peerSecret.Name) return ctrl.Result{}, err } } - err = createOrUpdateDestinationSecretsFromSource(ctx, rc, &peerSecret) + err = createOrUpdateDestinationSecretsFromSource(ctx, rc, &peerSecret, logger) if err != nil { - logger.Error(err, "Updating the destination secret failed", "secret", peerSecret.Name, "namespace", peerSecret.Namespace) + logger.Error("Failed to update destination secret", "error", err, "Secret", peerSecret.Name) return ctrl.Result{}, err } } else if utils.IsSecretDestination(&peerSecret) { - // a destination secret updation happened - err = processDestinationSecretUpdation(ctx, rc, &peerSecret) + err = processDestinationSecretUpdation(ctx, rc, &peerSecret, logger) if err != nil { - logger.Error(err, "Restoring destination secret failed", "secret", peerSecret.Name, "namespace", peerSecret.Namespace) + logger.Error("Failed to restore destination secret", "error", err, "Secret", peerSecret.Name) return ctrl.Result{}, err } } else if utils.IsSecretInternal(&peerSecret) { - err = createOrUpdateSecretsFromInternalSecret(ctx, rc, &peerSecret, nil) - if !utils.HasHubRecoveryLabels(&peerSecret) { - err = updateSecretWithHubRecoveryLabel(ctx, rc, peerSecret) - if err != nil { - logger.Error(err, "Error occured while adding backup labels to secret. Requeing the request") - return ctrl.Result{}, err - } - } + err = createOrUpdateSecretsFromInternalSecret(ctx, rc, &peerSecret, nil, logger) if err != nil { - logger.Error(err, "Updating the secret from internal secret is failed", "secret", peerSecret.Name, "namespace", peerSecret.Namespace) + logger.Error("Failed to update from internal secret", "error", err, "Secret", peerSecret.Name) return ctrl.Result{}, err } } + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *MirrorPeerSecretReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). + logger := r.Logger + logger.Info("Setting up controller with manager", "Controller", "MirrorPeerSecretReconciler") + + err := ctrl.NewControllerManagedBy(mgr). For(&corev1.Secret{}, builder.WithPredicates(utils.SourceOrDestinationPredicate)). Watches(&corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(r.secretConfigMapFunc)). Complete(r) + + if err != nil { + logger.Error("Failed to setup controller with manager", "error", err) + return err + } + + logger.Info("Controller has been successfully set up with manager") + return nil } func (r *MirrorPeerSecretReconciler) secretConfigMapFunc(ctx context.Context, obj client.Object) []reconcile.Request { + logger := r.Logger ConfigMapRamenConfigKeyName := "ramen_manager_config.yaml" var cm *corev1.ConfigMap cm, ok := obj.(*corev1.ConfigMap) if !ok { + logger.Info("Object is not a ConfigMap", "ReceivedType", fmt.Sprintf("%T", obj)) return []reconcile.Request{} } if _, ok := cm.Data[ConfigMapRamenConfigKeyName]; !ok { + logger.Info("ConfigMap does not contain the expected key", "ConfigMapName", cm.Name, "Key", ConfigMapRamenConfigKeyName) return []reconcile.Request{} } ramenConfig := &ramendrv1alpha1.RamenConfig{} err := yaml.Unmarshal([]byte(cm.Data[ConfigMapRamenConfigKeyName]), ramenConfig) if err != nil { + logger.Error("Failed to unmarshal RamenConfig from ConfigMap", "error", err, "ConfigMapName", cm.Name) return []reconcile.Request{} } - secrets := &corev1.SecretList{} internalSecretLabel, err := labels.NewRequirement(utils.SecretLabelTypeKey, selection.Equals, []string{string(utils.InternalLabel)}) if err != nil { - klog.Error(err, "cannot parse new requirement") + logger.Error("Failed to create label requirement for internal secrets", "error", err) return []reconcile.Request{} } internalSecretSelector := labels.NewSelector().Add(*internalSecretLabel) listOpts := &client.ListOptions{ - Namespace: "", //All Namespaces LabelSelector: internalSecretSelector, } - if err := r.Client.List(context.TODO(), secrets, listOpts); err != nil { + secrets := &corev1.SecretList{} + if err := r.Client.List(ctx, secrets, listOpts); err != nil { + logger.Error("Failed to list secrets based on label selector", "error", err, "Selector", internalSecretSelector) return []reconcile.Request{} } @@ -180,5 +205,6 @@ func (r *MirrorPeerSecretReconciler) secretConfigMapFunc(ctx context.Context, ob requests[i].Namespace = secret.GetNamespace() } + logger.Info("Generated reconcile requests from internal secrets", "NumberOfRequests", len(requests)) return requests } diff --git a/controllers/named_peerref_with_data.go b/controllers/named_peerref_with_data.go index fc3d7333..07fb9e22 100644 --- a/controllers/named_peerref_with_data.go +++ b/controllers/named_peerref_with_data.go @@ -3,6 +3,7 @@ package controllers import ( "context" "errors" + "log/slog" "reflect" multiclusterv1alpha1 "github.com/red-hat-storage/odf-multicluster-orchestrator/api/v1alpha1" @@ -12,7 +13,6 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -103,33 +103,37 @@ func (nPR *NamedPeerRefWithSecretData) GetAssociatedSecret(ctx context.Context, } // CreateOrUpdateDestinationSecret creates/updates the destination secret from NamedPeerRefWithSecretData object -func (nPR *NamedPeerRefWithSecretData) CreateOrUpdateDestinationSecret(ctx context.Context, rc client.Client) error { +func (nPR *NamedPeerRefWithSecretData) CreateOrUpdateDestinationSecret(ctx context.Context, rc client.Client, logger *slog.Logger) error { err := nPR.ErrorOnNilReceiver() if err != nil { + logger.Error("Receiver is nil", "error", err) return err } - logger := log.FromContext(ctx) expectedDest := nPR.GenerateSecret(utils.DestinationLabel) var currentDest corev1.Secret err = nPR.GetAssociatedSecret(ctx, rc, ¤tDest) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Creating destination secret", "secret", expectedDest.Name, "namespace", expectedDest.Namespace) + logger.Info("Creating destination secret", "SecretName", expectedDest.Name, "Namespace", expectedDest.Namespace) return rc.Create(ctx, expectedDest) } - logger.Error(err, "Unable to get the destination secret", "destination-ref", nPR.PeerRef) + logger.Error("Unable to get the destination secret", "DestinationRef", nPR.PeerRef, "error", err) return err } - // recieved a destination secret, now compare if !reflect.DeepEqual(expectedDest.Data, currentDest.Data) { - logger.Info("Updating the destination secret", "secret", currentDest.Name, "namespace", currentDest.Namespace) + logger.Info("Updating the destination secret", "SecretName", currentDest.Name, "Namespace", currentDest.Namespace) _, err := controllerutil.CreateOrUpdate(ctx, rc, ¤tDest, func() error { currentDest.Data = expectedDest.Data return nil }) + if err != nil { + logger.Error("Failed to update destination secret", "SecretName", currentDest.Name, "Namespace", currentDest.Namespace, "error", err) + } return err } + + logger.Info("Destination secret is up-to-date", "SecretName", currentDest.Name, "Namespace", currentDest.Namespace) return nil }