Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

create StorageCluster peer token secret on the hub #228

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 53 additions & 5 deletions addons/agent_mirrorpeer_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"context"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"log/slog"
"strconv"
Expand All @@ -45,11 +46,12 @@ import (

// MirrorPeerReconciler reconciles a MirrorPeer object
type MirrorPeerReconciler struct {
HubClient client.Client
Scheme *runtime.Scheme
SpokeClient client.Client
SpokeClusterName string
Logger *slog.Logger
HubClient client.Client
Scheme *runtime.Scheme
SpokeClient client.Client
SpokeClusterName string
OdfOperatorNamespace string
Logger *slog.Logger
}

// Reconcile is part of the main kubernetes reconciliation loop which aims to
Expand Down Expand Up @@ -156,6 +158,52 @@ func (r *MirrorPeerReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, fmt.Errorf("few failures occurred while labeling RBD StorageClasses: %v", errs)
}
}

if mirrorPeer.Spec.Type == multiclusterv1alpha1.Async {
if mirrorPeer.Status.Phase == multiclusterv1alpha1.ExchangedSecret {
logger.Info("Cleaning up stale onboarding token", "Token", string(mirrorPeer.GetUID()))
err = deleteStorageClusterPeerTokenSecret(ctx, r.HubClient, r.SpokeClusterName, string(mirrorPeer.GetUID()))
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
if mirrorPeer.Status.Phase == multiclusterv1alpha1.ExchangingSecret {
var token corev1.Secret
err = r.HubClient.Get(ctx, types.NamespacedName{Namespace: r.SpokeClusterName, Name: string(mirrorPeer.GetUID())}, &token)
if err != nil && !errors.IsNotFound(err) {
return ctrl.Result{}, err
}
if err == nil {
type OnboardingTicket struct {
ID string `json:"id"`
ExpirationDate int64 `json:"expirationDate,string"`
StorageQuotaInGiB uint `json:"storageQuotaInGiB,omitempty"`
}
var ticketData OnboardingTicket
err = json.Unmarshal(token.Data["storagecluster-peer-token"], &ticketData)
umangachapagain marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to unmarshal onboarding ticket message. %w", err)
}
if ticketData.ExpirationDate > time.Now().Unix() {
logger.Info("Onboarding token has not expired yet. Not renewing it.", "Token", token.Name, "ExpirationDate", ticketData.ExpirationDate)
return ctrl.Result{}, nil
}
logger.Info("Onboarding token has expired. Deleting it", "Token", token.Name)
err = deleteStorageClusterPeerTokenSecret(ctx, r.HubClient, r.SpokeClusterName, string(mirrorPeer.GetUID()))
if err != nil {
return ctrl.Result{}, err
}
}
logger.Info("Creating a new onboarding token", "Token", token.Name)
err = createStorageClusterPeerTokenSecret(ctx, r.HubClient, r.Scheme, r.SpokeClusterName, r.OdfOperatorNamespace, mirrorPeer, scr)
if err != nil {
logger.Error("Failed to create StorageCluster peer token on the hub.", "error", err)
return ctrl.Result{}, err
}
}
}

return ctrl.Result{}, nil
}

Expand Down
13 changes: 8 additions & 5 deletions addons/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ type AddonAgentOptions struct {
ProbeAddr string
HubKubeconfigFile string
SpokeClusterName string
OdfOperatorNamespace string
DRMode string
DevMode bool
}
Expand All @@ -99,6 +100,7 @@ func (o *AddonAgentOptions) AddFlags(cmd *cobra.Command) {
"Enabling this will ensure there is only one active controller manager.")
flags.StringVar(&o.HubKubeconfigFile, "hub-kubeconfig", o.HubKubeconfigFile, "Location of kubeconfig file to connect to hub cluster.")
flags.StringVar(&o.SpokeClusterName, "cluster-name", o.SpokeClusterName, "Name of spoke cluster.")
flags.StringVar(&o.OdfOperatorNamespace, "odf-operator-namespace", o.OdfOperatorNamespace, "Namespace of ODF operator on the spoke cluster.")
flags.StringVar(&o.DRMode, "mode", o.DRMode, "The DR mode of token exchange addon. Valid values are: 'sync', 'async'")
flags.BoolVar(&o.DevMode, "dev", false, "Set to true for dev environment (Text logging)")
}
Expand Down Expand Up @@ -184,11 +186,12 @@ func runHubManager(ctx context.Context, options AddonAgentOptions, logger *slog.
}

if err = (&MirrorPeerReconciler{
Scheme: mgr.GetScheme(),
HubClient: mgr.GetClient(),
SpokeClient: spokeClient,
SpokeClusterName: options.SpokeClusterName,
Logger: logger.With("controller", "MirrorPeerReconciler"),
Scheme: mgr.GetScheme(),
HubClient: mgr.GetClient(),
SpokeClient: spokeClient,
SpokeClusterName: options.SpokeClusterName,
OdfOperatorNamespace: options.OdfOperatorNamespace,
Logger: logger.With("controller", "MirrorPeerReconciler"),
}).SetupWithManager(mgr); err != nil {
logger.Error("Failed to create MirrorPeer controller", "controller", "MirrorPeer", "error", err)
os.Exit(1)
Expand Down
116 changes: 116 additions & 0 deletions addons/onboarding_token.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
package addons

import (
"context"
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
"time"

"github.com/red-hat-storage/odf-multicluster-orchestrator/addons/setup"
"github.com/red-hat-storage/odf-multicluster-orchestrator/api/v1alpha1"
multiclusterv1alpha1 "github.com/red-hat-storage/odf-multicluster-orchestrator/api/v1alpha1"
"github.com/red-hat-storage/odf-multicluster-orchestrator/controllers/utils"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)

func requestStorageClusterPeerToken(ctx context.Context, proxyServiceNamespace string) (string, error) {
token, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
if err != nil {
return "", fmt.Errorf("failed to read token: %w", err)
}
url := fmt.Sprintf("https://ux-backend-proxy.%s.svc.cluster.local:8888/onboarding-tokens", proxyServiceNamespace)
client := &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we planning to use it without certificate verification ? Might raise CVE bug later on.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah we'll see about that later.

},
}

req, err := http.NewRequestWithContext(ctx, "POST", url, nil)
if err != nil {
return "", fmt.Errorf("failed to create http request: %w", err)
}

req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", string(token)))

resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("http request failed: %w", err)
}
defer resp.Body.Close()

body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read http response body: %w", err)
}

if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected status code: %s", http.StatusText(resp.StatusCode))
}

return string(body), nil
}

func createStorageClusterPeerTokenSecret(ctx context.Context, client client.Client, scheme *runtime.Scheme, spokeClusterName string, odfOperatorNamespace string, mirrorPeer multiclusterv1alpha1.MirrorPeer, storageClusterRef *v1alpha1.StorageClusterRef) error {
uniqueSecretName := string(mirrorPeer.GetUID())
_, err := utils.FetchSecretWithName(ctx, client, types.NamespacedName{Namespace: spokeClusterName, Name: uniqueSecretName})
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("failed to get secret %s/%s: %w", spokeClusterName, uniqueSecretName, err)
}
if err == nil {
return errors.NewAlreadyExists(corev1.Resource("secret"), uniqueSecretName)
}

token, err := requestStorageClusterPeerToken(ctx, odfOperatorNamespace)
if err != nil {
return fmt.Errorf("unable to generate StorageClusterPeer token. %w", err)
}

tokenSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: uniqueSecretName,
Namespace: spokeClusterName,
Labels: map[string]string{
utils.CreatedByLabelKey: setup.TokenExchangeName,
utils.SecretLabelTypeKey: string(utils.InternalLabel),
utils.HubRecoveryLabel: "",
},
},
Data: map[string][]byte{
utils.NamespaceKey: []byte(storageClusterRef.Namespace),
utils.StorageClusterNameKey: []byte(storageClusterRef.Name),
utils.SecretDataKey: []byte(token),
},
}

err = controllerutil.SetOwnerReference(&mirrorPeer, tokenSecret, scheme)
if err != nil {
return fmt.Errorf("failed to set owner reference for secret %s/%s: %w", spokeClusterName, uniqueSecretName, err)
}

return client.Create(ctx, tokenSecret)
}

func deleteStorageClusterPeerTokenSecret(ctx context.Context, client client.Client, tokenNamespace string, tokenName string) error {
token := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: tokenName,
Namespace: tokenNamespace,
},
}

err := client.Delete(ctx, token)
if err != nil && !errors.IsNotFound(err) {
return err
}
return nil
}
13 changes: 13 additions & 0 deletions addons/setup/addon_setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,17 +84,30 @@ func (a *Addons) Manifests(cluster *clusterv1.ManagedCluster, addon *addonapiv1a
groups := agent.DefaultGroups(cluster.Name, a.AddonName)
user := agent.DefaultUser(cluster.Name, a.AddonName, a.AddonName)

var odfOperatorNamespace string
if utils.HasRequiredODFKey(cluster) {
odfOperatorNamespacedName, err := utils.GetNamespacedNameForClusterInfo(*cluster)
if err != nil {
return objects, fmt.Errorf("error while getting ODF operator namespace on the spoke cluster %q. %w", cluster.Name, err)
}
odfOperatorNamespace = odfOperatorNamespacedName.Namespace
} else {
return objects, fmt.Errorf("error while getting ODF operator namespace on the spoke cluster %q. Expected ClusterClaim does not exist", cluster.Name)
}

Comment on lines +87 to +97
Copy link
Member

@vbnrh vbnrh Aug 2, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not just use addon.Spec.InstallNamespace ? We are always creating the addons in ODF's namespace

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Even for Provider consumer cases, We will be doing the same. Fetching the namespace again would be redundant operation
https://github.com/red-hat-storage/odf-multicluster-orchestrator/pull/221/files#diff-71a31664c2a326d791c6040801a670cc01383300df3e4378f843bb619d2ea4e8R410-R440

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You are assuming that addon install namespace will be same as ODF operator namespace. That might not always be true. Instead I am allowing a way to change it dynamically.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@umanga I feel that fetching ODF namespace does not make sense in the manifests func for setting up addons.

Can we make it so that it is calculated by the MirrorPeer and sent further as a configuration option?

If there's no time, then we can merge it and design later.

manifestConfig := struct {
KubeConfigSecret string
ClusterName string
AddonInstallNamespace string
OdfOperatorNamespace string
Image string
DRMode string
Group string
User string
}{
KubeConfigSecret: fmt.Sprintf("%s-hub-kubeconfig", a.AddonName),
AddonInstallNamespace: installNamespace,
OdfOperatorNamespace: odfOperatorNamespace,
ClusterName: cluster.Name,
Image: a.AgentImage,
DRMode: addon.Annotations[utils.DRModeAnnotationKey],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ rules:
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["ocs.openshift.io"]
resources: ["storageclusters"]
verbs: ["get", "list", "watch", "update"]
verbs: ["get", "list", "watch", "create", "update"]
umangachapagain marked this conversation as resolved.
Show resolved Hide resolved
- apiGroups: ["objectbucket.io"]
resources: ["objectbucketclaims"]
verbs: ["get", "create", "list", "watch", "delete"]
Expand Down
1 change: 1 addition & 0 deletions addons/setup/tokenexchange-manifests/spoke_deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ spec:
- "addons"
- "--hub-kubeconfig=/var/run/hub/kubeconfig"
- "--cluster-name={{ .ClusterName }}"
- "--odf-operator-namespace={{ .OdfOperatorNamespace }}"
- "--mode={{ .DRMode }}"
volumeMounts:
- name: hub-config
Expand Down
37 changes: 3 additions & 34 deletions controllers/managedcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,11 @@ import (
"context"
"fmt"
"log/slog"
"strings"

"github.com/red-hat-storage/odf-multicluster-orchestrator/controllers/utils"
viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clusterv1 "open-cluster-management.io/api/cluster/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
Expand All @@ -25,10 +23,6 @@ type ManagedClusterReconciler struct {
Logger *slog.Logger
}

const (
OdfInfoClusterClaimNamespacedName = "odfinfo.odf.openshift.io"
)

func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
logger := r.Logger.With("ManagedCluster", req.NamespacedName)
logger.Info("Reconciling ManagedCluster")
Expand All @@ -51,16 +45,6 @@ func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req reconcile.
return ctrl.Result{}, nil
}

func hasRequiredODFKey(mc *clusterv1.ManagedCluster) bool {
claims := mc.Status.ClusterClaims
for _, claim := range claims {
if claim.Name == OdfInfoClusterClaimNamespacedName {
return true
}
}
return false

}
func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Logger.Info("Setting up ManagedClusterReconciler with manager")
managedClusterPredicate := predicate.Funcs{
Expand All @@ -69,14 +53,14 @@ func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
if !ok {
return false
}
return hasRequiredODFKey(obj)
return utils.HasRequiredODFKey(obj)
},
CreateFunc: func(e event.CreateEvent) bool {
obj, ok := e.Object.(*clusterv1.ManagedCluster)
if !ok {
return false
}
return hasRequiredODFKey(obj)
return utils.HasRequiredODFKey(obj)
},
}

Expand All @@ -89,7 +73,7 @@ func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {

func (r *ManagedClusterReconciler) processManagedClusterViews(ctx context.Context, managedCluster clusterv1.ManagedCluster) error {
resourceType := "ConfigMap"
odfInfoConfigMapNamespacedName, err := getNamespacedNameForClusterInfo(managedCluster)
odfInfoConfigMapNamespacedName, err := utils.GetNamespacedNameForClusterInfo(managedCluster)
if err != nil {
return fmt.Errorf("error while getting NamespacedName of the %s. %w", resourceType, err)
}
Expand All @@ -114,18 +98,3 @@ func (r *ManagedClusterReconciler) processManagedClusterViews(ctx context.Contex

return nil
}

func getNamespacedNameForClusterInfo(managedCluster clusterv1.ManagedCluster) (types.NamespacedName, error) {
clusterClaims := managedCluster.Status.ClusterClaims
for _, claim := range clusterClaims {
if claim.Name == OdfInfoClusterClaimNamespacedName {
namespacedName := strings.Split(claim.Value, "/")
if len(namespacedName) != 2 {
return types.NamespacedName{}, fmt.Errorf("invalid format for namespaced name claim: expected 'namespace/name', got '%s'", claim.Value)
}
return types.NamespacedName{Namespace: namespacedName[0], Name: namespacedName[1]}, nil
}
}

return types.NamespacedName{}, fmt.Errorf("cannot find ClusterClaim %q in ManagedCluster status", OdfInfoClusterClaimNamespacedName)
}
Loading
Loading