From 7092b00db74995c6197dd244c4ee40880bdb14e5 Mon Sep 17 00:00:00 2001 From: "Jose A. Rivera" Date: Fri, 3 Jun 2022 14:26:28 -0500 Subject: [PATCH] controllers: add StorageClassClaim api and controller Signed-off-by: Jose A. Rivera --- PROJECT | 9 + api/v1alpha1/storageclassclaim_types.go | 81 +++ api/v1alpha1/zz_generated.deepcopy.go | 89 ++++ ...client-operator.clusterserviceversion.yaml | 53 ++ .../odf.openshift.io_storageclassclaims.yaml | 70 +++ .../odf.openshift.io_storageclassclaims.yaml | 72 +++ config/crd/kustomization.yaml | 1 + ...client-operator.clusterserviceversion.yaml | 5 + config/rbac/role.yaml | 40 ++ config/samples/kustomization.yaml | 1 + .../odf_v1alpha1_storageclassclaim.yaml | 6 + controllers/ocsclient_controller.go | 56 +- controllers/storageclassclaim_controller.go | 484 ++++++++++++++++++ go.mod | 4 +- main.go | 7 + 15 files changed, 975 insertions(+), 3 deletions(-) create mode 100644 api/v1alpha1/storageclassclaim_types.go create mode 100644 bundle/manifests/odf.openshift.io_storageclassclaims.yaml create mode 100644 config/crd/bases/odf.openshift.io_storageclassclaims.yaml create mode 100644 config/samples/odf_v1alpha1_storageclassclaim.yaml create mode 100644 controllers/storageclassclaim_controller.go diff --git a/PROJECT b/PROJECT index 82b84293c..595b8e43b 100644 --- a/PROJECT +++ b/PROJECT @@ -16,4 +16,13 @@ resources: kind: OcsClient path: github.com/red-hat-storage/ocs-client-operator/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openshift.io + group: odf + kind: StorageClassClaim + path: github.com/red-hat-storage/ocs-client-operator/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/api/v1alpha1/storageclassclaim_types.go b/api/v1alpha1/storageclassclaim_types.go new file mode 100644 index 000000000..f03568222 --- /dev/null +++ b/api/v1alpha1/storageclassclaim_types.go @@ -0,0 +1,81 @@ +/* +Copyright 2020 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// StorageClassClaimSpec defines the desired state of StorageClassClaim +type StorageClassClaimSpec struct { + //+kubebuilder:validation:Enum=blockpool;sharedfilesystem + Type string `json:"type"` + EncryptionMethod string `json:"encryptionMethod,omitempty"` +} + +type storageClassClaimState string + +const ( + // StorageClassClaimInitializing represents Initializing state of StorageClassClaim + StorageClassClaimInitializing storageClassClaimState = "Initializing" + // StorageClassClaimValidating represents Validating state of StorageClassClaim + StorageClassClaimValidating storageClassClaimState = "Validating" + // StorageClassClaimFailed represents Failed state of StorageClassClaim + StorageClassClaimFailed storageClassClaimState = "Failed" + // StorageClassClaimCreating represents Configuring state of StorageClassClaim + StorageClassClaimCreating storageClassClaimState = "Creating" + // StorageClassClaimConfiguring represents Configuring state of StorageClassClaim + StorageClassClaimConfiguring storageClassClaimState = "Configuring" + // StorageClassClaimReady represents Ready state of StorageClassClaim + StorageClassClaimReady storageClassClaimState = "Ready" + // StorageClassClaimDeleting represents Deleting state of StorageClassClaim + StorageClassClaimDeleting storageClassClaimState = "Deleting" +) + +// StorageClassClaimStatus defines the observed state of StorageClassClaim +type StorageClassClaimStatus struct { + Phase storageClassClaimState `json:"phase,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="StorageType",type="string",JSONPath=".spec.type" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" + +// StorageClassClaim is the Schema for the storageclassclaims API +type StorageClassClaim struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StorageClassClaimSpec `json:"spec,omitempty"` + Status StorageClassClaimStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StorageClassClaimList contains a list of StorageClassClaim +type StorageClassClaimList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StorageClassClaim `json:"items"` +} + +func init() { + SchemeBuilder.Register(&StorageClassClaim{}, &StorageClassClaimList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index aefc651d9..ebe018307 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -119,3 +119,92 @@ func (in *OcsClientStatus) DeepCopy() *OcsClientStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassClaim) DeepCopyInto(out *StorageClassClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassClaim. +func (in *StorageClassClaim) DeepCopy() *StorageClassClaim { + if in == nil { + return nil + } + out := new(StorageClassClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClassClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassClaimList) DeepCopyInto(out *StorageClassClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClassClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassClaimList. +func (in *StorageClassClaimList) DeepCopy() *StorageClassClaimList { + if in == nil { + return nil + } + out := new(StorageClassClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClassClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassClaimSpec) DeepCopyInto(out *StorageClassClaimSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassClaimSpec. +func (in *StorageClassClaimSpec) DeepCopy() *StorageClassClaimSpec { + if in == nil { + return nil + } + out := new(StorageClassClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassClaimStatus) DeepCopyInto(out *StorageClassClaimStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassClaimStatus. +func (in *StorageClassClaimStatus) DeepCopy() *StorageClassClaimStatus { + if in == nil { + return nil + } + out := new(StorageClassClaimStatus) + in.DeepCopyInto(out) + return out +} diff --git a/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml b/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml index 8f5933103..9609ef926 100644 --- a/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml +++ b/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml @@ -11,6 +11,14 @@ metadata: "name": "ocsclient-sample" }, "spec": null + }, + { + "apiVersion": "odf.openshift.io/v1alpha1", + "kind": "StorageClassClaim", + "metadata": { + "name": "storageclassclaim-sample" + }, + "spec": null } ] capabilities: Basic Install @@ -29,6 +37,11 @@ spec: kind: OcsClient name: ocsclients.odf.openshift.io version: v1alpha1 + - description: StorageClassClaim is the Schema for the storageclassclaims API + displayName: Storage Class Claim + kind: StorageClassClaim + name: storageclassclaims.odf.openshift.io + version: v1alpha1 description: foo displayName: OpenShift Data Foundation Client Operator icon: @@ -38,6 +51,14 @@ spec: spec: clusterPermissions: - rules: + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch - apiGroups: - odf.openshift.io resources: @@ -64,6 +85,38 @@ spec: - get - patch - update + - apiGroups: + - odf.openshift.io + resources: + - storageclassclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - odf.openshift.io + resources: + - storageclassclaims/status + verbs: + - get + - patch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - authentication.k8s.io resources: diff --git a/bundle/manifests/odf.openshift.io_storageclassclaims.yaml b/bundle/manifests/odf.openshift.io_storageclassclaims.yaml new file mode 100644 index 000000000..b53da8cd6 --- /dev/null +++ b/bundle/manifests/odf.openshift.io_storageclassclaims.yaml @@ -0,0 +1,70 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: storageclassclaims.odf.openshift.io +spec: + group: odf.openshift.io + names: + kind: StorageClassClaim + listKind: StorageClassClaimList + plural: storageclassclaims + singular: storageclassclaim + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.type + name: StorageType + type: string + - jsonPath: .status.phase + name: Phase + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: StorageClassClaim is the Schema for the storageclassclaims API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StorageClassClaimSpec defines the desired state of StorageClassClaim + properties: + encryptionMethod: + type: string + type: + enum: + - blockpool + - sharedfilesystem + type: string + required: + - type + type: object + status: + description: StorageClassClaimStatus defines the observed state of StorageClassClaim + properties: + phase: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/odf.openshift.io_storageclassclaims.yaml b/config/crd/bases/odf.openshift.io_storageclassclaims.yaml new file mode 100644 index 000000000..7ad26854a --- /dev/null +++ b/config/crd/bases/odf.openshift.io_storageclassclaims.yaml @@ -0,0 +1,72 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: storageclassclaims.odf.openshift.io +spec: + group: odf.openshift.io + names: + kind: StorageClassClaim + listKind: StorageClassClaimList + plural: storageclassclaims + singular: storageclassclaim + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.type + name: StorageType + type: string + - jsonPath: .status.phase + name: Phase + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: StorageClassClaim is the Schema for the storageclassclaims API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StorageClassClaimSpec defines the desired state of StorageClassClaim + properties: + encryptionMethod: + type: string + type: + enum: + - blockpool + - sharedfilesystem + type: string + required: + - type + type: object + status: + description: StorageClassClaimStatus defines the observed state of StorageClassClaim + properties: + phase: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 2997fd438..c3cfc8fb1 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,6 +3,7 @@ # It should be run by config/default resources: - bases/odf.openshift.io_ocsclients.yaml +- bases/odf.openshift.io_storageclassclaims.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: diff --git a/config/manifests/bases/ocs-client-operator.clusterserviceversion.yaml b/config/manifests/bases/ocs-client-operator.clusterserviceversion.yaml index 97251cf1b..36133ffd5 100644 --- a/config/manifests/bases/ocs-client-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/ocs-client-operator.clusterserviceversion.yaml @@ -15,6 +15,11 @@ spec: kind: OcsClient name: ocsclients.odf.openshift.io version: v1alpha1 + - description: StorageClassClaim is the Schema for the storageclassclaims API + displayName: Storage Class Claim + kind: StorageClassClaim + name: storageclassclaims.odf.openshift.io + version: v1alpha1 description: foo displayName: OpenShift Data Foundation Client icon: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 0f19f88ff..100be814e 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -6,6 +6,14 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch - apiGroups: - odf.openshift.io resources: @@ -32,3 +40,35 @@ rules: - get - patch - update +- apiGroups: + - odf.openshift.io + resources: + - storageclassclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - odf.openshift.io + resources: + - storageclassclaims/status + verbs: + - get + - patch + - update +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 147cd5fb4..64121b166 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,4 +1,5 @@ ## Append samples you want in your CSV to this file as resources ## resources: - odf_v1alpha1_ocsclient.yaml +- odf_v1alpha1_storageclassclaim.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/odf_v1alpha1_storageclassclaim.yaml b/config/samples/odf_v1alpha1_storageclassclaim.yaml new file mode 100644 index 000000000..c0c1e0562 --- /dev/null +++ b/config/samples/odf_v1alpha1_storageclassclaim.yaml @@ -0,0 +1,6 @@ +apiVersion: odf.openshift.io/v1alpha1 +kind: StorageClassClaim +metadata: + name: storageclassclaim-sample +spec: + # TODO(user): Add fields here diff --git a/controllers/ocsclient_controller.go b/controllers/ocsclient_controller.go index e1112d5ef..eb65d7384 100644 --- a/controllers/ocsclient_controller.go +++ b/controllers/ocsclient_controller.go @@ -30,11 +30,13 @@ import ( "google.golang.org/grpc/status" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -61,6 +63,8 @@ type OcsClientReconciler struct { //+kubebuilder:rbac:groups=odf.openshift.io,resources=ocsclients,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=odf.openshift.io,resources=ocsclients/status,verbs=get;update;patch //+kubebuilder:rbac:groups=odf.openshift.io,resources=ocsclients/finalizers,verbs=update +//+kubebuilder:rbac:groups=odf.openshift.io,resources=storageclassclaims,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=odf.openshift.io,resources=storageclassclaims/status,verbs=get;update;patch // SetupWithManager sets up the controller with the Manager. func (r *OcsClientReconciler) SetupWithManager(mgr ctrl.Manager) error { @@ -388,8 +392,58 @@ func (r *OcsClientReconciler) logGrpcErrorAndReportEvent(instance *v1alpha1.OcsC } } -// TODO: claims should be created only once and should not be created/updated again if user deletes/update it. +func (r *OcsClientReconciler) createAndOwnStorageClassClaim(instance *v1alpha1.OcsClient, claim *v1alpha1.StorageClassClaim) error { + + err := controllerutil.SetOwnerReference(instance, claim, r.Client.Scheme()) + if err != nil { + return err + } + + err = r.Client.Create(context.TODO(), claim) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + return nil +} + +// claims should be created only once and should not be created/updated again if user deletes/update it. func (r *OcsClientReconciler) createDefaultStorageClassClaims(instance *v1alpha1.OcsClient) error { + storageClassClaimFile := &v1alpha1.StorageClassClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: generateNameForCephFilesystemSC(instance.Name), + Namespace: instance.Namespace, + Labels: map[string]string{ + //defaultStorageClassClaimLabel: "true", + }, + }, + Spec: v1alpha1.StorageClassClaimSpec{ + Type: "sharedfilesystem", + }, + } + + storageClassClaimBlock := &v1alpha1.StorageClassClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: generateNameForCephBlockPoolSC(instance.Name), + Namespace: instance.Namespace, + Labels: map[string]string{ + //defaultStorageClassClaimLabel: "true", + }, + }, + Spec: v1alpha1.StorageClassClaimSpec{ + Type: "blockpool", + }, + } + + err := r.createAndOwnStorageClassClaim(instance, storageClassClaimFile) + if err != nil { + return err + } + + err = r.createAndOwnStorageClassClaim(instance, storageClassClaimBlock) + if err != nil { + return err + } + return nil } diff --git a/controllers/storageclassclaim_controller.go b/controllers/storageclassclaim_controller.go new file mode 100644 index 000000000..95ffb06cf --- /dev/null +++ b/controllers/storageclassclaim_controller.go @@ -0,0 +1,484 @@ +/* +Copyright 2020 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "time" + + v1alpha1 "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1" + + providerclient "github.com/red-hat-storage/ocs-operator/services/provider/client" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// StorageClassClaimReconciler reconciles a StorageClassClaim object +// nolint +type StorageClassClaimReconciler struct { + client.Client + Scheme *runtime.Scheme + OperatorNamespace string + + log logr.Logger + ctx context.Context + ocsClient *v1alpha1.OcsClient + storageClassClaim *v1alpha1.StorageClassClaim +} + +const ( + StorageClassClaimFinalizer = "storageclassclaim.odf.openshift.io" + StorageClassClaimAnnotation = "odf.openshift.io/storagesclassclaim" +) + +// +kubebuilder:rbac:groups=odf.openshift.io,resources=storageclassclaims,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=odf.openshift.io,resources=storageclassclaims/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch + +func (r *StorageClassClaimReconciler) SetupWithManager(mgr ctrl.Manager) error { + enqueueStorageConsumerRequest := handler.EnqueueRequestsFromMapFunc( + func(obj client.Object) []reconcile.Request { + annotations := obj.GetAnnotations() + if annotation, found := annotations[StorageClassClaimAnnotation]; found { + parts := strings.Split(annotation, "/") + return []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Namespace: parts[0], + Name: parts[1], + }, + }} + } + return []reconcile.Request{} + }) + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.StorageClassClaim{}, builder.WithPredicates( + predicate.GenerationChangedPredicate{}, + )). + Watches(&source.Kind{Type: &storagev1.StorageClass{}}, enqueueStorageConsumerRequest). + Complete(r) +} + +func (r *StorageClassClaimReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + r.log = ctrllog.FromContext(ctx, "StorageClassClaim", request) + r.ctx = ctrllog.IntoContext(ctx, r.log) + r.log.Info("Reconciling StorageClassClaim.") + + // Fetch the StorageClassClaim instance + r.storageClassClaim = &v1alpha1.StorageClassClaim{} + r.storageClassClaim.Name = request.Name + r.storageClassClaim.Namespace = request.Namespace + + if err := r.get(r.storageClassClaim); err != nil { + if errors.IsNotFound(err) { + r.log.Info("StorageClassClaim resource not found. Ignoring since object must be deleted.") + return reconcile.Result{}, nil + } + r.log.Error(err, "Failed to get StorageClassClaim.") + return reconcile.Result{}, err + } + + r.storageClassClaim.Status.Phase = v1alpha1.StorageClassClaimInitializing + + ocsClientList := &v1alpha1.OcsClientList{} + if err := r.list(ocsClientList, client.InNamespace(r.OperatorNamespace)); err != nil { + return reconcile.Result{}, err + } + + switch l := len(ocsClientList.Items); { + case l == 0: + return reconcile.Result{}, fmt.Errorf("no OcsClient found") + case l != 1: + return reconcile.Result{}, fmt.Errorf("multiple OcsClient found") + } + r.ocsClient = &ocsClientList.Items[0] + + var result reconcile.Result + var reconcileError error + + // StorageCluster checks for required fields. + switch ocsClient := r.ocsClient; { + case ocsClient.Status.ConsumerID == "": + return reconcile.Result{}, fmt.Errorf("no external storage consumer id found on the " + + "OcsClient status, cannot determine mode") + case ocsClient.Spec.StorageProviderEndpoint == "": + return reconcile.Result{}, fmt.Errorf("no external storage provider endpoint found on the " + + "OcsClient spec, cannot determine mode") + } + + result, reconcileError = r.reconcileConsumerPhases() + + // Apply status changes to the StorageClassClaim + statusError := r.Client.Status().Update(r.ctx, r.storageClassClaim) + if statusError != nil { + r.log.Info("Failed to update StorageClassClaim status.") + } + + // Reconcile errors have higher priority than status update errors + if reconcileError != nil { + return result, reconcileError + } + + if statusError != nil { + return result, statusError + } + + return result, nil +} + +func (r *StorageClassClaimReconciler) reconcileConsumerPhases() (reconcile.Result, error) { + r.log.Info("Running StorageClassClaim controller in Consumer Mode") + + providerClient, err := providerclient.NewProviderClient( + r.ctx, + r.ocsClient.Spec.StorageProviderEndpoint, + 10*time.Second, + ) + if err != nil { + return reconcile.Result{}, err + } + + // Close client-side connections. + defer providerClient.Close() + + if r.storageClassClaim.GetDeletionTimestamp().IsZero() { + + // TODO: Phases do not have checks at the moment, in order to make them more predictable and less error-prone, at the expense of increased computation cost. + // Validation phase. + r.storageClassClaim.Status.Phase = v1alpha1.StorageClassClaimValidating + + // If a StorageClass already exists: + // StorageClassClaim passes validation and is promoted to the configuring phase if: + // * the StorageClassClaim has the same type as the StorageClass. + // * the StorageClassClaim has no encryption method specified when the type is filesystem. + // * the StorageClassClaim has a blockpool type and: + // * the StorageClassClaim has an encryption method specified. + // * the StorageClassClaim has the same encryption method as the StorageClass. + // StorageClassClaim fails validation and falls back to a failed phase indefinitely (no reconciliation happens). + existing := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: r.storageClassClaim.Name, + }, + } + if err = r.get(existing); err == nil { + sccType := r.storageClassClaim.Spec.Type + sccEncryptionMethod := r.storageClassClaim.Spec.EncryptionMethod + _, scIsFSType := existing.Parameters["fsName"] + scEncryptionMethod, scHasEncryptionMethod := existing.Parameters["encryptionMethod"] + if !((sccType == "sharedfilesystem" && scIsFSType && !scHasEncryptionMethod) || + (sccType == "blockpool" && !scIsFSType && sccEncryptionMethod == scEncryptionMethod)) { + r.log.Error(fmt.Errorf("storageClassClaim is not compatible with existing StorageClass"), + "StorageClassClaim validation failed.") + r.storageClassClaim.Status.Phase = v1alpha1.StorageClassClaimFailed + return reconcile.Result{}, nil + } + } else if err != nil && !errors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("failed to get StorageClass [%v]: %s", existing.ObjectMeta, err) + } + + // Configuration phase. + r.storageClassClaim.Status.Phase = v1alpha1.StorageClassClaimConfiguring + + // Check if finalizers are present, if not, add them. + if !contains(r.storageClassClaim.GetFinalizers(), StorageClassClaimFinalizer) { + storageClassClaimRef := klog.KRef(r.storageClassClaim.Name, r.storageClassClaim.Namespace) + r.log.Info("Finalizer not found for StorageClassClaim. Adding finalizer.", "StorageClassClaim", storageClassClaimRef) + r.storageClassClaim.SetFinalizers(append(r.storageClassClaim.GetFinalizers(), StorageClassClaimFinalizer)) + if err := r.update(r.storageClassClaim); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update StorageClassClaim [%v] with finalizer: %s", storageClassClaimRef, err) + } + } + + // storageClassClaimStorageType is the storage type of the StorageClassClaim + var storageClassClaimStorageType providerclient.StorageType + switch r.storageClassClaim.Spec.Type { + case "blockpool": + storageClassClaimStorageType = providerclient.StorageTypeBlockpool + case "sharedfilesystem": + storageClassClaimStorageType = providerclient.StorageTypeSharedfilesystem + default: + return reconcile.Result{}, fmt.Errorf("unsupported storage type: %s", r.storageClassClaim.Spec.Type) + } + + // Call the `FulfillStorageClassClaim` service on the provider server with StorageClassClaim as a request message. + _, err = providerClient.FulfillStorageClassClaim( + r.ctx, + r.ocsClient.Status.ConsumerID, + r.storageClassClaim.Name, + r.storageClassClaim.Spec.EncryptionMethod, + storageClassClaimStorageType, + ) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to initiate fulfillment of StorageClassClaim: %v", err) + } + + // Call the `GetStorageClassClaimConfig` service on the provider server with StorageClassClaim as a request message. + response, err := providerClient.GetStorageClassClaimConfig( + r.ctx, + r.ocsClient.Status.ConsumerID, + r.storageClassClaim.Name, + ) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get StorageClassClaim config: %v", err) + } + resources := response.ExternalResource + if resources == nil { + return reconcile.Result{}, fmt.Errorf("no configuration data received") + } + + // Go over the received objects and operate on them accordingly. + for _, resource := range resources { + data := map[string]string{} + err = json.Unmarshal(resource.Data, &data) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to unmarshal StorageClassClaim configuration response: %v", err) + } + + // Create the received resources, if necessary. + switch resource.Kind { + case "Secret": + secret := &corev1.Secret{} + secret.Name = resource.Name + secret.Namespace = r.storageClassClaim.Namespace + _, err = controllerutil.CreateOrUpdate(r.ctx, r.Client, secret, func() error { + err := r.own(secret) + if err != nil { + return fmt.Errorf("failed to own Secret: %v", err) + } + if secret.Data == nil { + secret.Data = map[string][]byte{} + } + for k, v := range data { + secret.Data[k] = []byte(v) + } + return nil + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create or update secret %v: %s", secret, err) + } + case "StorageClass": + var storageClass *storagev1.StorageClass + data["csi.storage.k8s.io/provisioner-secret-namespace"] = r.storageClassClaim.Namespace + data["csi.storage.k8s.io/node-stage-secret-namespace"] = r.storageClassClaim.Namespace + data["csi.storage.k8s.io/controller-expand-secret-namespace"] = r.storageClassClaim.Namespace + + if resource.Name == "cephfs" { + storageClass = r.getCephFSStorageClass(data) + } else if resource.Name == "ceph-rbd" { + storageClass = r.getCephRBDStorageClass(data) + } + storageClassClaimNamespacedName := r.getNamespacedName() + + if annotations := storageClass.GetAnnotations(); annotations == nil { + storageClass.SetAnnotations(map[string]string{ + StorageClassClaimAnnotation: storageClassClaimNamespacedName, + }) + } else { + annotations[StorageClassClaimAnnotation] = storageClassClaimNamespacedName + } + err = r.createOrReplaceStorageClass(storageClass) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create or update StorageClass: %s", err) + } + } + } + + // Readiness phase. + // Update the StorageClassClaim status. + r.storageClassClaim.Status.Phase = v1alpha1.StorageClassClaimReady + + // Initiate deletion phase if the StorageClassClaim exists. + } else if r.storageClassClaim.UID != "" { + + // Deletion phase. + // Update the StorageClassClaim status. + r.storageClassClaim.Status.Phase = v1alpha1.StorageClassClaimDeleting + + // Delete StorageClass. + // Make sure there are no StorageClass consumers left. + // Check if StorageClass is in use, if yes, then fail. + // Wait until all PVs using the StorageClass under deletion are removed. + // Check for any PVs using the StorageClass. + pvList := corev1.PersistentVolumeList{} + err := r.list(&pvList) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list PersistentVolumes: %s", err) + } + for i := range pvList.Items { + pv := &pvList.Items[i] + if pv.Spec.StorageClassName == r.storageClassClaim.Name { + return reconcile.Result{}, fmt.Errorf("StorageClass %s is still in use by one or more PV(s)", + r.storageClassClaim.Name) + } + } + + // Call `RevokeStorageClassClaim` service on the provider server with StorageClassClaim as a request message. + // Check if StorageClassClaim is still exists (it might have been manually removed during the StorageClass + // removal above). + _, err = providerClient.RevokeStorageClassClaim( + r.ctx, + r.ocsClient.Status.ConsumerID, + r.storageClassClaim.Name, + ) + if err != nil { + return reconcile.Result{}, err + } + + storageClass := &storagev1.StorageClass{} + storageClass.Name = r.storageClassClaim.Name + if err = r.get(storageClass); err != nil && !errors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("failed to get StorageClass %s: %s", storageClass.Name, err) + } + if storageClass.UID != "" { + + if err = r.delete(storageClass); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to delete StorageClass %s: %s", storageClass.Name, err) + } + } else { + r.log.Info("StorageClass already deleted.") + } + if contains(r.storageClassClaim.GetFinalizers(), StorageClassClaimFinalizer) { + r.storageClassClaim.Finalizers = remove(r.storageClassClaim.Finalizers, StorageClassClaimFinalizer) + if err := r.update(r.storageClassClaim); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to remove finalizer from storageClassClaim: %s", err) + } + } + } + + return reconcile.Result{}, nil +} + +func (r *StorageClassClaimReconciler) getCephFSStorageClass(data map[string]string) *storagev1.StorageClass { + pvReclaimPolicy := corev1.PersistentVolumeReclaimDelete + allowVolumeExpansion := true + storageClass := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: r.storageClassClaim.Name, + Namespace: r.storageClassClaim.Namespace, + Annotations: map[string]string{ + "description": "Provides RWO and RWX Filesystem volumes", + }, + }, + ReclaimPolicy: &pvReclaimPolicy, + AllowVolumeExpansion: &allowVolumeExpansion, + Provisioner: fmt.Sprintf("%s.cephfs.csi.ceph.com", r.ocsClient.Namespace), + Parameters: data, + } + return storageClass +} + +func (r *StorageClassClaimReconciler) getCephRBDStorageClass(data map[string]string) *storagev1.StorageClass { + pvReclaimPolicy := corev1.PersistentVolumeReclaimDelete + allowVolumeExpansion := true + storageClass := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: r.storageClassClaim.Name, + Namespace: r.storageClassClaim.Namespace, + Annotations: map[string]string{ + "description": "Provides RWO Filesystem volumes, and RWO and RWX Block volumes", + }, + }, + ReclaimPolicy: &pvReclaimPolicy, + AllowVolumeExpansion: &allowVolumeExpansion, + Provisioner: fmt.Sprintf("%s.rbd.csi.ceph.com", r.ocsClient.Namespace), + Parameters: data, + } + return storageClass +} + +func (r *StorageClassClaimReconciler) createOrReplaceStorageClass(storageClass *storagev1.StorageClass) error { + existing := &storagev1.StorageClass{} + existing.Name = r.storageClassClaim.Name + + if err := r.get(existing); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("failed to get StorageClass: %v", err) + } + + // If present then compare the existing StorageClass with the received StorageClass, and only proceed if they differ. + if reflect.DeepEqual(existing.Parameters, storageClass.Parameters) { + return nil + } + + // StorageClass already exists, but parameters have changed. Delete the existing StorageClass and create a new one. + if existing.UID != "" { + + // Since we have to update the existing StorageClass, so we will delete the existing StorageClass and create a new one. + r.log.Info("StorageClass needs to be updated, deleting it.", "StorageClass", klog.KRef(storageClass.Namespace, existing.Name)) + + // Delete the StorageClass. + err := r.delete(existing) + if err != nil { + r.log.Error(err, "Failed to delete StorageClass.", "StorageClass", klog.KRef(storageClass.Namespace, existing.Name)) + return err + } + } + r.log.Info("Creating StorageClass.", "StorageClass", klog.KRef(storageClass.Namespace, existing.Name)) + err := r.Client.Create(r.ctx, storageClass) + if err != nil { + return fmt.Errorf("failed to create StorageClass: %v", err) + } + return nil +} + +func (r *StorageClassClaimReconciler) get(obj client.Object) error { + key := client.ObjectKeyFromObject(obj) + return r.Client.Get(r.ctx, key, obj) +} + +func (r *StorageClassClaimReconciler) update(obj client.Object) error { + return r.Client.Update(r.ctx, obj) +} + +func (r *StorageClassClaimReconciler) list(obj client.ObjectList, listOptions ...client.ListOption) error { + return r.Client.List(r.ctx, obj, listOptions...) +} + +func (r *StorageClassClaimReconciler) delete(obj client.Object) error { + if err := r.Client.Delete(r.ctx, obj); err != nil && !errors.IsNotFound(err) { + return err + } + return nil +} + +func (r *StorageClassClaimReconciler) own(resource metav1.Object) error { + // Ensure StorageClassClaim ownership on a resource + return controllerutil.SetOwnerReference(r.storageClassClaim, resource, r.Scheme) +} + +func (r *StorageClassClaimReconciler) getNamespacedName() string { + return fmt.Sprintf("%s/%s", r.storageClassClaim.Namespace, r.storageClassClaim.Name) +} diff --git a/go.mod b/go.mod index b914d6b54..081e3a7bd 100644 --- a/go.mod +++ b/go.mod @@ -23,11 +23,13 @@ replace ( ) require ( + github.com/go-logr/logr v1.2.3 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.18.1 github.com/openshift/api v0.0.0-20220421141645-441fe135b2fc github.com/stretchr/testify v1.7.0 google.golang.org/grpc v1.45.0 + k8s.io/api v0.23.6 k8s.io/apimachinery v0.23.6 k8s.io/client-go v12.0.0+incompatible k8s.io/klog/v2 v2.60.1 @@ -47,7 +49,6 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/zapr v1.2.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect @@ -89,7 +90,6 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/api v0.23.6 // indirect k8s.io/apiextensions-apiserver v0.23.5 // indirect k8s.io/component-base v0.23.5 // indirect k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect diff --git a/main.go b/main.go index e2f4186cc..980f80d01 100644 --- a/main.go +++ b/main.go @@ -85,6 +85,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "OcsClient") os.Exit(1) } + if err = (&controllers.StorageClassClaimReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "StorageClassClaim") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {