diff --git a/api/v1/storagecluster_types.go b/api/v1/storagecluster_types.go index c809d32834..cfb480fe3b 100644 --- a/api/v1/storagecluster_types.go +++ b/api/v1/storagecluster_types.go @@ -109,29 +109,15 @@ type StorageClusterSpec struct { // DefaultStorageProfile is the default storage profile to use for // the storageclassrequest as StorageProfile is optional. DefaultStorageProfile string `json:"defaultStorageProfile,omitempty"` - - StorageProfiles []StorageProfile `json:"storageProfiles,omitempty"` -} - -// StorageProfile is the storage profile to use for the storageclassrequest. -type StorageProfile struct { - // +kubebuilder:validation:Required - // Name of the storage profile. - Name string `json:"name"` - // +kubebuilder:validation:Required - // DeviceClass is the deviceclass name. - DeviceClass string `json:"deviceClass"` - // configurations to use for cephfilesystem. - SharedFilesystemConfiguration SharedFilesystemConfigurationSpec `json:"sharedFilesystemConfiguration,omitempty"` - // configurations to use for profile specific blockpool. - BlockPoolConfiguration BlockPoolConfigurationSpec `json:"blockPoolConfiguration,omitempty"` } type SharedFilesystemConfigurationSpec struct { + // +kubebuilder:validation:Optional Parameters map[string]string `json:"parameters,omitempty"` } type BlockPoolConfigurationSpec struct { + // +kubebuilder:validation:Optional Parameters map[string]string `json:"parameters,omitempty"` } diff --git a/api/v1/storageprofile_types.go b/api/v1/storageprofile_types.go new file mode 100644 index 0000000000..5a3706f91c --- /dev/null +++ b/api/v1/storageprofile_types.go @@ -0,0 +1,97 @@ +/* +Copyright 2020 Red Hat OpenShift Container Storage. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// StorageProfileSpec defines the desired state of StorageProfile +type StorageProfileSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=512 + // DeviceClass is the deviceclass name. + DeviceClass string `json:"deviceClass"` + + // +kubebuilder:validation:Optional + // configurations to use for cephfilesystem. + SharedFilesystemConfiguration SharedFilesystemConfigurationSpec `json:"sharedFilesystemConfiguration,omitempty"` + + // +kubebuilder:validation:Optional + // configurations to use for profile specific blockpool. + BlockPoolConfiguration BlockPoolConfigurationSpec `json:"blockPoolConfiguration,omitempty"` +} + +// StorageProfileStatus defines the observed state of StorageProfile +type StorageProfileStatus struct { + // Phase describes the Phase of StorageProfile + // This is used by OLM UI to provide status information + // to the user + Phase StorageProfilePhase `json:"phase,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// StorageProfile is the Schema for the storageprofiles API +type StorageProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="oldSelf == self",message="spec is immutable" + Spec StorageProfileSpec `json:"spec"` + Status StorageProfileStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// StorageProfileList contains a list of StorageProfile +type StorageProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StorageProfile `json:"items"` +} + +// StorageProfilePhase stores a StorageProfile reconciliation phase +type StorageProfilePhase string + +const ( + StorageProfilePhaseRejected StorageProfilePhase = "Rejected" +) + +func init() { + SchemeBuilder.Register(&StorageProfile{}, &StorageProfileList{}) +} + +func (sp *StorageProfile) GetSpecHash() string { + specJSON, err := json.Marshal(sp.Spec) + if err != nil { + errStr := fmt.Errorf("failed to marshal StorageProfile.Spec for %s", sp.Name) + panic(errStr) + } + specHash := md5.Sum(specJSON) + return hex.EncodeToString(specHash[:]) +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 6458abf1bd..b41ea9e70b 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -792,13 +792,6 @@ func (in *StorageClusterSpec) DeepCopyInto(out *StorageClusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.StorageProfiles != nil { - in, out := &in.StorageProfiles, &out.StorageProfiles - *out = make([]StorageProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClusterSpec. @@ -898,8 +891,10 @@ func (in *StorageDeviceSetConfig) DeepCopy() *StorageDeviceSetConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageProfile) DeepCopyInto(out *StorageProfile) { *out = *in - in.SharedFilesystemConfiguration.DeepCopyInto(&out.SharedFilesystemConfiguration) - in.BlockPoolConfiguration.DeepCopyInto(&out.BlockPoolConfiguration) + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfile. @@ -912,6 +907,78 @@ func (in *StorageProfile) DeepCopy() *StorageProfile { return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileList) DeepCopyInto(out *StorageProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileList. +func (in *StorageProfileList) DeepCopy() *StorageProfileList { + if in == nil { + return nil + } + out := new(StorageProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileSpec) DeepCopyInto(out *StorageProfileSpec) { + *out = *in + in.SharedFilesystemConfiguration.DeepCopyInto(&out.SharedFilesystemConfiguration) + in.BlockPoolConfiguration.DeepCopyInto(&out.BlockPoolConfiguration) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileSpec. +func (in *StorageProfileSpec) DeepCopy() *StorageProfileSpec { + if in == nil { + return nil + } + out := new(StorageProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileStatus) DeepCopyInto(out *StorageProfileStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileStatus. +func (in *StorageProfileStatus) DeepCopy() *StorageProfileStatus { + if in == nil { + return nil + } + out := new(StorageProfileStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in TopologyLabelValues) DeepCopyInto(out *TopologyLabelValues) { { diff --git a/api/v1alpha1/storageconsumer_types.go b/api/v1alpha1/storageconsumer_types.go index f0ae411b95..b76daa1315 100644 --- a/api/v1alpha1/storageconsumer_types.go +++ b/api/v1alpha1/storageconsumer_types.go @@ -62,6 +62,16 @@ type StorageConsumerStatus struct { CephResources []*CephResourcesSpec `json:"cephResources,omitempty"` // Timestamp of last heartbeat received from consumer LastHeartbeat metav1.Time `json:"lastHeartbeat,omitempty"` + // Information of storage client received from consumer + Client ClientStatus `json:"client,omitempty"` +} + +// ClientStatus is the information pushed from connected storage client +type ClientStatus struct { + // StorageClient Platform Version + PlatformVersion string `json:"platformVersion"` + // StorageClient Operator Version + OperatorVersion string `json:"operatorVersion"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 7614dbbfe4..3dae5a8135 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -47,6 +47,21 @@ func (in *CephResourcesSpec) DeepCopy() *CephResourcesSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientStatus) DeepCopyInto(out *ClientStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientStatus. +func (in *ClientStatus) DeepCopy() *ClientStatus { + if in == nil { + return nil + } + out := new(ClientStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClassRequest) DeepCopyInto(out *StorageClassRequest) { *out = *in @@ -236,6 +251,7 @@ func (in *StorageConsumerStatus) DeepCopyInto(out *StorageConsumerStatus) { } } in.LastHeartbeat.DeepCopyInto(&out.LastHeartbeat) + out.Client = in.Client } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConsumerStatus. diff --git a/config/crd/bases/ocs.openshift.io_storageclusters.yaml b/config/crd/bases/ocs.openshift.io_storageclusters.yaml index 46e68c14e9..a3b7a9cedf 100644 --- a/config/crd/bases/ocs.openshift.io_storageclusters.yaml +++ b/config/crd/bases/ocs.openshift.io_storageclusters.yaml @@ -6225,38 +6225,6 @@ spec: - name type: object type: array - storageProfiles: - items: - description: StorageProfile is the storage profile to use for the - storageclassrequest. - properties: - blockPoolConfiguration: - description: configurations to use for profile specific blockpool. - properties: - parameters: - additionalProperties: - type: string - type: object - type: object - deviceClass: - description: DeviceClass is the deviceclass name. - type: string - name: - description: Name of the storage profile. - type: string - sharedFilesystemConfiguration: - description: configurations to use for cephfilesystem. - properties: - parameters: - additionalProperties: - type: string - type: object - type: object - required: - - deviceClass - - name - type: object - type: array version: description: Version specifies the version of StorageCluster type: string diff --git a/config/crd/bases/ocs.openshift.io_storageconsumers.yaml b/config/crd/bases/ocs.openshift.io_storageconsumers.yaml index b53da54942..fe6bc1d457 100644 --- a/config/crd/bases/ocs.openshift.io_storageconsumers.yaml +++ b/config/crd/bases/ocs.openshift.io_storageconsumers.yaml @@ -66,6 +66,19 @@ spec: type: string type: object type: array + client: + description: Information of storage client received from consumer + properties: + operatorVersion: + description: StorageClient Operator Version + type: string + platformVersion: + description: StorageClient Platform Version + type: string + required: + - operatorVersion + - platformVersion + type: object lastHeartbeat: description: Timestamp of last heartbeat received from consumer format: date-time diff --git a/config/crd/bases/ocs.openshift.io_storageprofiles.yaml b/config/crd/bases/ocs.openshift.io_storageprofiles.yaml new file mode 100644 index 0000000000..f0f37aa6e0 --- /dev/null +++ b/config/crd/bases/ocs.openshift.io_storageprofiles.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: storageprofiles.ocs.openshift.io +spec: + group: ocs.openshift.io + names: + kind: StorageProfile + listKind: StorageProfileList + plural: storageprofiles + singular: storageprofile + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: StorageProfile is the Schema for the storageprofiles API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StorageProfileSpec defines the desired state of StorageProfile + properties: + blockPoolConfiguration: + description: configurations to use for profile specific blockpool. + properties: + parameters: + additionalProperties: + type: string + type: object + type: object + deviceClass: + description: DeviceClass is the deviceclass name. + maxLength: 512 + type: string + sharedFilesystemConfiguration: + description: configurations to use for cephfilesystem. + properties: + parameters: + additionalProperties: + type: string + type: object + type: object + required: + - deviceClass + type: object + x-kubernetes-validations: + - message: spec is immutable + rule: oldSelf == self + status: + description: StorageProfileStatus defines the observed state of StorageProfile + properties: + phase: + description: Phase describes the Phase of StorageProfile This is used + by OLM UI to provide status information to the user + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 73fc7d6944..32087bfd72 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -6,6 +6,7 @@ resources: - bases/ocs.openshift.io_storageclusters.yaml - bases/ocs.openshift.io_storageconsumers.yaml - bases/ocs.openshift.io_storageclassrequests.yaml +- bases/ocs.openshift.io_storageprofiles.yaml # +kubebuilder:scaffold:crdkustomizeresource # patchesStrategicMerge: @@ -15,6 +16,7 @@ resources: #- patches/webhook_in_storageclusters.yaml #- patches/webhook_in_storageconsumers.yaml #- patches/webhook_in_storageclassrequestss.yaml +#- patches/webhook_in_storageprofiles.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -23,6 +25,7 @@ resources: #- patches/cainjection_in_storageclusters.yaml #- patches/cainjection_in_storageconsumers.yaml #- patches/cainjection_in_storageclassrequests.yaml +#- patches/cainjection_in_storageprofiles.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_storageprofiles.yaml b/config/crd/patches/cainjection_in_storageprofiles.yaml new file mode 100644 index 0000000000..e3d618b24c --- /dev/null +++ b/config/crd/patches/cainjection_in_storageprofiles.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: storageprofiles.ocs.openshift.io diff --git a/config/crd/patches/webhook_in_storageprofiles.yaml b/config/crd/patches/webhook_in_storageprofiles.yaml new file mode 100644 index 0000000000..a08411cf13 --- /dev/null +++ b/config/crd/patches/webhook_in_storageprofiles.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: storageprofiles.ocs.openshift.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/rbac/storageprofile_editor_role.yaml b/config/rbac/storageprofile_editor_role.yaml new file mode 100644 index 0000000000..17c3bffb52 --- /dev/null +++ b/config/rbac/storageprofile_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit storageprofiles. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: storageprofile-editor-role +rules: +- apiGroups: + - ocs.openshift.io + resources: + - storageprofiles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ocs.openshift.io + resources: + - storageprofiles/status + verbs: + - get diff --git a/config/rbac/storageprofile_viewer_role.yaml b/config/rbac/storageprofile_viewer_role.yaml new file mode 100644 index 0000000000..a276c77ed3 --- /dev/null +++ b/config/rbac/storageprofile_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view storageprofiles. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: storageprofile-viewer-role +rules: +- apiGroups: + - ocs.openshift.io + resources: + - storageprofiles + verbs: + - get + - list + - watch +- apiGroups: + - ocs.openshift.io + resources: + - storageprofiles/status + verbs: + - get diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 521fead1da..bff9db7c1e 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -3,4 +3,5 @@ resources: - ocs_v1_ocsinitialization.yaml - ocs_v1_storagecluster.yaml - ocs_v1alpha1_storageconsumer.yaml +- ocs_v1_storageprofile.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/ocs_v1_storageprofile.yaml b/config/samples/ocs_v1_storageprofile.yaml new file mode 100644 index 0000000000..4249b59d51 --- /dev/null +++ b/config/samples/ocs_v1_storageprofile.yaml @@ -0,0 +1,6 @@ +apiVersion: ocs.openshift.io/v1 +kind: StorageProfile +metadata: + name: medium +spec: + deviceClass: ssd diff --git a/controllers/storageclassrequest/storageclassrequest_controller.go b/controllers/storageclassrequest/storageclassrequest_controller.go index a7d216c0a7..4f808b6fd8 100644 --- a/controllers/storageclassrequest/storageclassrequest_controller.go +++ b/controllers/storageclassrequest/storageclassrequest_controller.go @@ -61,7 +61,6 @@ type StorageClassRequestReconciler struct { cephClientProvisioner *rookCephv1.CephClient cephClientNode *rookCephv1.CephClient cephResourcesByName map[string]*v1alpha1.CephResourcesSpec - storageProfile *v1.StorageProfile } // +kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclassrequests,verbs=get;list;watch;create;update;patch;delete @@ -190,6 +189,23 @@ func (r *StorageClassRequestReconciler) reconcilePhases() (reconcile.Result, err return reconcile.Result{}, err } + profileName := r.StorageClassRequest.Spec.StorageProfile + if profileName == "" { + profileName = r.storageCluster.Spec.DefaultStorageProfile + } + + // Fetch StorageProfile by name in the StorageCluster's namespace + storageProfile := v1.StorageProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: profileName, + Namespace: r.storageCluster.Namespace, + }, + } + + if err := r.get(&storageProfile); err != nil { + return reconcile.Result{}, fmt.Errorf("no storage profile CR found for storage profile %s", profileName) + } + // check request status already contains the name of the resource. if not, add it. if r.StorageClassRequest.Spec.Type == "blockpool" { r.cephBlockPool = &rookCephv1.CephBlockPool{} @@ -200,8 +216,29 @@ func (r *StorageClassRequestReconciler) reconcilePhases() (reconcile.Result, err break } } + + // check if a cephblockpool resource exists for the desired storageconsumer and storageprofile. if r.cephBlockPool.Name == "" { - r.cephBlockPool.Name = fmt.Sprintf("cephblockpool-%s-%s", r.storageConsumer.Name, generateUUID()) + cephBlockPoolList := &rookCephv1.CephBlockPoolList{} + listOptions := &client.MatchingLabels{ + controllers.StorageConsumerNameLabel: r.storageConsumer.Name, + controllers.StorageProfileSpecLabel: storageProfile.GetSpecHash(), + } + if err := r.list(cephBlockPoolList, client.InNamespace(r.OperatorNamespace), listOptions); err != nil { + return reconcile.Result{}, err + } + + // if we found no CephBlockPools, generate a new name + // if we found only one CephBlockPool with our query, we're good + // if we found more than one CephBlockPool, we can't determine which one to select, so error out + cbpItemsLen := len(cephBlockPoolList.Items) + if cbpItemsLen == 0 { + r.cephBlockPool.Name = fmt.Sprintf("cephblockpool-%s-%s", r.storageConsumer.Name, generateUUID()) + } else if cbpItemsLen == 1 { + r.cephBlockPool.Name = cephBlockPoolList.Items[0].GetName() + } else { + return reconcile.Result{}, fmt.Errorf("invalid number of CephBlockPools for storage consumer %q and storage profile %q: found %d, expecting 0 or 1", r.storageConsumer.Name, profileName, cbpItemsLen) + } } } else if r.StorageClassRequest.Spec.Type == "sharedfilesystem" { @@ -218,23 +255,6 @@ func (r *StorageClassRequestReconciler) reconcilePhases() (reconcile.Result, err } } - profileName := r.StorageClassRequest.Spec.StorageProfile - if profileName == "" { - profileName = r.storageCluster.Spec.DefaultStorageProfile - } - - for i := range r.storageCluster.Spec.StorageProfiles { - profile := &r.storageCluster.Spec.StorageProfiles[i] - if profile.Name == profileName { - r.storageProfile = profile - break - } - } - - if r.storageProfile == nil { - return reconcile.Result{}, fmt.Errorf("no storage profile definition found for storage profile %s", profileName) - } - r.cephClientProvisioner = &rookCephv1.CephClient{} r.cephClientProvisioner.Name = controllers.GenerateHashForCephClient(r.StorageClassRequest.Name, "provisioner") r.cephClientProvisioner.Namespace = r.OperatorNamespace @@ -262,7 +282,7 @@ func (r *StorageClassRequestReconciler) reconcilePhases() (reconcile.Result, err return reconcile.Result{}, err } - if err := r.reconcileCephBlockPool(); err != nil { + if err := r.reconcileCephBlockPool(&storageProfile); err != nil { return reconcile.Result{}, err } @@ -275,7 +295,7 @@ func (r *StorageClassRequestReconciler) reconcilePhases() (reconcile.Result, err return reconcile.Result{}, err } - if err := r.reconcileCephFilesystemSubVolumeGroup(); err != nil { + if err := r.reconcileCephFilesystemSubVolumeGroup(&storageProfile); err != nil { return reconcile.Result{}, err } } @@ -297,7 +317,7 @@ func (r *StorageClassRequestReconciler) reconcilePhases() (reconcile.Result, err return reconcile.Result{}, nil } -func (r *StorageClassRequestReconciler) reconcileCephBlockPool() error { +func (r *StorageClassRequestReconciler) reconcileCephBlockPool(storageProfile *v1.StorageProfile) error { failureDomain := r.storageCluster.Status.FailureDomain @@ -305,7 +325,7 @@ func (r *StorageClassRequestReconciler) reconcileCephBlockPool() error { if err := r.own(r.cephBlockPool); err != nil { return err } - deviceClass := r.storageProfile.DeviceClass + deviceClass := storageProfile.Spec.DeviceClass deviceSetList := r.storageCluster.Spec.StorageDeviceSets var deviceSet *v1.StorageDeviceSet for i := range deviceSetList { @@ -322,6 +342,7 @@ func (r *StorageClassRequestReconciler) reconcileCephBlockPool() error { } addLabel(r.cephBlockPool, controllers.StorageConsumerNameLabel, r.storageConsumer.Name) + addLabel(r.cephBlockPool, controllers.StorageProfileSpecLabel, storageProfile.GetSpecHash()) r.cephBlockPool.Spec = rookCephv1.NamedBlockPoolSpec{ PoolSpec: rookCephv1.PoolSpec{ @@ -331,7 +352,7 @@ func (r *StorageClassRequestReconciler) reconcileCephBlockPool() error { Size: 3, ReplicasPerFailureDomain: 1, }, - Parameters: r.storageProfile.BlockPoolConfiguration.Parameters, + Parameters: storageProfile.Spec.BlockPoolConfiguration.Parameters, }, } return nil @@ -361,7 +382,7 @@ func (r *StorageClassRequestReconciler) reconcileCephBlockPool() error { return nil } -func (r *StorageClassRequestReconciler) reconcileCephFilesystemSubVolumeGroup() error { +func (r *StorageClassRequestReconciler) reconcileCephFilesystemSubVolumeGroup(storageProfile *v1.StorageProfile) error { cephFilesystem := rookCephv1.CephFilesystem{ ObjectMeta: metav1.ObjectMeta{ @@ -377,8 +398,8 @@ func (r *StorageClassRequestReconciler) reconcileCephFilesystemSubVolumeGroup() if err := r.own(r.cephFilesystemSubVolumeGroup); err != nil { return err } - deviceClass := r.storageProfile.DeviceClass - dataPool := &rookCephv1.NamedPoolSpec{} + deviceClass := storageProfile.Spec.DeviceClass + var dataPool *rookCephv1.NamedPoolSpec for i := range cephFilesystem.Spec.DataPools { if cephFilesystem.Spec.DataPools[i].DeviceClass == deviceClass { dataPool = &cephFilesystem.Spec.DataPools[i] @@ -390,6 +411,7 @@ func (r *StorageClassRequestReconciler) reconcileCephFilesystemSubVolumeGroup() } addLabel(r.cephFilesystemSubVolumeGroup, controllers.StorageConsumerNameLabel, r.storageConsumer.Name) + addLabel(r.cephFilesystemSubVolumeGroup, controllers.StorageProfileSpecLabel, storageProfile.GetSpecHash()) // This label is required to set the dataPool on the CephFS // storageclass so that each PVC created from CephFS storageclass can // use correct dataPool backed by deviceclass. diff --git a/controllers/storagecluster/cephfilesystem.go b/controllers/storagecluster/cephfilesystem.go index 241239ce01..2a90be4255 100644 --- a/controllers/storagecluster/cephfilesystem.go +++ b/controllers/storagecluster/cephfilesystem.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -19,59 +20,83 @@ type ocsCephFilesystems struct{} // newCephFilesystemInstances returns the cephFilesystem instances that should be created // on first run. -func (r *StorageClusterReconciler) newCephFilesystemInstances(initData *ocsv1.StorageCluster) ([]*cephv1.CephFilesystem, error) { +func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster *ocsv1.StorageCluster) ([]*cephv1.CephFilesystem, error) { ret := &cephv1.CephFilesystem{ ObjectMeta: metav1.ObjectMeta{ - Name: generateNameForCephFilesystem(initData), - Namespace: initData.Namespace, + Name: generateNameForCephFilesystem(initStorageCluster), + Namespace: initStorageCluster.Namespace, }, Spec: cephv1.FilesystemSpec{ MetadataPool: cephv1.PoolSpec{ - Replicated: generateCephReplicatedSpec(initData, "metadata"), - FailureDomain: initData.Status.FailureDomain, + Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata"), + FailureDomain: initStorageCluster.Status.FailureDomain, }, MetadataServer: cephv1.MetadataServerSpec{ ActiveCount: 1, ActiveStandby: true, - Placement: getPlacement(initData, "mds"), - Resources: defaults.GetDaemonResources("mds", initData.Spec.Resources), + Placement: getPlacement(initStorageCluster, "mds"), + Resources: defaults.GetDaemonResources("mds", initStorageCluster.Spec.Resources), // set PriorityClassName for the MDS pods PriorityClassName: openshiftUserCritical, }, }, } - if initData.Spec.StorageProfiles == nil { - // standalone deployment will not have storageProfile, we need to - // define default dataPool, if storageProfile is set this will be - // overridden. + // not in provider mode + if !initStorageCluster.Spec.AllowRemoteStorageConsumers { + // standalone deployment that isn't in provider cluster will not + // have storageProfile, we need to define default dataPool, if + // storageProfile is set this will be overridden. ret.Spec.DataPools = []cephv1.NamedPoolSpec{ { PoolSpec: cephv1.PoolSpec{ - DeviceClass: generateDeviceClass(initData), - Replicated: generateCephReplicatedSpec(initData, "data"), - FailureDomain: initData.Status.FailureDomain, + DeviceClass: generateDeviceClass(initStorageCluster), + Replicated: generateCephReplicatedSpec(initStorageCluster, "data"), + FailureDomain: initStorageCluster.Status.FailureDomain, }, }, } } else { + // Load all StorageProfile objects in the StorageCluster's namespace + storageProfiles := &ocsv1.StorageProfileList{} + err := r.Client.List(r.ctx, storageProfiles, client.InNamespace(initStorageCluster.GetNamespace())) + if err != nil { + r.Log.Error(err, "unable to list StorageProfile objects") + } // set deviceClass and parameters from storageProfile - for i := range initData.Spec.StorageProfiles { - deviceClass := initData.Spec.StorageProfiles[i].DeviceClass - parameters := initData.Spec.StorageProfiles[i].SharedFilesystemConfiguration.Parameters + for i := range storageProfiles.Items { + storageProfile := storageProfiles.Items[i] + spSpec := &storageProfile.Spec + deviceClass := spSpec.DeviceClass + if len(deviceClass) == 0 { + r.Log.Error(nil, "Storage profile has an empty device class. Skipping.", "StorageProfile", klog.KRef(storageProfile.Namespace, storageProfile.Name)) + storageProfile.Status.Phase = ocsv1.StorageProfilePhaseRejected + if updateErr := r.Client.Status().Update(r.ctx, &storageProfile); updateErr != nil { + r.Log.Error(updateErr, "Could not update StorageProfile.", "StorageProfile", klog.KRef(storageProfile.Namespace, storageProfile.Name)) + return nil, updateErr + } + continue + } else { + storageProfile.Status.Phase = "" + if updateErr := r.Client.Status().Update(r.ctx, &storageProfile); updateErr != nil { + r.Log.Error(updateErr, "Could not update StorageProfile.", "StorageProfile", klog.KRef(storageProfile.Namespace, storageProfile.Name)) + return nil, updateErr + } + } + parameters := spSpec.SharedFilesystemConfiguration.Parameters ret.Spec.DataPools = append(ret.Spec.DataPools, cephv1.NamedPoolSpec{ Name: deviceClass, PoolSpec: cephv1.PoolSpec{ - Replicated: generateCephReplicatedSpec(initData, "data"), + Replicated: generateCephReplicatedSpec(initStorageCluster, "data"), DeviceClass: deviceClass, Parameters: parameters, - FailureDomain: initData.Status.FailureDomain, + FailureDomain: initStorageCluster.Status.FailureDomain, }, }) } } - err := controllerutil.SetControllerReference(initData, ret, r.Scheme) + err := controllerutil.SetControllerReference(initStorageCluster, ret, r.Scheme) if err != nil { r.Log.Error(err, "Unable to set Controller Reference for CephFileSystem.", "CephFileSystem", klog.KRef(ret.Namespace, ret.Name)) return nil, err diff --git a/controllers/storagecluster/provider_server.go b/controllers/storagecluster/provider_server.go index 90686e2d2d..3b21f99bfe 100644 --- a/controllers/storagecluster/provider_server.go +++ b/controllers/storagecluster/provider_server.go @@ -146,11 +146,16 @@ func (o *ocsProviderServer) createDeployment(r *StorageClusterReconciler, instan func (o *ocsProviderServer) createService(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) (reconcile.Result, error) { - if instance.Spec.ProviderAPIServerServiceType != "" && instance.Spec.ProviderAPIServerServiceType != corev1.ServiceTypeNodePort && - instance.Spec.ProviderAPIServerServiceType != corev1.ServiceTypeLoadBalancer { - err := fmt.Errorf("providerAPIServer only supports service of type %s and %s", corev1.ServiceTypeNodePort, corev1.ServiceTypeLoadBalancer) - r.Log.Error(err, "Failed to create/update service, Requested ServiceType is", "ServiceType", instance.Spec.ProviderAPIServerServiceType) - return reconcile.Result{}, err + if instance.Spec.ProviderAPIServerServiceType != "" { + switch instance.Spec.ProviderAPIServerServiceType { + case corev1.ServiceTypeClusterIP, corev1.ServiceTypeLoadBalancer, corev1.ServiceTypeNodePort: + default: + err := fmt.Errorf("providerAPIServer only supports service of type %s, %s and %s", + corev1.ServiceTypeNodePort, corev1.ServiceTypeLoadBalancer, corev1.ServiceTypeClusterIP) + r.Log.Error(err, "Failed to create/update service, Requested ServiceType is", "ServiceType", instance.Spec.ProviderAPIServerServiceType) + return reconcile.Result{}, err + } + } desiredService := GetProviderAPIServerService(instance) @@ -186,7 +191,8 @@ func (o *ocsProviderServer) createService(r *StorageClusterReconciler, instance r.Log.Info("Service create/update succeeded") - if instance.Spec.ProviderAPIServerServiceType == corev1.ServiceTypeLoadBalancer { + switch instance.Spec.ProviderAPIServerServiceType { + case corev1.ServiceTypeLoadBalancer: endpoint := o.getLoadBalancerServiceEndpoint(actualService) if endpoint == "" { @@ -196,7 +202,11 @@ func (o *ocsProviderServer) createService(r *StorageClusterReconciler, instance } instance.Status.StorageProviderEndpoint = fmt.Sprintf("%s:%d", endpoint, ocsProviderServicePort) - } else { + + case corev1.ServiceTypeClusterIP: + instance.Status.StorageProviderEndpoint = fmt.Sprintf("%s:%d", actualService.Spec.ClusterIP, ocsProviderServicePort) + + default: // Nodeport is the default ServiceType for the provider server nodeAddresses, err := o.getWorkerNodesInternalIPAddresses(r) if err != nil { return reconcile.Result{}, err @@ -384,7 +394,13 @@ func GetProviderAPIServerService(instance *ocsv1.StorageCluster) *corev1.Service }, Ports: []corev1.ServicePort{ { - NodePort: ocsProviderServiceNodePort, + NodePort: func() int32 { + // ClusterIP service doesn't need nodePort + if instance.Spec.ProviderAPIServerServiceType == corev1.ServiceTypeClusterIP { + return 0 + } + return ocsProviderServiceNodePort + }(), Port: ocsProviderServicePort, TargetPort: intstr.FromString("ocs-provider"), }, diff --git a/controllers/storagecluster/provider_server_test.go b/controllers/storagecluster/provider_server_test.go index 054dc38cbd..c3807b3cdb 100644 --- a/controllers/storagecluster/provider_server_test.go +++ b/controllers/storagecluster/provider_server_test.go @@ -149,10 +149,68 @@ func TestOcsProviderServerEnsureCreated(t *testing.T) { assert.NoError(t, r.Client.Get(context.TODO(), client.ObjectKeyFromObject(secret), secret)) }) - t.Run("Ensure that Service is not created when AllowRemoteStorageConsumers is enabled and ProviderAPIServerServiceType is set to any other value than NodePort or LoadBalancer", func(t *testing.T) { + t.Run("Ensure that Deployment,Service,Secret is created when AllowRemoteStorageConsumers and ProviderAPIServerServiceType set to ClusterIP", func(t *testing.T) { r, instance := createSetupForOcsProviderTest(t, true, corev1.ServiceTypeClusterIP) + obj := &ocsProviderServer{} + res, err := obj.ensureCreated(r, instance) + assert.NoError(t, err) + assert.False(t, res.IsZero()) + + // storagecluster controller waits for svc status to fetch the IP and it requeue + // as we are using a fake client and it does not fill the status automatically. + // update the required status field of the svc to overcome the failure and requeue. + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: ocsProviderServerName}, + } + err = r.Update(context.TODO(), service) + assert.NoError(t, err) + + // call ensureCreated again after filling the status of svc, It will fail on deployment now + res, err = obj.ensureCreated(r, instance) + assert.NoError(t, err) + assert.False(t, res.IsZero()) + + // storagecluster controller waits for deployment status to fetch the replica count and it requeue + // as we are using a fake client and it does not fill the status automatically. + // update the required status field of the deployment to overcome the failure and requeue. + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: ocsProviderServerName}, + } + deployment.Status.AvailableReplicas = 1 + err = r.Update(context.TODO(), deployment) + assert.NoError(t, err) + + // call ensureCreated again after filling the status of deployment, It will pass now + res, err = obj.ensureCreated(r, instance) + assert.NoError(t, err) + assert.True(t, res.IsZero()) + + deployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: ocsProviderServerName}, + } + assert.NoError(t, r.Client.Get(context.TODO(), client.ObjectKeyFromObject(deployment), deployment)) + expectedDeployment := GetProviderAPIServerDeploymentForTest(instance) + assert.Equal(t, deployment.Spec, expectedDeployment.Spec) + + service = &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: ocsProviderServerName}, + } + assert.NoError(t, r.Client.Get(context.TODO(), client.ObjectKeyFromObject(service), service)) + expectedService := GetClusterIPProviderAPIServerServiceForTest(instance) + assert.Equal(t, expectedService.Spec, service.Spec) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: ocsProviderServerName}, + } + assert.NoError(t, r.Client.Get(context.TODO(), client.ObjectKeyFromObject(secret), secret)) + }) + + t.Run("Ensure that Service is not created when AllowRemoteStorageConsumers is enabled and ProviderAPIServerServiceType is set to any other value than NodePort, ClusterIP or LoadBalancer", func(t *testing.T) { + + r, instance := createSetupForOcsProviderTest(t, true, corev1.ServiceTypeExternalName) + obj := &ocsProviderServer{} _, err := obj.ensureCreated(r, instance) assert.Errorf(t, err, "only supports service of type") @@ -391,3 +449,28 @@ func GetLoadBalancerProviderAPIServerServiceForTest(instance *ocsv1.StorageClust }, } } + +func GetClusterIPProviderAPIServerServiceForTest(instance *ocsv1.StorageCluster) *corev1.Service { + + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: ocsProviderServerName, + Namespace: instance.Namespace, + Annotations: map[string]string{ + "service.beta.openshift.io/serving-cert-secret-name": ocsProviderCertSecretName, + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "ocsProviderApiServer", + }, + Ports: []corev1.ServicePort{ + { + Port: ocsProviderServicePort, + TargetPort: intstr.FromString("ocs-provider"), + }, + }, + Type: corev1.ServiceTypeClusterIP, + }, + } +} diff --git a/controllers/storagecluster/storageclasses.go b/controllers/storagecluster/storageclasses.go index ea54a7b3db..118c839324 100644 --- a/controllers/storagecluster/storageclasses.go +++ b/controllers/storagecluster/storageclasses.go @@ -413,12 +413,17 @@ func (r *StorageClusterReconciler) newStorageClassConfigurations(initData *ocsv1 newCephFilesystemStorageClassConfiguration(initData), newCephBlockPoolStorageClassConfiguration(initData), } - // If kubevirt crd is present, we create a specialized rbd storageclass for virtualization environment - kvcrd := &extv1.CustomResourceDefinition{} - err := r.Client.Get(context.TODO(), types.NamespacedName{Name: "virtualmachines.kubevirt.io", Namespace: ""}, kvcrd) - if err == nil { - ret = append(ret, newCephBlockPoolVirtualizationStorageClassConfiguration(initData)) + + // when allowing consumers, creation of storage classes should only be done via storageclassrequests + if !initData.Spec.AllowRemoteStorageConsumers { + // If kubevirt crd is present, we create a specialized rbd storageclass for virtualization environment + kvcrd := &extv1.CustomResourceDefinition{} + err := r.Client.Get(context.TODO(), types.NamespacedName{Name: "virtualmachines.kubevirt.io", Namespace: ""}, kvcrd) + if err == nil { + ret = append(ret, newCephBlockPoolVirtualizationStorageClassConfiguration(initData)) + } } + if initData.Spec.ManagedResources.CephNonResilientPools.Enable { ret = append(ret, newNonResilientCephBlockPoolStorageClassConfiguration(initData)) } diff --git a/controllers/storagecluster/storagecluster_controller.go b/controllers/storagecluster/storagecluster_controller.go index 5f6c280f1d..a35bab8628 100644 --- a/controllers/storagecluster/storagecluster_controller.go +++ b/controllers/storagecluster/storagecluster_controller.go @@ -121,7 +121,7 @@ func (r *StorageClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } enqueueStorageClusterRequest := handler.EnqueueRequestsFromMapFunc( - func(context context.Context, obj client.Object) []reconcile.Request { + func(_ context.Context, obj client.Object) []reconcile.Request { ocinit, ok := obj.(*ocsv1.OCSInitialization) if !ok { @@ -155,6 +155,36 @@ func (r *StorageClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { }, ) + enqueueFromStorageProfile := handler.EnqueueRequestsFromMapFunc( + func(_ context.Context, obj client.Object) []reconcile.Request { + // only storage profile is being watched + _ = obj.(*ocsv1.StorageProfile) + + // Get the StorageCluster object + scList := &ocsv1.StorageClusterList{} + err := r.Client.List(r.ctx, scList, client.InNamespace(obj.GetNamespace()), client.Limit(1)) + if err != nil { + r.Log.Error(err, "Unable to list StorageCluster objects") + return []reconcile.Request{} + } + + if len(scList.Items) == 0 { + return []reconcile.Request{} + } + + sc := scList.Items[0] + // Return name and namespace of StorageCluster + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: sc.Name, + Namespace: sc.Namespace, + }, + }, + } + }, + ) + builder := ctrl.NewControllerManagedBy(mgr). For(&ocsv1.StorageCluster{}, builder.WithPredicates(scPredicate)). Owns(&cephv1.CephCluster{}). @@ -163,6 +193,7 @@ func (r *StorageClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&corev1.Service{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Owns(&corev1.ConfigMap{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Watches(&ocsv1.OCSInitialization{}, enqueueStorageClusterRequest). + Watches(&ocsv1.StorageProfile{}, enqueueFromStorageProfile). Watches( &extv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ diff --git a/controllers/storageconsumer/storageconsumer_controller.go b/controllers/storageconsumer/storageconsumer_controller.go index af0b806f78..469c33acb9 100644 --- a/controllers/storageconsumer/storageconsumer_controller.go +++ b/controllers/storageconsumer/storageconsumer_controller.go @@ -44,6 +44,7 @@ const ( StorageConsumerAnnotation = "ocs.openshift.io.storageconsumer" StorageRequestAnnotation = "ocs.openshift.io.storagerequest" StorageCephUserTypeAnnotation = "ocs.openshift.io.cephusertype" + StorageProfileSpecLabel = "ocs.openshift.io/storageprofile-spec" ConsumerUUIDLabel = "ocs.openshift.io/storageconsumer-uuid" StorageConsumerNameLabel = "ocs.openshift.io/storageconsumer-name" ) diff --git a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml index 46e68c14e9..a3b7a9cedf 100644 --- a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml +++ b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml @@ -6225,38 +6225,6 @@ spec: - name type: object type: array - storageProfiles: - items: - description: StorageProfile is the storage profile to use for the - storageclassrequest. - properties: - blockPoolConfiguration: - description: configurations to use for profile specific blockpool. - properties: - parameters: - additionalProperties: - type: string - type: object - type: object - deviceClass: - description: DeviceClass is the deviceclass name. - type: string - name: - description: Name of the storage profile. - type: string - sharedFilesystemConfiguration: - description: configurations to use for cephfilesystem. - properties: - parameters: - additionalProperties: - type: string - type: object - type: object - required: - - deviceClass - - name - type: object - type: array version: description: Version specifies the version of StorageCluster type: string diff --git a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageconsumers.yaml b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageconsumers.yaml index b53da54942..fe6bc1d457 100644 --- a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageconsumers.yaml +++ b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageconsumers.yaml @@ -66,6 +66,19 @@ spec: type: string type: object type: array + client: + description: Information of storage client received from consumer + properties: + operatorVersion: + description: StorageClient Operator Version + type: string + platformVersion: + description: StorageClient Platform Version + type: string + required: + - operatorVersion + - platformVersion + type: object lastHeartbeat: description: Timestamp of last heartbeat received from consumer format: date-time diff --git a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageprofiles.yaml b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageprofiles.yaml new file mode 100644 index 0000000000..f0f37aa6e0 --- /dev/null +++ b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageprofiles.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: storageprofiles.ocs.openshift.io +spec: + group: ocs.openshift.io + names: + kind: StorageProfile + listKind: StorageProfileList + plural: storageprofiles + singular: storageprofile + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: StorageProfile is the Schema for the storageprofiles API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StorageProfileSpec defines the desired state of StorageProfile + properties: + blockPoolConfiguration: + description: configurations to use for profile specific blockpool. + properties: + parameters: + additionalProperties: + type: string + type: object + type: object + deviceClass: + description: DeviceClass is the deviceclass name. + maxLength: 512 + type: string + sharedFilesystemConfiguration: + description: configurations to use for cephfilesystem. + properties: + parameters: + additionalProperties: + type: string + type: object + type: object + required: + - deviceClass + type: object + x-kubernetes-validations: + - message: spec is immutable + rule: oldSelf == self + status: + description: StorageProfileStatus defines the observed state of StorageProfile + properties: + phase: + description: Phase describes the Phase of StorageProfile This is used + by OLM UI to provide status information to the user + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/csv-templates/ocs-operator.csv.yaml.in b/deploy/csv-templates/ocs-operator.csv.yaml.in index e9803a7040..81c4748b15 100644 --- a/deploy/csv-templates/ocs-operator.csv.yaml.in +++ b/deploy/csv-templates/ocs-operator.csv.yaml.in @@ -59,6 +59,16 @@ metadata: ] } }, + { + "apiVersion": "ocs.openshift.io/v1", + "kind": "StorageProfile", + "metadata": { + "name": "medium" + }, + "spec": { + "deviceClass": "ssd" + } + }, { "apiVersion": "ocs.openshift.io/v1alpha1", "kind": "StorageConsumer", @@ -110,6 +120,9 @@ spec: kind: StorageConsumer name: storageconsumers.ocs.openshift.io version: v1alpha1 + - kind: StorageProfile + name: storageprofiles.ocs.openshift.io + version: v1 description: | **Red Hat OpenShift Container Storage** deploys three operators. diff --git a/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml b/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml index 511e202def..a785928803 100644 --- a/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml +++ b/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml @@ -1607,7 +1607,7 @@ metadata: operatorframework.io/suggested-namespace: openshift-storage operators.openshift.io/infrastructure-features: '["disconnected"]' operators.operatorframework.io/builder: operator-sdk-v1.25.4 - operators.operatorframework.io/internal-objects: '["ocsinitializations.ocs.openshift.io","storageclassrequests.ocs.openshift.io","storageconsumers.ocs.openshift.io","cephclusters.ceph.rook.io","cephobjectstores.ceph.rook.io","cephobjectstoreusers.ceph.rook.io","cephnfses.ceph.rook.io","cephclients.ceph.rook.io","cephfilesystems.ceph.rook.io","cephfilesystemmirrors.ceph.rook.io","cephrbdmirrors.ceph.rook.io","cephobjectrealms.ceph.rook.io","cephobjectzonegroups.ceph.rook.io","cephobjectzones.ceph.rook.io","cephbucketnotifications.ceph.rook.io","cephbuckettopics.ceph.rook.io","cephfilesystemsubvolumegroups.ceph.rook.io","cephblockpoolradosnamespaces.ceph.rook.io","cephcosidrivers.ceph.rook.io"]' + operators.operatorframework.io/internal-objects: '["ocsinitializations.ocs.openshift.io","storageclassrequests.ocs.openshift.io","storageconsumers.ocs.openshift.io","storageprofiles.ocs.openshift.io","cephclusters.ceph.rook.io","cephobjectstores.ceph.rook.io","cephobjectstoreusers.ceph.rook.io","cephnfses.ceph.rook.io","cephclients.ceph.rook.io","cephfilesystems.ceph.rook.io","cephfilesystemmirrors.ceph.rook.io","cephrbdmirrors.ceph.rook.io","cephobjectrealms.ceph.rook.io","cephobjectzonegroups.ceph.rook.io","cephobjectzones.ceph.rook.io","cephbucketnotifications.ceph.rook.io","cephbuckettopics.ceph.rook.io","cephfilesystemsubvolumegroups.ceph.rook.io","cephblockpoolradosnamespaces.ceph.rook.io","cephcosidrivers.ceph.rook.io"]' operators.operatorframework.io/operator-type: non-standalone operators.operatorframework.io/project_layout: go.kubebuilder.io/v2 repository: https://github.com/red-hat-storage/ocs-operator @@ -1654,6 +1654,9 @@ spec: kind: StorageConsumer name: storageconsumers.ocs.openshift.io version: v1alpha1 + - kind: StorageProfile + name: storageprofiles.ocs.openshift.io + version: v1 - description: Represents a Ceph cluster. displayName: Ceph Cluster kind: CephCluster diff --git a/deploy/ocs-operator/manifests/storagecluster.crd.yaml b/deploy/ocs-operator/manifests/storagecluster.crd.yaml index ef2b0f1ed9..2f0cabb66a 100644 --- a/deploy/ocs-operator/manifests/storagecluster.crd.yaml +++ b/deploy/ocs-operator/manifests/storagecluster.crd.yaml @@ -6224,38 +6224,6 @@ spec: - name type: object type: array - storageProfiles: - items: - description: StorageProfile is the storage profile to use for the - storageclassrequest. - properties: - blockPoolConfiguration: - description: configurations to use for profile specific blockpool. - properties: - parameters: - additionalProperties: - type: string - type: object - type: object - deviceClass: - description: DeviceClass is the deviceclass name. - type: string - name: - description: Name of the storage profile. - type: string - sharedFilesystemConfiguration: - description: configurations to use for cephfilesystem. - properties: - parameters: - additionalProperties: - type: string - type: object - type: object - required: - - deviceClass - - name - type: object - type: array version: description: Version specifies the version of StorageCluster type: string diff --git a/deploy/ocs-operator/manifests/storageconsumer.crd.yaml b/deploy/ocs-operator/manifests/storageconsumer.crd.yaml index d30f49b5d1..2950e94745 100644 --- a/deploy/ocs-operator/manifests/storageconsumer.crd.yaml +++ b/deploy/ocs-operator/manifests/storageconsumer.crd.yaml @@ -65,6 +65,19 @@ spec: type: string type: object type: array + client: + description: Information of storage client received from consumer + properties: + operatorVersion: + description: StorageClient Operator Version + type: string + platformVersion: + description: StorageClient Platform Version + type: string + required: + - operatorVersion + - platformVersion + type: object lastHeartbeat: description: Timestamp of last heartbeat received from consumer format: date-time diff --git a/deploy/ocs-operator/manifests/storageprofile.crd.yaml b/deploy/ocs-operator/manifests/storageprofile.crd.yaml new file mode 100644 index 0000000000..f3b6ef3a58 --- /dev/null +++ b/deploy/ocs-operator/manifests/storageprofile.crd.yaml @@ -0,0 +1,77 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + name: storageprofiles.ocs.openshift.io +spec: + group: ocs.openshift.io + names: + kind: StorageProfile + listKind: StorageProfileList + plural: storageprofiles + singular: storageprofile + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: StorageProfile is the Schema for the storageprofiles API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StorageProfileSpec defines the desired state of StorageProfile + properties: + blockPoolConfiguration: + description: configurations to use for profile specific blockpool. + properties: + parameters: + additionalProperties: + type: string + type: object + type: object + deviceClass: + description: DeviceClass is the deviceclass name. + maxLength: 512 + type: string + sharedFilesystemConfiguration: + description: configurations to use for cephfilesystem. + properties: + parameters: + additionalProperties: + type: string + type: object + type: object + required: + - deviceClass + type: object + x-kubernetes-validations: + - message: spec is immutable + rule: oldSelf == self + status: + description: StorageProfileStatus defines the observed state of StorageProfile + properties: + phase: + description: Phase describes the Phase of StorageProfile This is used + by OLM UI to provide status information to the user + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/go.mod b/go.mod index 066c388a72..d701d0b2b1 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/onsi/gomega v1.27.9 github.com/openshift/api v0.0.0-20231010191030-1f9525271dda github.com/openshift/build-machinery-go v0.0.0-20230306181456-d321ffa04533 - github.com/openshift/client-go v0.0.0-20230718165156-6014fb98e86a + github.com/openshift/client-go v0.0.0-20231005121823-e81400b97c46 github.com/openshift/custom-resource-status v1.1.2 github.com/operator-framework/api v0.17.7-0.20230626210316-aa3e49803e7b github.com/operator-framework/operator-lib v0.11.1-0.20230717184314-6efbe3a22f6f diff --git a/go.sum b/go.sum index 15c1c84813..c8a245cc4c 100644 --- a/go.sum +++ b/go.sum @@ -757,8 +757,8 @@ github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mo github.com/openshift/build-machinery-go v0.0.0-20230306181456-d321ffa04533 h1:mh3ZYs7kPIIe3UUY6tJcTExmtjnXXUu0MrBuK2W/Qvw= github.com/openshift/build-machinery-go v0.0.0-20230306181456-d321ffa04533/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47/go.mod h1:u7NRAjtYVAKokiI9LouzTv4mhds8P4S1TwdVAfbjKSk= -github.com/openshift/client-go v0.0.0-20230718165156-6014fb98e86a h1:ZKewwwEIURDnufm2oBd9rRvSp45BtRzPPrsUIFtm4V8= -github.com/openshift/client-go v0.0.0-20230718165156-6014fb98e86a/go.mod h1:EjhPQjEm8HM3GThz5ywNGLEec1P1IjTn08kwzdvupvA= +github.com/openshift/client-go v0.0.0-20231005121823-e81400b97c46 h1:J7UsTNgyM1krYnfsmijowYqt5I4mDM1qxNAy4eEa0xc= +github.com/openshift/client-go v0.0.0-20231005121823-e81400b97c46/go.mod h1:xM64ClnmCheAmffZZdTSJejy3yPE1nTRWQthKaZQ7JY= github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/operator-framework/api v0.17.7-0.20230626210316-aa3e49803e7b h1:prJEMyFQde4yxxaTuvqx1A/ukuCg/EZ2MbfdZiJwlls= diff --git a/metrics/deploy/prometheus-ocs-rules-external.yaml b/metrics/deploy/prometheus-ocs-rules-external.yaml index 7d2051a1d8..470e280885 100644 --- a/metrics/deploy/prometheus-ocs-rules-external.yaml +++ b/metrics/deploy/prometheus-ocs-rules-external.yaml @@ -67,7 +67,7 @@ spec: rules: - alert: ObcQuotaBytesAlert annotations: - description: ObjectBucketClaim {{$labels.objectbucketclaim}} has crossed 80% of the size limit set by the quota(bytes) and will become read-only on reaching the quota limit. Increase the quota in the {{$labels.objectbucketclaim}} OBC custom resource. + description: ObjectBucketClaim {{ $labels.objectbucketclaim }} has crossed 80% of the size limit set by the quota(bytes) and will become read-only on reaching the quota limit. Increase the quota in the {{ $labels.objectbucketclaim }} OBC custom resource. message: OBC has crossed 80% of the quota(bytes). severity_level: warning storage_type: RGW @@ -78,7 +78,7 @@ spec: severity: warning - alert: ObcQuotaObjectsAlert annotations: - description: ObjectBucketClaim {{$labels.objectbucketclaim}} has crossed 80% of the size limit set by the quota(objects) and will become read-only on reaching the quota limit. Increase the quota in the {{$labels.objectbucketclaim}} OBC custom resource. + description: ObjectBucketClaim {{ $labels.objectbucketclaim }} has crossed 80% of the size limit set by the quota(objects) and will become read-only on reaching the quota limit. Increase the quota in the {{ $labels.objectbucketclaim }} OBC custom resource. message: OBC has crossed 80% of the quota(object). severity_level: warning storage_type: RGW @@ -89,7 +89,7 @@ spec: severity: warning - alert: ObcQuotaBytesExhausedAlert annotations: - description: ObjectBucketClaim {{$labels.objectbucketclaim}} has crossed the limit set by the quota(bytes) and will be read-only now. Increase the quota in the {{$labels.objectbucketclaim}} OBC custom resource immediately. + description: ObjectBucketClaim {{ $labels.objectbucketclaim }} has crossed the limit set by the quota(bytes) and will be read-only now. Increase the quota in the {{ $labels.objectbucketclaim }} OBC custom resource immediately. message: OBC reached quota(bytes) limit. severity_level: error storage_type: RGW @@ -100,7 +100,7 @@ spec: severity: critical - alert: ObcQuotaObjectsExhausedAlert annotations: - description: ObjectBucketClaim {{$labels.objectbucketclaim}} has crossed the limit set by the quota(objects) and will be read-only now. Increase the quota in the {{$labels.objectbucketclaim}} OBC custom resource immediately. + description: ObjectBucketClaim {{ $labels.objectbucketclaim }} has crossed the limit set by the quota(objects) and will be read-only now. Increase the quota in the {{ $labels.objectbucketclaim }} OBC custom resource immediately. message: OBC reached quota(object) limit. severity_level: error storage_type: RGW diff --git a/metrics/deploy/prometheus-ocs-rules.yaml b/metrics/deploy/prometheus-ocs-rules.yaml index cae98266b7..f7234be2c0 100644 --- a/metrics/deploy/prometheus-ocs-rules.yaml +++ b/metrics/deploy/prometheus-ocs-rules.yaml @@ -86,6 +86,7 @@ spec: (ocs_pool_mirroring_image_health{job="ocs-metrics-exporter"} * on (namespace) group_left() (max by(namespace) (ocs_pool_mirroring_status{job="ocs-metrics-exporter"}))) == 1 for: 1m labels: + mirroring_image_status: unknown severity: warning - alert: OdfPoolMirroringImageHealth annotations: @@ -97,6 +98,7 @@ spec: (ocs_pool_mirroring_image_health{job="ocs-metrics-exporter"} * on (namespace) group_left() (max by(namespace) (ocs_pool_mirroring_status{job="ocs-metrics-exporter"}))) == 2 for: 1m labels: + mirroring_image_status: warning severity: warning - alert: OdfPoolMirroringImageHealth annotations: @@ -108,6 +110,7 @@ spec: (ocs_pool_mirroring_image_health{job="ocs-metrics-exporter"} * on (namespace) group_left() (max by(namespace) (ocs_pool_mirroring_status{job="ocs-metrics-exporter"}))) == 3 for: 10s labels: + mirroring_image_status: error severity: critical - alert: ODFPersistentVolumeMirrorStatus annotations: @@ -135,7 +138,7 @@ spec: rules: - alert: ObcQuotaBytesAlert annotations: - description: ObjectBucketClaim {{$labels.objectbucketclaim}} has crossed 80% of the size limit set by the quota(bytes) and will become read-only on reaching the quota limit. Increase the quota in the {{$labels.objectbucketclaim}} OBC custom resource. + description: ObjectBucketClaim {{ $labels.objectbucketclaim }} has crossed 80% of the size limit set by the quota(bytes) and will become read-only on reaching the quota limit. Increase the quota in the {{ $labels.objectbucketclaim }} OBC custom resource. message: OBC has crossed 80% of the quota(bytes). severity_level: warning storage_type: RGW @@ -146,7 +149,7 @@ spec: severity: warning - alert: ObcQuotaObjectsAlert annotations: - description: ObjectBucketClaim {{$labels.objectbucketclaim}} has crossed 80% of the size limit set by the quota(objects) and will become read-only on reaching the quota limit. Increase the quota in the {{$labels.objectbucketclaim}} OBC custom resource. + description: ObjectBucketClaim {{ $labels.objectbucketclaim }} has crossed 80% of the size limit set by the quota(objects) and will become read-only on reaching the quota limit. Increase the quota in the {{ $labels.objectbucketclaim }} OBC custom resource. message: OBC has crossed 80% of the quota(object). severity_level: warning storage_type: RGW @@ -157,7 +160,7 @@ spec: severity: warning - alert: ObcQuotaBytesExhausedAlert annotations: - description: ObjectBucketClaim {{$labels.objectbucketclaim}} has crossed the limit set by the quota(bytes) and will be read-only now. Increase the quota in the {{$labels.objectbucketclaim}} OBC custom resource immediately. + description: ObjectBucketClaim {{ $labels.objectbucketclaim }} has crossed the limit set by the quota(bytes) and will be read-only now. Increase the quota in the {{ $labels.objectbucketclaim }} OBC custom resource immediately. message: OBC reached quota(bytes) limit. severity_level: error storage_type: RGW @@ -168,7 +171,7 @@ spec: severity: critical - alert: ObcQuotaObjectsExhausedAlert annotations: - description: ObjectBucketClaim {{$labels.objectbucketclaim}} has crossed the limit set by the quota(objects) and will be read-only now. Increase the quota in the {{$labels.objectbucketclaim}} OBC custom resource immediately. + description: ObjectBucketClaim {{ $labels.objectbucketclaim }} has crossed the limit set by the quota(objects) and will be read-only now. Increase the quota in the {{ $labels.objectbucketclaim }} OBC custom resource immediately. message: OBC reached quota(object) limit. severity_level: error storage_type: RGW @@ -224,3 +227,41 @@ spec: for: 5s labels: severity: critical + - name: storage-client-alerts.rules + rules: + - alert: StorageClientHeartbeatMissed + annotations: + description: Storage Client ({{ $labels.storage_consumer_name }}) heartbeat missed for more than 120 (s). Lossy network connectivity might exist + message: Storage Client ({{ $labels.storage_consumer_name }}) heartbeat missed for more than 120 (s) + severity_level: warning + expr: | + (time() - 120) > (ocs_storage_client_last_heartbeat > 0) + labels: + severity: warning + - alert: StorageClientHeartbeatMissed + annotations: + description: Storage Client ({{ $labels.storage_consumer_name }}) heartbeat missed for more than 300 (s). Client might have lost internet connectivity + message: Storage Client ({{ $labels.storage_consumer_name }}) heartbeat missed for more than 300 (s) + severity_level: critical + expr: | + (time() - 300) > (ocs_storage_client_last_heartbeat > 0) + labels: + severity: critical + - alert: StorageClientIncompatibleOperatorVersion + annotations: + description: Storage Client Operator ({{ $labels.storage_consumer_name }}) lags by 1 minor version. Client configuration may be incompatible + message: Storage Client Operator ({{ $labels.storage_consumer_name }}) lags by 1 minor version + severity_level: warning + expr: | + floor((ocs_storage_provider_operator_version>0)/1000) - ignoring(storage_consumer_name) group_right() floor((ocs_storage_client_operator_version>0)/1000) == 1 + labels: + severity: warning + - alert: StorageClientIncompatibleOperatorVersion + annotations: + description: Storage Client Operator ({{ $labels.storage_consumer_name }}) differs by more than 1 minor version. Client configuration may be incompatible and unsupported + message: Storage Client Operator ({{ $labels.storage_consumer_name }}) differs by more than 1 minor version + severity_level: critical + expr: | + floor((ocs_storage_provider_operator_version>0)/1000) - ignoring(storage_consumer_name) group_right() floor((ocs_storage_client_operator_version>0)/1000) > 1 or floor((ocs_storage_client_operator_version>0)/1000) - ignoring(storage_consumer_name) group_left() floor((ocs_storage_provider_operator_version>0)/1000) >= 1 + labels: + severity: critical diff --git a/metrics/internal/collectors/storageconsumer.go b/metrics/internal/collectors/storageconsumer.go index 544376f4bc..b6f1d0a783 100644 --- a/metrics/internal/collectors/storageconsumer.go +++ b/metrics/internal/collectors/storageconsumer.go @@ -1,9 +1,15 @@ package collectors import ( + "fmt" + "strconv" + "strings" + + "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/v4/api/v1alpha1" "github.com/red-hat-storage/ocs-operator/v4/metrics/internal/options" + "github.com/red-hat-storage/ocs-operator/v4/metrics/internal/version" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -16,6 +22,9 @@ var _ prometheus.Collector = &StorageConsumerCollector{} type StorageConsumerCollector struct { Informer cache.SharedIndexInformer StorageConsumerMetadata *prometheus.Desc + LastHeartbeat *prometheus.Desc + ProviderOperatorVersion *prometheus.Desc + ClientOperatorVersion *prometheus.Desc AllowedNamespace string } @@ -27,11 +36,29 @@ func NewStorageConsumerCollector(opts *options.Options) *StorageConsumerCollecto } lw := cache.NewListWatchFromClient(ocsClient, "storageconsumers", metav1.NamespaceAll, fields.Everything()) sharedIndexInformer := cache.NewSharedIndexInformer(lw, &ocsv1alpha1.StorageConsumer{}, 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + return &StorageConsumerCollector{ StorageConsumerMetadata: prometheus.NewDesc( prometheus.BuildFQName("ocs", "storage_consumer", "metadata"), `Attributes of OCS Storage Consumers`, - []string{"storage_consumer_name", "capacity", "state", "granted_capacity"}, + []string{"storage_consumer_name", "state"}, + nil, + ), + LastHeartbeat: prometheus.NewDesc( + prometheus.BuildFQName("ocs", "storage_client", "last_heartbeat"), + `Unixtime (in sec) of last heartbeat of OCS Storage Client`, + []string{"storage_consumer_name"}, + nil, + ), + ProviderOperatorVersion: prometheus.NewDesc( + prometheus.BuildFQName("ocs", "storage_provider", "operator_version"), + `OCS StorageProvider encode Operator Version`, + nil, nil, + ), + ClientOperatorVersion: prometheus.NewDesc( + prometheus.BuildFQName("ocs", "storage_client", "operator_version"), + `OCS StorageClient encoded Operator Version`, + []string{"storage_consumer_name"}, nil, ), Informer: sharedIndexInformer, @@ -49,6 +76,9 @@ func (c *StorageConsumerCollector) Collect(ch chan<- prometheus.Metric) { func (c *StorageConsumerCollector) Describe(ch chan<- *prometheus.Desc) { ds := []*prometheus.Desc{ c.StorageConsumerMetadata, + c.LastHeartbeat, + c.ProviderOperatorVersion, + c.ClientOperatorVersion, } for _, d := range ds { ch <- d @@ -59,12 +89,55 @@ func (c *StorageConsumerCollector) Run(stopCh <-chan struct{}) { go c.Informer.Run(stopCh) } +// encodes version padding with 3 zeros for each part making suitable +// for numerical comparisons +// ex: 4.10.3 -> 004 010 003 -> 4010003 +func encodeVersion(version string) int { + + fv, err := semver.FinalizeVersion(version) + if err != nil { + klog.Warningf("Failed to parse %q as semver version: %v", version, err) + return -1 + } + + parts := strings.Split(fv, ".") + if len(parts) != 3 { + return -1 + } + sb := make([]string, 3) + for i := range sb { + sb[i] = fmt.Sprintf("%03s", parts[i]) + } + + ver := strings.Join(sb, "") + encode, err := strconv.Atoi(ver) + if err != nil { + return -1 + } + + return encode +} + func (c *StorageConsumerCollector) collectStorageConsumersMetadata(storageConsumers []*ocsv1alpha1.StorageConsumer, ch chan<- prometheus.Metric) { + + ch <- prometheus.MustNewConstMetric(c.ProviderOperatorVersion, + prometheus.GaugeValue, float64(encodeVersion(version.GetVersion())), + ) + for _, storageConsumer := range storageConsumers { ch <- prometheus.MustNewConstMetric(c.StorageConsumerMetadata, prometheus.GaugeValue, 1, storageConsumer.Name, string(storageConsumer.Status.State)) + + ch <- prometheus.MustNewConstMetric(c.LastHeartbeat, + prometheus.GaugeValue, float64(storageConsumer.Status.LastHeartbeat.Time.Unix()), + storageConsumer.Name) + + ch <- prometheus.MustNewConstMetric(c.ClientOperatorVersion, + prometheus.GaugeValue, + float64(encodeVersion(storageConsumer.Status.Client.OperatorVersion)), + storageConsumer.Name) } } diff --git a/metrics/mixin/alerts/alerts.libsonnet b/metrics/mixin/alerts/alerts.libsonnet index 14c60ea17c..5fe4084d96 100644 --- a/metrics/mixin/alerts/alerts.libsonnet +++ b/metrics/mixin/alerts/alerts.libsonnet @@ -2,4 +2,5 @@ (import 'obc.libsonnet') + (import 'services.libsonnet') + (import 'blocklist.libsonnet') + -(import 'encryption.libsonnet') +(import 'encryption.libsonnet') + +(import 'storage-client.libsonnet') diff --git a/metrics/mixin/alerts/storage-client.libsonnet b/metrics/mixin/alerts/storage-client.libsonnet new file mode 100644 index 0000000000..6382d6c564 --- /dev/null +++ b/metrics/mixin/alerts/storage-client.libsonnet @@ -0,0 +1,72 @@ +{ + prometheusAlerts+:: { + groups+: [ + { + name: 'storage-client-alerts.rules', + rules: [ + { + alert: 'StorageClientHeartbeatMissed', + expr: ||| + (time() - %(clientCheckinWarnSec)d) > (ocs_storage_client_last_heartbeat > 0) + ||| % $._config, + labels: { + severity: 'warning', + }, + annotations: { + message: 'Storage Client ({{ $labels.storage_consumer_name }}) heartbeat missed for more than %d (s)' % $._config.clientCheckinWarnSec, + description: 'Storage Client ({{ $labels.storage_consumer_name }}) heartbeat missed for more than %d (s). Lossy network connectivity might exist' % $._config.clientCheckinWarnSec, + severity_level: 'warning', + }, + }, + { + alert: 'StorageClientHeartbeatMissed', + expr: ||| + (time() - %(clientCheckinCritSec)d) > (ocs_storage_client_last_heartbeat > 0) + ||| % $._config, + labels: { + severity: 'critical', + }, + annotations: { + message: 'Storage Client ({{ $labels.storage_consumer_name }}) heartbeat missed for more than %d (s)' % $._config.clientCheckinCritSec, + description: 'Storage Client ({{ $labels.storage_consumer_name }}) heartbeat missed for more than %d (s). Client might have lost internet connectivity' % $._config.clientCheckinCritSec, + severity_level: 'critical', + }, + }, + { + # divide by 1000 here removes patch version + # warn if client lags provider by one minor version + alert: 'StorageClientIncompatibleOperatorVersion', + expr: ||| + floor((ocs_storage_provider_operator_version>0)/1000) - ignoring(storage_consumer_name) group_right() floor((ocs_storage_client_operator_version>0)/1000) == %(clientOperatorMinorVerDiff)d + ||| % $._config, + labels: { + severity: 'warning', + }, + annotations: { + message: 'Storage Client Operator ({{ $labels.storage_consumer_name }}) lags by %d minor version' % $._config.clientOperatorMinorVerDiff, + description: 'Storage Client Operator ({{ $labels.storage_consumer_name }}) lags by %d minor version. Client configuration may be incompatible' % $._config.clientOperatorMinorVerDiff, + severity_level: 'warning', + }, + }, + { + # divide by 1000 here removes patch version + # critical if client lags provider by more than one minor version or + # client is ahead of provider + alert: 'StorageClientIncompatibleOperatorVersion', + expr: ||| + floor((ocs_storage_provider_operator_version>0)/1000) - ignoring(storage_consumer_name) group_right() floor((ocs_storage_client_operator_version>0)/1000) > %(clientOperatorMinorVerDiff)d or floor((ocs_storage_client_operator_version>0)/1000) - ignoring(storage_consumer_name) group_left() floor((ocs_storage_provider_operator_version>0)/1000) >= %(clientOperatorMinorVerDiff)d + ||| % $._config, + labels: { + severity: 'critical', + }, + annotations: { + message: 'Storage Client Operator ({{ $labels.storage_consumer_name }}) differs by more than %d minor version' % $._config.clientOperatorMinorVerDiff, + description: 'Storage Client Operator ({{ $labels.storage_consumer_name }}) differs by more than %d minor version. Client configuration may be incompatible and unsupported' % $._config.clientOperatorMinorVerDiff, + severity_level: 'critical', + }, + }, + ], + }, + ], + } +} diff --git a/metrics/mixin/config.libsonnet b/metrics/mixin/config.libsonnet index f424780913..46a4c368e9 100644 --- a/metrics/mixin/config.libsonnet +++ b/metrics/mixin/config.libsonnet @@ -22,5 +22,10 @@ jobs: { ocsExporter: $._config.ExporterSelector, }, + + // Storage Consumer + clientCheckinWarnSec: 120, + clientCheckinCritSec: 300, + clientOperatorMinorVerDiff: 1, }, } diff --git a/services/provider/client/client.go b/services/provider/client/client.go index bbfd8db0bf..e11ce16602 100644 --- a/services/provider/client/client.go +++ b/services/provider/client/client.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + cs "github.com/red-hat-storage/ocs-operator/v4/services/provider/clientstatus" pb "github.com/red-hat-storage/ocs-operator/v4/services/provider/pb" "google.golang.org/grpc" @@ -188,13 +189,18 @@ func (cc *OCSProviderClient) GetStorageClassClaimConfig(ctx context.Context, con return cc.Client.GetStorageClassClaimConfig(apiCtx, req) } -func (cc *OCSProviderClient) ReportStatus(ctx context.Context, consumerUUID string) (*pb.ReportStatusResponse, error) { +func NewStorageClientStatus() cs.StorageClientStatus { + return &pb.ReportStatusRequest{} +} + +func (cc *OCSProviderClient) ReportStatus(ctx context.Context, consumerUUID string, status cs.StorageClientStatus) (*pb.ReportStatusResponse, error) { if cc.Client == nil || cc.clientConn == nil { return nil, fmt.Errorf("Provider client is closed") } - req := &pb.ReportStatusRequest{ - StorageConsumerUUID: consumerUUID, - } + + // panic if the request wasn't constructed using "NewStorageClientStatus()" + req := status.(*pb.ReportStatusRequest) + req.StorageConsumerUUID = consumerUUID apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) defer cancel() diff --git a/services/provider/clientstatus/status.go b/services/provider/clientstatus/status.go new file mode 100644 index 0000000000..1b2506891f --- /dev/null +++ b/services/provider/clientstatus/status.go @@ -0,0 +1,9 @@ +package clientstatus + +type StorageClientStatus interface { + GetPlatformVersion() string + GetOperatorVersion() string + + SetPlatformVersion(string) StorageClientStatus + SetOperatorVersion(string) StorageClientStatus +} diff --git a/services/provider/pb/provider.pb.go b/services/provider/pb/provider.pb.go index 4816a535fa..3f89cd85a6 100644 --- a/services/provider/pb/provider.pb.go +++ b/services/provider/pb/provider.pb.go @@ -852,7 +852,9 @@ type ReportStatusRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - StorageConsumerUUID string `protobuf:"bytes,1,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` + StorageConsumerUUID string `protobuf:"bytes,1,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` + ClientPlatformVersion string `protobuf:"bytes,2,opt,name=clientPlatformVersion,proto3" json:"clientPlatformVersion,omitempty"` + ClientOperatorVersion string `protobuf:"bytes,3,opt,name=clientOperatorVersion,proto3" json:"clientOperatorVersion,omitempty"` } func (x *ReportStatusRequest) Reset() { @@ -894,6 +896,20 @@ func (x *ReportStatusRequest) GetStorageConsumerUUID() string { return "" } +func (x *ReportStatusRequest) GetClientPlatformVersion() string { + if x != nil { + return x.ClientPlatformVersion + } + return "" +} + +func (x *ReportStatusRequest) GetClientOperatorVersion() string { + if x != nil { + return x.ClientOperatorVersion + } + return "" +} + type ReportStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1031,65 +1047,72 @@ var file_provider_proto_rawDesc = []byte{ 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, - 0x47, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x32, 0xb4, 0x06, 0x0a, 0x0b, 0x4f, 0x43, 0x53, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x12, 0x58, 0x0a, 0x0f, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, - 0x6d, 0x65, 0x72, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, - 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x2e, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x10, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, - 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, - 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x4f, 0x66, 0x66, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, - 0x73, 0x75, 0x6d, 0x65, 0x72, 0x12, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0xb3, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x12, 0x34, 0x0a, 0x15, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x34, 0x0a, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb4, 0x06, + 0x0a, 0x0b, 0x4f, 0x43, 0x53, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x58, 0x0a, + 0x0f, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, + 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x62, 0x6f, + 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x6e, + 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x2e, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, + 0x0a, 0x10, 0x4f, 0x66, 0x66, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x72, 0x12, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x66, + 0x66, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x66, 0x66, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x66, 0x66, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, - 0x0a, 0x15, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, - 0x6f, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, - 0x62, 0x6f, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x27, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x73, 0x0a, 0x18, 0x46, 0x75, - 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x2e, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x75, 0x6c, - 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x70, 0x0a, 0x17, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x28, 0x2e, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x15, 0x41, + 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, + 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, 0x6f, 0x61, + 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x73, 0x0a, 0x18, 0x46, 0x75, 0x6c, 0x66, 0x69, + 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, + 0x61, 0x69, 0x6d, 0x12, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x46, + 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, + 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, + 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, + 0x69, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x73, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x28, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x3b, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x28, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x76, + 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, + 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x73, + 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, + 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x3b, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/services/provider/pb/storageclient_status.go b/services/provider/pb/storageclient_status.go new file mode 100644 index 0000000000..f417fe4454 --- /dev/null +++ b/services/provider/pb/storageclient_status.go @@ -0,0 +1,26 @@ +package providerpb + +import ( + cs "github.com/red-hat-storage/ocs-operator/v4/services/provider/clientstatus" +) + +// ensure ReportStatusRequest satisfies StorageClientStatus interface +var _ cs.StorageClientStatus = &ReportStatusRequest{} + +func (r *ReportStatusRequest) GetPlatformVersion() string { + return r.ClientPlatformVersion +} + +func (r *ReportStatusRequest) GetOperatorVersion() string { + return r.ClientOperatorVersion +} + +func (r *ReportStatusRequest) SetPlatformVersion(version string) cs.StorageClientStatus { + r.ClientPlatformVersion = version + return r +} + +func (r *ReportStatusRequest) SetOperatorVersion(version string) cs.StorageClientStatus { + r.ClientOperatorVersion = version + return r +} diff --git a/services/provider/proto/provider.proto b/services/provider/proto/provider.proto index dc76790d5e..63200f36fd 100644 --- a/services/provider/proto/provider.proto +++ b/services/provider/proto/provider.proto @@ -155,6 +155,8 @@ message StorageClassClaimConfigResponse{ message ReportStatusRequest{ string storageConsumerUUID = 1; + string clientPlatformVersion = 2; + string clientOperatorVersion = 3; } message ReportStatusResponse{} diff --git a/services/provider/server/consumer.go b/services/provider/server/consumer.go index 0d6fca2b91..2690471d56 100644 --- a/services/provider/server/consumer.go +++ b/services/provider/server/consumer.go @@ -2,12 +2,12 @@ package server import ( "context" - "encoding/json" "errors" "fmt" "sync" ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/v4/api/v1alpha1" + cs "github.com/red-hat-storage/ocs-operator/v4/services/provider/clientstatus" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -199,36 +199,19 @@ func (c *ocsConsumerManager) Get(ctx context.Context, id string) (*ocsv1alpha1.S return consumerObj, nil } -func (c *ocsConsumerManager) UpdateStatusLastHeatbeat(ctx context.Context, id string) error { - uid := types.UID(id) - - c.mutex.RLock() - consumerName, ok := c.nameByUID[uid] - if !ok { - c.mutex.RUnlock() - klog.Warningf("no storageConsumer found with UID %q", id) - return nil +func (c *ocsConsumerManager) UpdateConsumerStatus(ctx context.Context, id string, status cs.StorageClientStatus) error { + consumerObj, err := c.Get(ctx, id) + if err != nil { + return err } - c.mutex.RUnlock() - patchInfo := struct { - Op string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value"` - }{ - Op: "replace", - Path: "/status/lastHeartbeat", - Value: metav1.Now(), - } - jsonPatchInfo, _ := json.Marshal([]interface{}{patchInfo}) - patch := client.RawPatch(types.JSONPatchType, jsonPatchInfo) + consumerObj.Status.LastHeartbeat = metav1.Now() + consumerObj.Status.Client.PlatformVersion = status.GetPlatformVersion() + consumerObj.Status.Client.OperatorVersion = status.GetOperatorVersion() - consumerObj := &ocsv1alpha1.StorageConsumer{} - consumerObj.Name = consumerName - consumerObj.Namespace = c.namespace - if err := c.client.Status().Patch(ctx, consumerObj, patch); err != nil { - return fmt.Errorf("Failed to patch Status.LastHeartbeat for StorageConsumer %v: %v", consumerName, err) + if err := c.client.Status().Update(ctx, consumerObj); err != nil { + return fmt.Errorf("Failed to patch Status for StorageConsumer %v: %v", consumerObj.Name, err) } - klog.Infof("successfully updated Status.LastHeartbeat for StorageConsumer %v", consumerName) + klog.Infof("successfully updated Status for StorageConsumer %v", consumerObj.Name) return nil } diff --git a/services/provider/server/consumer_test.go b/services/provider/server/consumer_test.go index 088d7acda7..b927c1e030 100644 --- a/services/provider/server/consumer_test.go +++ b/services/provider/server/consumer_test.go @@ -6,12 +6,12 @@ import ( api "github.com/red-hat-storage/ocs-operator/v4/api/v1" ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/v4/api/v1alpha1" + providerClient "github.com/red-hat-storage/ocs-operator/v4/services/provider/client" rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -47,7 +47,7 @@ var ( } ) -func newFakeClient(t *testing.T, obj ...runtime.Object) client.Client { +func newFakeClient(t *testing.T, obj ...client.Object) client.Client { scheme, err := api.SchemeBuilder.Build() assert.NoError(t, err, "unable to build scheme") @@ -60,12 +60,15 @@ func newFakeClient(t *testing.T, obj ...runtime.Object) client.Client { err = rookCephv1.AddToScheme(scheme) assert.NoError(t, err, "failed to add rookCephv1 scheme") - return fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(obj...).Build() + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(obj...). + WithStatusSubresource(obj...).Build() } func TestNewConsumerManager(t *testing.T) { ctx := context.TODO() - obj := []runtime.Object{} + obj := []client.Object{} // Test NewConsumerManager with no StorageConsumer resources client := newFakeClient(t) @@ -89,7 +92,7 @@ func TestNewConsumerManager(t *testing.T) { func TestCreateStorageConsumer(t *testing.T) { ctx := context.TODO() - obj := []runtime.Object{} + obj := []client.Object{} obj = append(obj, consumer1) client := newFakeClient(t, obj...) @@ -115,7 +118,7 @@ func TestCreateStorageConsumer(t *testing.T) { func TestDeleteStorageConsumer(t *testing.T) { ctx := context.TODO() - obj := []runtime.Object{} + obj := []client.Object{} obj = append(obj, consumer1) client := newFakeClient(t, obj...) @@ -142,7 +145,7 @@ func TestDeleteStorageConsumer(t *testing.T) { func TestGetStorageConsumer(t *testing.T) { ctx := context.TODO() - obj := []runtime.Object{} + obj := []client.Object{} obj = append(obj, consumer1) client := newFakeClient(t, obj...) @@ -158,3 +161,34 @@ func TestGetStorageConsumer(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "consumer1", consumer.Name) } + +func TestUpdateConsumerStatus(t *testing.T) { + ctx := context.TODO() + obj := []client.Object{} + + consumer := &ocsv1alpha1.StorageConsumer{} + consumer1.DeepCopyInto(consumer) + + // status should be preserved after update + consumer.Status.State = ocsv1alpha1.StorageConsumerStateReady + + obj = append(obj, consumer) + client := newFakeClient(t, obj...) + consumerManager, err := newConsumerManager(ctx, client, + testNamespace) + assert.NoError(t, err) + + // with fields + fields := providerClient.NewStorageClientStatus(). + SetPlatformVersion("1.0.0"). + SetOperatorVersion("1.0.0") + err = consumerManager.UpdateConsumerStatus(ctx, "uid1", fields) + assert.NoError(t, err) + + c1, err := consumerManager.Get(ctx, "uid1") + assert.NoError(t, err) + assert.NotEmpty(t, c1.Status.LastHeartbeat) + assert.Equal(t, fields.GetPlatformVersion(), c1.Status.Client.PlatformVersion) + assert.Equal(t, fields.GetOperatorVersion(), c1.Status.Client.OperatorVersion) + assert.Equal(t, c1.Status.State, ocsv1alpha1.StorageConsumerStateReady) +} diff --git a/services/provider/server/server.go b/services/provider/server/server.go index 261825a1fc..5643a39998 100644 --- a/services/provider/server/server.go +++ b/services/provider/server/server.go @@ -16,6 +16,7 @@ import ( "strings" "time" + "github.com/blang/semver/v4" "github.com/red-hat-storage/ocs-operator/v4/api/v1alpha1" ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/v4/api/v1alpha1" controllers "github.com/red-hat-storage/ocs-operator/v4/controllers/storageconsumer" @@ -42,6 +43,7 @@ const ( ProviderCertsMountPoint = "/mnt/cert" onboardingTicketKeySecret = "onboarding-ticket-key" storageClassRequestNameLabel = "ocs.openshift.io/storageclassrequest-name" + notAvailable = "N/A" ) const ( @@ -653,11 +655,29 @@ func (s *OCSProviderServer) GetStorageClassClaimConfig(ctx context.Context, req // ReportStatus rpc call to check if a consumer can reach to the provider. func (s *OCSProviderServer) ReportStatus(ctx context.Context, req *pb.ReportStatusRequest) (*pb.ReportStatusResponse, error) { // Update the status in storageConsumer CR - if err := s.consumerManager.UpdateStatusLastHeatbeat(ctx, req.StorageConsumerUUID); err != nil { + klog.Infof("Client status report received: %+v", req) + + if req.ClientOperatorVersion == "" { + req.ClientOperatorVersion = notAvailable + } else { + if _, err := semver.Parse(req.ClientOperatorVersion); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "Malformed ClientOperatorVersion: %v", err) + } + } + + if req.ClientPlatformVersion == "" { + req.ClientPlatformVersion = notAvailable + } else { + if _, err := semver.Parse(req.ClientPlatformVersion); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "Malformed ClientPlatformVersion: %v", err) + } + } + + if err := s.consumerManager.UpdateConsumerStatus(ctx, req.StorageConsumerUUID, req); err != nil { if kerrors.IsNotFound(err) { - return nil, status.Errorf(codes.NotFound, "Failed to update lastHeartbeat in the storageConsumer resource: %v", err) + return nil, status.Errorf(codes.NotFound, "Failed to update lastHeartbeat payload in the storageConsumer resource: %v", err) } - return nil, status.Errorf(codes.Internal, "Failed to update lastHeartbeat in the storageConsumer resource: %v", err) + return nil, status.Errorf(codes.Internal, "Failed to update lastHeartbeat payload in the storageConsumer resource: %v", err) } return &pb.ReportStatusResponse{}, nil diff --git a/services/provider/server/server_test.go b/services/provider/server/server_test.go index a263032a0c..3fcc80d17b 100644 --- a/services/provider/server/server_test.go +++ b/services/provider/server/server_test.go @@ -17,6 +17,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + crClient "sigs.k8s.io/controller-runtime/pkg/client" ) type externalResource struct { @@ -138,7 +139,7 @@ var ( func TestGetExternalResources(t *testing.T) { ctx := context.TODO() - objects := []runtime.Object{ + objects := []crClient.Object{ consumerResource, consumerResource1, consumerResource2, @@ -278,7 +279,7 @@ func TestGetExternalResources(t *testing.T) { assert.Nil(t, storageConRes) // When CephClient status is empty - objects = []runtime.Object{ + objects = []crClient.Object{ &rookCephv1.CephClient{}, } s := runtime.NewScheme() @@ -303,7 +304,7 @@ func TestGetExternalResources(t *testing.T) { // When CephClient status info is empty - objects = []runtime.Object{ + objects = []crClient.Object{ &rookCephv1.CephClient{}, } s = runtime.NewScheme() @@ -389,7 +390,7 @@ func TestOCSProviderServerStorageClassRequest(t *testing.T) { } ctx := context.TODO() - objects := []runtime.Object{ + objects := []crClient.Object{ consumerResource, claimResourceUnderDeletion, } @@ -455,7 +456,7 @@ func TestOCSProviderServerRevokeStorageClassClaim(t *testing.T) { } ctx := context.TODO() - objects := []runtime.Object{ + objects := []crClient.Object{ consumerResource, claimResource, } @@ -673,7 +674,7 @@ func TestOCSProviderServerGetStorageClassClaimConfig(t *testing.T) { ) ctx := context.TODO() - objects := []runtime.Object{ + objects := []crClient.Object{ consumerResource, blockPoolClaimResource, sharedFilesystemClaimResource, diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicyreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicyreview.go index 85d015d8a2..61c5f9902a 100644 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicyreview.go +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicyreview.go @@ -7,7 +7,6 @@ import ( v1 "github.com/openshift/api/security/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -17,9 +16,9 @@ type FakePodSecurityPolicyReviews struct { ns string } -var podsecuritypolicyreviewsResource = schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "podsecuritypolicyreviews"} +var podsecuritypolicyreviewsResource = v1.SchemeGroupVersion.WithResource("podsecuritypolicyreviews") -var podsecuritypolicyreviewsKind = schema.GroupVersionKind{Group: "security.openshift.io", Version: "v1", Kind: "PodSecurityPolicyReview"} +var podsecuritypolicyreviewsKind = v1.SchemeGroupVersion.WithKind("PodSecurityPolicyReview") // Create takes the representation of a podSecurityPolicyReview and creates it. Returns the server's representation of the podSecurityPolicyReview, and an error, if there is any. func (c *FakePodSecurityPolicyReviews) Create(ctx context.Context, podSecurityPolicyReview *v1.PodSecurityPolicyReview, opts metav1.CreateOptions) (result *v1.PodSecurityPolicyReview, err error) { diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicyselfsubjectreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicyselfsubjectreview.go index 846ea9daee..80aa29043c 100644 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicyselfsubjectreview.go +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicyselfsubjectreview.go @@ -7,7 +7,6 @@ import ( v1 "github.com/openshift/api/security/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -17,9 +16,9 @@ type FakePodSecurityPolicySelfSubjectReviews struct { ns string } -var podsecuritypolicyselfsubjectreviewsResource = schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "podsecuritypolicyselfsubjectreviews"} +var podsecuritypolicyselfsubjectreviewsResource = v1.SchemeGroupVersion.WithResource("podsecuritypolicyselfsubjectreviews") -var podsecuritypolicyselfsubjectreviewsKind = schema.GroupVersionKind{Group: "security.openshift.io", Version: "v1", Kind: "PodSecurityPolicySelfSubjectReview"} +var podsecuritypolicyselfsubjectreviewsKind = v1.SchemeGroupVersion.WithKind("PodSecurityPolicySelfSubjectReview") // Create takes the representation of a podSecurityPolicySelfSubjectReview and creates it. Returns the server's representation of the podSecurityPolicySelfSubjectReview, and an error, if there is any. func (c *FakePodSecurityPolicySelfSubjectReviews) Create(ctx context.Context, podSecurityPolicySelfSubjectReview *v1.PodSecurityPolicySelfSubjectReview, opts metav1.CreateOptions) (result *v1.PodSecurityPolicySelfSubjectReview, err error) { diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicysubjectreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicysubjectreview.go index dbe5d308e3..9910db95de 100644 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicysubjectreview.go +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_podsecuritypolicysubjectreview.go @@ -7,7 +7,6 @@ import ( v1 "github.com/openshift/api/security/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" testing "k8s.io/client-go/testing" ) @@ -17,9 +16,9 @@ type FakePodSecurityPolicySubjectReviews struct { ns string } -var podsecuritypolicysubjectreviewsResource = schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "podsecuritypolicysubjectreviews"} +var podsecuritypolicysubjectreviewsResource = v1.SchemeGroupVersion.WithResource("podsecuritypolicysubjectreviews") -var podsecuritypolicysubjectreviewsKind = schema.GroupVersionKind{Group: "security.openshift.io", Version: "v1", Kind: "PodSecurityPolicySubjectReview"} +var podsecuritypolicysubjectreviewsKind = v1.SchemeGroupVersion.WithKind("PodSecurityPolicySubjectReview") // Create takes the representation of a podSecurityPolicySubjectReview and creates it. Returns the server's representation of the podSecurityPolicySubjectReview, and an error, if there is any. func (c *FakePodSecurityPolicySubjectReviews) Create(ctx context.Context, podSecurityPolicySubjectReview *v1.PodSecurityPolicySubjectReview, opts metav1.CreateOptions) (result *v1.PodSecurityPolicySubjectReview, err error) { diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_rangeallocation.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_rangeallocation.go index d05477811f..2692d3b982 100644 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_rangeallocation.go +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_rangeallocation.go @@ -7,11 +7,10 @@ import ( json "encoding/json" "fmt" - securityv1 "github.com/openshift/api/security/v1" - applyconfigurationssecurityv1 "github.com/openshift/client-go/security/applyconfigurations/security/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/openshift/api/security/v1" + securityv1 "github.com/openshift/client-go/security/applyconfigurations/security/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -22,24 +21,24 @@ type FakeRangeAllocations struct { Fake *FakeSecurityV1 } -var rangeallocationsResource = schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "rangeallocations"} +var rangeallocationsResource = v1.SchemeGroupVersion.WithResource("rangeallocations") -var rangeallocationsKind = schema.GroupVersionKind{Group: "security.openshift.io", Version: "v1", Kind: "RangeAllocation"} +var rangeallocationsKind = v1.SchemeGroupVersion.WithKind("RangeAllocation") // Get takes name of the rangeAllocation, and returns the corresponding rangeAllocation object, and an error if there is any. -func (c *FakeRangeAllocations) Get(ctx context.Context, name string, options v1.GetOptions) (result *securityv1.RangeAllocation, err error) { +func (c *FakeRangeAllocations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RangeAllocation, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(rangeallocationsResource, name), &securityv1.RangeAllocation{}) + Invokes(testing.NewRootGetAction(rangeallocationsResource, name), &v1.RangeAllocation{}) if obj == nil { return nil, err } - return obj.(*securityv1.RangeAllocation), err + return obj.(*v1.RangeAllocation), err } // List takes label and field selectors, and returns the list of RangeAllocations that match those selectors. -func (c *FakeRangeAllocations) List(ctx context.Context, opts v1.ListOptions) (result *securityv1.RangeAllocationList, err error) { +func (c *FakeRangeAllocations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RangeAllocationList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(rangeallocationsResource, rangeallocationsKind, opts), &securityv1.RangeAllocationList{}) + Invokes(testing.NewRootListAction(rangeallocationsResource, rangeallocationsKind, opts), &v1.RangeAllocationList{}) if obj == nil { return nil, err } @@ -48,8 +47,8 @@ func (c *FakeRangeAllocations) List(ctx context.Context, opts v1.ListOptions) (r if label == nil { label = labels.Everything() } - list := &securityv1.RangeAllocationList{ListMeta: obj.(*securityv1.RangeAllocationList).ListMeta} - for _, item := range obj.(*securityv1.RangeAllocationList).Items { + list := &v1.RangeAllocationList{ListMeta: obj.(*v1.RangeAllocationList).ListMeta} + for _, item := range obj.(*v1.RangeAllocationList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -58,58 +57,58 @@ func (c *FakeRangeAllocations) List(ctx context.Context, opts v1.ListOptions) (r } // Watch returns a watch.Interface that watches the requested rangeAllocations. -func (c *FakeRangeAllocations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRangeAllocations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(rangeallocationsResource, opts)) } // Create takes the representation of a rangeAllocation and creates it. Returns the server's representation of the rangeAllocation, and an error, if there is any. -func (c *FakeRangeAllocations) Create(ctx context.Context, rangeAllocation *securityv1.RangeAllocation, opts v1.CreateOptions) (result *securityv1.RangeAllocation, err error) { +func (c *FakeRangeAllocations) Create(ctx context.Context, rangeAllocation *v1.RangeAllocation, opts metav1.CreateOptions) (result *v1.RangeAllocation, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(rangeallocationsResource, rangeAllocation), &securityv1.RangeAllocation{}) + Invokes(testing.NewRootCreateAction(rangeallocationsResource, rangeAllocation), &v1.RangeAllocation{}) if obj == nil { return nil, err } - return obj.(*securityv1.RangeAllocation), err + return obj.(*v1.RangeAllocation), err } // Update takes the representation of a rangeAllocation and updates it. Returns the server's representation of the rangeAllocation, and an error, if there is any. -func (c *FakeRangeAllocations) Update(ctx context.Context, rangeAllocation *securityv1.RangeAllocation, opts v1.UpdateOptions) (result *securityv1.RangeAllocation, err error) { +func (c *FakeRangeAllocations) Update(ctx context.Context, rangeAllocation *v1.RangeAllocation, opts metav1.UpdateOptions) (result *v1.RangeAllocation, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(rangeallocationsResource, rangeAllocation), &securityv1.RangeAllocation{}) + Invokes(testing.NewRootUpdateAction(rangeallocationsResource, rangeAllocation), &v1.RangeAllocation{}) if obj == nil { return nil, err } - return obj.(*securityv1.RangeAllocation), err + return obj.(*v1.RangeAllocation), err } // Delete takes name of the rangeAllocation and deletes it. Returns an error if one occurs. -func (c *FakeRangeAllocations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeRangeAllocations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(rangeallocationsResource, name, opts), &securityv1.RangeAllocation{}) + Invokes(testing.NewRootDeleteActionWithOptions(rangeallocationsResource, name, opts), &v1.RangeAllocation{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeRangeAllocations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeRangeAllocations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(rangeallocationsResource, listOpts) - _, err := c.Fake.Invokes(action, &securityv1.RangeAllocationList{}) + _, err := c.Fake.Invokes(action, &v1.RangeAllocationList{}) return err } // Patch applies the patch and returns the patched rangeAllocation. -func (c *FakeRangeAllocations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *securityv1.RangeAllocation, err error) { +func (c *FakeRangeAllocations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RangeAllocation, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(rangeallocationsResource, name, pt, data, subresources...), &securityv1.RangeAllocation{}) + Invokes(testing.NewRootPatchSubresourceAction(rangeallocationsResource, name, pt, data, subresources...), &v1.RangeAllocation{}) if obj == nil { return nil, err } - return obj.(*securityv1.RangeAllocation), err + return obj.(*v1.RangeAllocation), err } // Apply takes the given apply declarative configuration, applies it and returns the applied rangeAllocation. -func (c *FakeRangeAllocations) Apply(ctx context.Context, rangeAllocation *applyconfigurationssecurityv1.RangeAllocationApplyConfiguration, opts v1.ApplyOptions) (result *securityv1.RangeAllocation, err error) { +func (c *FakeRangeAllocations) Apply(ctx context.Context, rangeAllocation *securityv1.RangeAllocationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RangeAllocation, err error) { if rangeAllocation == nil { return nil, fmt.Errorf("rangeAllocation provided to Apply must not be nil") } @@ -122,9 +121,9 @@ func (c *FakeRangeAllocations) Apply(ctx context.Context, rangeAllocation *apply return nil, fmt.Errorf("rangeAllocation.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(rangeallocationsResource, *name, types.ApplyPatchType, data), &securityv1.RangeAllocation{}) + Invokes(testing.NewRootPatchSubresourceAction(rangeallocationsResource, *name, types.ApplyPatchType, data), &v1.RangeAllocation{}) if obj == nil { return nil, err } - return obj.(*securityv1.RangeAllocation), err + return obj.(*v1.RangeAllocation), err } diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_securitycontextconstraints.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_securitycontextconstraints.go index 2b570a4988..ee40b0dce0 100644 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_securitycontextconstraints.go +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake/fake_securitycontextconstraints.go @@ -7,11 +7,10 @@ import ( json "encoding/json" "fmt" - securityv1 "github.com/openshift/api/security/v1" - applyconfigurationssecurityv1 "github.com/openshift/client-go/security/applyconfigurations/security/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/openshift/api/security/v1" + securityv1 "github.com/openshift/client-go/security/applyconfigurations/security/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -22,24 +21,24 @@ type FakeSecurityContextConstraints struct { Fake *FakeSecurityV1 } -var securitycontextconstraintsResource = schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"} +var securitycontextconstraintsResource = v1.SchemeGroupVersion.WithResource("securitycontextconstraints") -var securitycontextconstraintsKind = schema.GroupVersionKind{Group: "security.openshift.io", Version: "v1", Kind: "SecurityContextConstraints"} +var securitycontextconstraintsKind = v1.SchemeGroupVersion.WithKind("SecurityContextConstraints") // Get takes name of the securityContextConstraints, and returns the corresponding securityContextConstraints object, and an error if there is any. -func (c *FakeSecurityContextConstraints) Get(ctx context.Context, name string, options v1.GetOptions) (result *securityv1.SecurityContextConstraints, err error) { +func (c *FakeSecurityContextConstraints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.SecurityContextConstraints, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(securitycontextconstraintsResource, name), &securityv1.SecurityContextConstraints{}) + Invokes(testing.NewRootGetAction(securitycontextconstraintsResource, name), &v1.SecurityContextConstraints{}) if obj == nil { return nil, err } - return obj.(*securityv1.SecurityContextConstraints), err + return obj.(*v1.SecurityContextConstraints), err } // List takes label and field selectors, and returns the list of SecurityContextConstraints that match those selectors. -func (c *FakeSecurityContextConstraints) List(ctx context.Context, opts v1.ListOptions) (result *securityv1.SecurityContextConstraintsList, err error) { +func (c *FakeSecurityContextConstraints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecurityContextConstraintsList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(securitycontextconstraintsResource, securitycontextconstraintsKind, opts), &securityv1.SecurityContextConstraintsList{}) + Invokes(testing.NewRootListAction(securitycontextconstraintsResource, securitycontextconstraintsKind, opts), &v1.SecurityContextConstraintsList{}) if obj == nil { return nil, err } @@ -48,8 +47,8 @@ func (c *FakeSecurityContextConstraints) List(ctx context.Context, opts v1.ListO if label == nil { label = labels.Everything() } - list := &securityv1.SecurityContextConstraintsList{ListMeta: obj.(*securityv1.SecurityContextConstraintsList).ListMeta} - for _, item := range obj.(*securityv1.SecurityContextConstraintsList).Items { + list := &v1.SecurityContextConstraintsList{ListMeta: obj.(*v1.SecurityContextConstraintsList).ListMeta} + for _, item := range obj.(*v1.SecurityContextConstraintsList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -58,58 +57,58 @@ func (c *FakeSecurityContextConstraints) List(ctx context.Context, opts v1.ListO } // Watch returns a watch.Interface that watches the requested securityContextConstraints. -func (c *FakeSecurityContextConstraints) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeSecurityContextConstraints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(securitycontextconstraintsResource, opts)) } // Create takes the representation of a securityContextConstraints and creates it. Returns the server's representation of the securityContextConstraints, and an error, if there is any. -func (c *FakeSecurityContextConstraints) Create(ctx context.Context, securityContextConstraints *securityv1.SecurityContextConstraints, opts v1.CreateOptions) (result *securityv1.SecurityContextConstraints, err error) { +func (c *FakeSecurityContextConstraints) Create(ctx context.Context, securityContextConstraints *v1.SecurityContextConstraints, opts metav1.CreateOptions) (result *v1.SecurityContextConstraints, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(securitycontextconstraintsResource, securityContextConstraints), &securityv1.SecurityContextConstraints{}) + Invokes(testing.NewRootCreateAction(securitycontextconstraintsResource, securityContextConstraints), &v1.SecurityContextConstraints{}) if obj == nil { return nil, err } - return obj.(*securityv1.SecurityContextConstraints), err + return obj.(*v1.SecurityContextConstraints), err } // Update takes the representation of a securityContextConstraints and updates it. Returns the server's representation of the securityContextConstraints, and an error, if there is any. -func (c *FakeSecurityContextConstraints) Update(ctx context.Context, securityContextConstraints *securityv1.SecurityContextConstraints, opts v1.UpdateOptions) (result *securityv1.SecurityContextConstraints, err error) { +func (c *FakeSecurityContextConstraints) Update(ctx context.Context, securityContextConstraints *v1.SecurityContextConstraints, opts metav1.UpdateOptions) (result *v1.SecurityContextConstraints, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(securitycontextconstraintsResource, securityContextConstraints), &securityv1.SecurityContextConstraints{}) + Invokes(testing.NewRootUpdateAction(securitycontextconstraintsResource, securityContextConstraints), &v1.SecurityContextConstraints{}) if obj == nil { return nil, err } - return obj.(*securityv1.SecurityContextConstraints), err + return obj.(*v1.SecurityContextConstraints), err } // Delete takes name of the securityContextConstraints and deletes it. Returns an error if one occurs. -func (c *FakeSecurityContextConstraints) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeSecurityContextConstraints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(securitycontextconstraintsResource, name, opts), &securityv1.SecurityContextConstraints{}) + Invokes(testing.NewRootDeleteActionWithOptions(securitycontextconstraintsResource, name, opts), &v1.SecurityContextConstraints{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeSecurityContextConstraints) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeSecurityContextConstraints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(securitycontextconstraintsResource, listOpts) - _, err := c.Fake.Invokes(action, &securityv1.SecurityContextConstraintsList{}) + _, err := c.Fake.Invokes(action, &v1.SecurityContextConstraintsList{}) return err } // Patch applies the patch and returns the patched securityContextConstraints. -func (c *FakeSecurityContextConstraints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *securityv1.SecurityContextConstraints, err error) { +func (c *FakeSecurityContextConstraints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.SecurityContextConstraints, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(securitycontextconstraintsResource, name, pt, data, subresources...), &securityv1.SecurityContextConstraints{}) + Invokes(testing.NewRootPatchSubresourceAction(securitycontextconstraintsResource, name, pt, data, subresources...), &v1.SecurityContextConstraints{}) if obj == nil { return nil, err } - return obj.(*securityv1.SecurityContextConstraints), err + return obj.(*v1.SecurityContextConstraints), err } // Apply takes the given apply declarative configuration, applies it and returns the applied securityContextConstraints. -func (c *FakeSecurityContextConstraints) Apply(ctx context.Context, securityContextConstraints *applyconfigurationssecurityv1.SecurityContextConstraintsApplyConfiguration, opts v1.ApplyOptions) (result *securityv1.SecurityContextConstraints, err error) { +func (c *FakeSecurityContextConstraints) Apply(ctx context.Context, securityContextConstraints *securityv1.SecurityContextConstraintsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.SecurityContextConstraints, err error) { if securityContextConstraints == nil { return nil, fmt.Errorf("securityContextConstraints provided to Apply must not be nil") } @@ -122,9 +121,9 @@ func (c *FakeSecurityContextConstraints) Apply(ctx context.Context, securityCont return nil, fmt.Errorf("securityContextConstraints.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(securitycontextconstraintsResource, *name, types.ApplyPatchType, data), &securityv1.SecurityContextConstraints{}) + Invokes(testing.NewRootPatchSubresourceAction(securitycontextconstraintsResource, *name, types.ApplyPatchType, data), &v1.SecurityContextConstraints{}) if obj == nil { return nil, err } - return obj.(*securityv1.SecurityContextConstraints), err + return obj.(*v1.SecurityContextConstraints), err } diff --git a/vendor/modules.txt b/vendor/modules.txt index 38c3578230..a9b9bf775a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -332,7 +332,7 @@ github.com/openshift/build-machinery-go/make/targets/golang github.com/openshift/build-machinery-go/make/targets/openshift github.com/openshift/build-machinery-go/make/targets/openshift/operator github.com/openshift/build-machinery-go/scripts -# github.com/openshift/client-go v0.0.0-20230718165156-6014fb98e86a +# github.com/openshift/client-go v0.0.0-20231005121823-e81400b97c46 ## explicit; go 1.20 github.com/openshift/client-go/security/applyconfigurations/internal github.com/openshift/client-go/security/applyconfigurations/security/v1