diff --git a/config/argocd-cloudpaks/cp-shared/Chart.yaml b/config/argocd-cloudpaks/cp-shared/Chart.yaml index ea2c3af1..22b4d241 100644 --- a/config/argocd-cloudpaks/cp-shared/Chart.yaml +++ b/config/argocd-cloudpaks/cp-shared/Chart.yaml @@ -16,9 +16,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.6.2 +version: 0.6.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: "1.3.2" +appVersion: "1.3.3" diff --git a/config/argocd-cloudpaks/cp-shared/templates/0050-sync-common-config-map.yaml b/config/argocd-cloudpaks/cp-shared/templates/0050-sync-common-config-map.yaml index d6d8c382..05913179 100644 --- a/config/argocd-cloudpaks/cp-shared/templates/0050-sync-common-config-map.yaml +++ b/config/argocd-cloudpaks/cp-shared/templates/0050-sync-common-config-map.yaml @@ -14,16 +14,13 @@ spec: - name: config image: quay.io/openshift/origin-cli:latest imagePullPolicy: IfNotPresent - resources: - requests: - memory: "64Mi" - cpu: "150m" - limits: - memory: "64Mi" - cpu: "200m" env: - name: ARGOCD_NAMESPACE value: "openshift-gitops" + - name: STORAGE_CLASS_OVERRIDE_RWO + value: {{ .Values.storageclass.rwo.override }} + - name: STORAGE_CLASS_OVERRIDE_RWX + value: {{ .Values.storageclass.rwx.override }} command: - /bin/sh - -c @@ -38,88 +35,104 @@ spec: exit 1 fi - storage_class_rwo=$(oc get StorageClasses ocs-storagecluster-ceph-rbd -o name 2> /dev/null | cut -d "/" -f 2) || true - storage_class_rwx=$(oc get StorageClasses ocs-storagecluster-cephfs -o name 2> /dev/null | cut -d "/" -f 2) || true - if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then - echo "INFO: Cluster has ODF installed, using ODF storage classes." + storage_class_rwo="${STORAGE_CLASS_OVERRIDE_RWO}" + storage_class_rwx="${STORAGE_CLASS_OVERRIDE_RWX}" + if [ -n "${storage_class_rwx}" ]; then + if [ -z "${storage_class_rwo}" ]; then + storage_class_rwo="${storage_class_rwx}" + fi else - storage_class_rwo=$(oc get StorageClasses rook-ceph-block -o name 2> /dev/null | cut -d "/" -f 2) || true - storage_class_rwx=$(oc get StorageClasses rook-cephfs -o name 2> /dev/null | cut -d "/" -f 2) || true - if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then - echo "INFO: Cluster has Ceph installed, using Ceph storage classes." - fi - fi + storage_class_rwo=$(oc get StorageClasses ocs-storagecluster-ceph-rbd -o name 2> /dev/null | cut -d "/" -f 2) || true + storage_class_rwx=$(oc get StorageClasses ocs-storagecluster-cephfs -o name 2> /dev/null | cut -d "/" -f 2) || true + if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + echo "INFO: Cluster has ODF installed, using ODF storage classes." + else + storage_class_rwo=$(oc get StorageClasses rook-ceph-block -o name 2> /dev/null | cut -d "/" -f 2) || true + storage_class_rwx=$(oc get StorageClasses rook-cephfs -o name 2> /dev/null | cut -d "/" -f 2) || true - # NetApp/Trident support - if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ]; then - storage_class_rwx=$(oc get storageclass \ - -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ - | grep "csi.trident.netapp.io.*ontap-nas" \ - | head -n 1 \ - | cut -d " " -f 1) || true - storage_class_rwo=$(oc get storageclass \ - -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ - | grep "csi.trident.netapp.io.*ontap-san" \ - | head -n 1 \ - | cut -d " " -f 1) || true + # If the cluster does not have the dedicated block storage class, the NFS + # driver can address most use cases with the exception of "block" volume + # mode, which is not needed for Cloud Paks + if [ -z "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + storage_class_rwo=${storage_class_rwx} + fi - # If the cluster does not have the dedicated block storage class, the NFS - # driver can address most use cases with the exception of "block" volume - # mode, which is not needed for Cloud Paks - if [ -z "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then - storage_class_rwo=${storage_class_rwx} + if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + echo "INFO: Cluster has Ceph installed, using Ceph storage classes." + fi fi - if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then - echo "INFO: Cluster has ONTAP installed, using ONTAP storage classes." + + # NetApp/Trident support + if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ]; then + storage_class_rwx=$(oc get storageclass \ + -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ + | grep "csi.trident.netapp.io.*ontap-nas" \ + | head -n 1 \ + | cut -d " " -f 1) || true + storage_class_rwo=$(oc get storageclass \ + -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ + | grep "csi.trident.netapp.io.*ontap-san" \ + | head -n 1 \ + | cut -d " " -f 1) || true + + # If the cluster does not have the dedicated block storage class, the NFS + # driver can address most use cases with the exception of "block" volume + # mode, which is not needed for Cloud Paks + if [ -z "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + storage_class_rwo=${storage_class_rwx} + fi + if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + echo "INFO: Cluster has ONTAP installed, using ONTAP storage classes." + fi fi - fi - if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ] ; then - if [[ "${api_url}" == *fyre.ibm.com* ]]; then - storage_class_rwo="{{.Values.storageclass.rwo.fyre}}" - storage_class_rwx="{{.Values.storageclass.rwx.fyre}}" - else - platform=$(oc get Infrastructure cluster -o jsonpath={.status.platform}) - if [ "${platform}" == "AWS" ]; then - ebs=$(oc get StorageClasses | grep ebs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true - if [ -z "${ebs}" ]; then - ebs=$(oc get StorageClasses | grep kubernetes.io/aws-ebs | cut -d " " -f 1) || true - fi - efs=$(oc get StorageClasses | grep efs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true - if [ -z "${efs}" ]; then - efs=$(oc get StorageClasses | grep openshift.org/aws-efs | cut -d " " -f 1) || true - fi - if [ -n "${ebs}" ] && [ -n "${efs}" ]; then - storage_class_rwo="${ebs}" - storage_class_rwx="${efs}" - else - storage_class_rwo="{{.Values.storageclass.rwo.aws}}" - storage_class_rwx="{{.Values.storageclass.rwx.aws}}" - fi - elif [ "${platform}" == "Azure" ]; then - if oc get StorageClass azure-file 2> /dev/null && - oc get StorageClass managed-premium 2> /dev/null; then - storage_class_rwo=managed-premium - storage_class_rwx=azure-file - else - storage_class_rwo="{{.Values.storageclass.rwo.azure}}" - storage_class_rwx="{{.Values.storageclass.rwx.azure}}" - fi - elif [ "${platform}" == "IBMCloud" ]; then - vpc_class=$(oc get StorageClass | grep -c "ibmc-vpc" || result=0) - if [ ${vpc_class} -gt 0 ]; then - storage_class_rwo="{{.Values.storageclass.rwo.roksgen2}}" - storage_class_rwx="{{.Values.storageclass.rwx.roksgen2}}" + if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ] ; then + if [[ "${api_url}" == *fyre.ibm.com* ]]; then + storage_class_rwo="{{.Values.storageclass.rwo.fyre}}" + storage_class_rwx="{{.Values.storageclass.rwx.fyre}}" else - storage_class_rwo="{{.Values.storageclass.rwo.roks}}" - storage_class_rwx="{{.Values.storageclass.rwx.roks}}" + platform=$(oc get Infrastructure cluster -o jsonpath={.status.platform}) + if [ "${platform}" == "AWS" ]; then + ebs=$(oc get StorageClasses | grep ebs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true + if [ -z "${ebs}" ]; then + ebs=$(oc get StorageClasses | grep kubernetes.io/aws-ebs | cut -d " " -f 1) || true + fi + efs=$(oc get StorageClasses | grep efs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true + if [ -z "${efs}" ]; then + efs=$(oc get StorageClasses | grep openshift.org/aws-efs | cut -d " " -f 1) || true + fi + if [ -n "${ebs}" ] && [ -n "${efs}" ]; then + storage_class_rwo="${ebs}" + storage_class_rwx="${efs}" + else + storage_class_rwo="{{.Values.storageclass.rwo.aws}}" + storage_class_rwx="{{.Values.storageclass.rwx.aws}}" + fi + elif [ "${platform}" == "Azure" ]; then + if oc get StorageClass azure-file 2> /dev/null && + oc get StorageClass managed-premium 2> /dev/null; then + storage_class_rwo=managed-premium + storage_class_rwx=azure-file + else + storage_class_rwo="{{.Values.storageclass.rwo.azure}}" + storage_class_rwx="{{.Values.storageclass.rwx.azure}}" + fi + elif [ "${platform}" == "IBMCloud" ]; then + vpc_class=$(oc get StorageClass | grep -c "ibmc-vpc" || result=0) + if [ ${vpc_class} -gt 0 ]; then + storage_class_rwo="{{.Values.storageclass.rwo.roksgen2}}" + storage_class_rwx="{{.Values.storageclass.rwx.roksgen2}}" + else + storage_class_rwo="{{.Values.storageclass.rwo.roks}}" + storage_class_rwx="{{.Values.storageclass.rwx.roks}}" + fi + fi fi - fi - fi + fi fi if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ] ; then - echo "ERROR: Did not find storage classes for target platform." + echo "ERROR: Did not find storage classes for target platform." exit fi diff --git a/config/argocd-cloudpaks/cp-shared/templates/0050-sync-cp4a-config-map.yaml b/config/argocd-cloudpaks/cp-shared/templates/0050-sync-cp4a-config-map.yaml index da8e20d2..026d6f54 100644 --- a/config/argocd-cloudpaks/cp-shared/templates/0050-sync-cp4a-config-map.yaml +++ b/config/argocd-cloudpaks/cp-shared/templates/0050-sync-cp4a-config-map.yaml @@ -14,16 +14,13 @@ spec: - name: config image: quay.io/openshift/origin-cli:latest imagePullPolicy: IfNotPresent - resources: - requests: - memory: "64Mi" - cpu: "150m" - limits: - memory: "64Mi" - cpu: "200m" env: - name: ARGOCD_NAMESPACE value: openshift-gitops + - name: STORAGE_CLASS_OVERRIDE_RWO + value: {{ .Values.storageclass.rwo.override }} + - name: STORAGE_CLASS_OVERRIDE_RWX + value: {{ .Values.storageclass.rwx.override }} command: - /bin/sh - -c @@ -37,102 +34,118 @@ spec: platform=$(oc get Infrastructure cluster -o jsonpath={.status.platform}) cp4a_platform="${platform}" if [ "${cp4a_platform}" == "IBMCloud" ]; then - cp4a_platform=ROKS + cp4a_platform=ROKS else - cp4a_platform=OCP + cp4a_platform=OCP fi api_url=$(oc get Infrastructure cluster -o jsonpath={.status.apiServerURL}) - storage_class_rwo=$(oc get StorageClasses ocs-storagecluster-ceph-rbd -o name 2> /dev/null | cut -d "/" -f 2) || true - storage_class_rwx=$(oc get StorageClasses ocs-storagecluster-cephfs -o name 2> /dev/null | cut -d "/" -f 2) || true - if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then - echo "INFO: Cluster has ODF installed, using ODF storage classes." + storage_class_rwo="${STORAGE_CLASS_OVERRIDE_RWO}" + storage_class_rwx="${STORAGE_CLASS_OVERRIDE_RWX}" + if [ -n "${storage_class_rwx}" ]; then + if [ -z "${storage_class_rwo}" ]; then + storage_class_rwo="${storage_class_rwx}" + fi else - storage_class_rwo=$(oc get StorageClasses rook-ceph-block -o name 2> /dev/null | cut -d "/" -f 2) || true - storage_class_rwx=$(oc get StorageClasses rook-cephfs -o name 2> /dev/null | cut -d "/" -f 2) || true - if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then - echo "INFO: Cluster has Ceph installed, using Ceph storage classes." - fi - fi + storage_class_rwo=$(oc get StorageClasses ocs-storagecluster-ceph-rbd -o name 2> /dev/null | cut -d "/" -f 2) || true + storage_class_rwx=$(oc get StorageClasses ocs-storagecluster-cephfs -o name 2> /dev/null | cut -d "/" -f 2) || true + if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + echo "INFO: Cluster has ODF installed, using ODF storage classes." + else + storage_class_rwo=$(oc get StorageClasses rook-ceph-block -o name 2> /dev/null | cut -d "/" -f 2) || true + storage_class_rwx=$(oc get StorageClasses rook-cephfs -o name 2> /dev/null | cut -d "/" -f 2) || true - # NetApp/Trident support - if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ]; then - storage_class_rwx=$(oc get storageclass \ - -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ - | grep "csi.trident.netapp.io.*ontap-nas" \ - | head -n 1 \ - | cut -d " " -f 1) || true - storage_class_rwo=$(oc get storageclass \ - -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ - | grep "csi.trident.netapp.io.*ontap-san" \ - | head -n 1 \ - | cut -d " " -f 1) || true + # If the cluster does not have the dedicated block storage class, the NFS + # driver can address most use cases with the exception of "block" volume + # mode, which is not needed for Cloud Paks + if [ -z "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + storage_class_rwo=${storage_class_rwx} + fi - # If the cluster does not have the dedicated block storage class, the NFS - # driver can address most use cases with the exception of "block" volume - # mode, which is not needed for Cloud Paks - if [ -z "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then - storage_class_rwo=${storage_class_rwx} + if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + echo "INFO: Cluster has Ceph installed, using Ceph storage classes." + fi fi - if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then - echo "INFO: Cluster has ONTAP installed, using ONTAP storage classes." + # NetApp/Trident support + if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ]; then + storage_class_rwx=$(oc get storageclass \ + -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ + | grep "csi.trident.netapp.io.*ontap-nas" \ + | head -n 1 \ + | cut -d " " -f 1) || true + storage_class_rwo=$(oc get storageclass \ + -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ + | grep "csi.trident.netapp.io.*ontap-san" \ + | head -n 1 \ + | cut -d " " -f 1) || true + + # If the cluster does not have the dedicated block storage class, the NFS + # driver can address most use cases with the exception of "block" volume + # mode, which is not needed for Cloud Paks + if [ -z "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + storage_class_rwo=${storage_class_rwx} + fi + + if [ -n "${storage_class_rwo}" ] && [ -n "${storage_class_rwx}" ]; then + echo "INFO: Cluster has ONTAP installed, using ONTAP storage classes." + fi fi - fi - if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ] ; then - if [[ "${api_url}" == *fyre.ibm.com* ]]; then - storage_class_rwo="{{.Values.storageclass.rwo.fyre}}" - storage_class_rwx="{{.Values.storageclass.rwx.fyre}}" - else - platform=$(oc get Infrastructure cluster -o jsonpath={.status.platform}) - if [ "${platform}" == "AWS" ]; then - ebs=$(oc get StorageClasses | grep ebs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true - if [ -z "${ebs}" ]; then - ebs=$(oc get StorageClasses | grep kubernetes.io/aws-ebs | cut -d " " -f 1) || true - fi - efs=$(oc get StorageClasses | grep efs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true - if [ -z "${efs}" ]; then - efs=$(oc get StorageClasses | grep openshift.org/aws-efs | cut -d " " -f 1) || true - fi - if [ -n "${ebs}" ] && [ -n "${efs}" ]; then - storage_class_rwo="${ebs}" - storage_class_rwx="${efs}" - else - storage_class_rwo="{{.Values.storageclass.rwo.aws}}" - storage_class_rwx="{{.Values.storageclass.rwx.aws}}" - fi - elif [ "${platform}" == "Azure" ]; then - if oc get StorageClass azure-file 2> /dev/null && - oc get StorageClass managed-premium 2> /dev/null; then - storage_class_rwo=managed-premium - storage_class_rwx=azure-file - else - storage_class_rwo="{{.Values.storageclass.rwo.azure}}" - storage_class_rwx="{{.Values.storageclass.rwx.azure}}" - fi - elif [ "${platform}" == "IBMCloud" ]; then - vpc_class=$(oc get StorageClass | grep -c "ibmc-vpc" || result=0) - if [ ${vpc_class} -gt 0 ]; then - storage_class_rwo="{{.Values.storageclass.rwo.roksgen2}}" - storage_class_rwx="{{.Values.storageclass.rwx.roksgen2}}" + if [ -z "${storage_class_rwo}" ] || [ -z "${storage_class_rwx}" ] ; then + if [[ "${api_url}" == *fyre.ibm.com* ]]; then + storage_class_rwo="{{.Values.storageclass.rwo.fyre}}" + storage_class_rwx="{{.Values.storageclass.rwx.fyre}}" else - storage_class_rwo="{{.Values.storageclass.rwo.roks}}" - storage_class_rwx="{{.Values.storageclass.rwx.roks}}" + platform=$(oc get Infrastructure cluster -o jsonpath={.status.platform}) + if [ "${platform}" == "AWS" ]; then + ebs=$(oc get StorageClasses | grep ebs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true + if [ -z "${ebs}" ]; then + ebs=$(oc get StorageClasses | grep kubernetes.io/aws-ebs | cut -d " " -f 1) || true + fi + efs=$(oc get StorageClasses | grep efs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true + if [ -z "${efs}" ]; then + efs=$(oc get StorageClasses | grep openshift.org/aws-efs | cut -d " " -f 1) || true + fi + if [ -n "${ebs}" ] && [ -n "${efs}" ]; then + storage_class_rwo="${ebs}" + storage_class_rwx="${efs}" + else + storage_class_rwo="{{.Values.storageclass.rwo.aws}}" + storage_class_rwx="{{.Values.storageclass.rwx.aws}}" + fi + elif [ "${platform}" == "Azure" ]; then + if oc get StorageClass azure-file 2> /dev/null && + oc get StorageClass managed-premium 2> /dev/null; then + storage_class_rwo=managed-premium + storage_class_rwx=azure-file + else + storage_class_rwo="{{.Values.storageclass.rwo.azure}}" + storage_class_rwx="{{.Values.storageclass.rwx.azure}}" + fi + elif [ "${platform}" == "IBMCloud" ]; then + vpc_class=$(oc get StorageClass | grep -c "ibmc-vpc" || result=0) + if [ ${vpc_class} -gt 0 ]; then + storage_class_rwo="{{.Values.storageclass.rwo.roksgen2}}" + storage_class_rwx="{{.Values.storageclass.rwx.roksgen2}}" + else + storage_class_rwo="{{.Values.storageclass.rwo.roks}}" + storage_class_rwx="{{.Values.storageclass.rwx.roks}}" + fi + fi fi - fi - fi + fi fi if [ -z "${storage_class_rwo}" ]; then - echo "ERROR: Did not find RWO storage classes for target platform." - exit + echo "ERROR: Did not find RWO storage classes for target platform." + exit fi if [ -z "${storage_class_rwx}" ]; then - echo "ERROR: Did not find RWX storage classes for target platform." - exit + echo "ERROR: Did not find RWX storage classes for target platform." + exit fi storage_class_gold=${storage_class_rwx} diff --git a/config/argocd-cloudpaks/cp-shared/templates/0050-sync-cp4s-config-map.yaml b/config/argocd-cloudpaks/cp-shared/templates/0050-sync-cp4s-config-map.yaml index 823791e9..8ecfb4ea 100644 --- a/config/argocd-cloudpaks/cp-shared/templates/0050-sync-cp4s-config-map.yaml +++ b/config/argocd-cloudpaks/cp-shared/templates/0050-sync-cp4s-config-map.yaml @@ -47,70 +47,78 @@ spec: api_url=$(oc get Infrastructure cluster -o jsonpath={.status.apiServerURL}) - storage_class_rwo=$(oc get StorageClasses ocs-storagecluster-ceph-rbd -o name 2> /dev/null | cut -d "/" -f 2) || true - if [ -n "${storage_class_rwo}" ]; then - echo "INFO: Cluster has ODF installed, using ODF storage classes." - else - storage_class_rwo=$(oc get StorageClasses rook-ceph-block -o name 2> /dev/null | cut -d "/" -f 2) || true - if [ -n "${storage_class_rwo}" ]; then - echo "INFO: Cluster has Ceph installed, using Ceph storage classes." - fi - fi - - # NetApp/Trident support - if [ -z "${storage_class_rwo}" ]; then - storage_class_rwo=$(oc get storageclass \ - -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ - | grep "csi.trident.netapp.io.*ontap-san" \ - | head -n 1 \ - | cut -d " " -f 1) || true - # If the cluster does not have the dedicated block storage class, the NFS - # driver can address most use cases with the exception of "block" volume - # mode, which is not needed for Cloud Paks + storage_class_rwo="${STORAGE_CLASS_OVERRIDE_RWO}" + storage_class_rwx="${STORAGE_CLASS_OVERRIDE_RWX}" + if [ -n "${storage_class_rwx}" ]; then if [ -z "${storage_class_rwo}" ]; then - storage_class_rwo=$(oc get storageclass \ - -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ - | grep "csi.trident.netapp.io.*ontap-nas" \ - | head -n 1 \ - | cut -d " " -f 1) || true + storage_class_rwo="${storage_class_rwx}" fi + else + storage_class_rwo=$(oc get StorageClasses ocs-storagecluster-ceph-rbd -o name 2> /dev/null | cut -d "/" -f 2) || true if [ -n "${storage_class_rwo}" ]; then - echo "INFO: Cluster has ONTAP installed, using ONTAP storage class." + echo "INFO: Cluster has ODF installed, using ODF storage classes." + else + storage_class_rwo=$(oc get StorageClasses rook-ceph-block -o name 2> /dev/null | cut -d "/" -f 2) || true + if [ -n "${storage_class_rwo}" ]; then + echo "INFO: Cluster has Ceph installed, using Ceph storage classes." + fi fi - fi - if [ -z "${storage_class_rwo}" ]; then - if [[ "${api_url}" == *fyre.ibm.com* ]]; then - storage_class_rwo="{{.Values.storageclass.rwo.fyre}}" - else - platform=$(oc get Infrastructure cluster -o jsonpath={.status.platform}) - if [ "${platform}" == "AWS" ]; then - ebs=$(oc get StorageClasses | grep ebs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true - if [ -z "${ebs}" ]; then - ebs=$(oc get StorageClasses | grep kubernetes.io/aws-ebs | cut -d " " -f 1) || true - fi - if [ -n "${ebs}" ]; then - storage_class_rwo="${ebs}" - else - storage_class_rwo="{{.Values.storageclass.rwo.aws}}" - fi - elif [ "${platform}" == "Azure" ]; then - if oc get StorageClass managed-premium 2> /dev/null; then - storage_class_rwo=managed-premium - elif oc get StorageClass managed-csi 2> /dev/null; then - storage_class_rwo=managed-csi - else - storage_class_rwo="{{.Values.storageclass.rwo.azure}}" - fi - elif [ "${platform}" == "IBMCloud" ]; then - vpc_class=$(oc get StorageClass | grep -c "ibmc-vpc" || result=0) - if [ ${vpc_class} -gt 0 ]; then - storage_class_rwo="{{.Values.storageclass.rwo.roksgen2}}" + # NetApp/Trident support + if [ -z "${storage_class_rwo}" ]; then + storage_class_rwo=$(oc get storageclass \ + -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ + | grep "csi.trident.netapp.io.*ontap-san" \ + | head -n 1 \ + | cut -d " " -f 1) || true + # If the cluster does not have the dedicated block storage class, the NFS + # driver can address most use cases with the exception of "block" volume + # mode, which is not needed for Cloud Paks + if [ -z "${storage_class_rwo}" ]; then + storage_class_rwo=$(oc get storageclass \ + -o=custom-columns='NAME:metadata.name,PROVISIONER:provisioner,BACKEND-TYPE:parameters.backendType' \ + | grep "csi.trident.netapp.io.*ontap-nas" \ + | head -n 1 \ + | cut -d " " -f 1) || true + fi + if [ -n "${storage_class_rwo}" ]; then + echo "INFO: Cluster has ONTAP installed, using ONTAP storage class." + fi + fi + + if [ -z "${storage_class_rwo}" ]; then + if [[ "${api_url}" == *fyre.ibm.com* ]]; then + storage_class_rwo="{{.Values.storageclass.rwo.fyre}}" else - storage_class_rwo="{{.Values.storageclass.rwo.roks}}" + platform=$(oc get Infrastructure cluster -o jsonpath={.status.platform}) + if [ "${platform}" == "AWS" ]; then + ebs=$(oc get StorageClasses | grep ebs.csi.aws.com | head -n 1 | cut -d " " -f 1) || true + if [ -z "${ebs}" ]; then + ebs=$(oc get StorageClasses | grep kubernetes.io/aws-ebs | cut -d " " -f 1) || true + fi + if [ -n "${ebs}" ]; then + storage_class_rwo="${ebs}" + else + storage_class_rwo="{{.Values.storageclass.rwo.aws}}" + fi + elif [ "${platform}" == "Azure" ]; then + if oc get StorageClass managed-premium 2> /dev/null; then + storage_class_rwo=managed-premium + elif oc get StorageClass managed-csi 2> /dev/null; then + storage_class_rwo=managed-csi + else + storage_class_rwo="{{.Values.storageclass.rwo.azure}}" + fi + elif [ "${platform}" == "IBMCloud" ]; then + vpc_class=$(oc get StorageClass | grep -c "ibmc-vpc" || result=0) + if [ ${vpc_class} -gt 0 ]; then + storage_class_rwo="{{.Values.storageclass.rwo.roksgen2}}" + else + storage_class_rwo="{{.Values.storageclass.rwo.roks}}" + fi + fi fi - fi - fi + fi fi if [ -z "${storage_class_rwo}" ]; then diff --git a/config/argocd-cloudpaks/cp-shared/templates/0100-cp-shared-app.yaml b/config/argocd-cloudpaks/cp-shared/templates/0100-cp-shared-app.yaml index 4a827970..deb68cd6 100644 --- a/config/argocd-cloudpaks/cp-shared/templates/0100-cp-shared-app.yaml +++ b/config/argocd-cloudpaks/cp-shared/templates/0100-cp-shared-app.yaml @@ -43,6 +43,10 @@ spec: value: "{{.Values.dedicated_cs.namespace_mapping.cp4waiops}}" - name: online_catalog_source_priority value: "{{.Values.online_catalog_source_priority}}" + - name: storageclass.rwo.override + value: "{{.Values.storageclass.rwo.override}}" + - name: storageclass.rwx.override + value: "{{.Values.storageclass.rwx.override}}" - name: repoURL value: ${ARGOCD_APP_SOURCE_REPO_URL} - name: serviceaccount.argocd_application_controller diff --git a/config/argocd-cloudpaks/cp-shared/templates/0100-cp-shared-operators-app.yaml b/config/argocd-cloudpaks/cp-shared/templates/0100-cp-shared-operators-app.yaml index 22da877d..e16948b6 100644 --- a/config/argocd-cloudpaks/cp-shared/templates/0100-cp-shared-operators-app.yaml +++ b/config/argocd-cloudpaks/cp-shared/templates/0100-cp-shared-operators-app.yaml @@ -29,6 +29,10 @@ spec: value: ${ARGOCD_APP_SOURCE_REPO_URL} - name: serviceaccount.argocd_application_controller value: {{.Values.serviceaccount.argocd_application_controller}} + - name: storageclass.rwo.override + value: "{{.Values.storageclass.rwo.override}}" + - name: storageclass.rwx.override + value: "{{.Values.storageclass.rwx.override}}" - name: targetRevision value: ${ARGOCD_APP_SOURCE_TARGET_REVISION} path: config/cloudpaks/cp-shared/operators diff --git a/config/argocd-cloudpaks/cp-shared/values.yaml b/config/argocd-cloudpaks/cp-shared/values.yaml index 398942c7..b55fc151 100644 --- a/config/argocd-cloudpaks/cp-shared/values.yaml +++ b/config/argocd-cloudpaks/cp-shared/values.yaml @@ -19,12 +19,14 @@ dedicated_cs: online_catalog_source_priority: -1 storageclass: rwo: + override: aws: gp2 azure: ocs-storagecluster-ceph-rbd fyre: rook-ceph-block roks: ibmc-block-gold roksgen2: ocs-storagecluster-ceph-rbd rwx: + override: aws: ocs-storagecluster-cephfs azure: ocs-storagecluster-cephfs fyre: rook-cephfs diff --git a/config/argocd-cloudpaks/cp4i/templates/0301-cp4i-module-template-app.yaml b/config/argocd-cloudpaks/cp4i/templates/0301-cp4i-module-template-app.yaml index 654fe41a..1b7e3084 100644 --- a/config/argocd-cloudpaks/cp4i/templates/0301-cp4i-module-template-app.yaml +++ b/config/argocd-cloudpaks/cp4i/templates/0301-cp4i-module-template-app.yaml @@ -7,7 +7,7 @@ {{- $targetRevision := .Values.targetRevision -}} {{- range $module_name, $module_enabled := .Values.modules }} {{- if not (eq $module_name "client") }} -{{- if eq ( default false $module_enabled ) true }} +{{- if eq ( default "false" $module_enabled | toString ) "true" }} --- apiVersion: argoproj.io/v1alpha1 kind: Application diff --git a/config/argocd-cloudpaks/cp4i/templates/0400-cp4i-client-app.yaml b/config/argocd-cloudpaks/cp4i/templates/0400-cp4i-client-app.yaml index 416d614d..63c3f479 100644 --- a/config/argocd-cloudpaks/cp4i/templates/0400-cp4i-client-app.yaml +++ b/config/argocd-cloudpaks/cp4i/templates/0400-cp4i-client-app.yaml @@ -1,5 +1,5 @@ -{{- $client := .Values.modules.client }} -{{- if eq ( default false $client ) true }} +{{- $client := .Values.modules.client | toString }} +{{- if eq ( default "false" $client ) "true" }} --- apiVersion: argoproj.io/v1alpha1 kind: Application diff --git a/config/argocd-cloudpaks/cp4i/values.yaml b/config/argocd-cloudpaks/cp4i/values.yaml index d38229a0..851af9ff 100644 --- a/config/argocd-cloudpaks/cp4i/values.yaml +++ b/config/argocd-cloudpaks/cp4i/values.yaml @@ -11,6 +11,6 @@ storageclass: rwo: ocs-storagecluster-ceph-rbd rwx: ocs-storagecluster-cephfs modules: - apic: true - mq: true - client: false + apic: "true" + mq: "true" + client: "false" diff --git a/config/cloudpaks/cp-shared/operators/templates/0000-cert-manager-namespace.yaml b/config/cloudpaks/cp-shared/operators/templates/0000-cert-manager-namespace.yaml deleted file mode 100644 index 6241fb0a..00000000 --- a/config/cloudpaks/cp-shared/operators/templates/0000-cert-manager-namespace.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - creationTimestamp: null - name: {{.Values.metadata.cert_manager_namespace}} -spec: {} -status: {} diff --git a/config/cloudpaks/cp-shared/operators/templates/0100-cert-manager-operator-group.yaml b/config/cloudpaks/cp-shared/operators/templates/0100-cert-manager-operator-group.yaml deleted file mode 100644 index f223fb60..00000000 --- a/config/cloudpaks/cp-shared/operators/templates/0100-cert-manager-operator-group.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - annotations: - argocd.argoproj.io/sync-wave: "100" - creationTimestamp: null - name: ibm-cert-manager - namespace: {{.Values.metadata.cert_manager_namespace}} -spec: - upgradeStrategy: Default diff --git a/config/cloudpaks/cp-shared/operators/templates/0100-cert-manager-subscription.yaml b/config/cloudpaks/cp-shared/operators/templates/0100-cert-manager-subscription.yaml deleted file mode 100644 index 5cb2b481..00000000 --- a/config/cloudpaks/cp-shared/operators/templates/0100-cert-manager-subscription.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - annotations: - argocd.argoproj.io/sync-wave: "100" - creationTimestamp: null - name: ibm-cert-manager-operator - namespace: {{.Values.metadata.cert_manager_namespace}} -spec: - channel: v4.1 - installPlanApproval: Automatic - name: ibm-cert-manager-operator - source: ibm-operator-catalog - sourceNamespace: openshift-marketplace diff --git a/config/cloudpaks/cp-shared/operators/values.yaml b/config/cloudpaks/cp-shared/operators/values.yaml index d3e8b84e..cd891b63 100644 --- a/config/cloudpaks/cp-shared/operators/values.yaml +++ b/config/cloudpaks/cp-shared/operators/values.yaml @@ -1,7 +1,6 @@ --- metadata: argocd_namespace: openshift-gitops - cert_manager_namespace: ibm-cert-manager serviceaccount: argocd_application_controller: openshift-gitops-argocd-application-controller online_catalog_source_priority: -1 diff --git a/config/cloudpaks/cp4a/operators/templates/0050-sync-cluster-scoper-operators.yaml b/config/cloudpaks/cp4a/operators/templates/0050-sync-cluster-scoper-operators.yaml new file mode 100644 index 00000000..5e12b766 --- /dev/null +++ b/config/cloudpaks/cp4a/operators/templates/0050-sync-cluster-scoper-operators.yaml @@ -0,0 +1,83 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: sync-cluster-scoper-operators + annotations: + argocd.argoproj.io/hook: Sync + argocd.argoproj.io/sync-wave: "50" + namespace: openshift-gitops +spec: + template: + spec: + containers: + - name: config + image: quay.io/openshift/origin-cli:latest + imagePullPolicy: IfNotPresent + env: + - name: ARGOCD_NAMESPACE + value: "openshift-gitops" + - name: IBM_CERT_MANAGER + value: {{.Values.metadata.cert_manager_namespace}} + - name: IBM_CERT_MANAGER_CHANNEL + value: {{.Values.metadata.cert_manager_channel}} + command: + - /bin/sh + - -c + - | + set -eo pipefail + set -x + + result=0 + ibm_cert_manager_count=$(oc get Subscription.operators.coreos.com \ + -l operators.coreos.com/ibm-cert-manager-operator.ibm-cert-manager \ + -A \ + -o name | wc -l || result=1) + if [ "${result}" == 0 ]; then + if [ "${ibm_cert_manager_count}" == "0" ]; then + echo "INFO: Installing IBM Cert Manager." + cat << EOF > oc apply -f - + --- + apiVersion: operators.coreos.com/v1 + kind: OperatorGroup + metadata: + name: ibm-cert-manager + namespace: ${IBM_CERT_MANAGER:?} + spec: + upgradeStrategy: Default + --- + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: ibm-cert-manager-operator + namespace: ${IBM_CERT_MANAGER:?} + spec: + channel: ${IBM_CERT_MANAGER_CHANNEL:?} + installPlanApproval: Automatic + name: ibm-cert-manager-operator + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace + EOF + oc wait Subscription.operators.coreos.com ibm-cert-manager-operator -n ${IBM_CERT_MANAGER:?} --for=CatalogSourcesUnhealthy=False \ + && oc wait Subscription.operators.coreos.com ibm-cert-manager-operator -n ${IBM_CERT_MANAGER:?} --for=jsonpath='state'="AtLatestKnown" \ + && echo "INFO: Successfully install IBM Cert Manager." \ + || result=1 + else + echo "INFO: IBM Cert Manager already installed." + oc get Subscription.operators.coreos.com \ + -l operators.coreos.com/ibm-cert-manager-operator.ibm-cert-manager \ + -A + fi + else + echo "ERROR: Unable to ascertain status of cert managers in the cluster." + fi + + if [ "${result}" -eq 1 ] ; then + echo "ERROR: Unable to locate a running IBM Cert Manager and to install one afterwards." + fi + + exit "${result}" + + restartPolicy: Never + serviceAccountName: {{.Values.serviceaccount.ibm_cloudpaks_installer}} + backoffLimit: 2 diff --git a/config/cloudpaks/cp4a/operators/values.yaml b/config/cloudpaks/cp4a/operators/values.yaml index 8fc805a7..ef0c14ca 100644 --- a/config/cloudpaks/cp4a/operators/values.yaml +++ b/config/cloudpaks/cp4a/operators/values.yaml @@ -1,6 +1,8 @@ --- metadata: argocd_app_namespace: ibm-cloudpaks + cert_manager_namespace: ibm-cert-manager + cert_manager_channel: v4.1 serviceaccount: argocd_application_controller: openshift-gitops-argocd-application-controller storageclass: