From 6bbac3fc97847d12f26efc34e9a2853e0a7413f7 Mon Sep 17 00:00:00 2001 From: kavyashree-r Date: Wed, 4 Sep 2024 13:07:13 +0530 Subject: [PATCH] storage policy quota - 1st set --- tests/e2e/csi_static_provisioning_basic.go | 133 ++++++++- tests/e2e/e2e_common.go | 8 + tests/e2e/improved_csi_idempotency.go | 157 +++++++++- tests/e2e/statefulsets.go | 75 ++++- tests/e2e/tkgs_ha.go | 315 +++++++++++++++++++-- tests/e2e/tkgs_ha_utils.go | 4 +- tests/e2e/util.go | 261 +++++++++++++++-- tests/e2e/vmservice_vm.go | 219 ++++++++++++++ tests/e2e/vsphere_volume_expansion.go | 4 +- 9 files changed, 1111 insertions(+), 65 deletions(-) diff --git a/tests/e2e/csi_static_provisioning_basic.go b/tests/e2e/csi_static_provisioning_basic.go index aada0ccc37..698d05af4d 100644 --- a/tests/e2e/csi_static_provisioning_basic.go +++ b/tests/e2e/csi_static_provisioning_basic.go @@ -38,6 +38,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" @@ -78,6 +79,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { ctx context.Context nonSharedDatastoreURL string fullSyncWaitTime int + isStorageQuotaFSSEnabled bool ) ginkgo.BeforeEach(func() { @@ -140,6 +142,9 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { } else { fullSyncWaitTime = defaultFullSyncWaitTime } + // vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + // isStorageQuotaFSSEnabled = isFssEnabled(ctx, vcAddress, "STORAGE_QUOTA_M2") + isStorageQuotaFSSEnabled = true }) ginkgo.AfterEach(func() { @@ -868,8 +873,9 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 11. Verify CRD deleted automatically. ginkgo.It("[csi-supervisor] [stretched-svc] Verify static provisioning workflow on SVC import "+ "FCD", ginkgo.Label(p0, block, wcp), func() { - var err error + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -879,7 +885,20 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { framework.Logf("pvc name :%s", pvcName) namespace = getNamespaceToRunTests(f) - restConfig, _, profileID := staticProvisioningPreSetUpUtil(ctx) + restConfig, storageclass, profileID := staticProvisioningPreSetUpUtil(ctx) + + if isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + } ginkgo.By("Creating FCD Disk") fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, @@ -904,6 +923,27 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { pv := getPvFromClaim(client, namespace, pvcName) verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) + if isStorageQuotaFSSEnabled { + totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_after) + + storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after) + + storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb, totalquota_used_before, totalquota_used_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } + ginkgo.By("Creating pod") pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -933,6 +973,29 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { defer func() { testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + + if isStorageQuotaFSSEnabled { + totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup := getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalquota_used_after :%v, totalReservedQuota_after: %v totalQuota", totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup) + + storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup := getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_after_cleanup :%v, pvc_reservedQuota_after__cleanup: %v PolicyQuota", + storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup) + + pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup := getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("volume ********** pvc_Usage_Quota_After :%v, pvc_reserved_Quota_After: %v ", pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup) + + quotavalidationStatus_afterCleanup := validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, totalquota_used_after, totalquota_used_after_Cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, storagepolicyquota_pvc_after, storagepolicyquota_pvc_after_cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, storagepolicy_usage_pvc_after, pvc_Usage_Quota_After_cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + reservedQuota := validate_reservedQuota_afterCleanUp(ctx, totalReservedQuota_after_Cleanup, pvc_reservedQuota_after_cleanup, pvc_reserved_Quota_After_cleanup) + gomega.Expect(reservedQuota).NotTo(gomega.BeFalse()) + } }() }) @@ -957,6 +1020,10 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // 14. Verify CRD deleted automatically. ginkgo.It("[csi-supervisor] Verify static provisioning workflow on svc - when there is no "+ "resourcequota available", ginkgo.Label(p1, block, wcp), func() { + + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity + var err error ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -967,6 +1034,19 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { restConfig, _, profileID := staticProvisioningPreSetUpUtil(ctx) + if isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storagePolicyName, namespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + } + ginkgo.By("Create FCD with valid storage policy.") fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, "staticfcd"+curtimeinstring, profileID, diskSizeInMb, defaultDatastore.Reference()) @@ -989,6 +1069,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { framework.Logf("Wait till the PVC creation succeeds after increasing resource quota") framework.ExpectNoError(waitForCNSRegisterVolumeToGetCreated(ctx, restConfig, namespace, cnsRegisterVolume, poll, pollTimeout)) + cnsRegisterVolumeName := cnsRegisterVolume.GetName() framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) @@ -1014,6 +1095,28 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if isStorageQuotaFSSEnabled { + totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storagePolicyName, namespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_after) + + storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after) + + storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb, totalquota_used_before, totalquota_used_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after) + framework.Logf("quotavalidationStatus :%v", quotavalidationStatus) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } + ginkgo.By("Deleting the pod") err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1027,6 +1130,30 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { vmUUID, pv.Spec.CSI.VolumeHandle)) defer func() { testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + + if isStorageQuotaFSSEnabled { + totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup := getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storagePolicyName, namespace) + framework.Logf("totalquota_used_after :%v, totalReservedQuota_after: %v totalQuota", totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup) + + storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup := getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_after_cleanup :%v, pvc_reservedQuota_after__cleanup: %v PolicyQuota", storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup) + + pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup := getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("volume ********** pvc_Usage_Quota_After_cleanup :%v, pvc_reserved_Quota_After_cleanup: %v ", pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup) + + quotavalidationStatus_afterCleanup := validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, totalquota_used_after, totalquota_used_after_Cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, storagepolicyquota_pvc_after, storagepolicyquota_pvc_after_cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, storagepolicy_usage_pvc_after, pvc_Usage_Quota_After_cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + reservedQuota := validate_reservedQuota_afterCleanUp(ctx, totalReservedQuota_after_Cleanup, pvc_reservedQuota_after_cleanup, pvc_reserved_Quota_After_cleanup) + gomega.Expect(reservedQuota).NotTo(gomega.BeFalse()) + framework.Logf("quotavalidationStatus :%v reservedQuota:%v", quotavalidationStatus_afterCleanup, reservedQuota) + } + }() }) @@ -2272,7 +2399,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { }) /* - VMDK is deleted from datastore but CNS volume is still present + VMDK is deleted from datastore but CNS volume is still presentx STEPS: 1.Create FCD disk. 2.Creating Static PV with FCD ID and PVC from it. diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index 5e5b7650db..3b4c8418b7 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -254,6 +254,14 @@ const ( podVMOnStretchedSupervisor = "stretched-svc" stretchedSVCTopologyLevels = 1 envZonalStoragePolicyName2 = "ZONAL2_STORAGECLASS" + volExtensionName = "volume.cns.vsphere.vmware.com" + snapshotExtensionName = "snapshot.cns.vsphere.vmware.com" + vmServiceExtensionName = "vmservice.cns.vsphere.vmware.com" + pvcUsage = "-pvc-usage" + snapshotUsage = "-snapshot-usage" + vmUsage = "-vm-usage" + diskSize1Gi = int64(1024) + storageQuotaWebhookPrefix = "storage-quota-webhook" ) /* diff --git a/tests/e2e/improved_csi_idempotency.go b/tests/e2e/improved_csi_idempotency.go index 396e708794..e7f3cddbc3 100644 --- a/tests/e2e/improved_csi_idempotency.go +++ b/tests/e2e/improved_csi_idempotency.go @@ -304,6 +304,23 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] "+ extendVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, scParameters, volumeOpsScale, true, isServiceStopped, c) }) + + /* + Create volume when SPS goes down + 1. Create a SC using a thick provisioned policy + 2. Create a PVCs using SC + 3. Bring down sps service and wait for 5mins (default provisioner timeout) + 4. Bring up storage-quota-webhook + 5. Wait for pvcs to be bound + 6. Delete pvcs and SC + 7. Verify no orphan volumes are left + */ + ginkgo.It("create volume when storage-quota-weebhook goes down", func() { + serviceName = storageQuotaWebhookPrefix + createVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + }) // createVolumesByReducingProvisionerTime creates the volumes by reducing the provisioner timeout @@ -425,6 +442,8 @@ func createVolumesByReducingProvisionerTime(namespace string, client clientset.I func createVolumeWithServiceDown(serviceName string, namespace string, client clientset.Interface, storagePolicyName string, scParameters map[string]string, volumeOpsScale int, isServiceStopped bool, c clientset.Interface) { + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By(fmt.Sprintf("Invoking Test for create volume when %v goes down", serviceName)) @@ -460,7 +479,9 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + //createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + restConfig = getRestConfigClient() + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, thickProvPolicy) } else { ginkgo.By("CNS_TEST: Running for GC setup") @@ -486,6 +507,21 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl } }() + if !vanillaCluster { + restConfig := getRestConfigClient() + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + + } + ginkgo.By("Creating PVCs using the Storage Class") framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) for i := 0; i < volumeOpsScale; i++ { @@ -565,6 +601,41 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl startHostDOnHost(ctx, hostIP) } isServiceStopped = false + } else if serviceName == "storage-quota-webhook" { + // Get CSI Controller's replica count from the setup + deployment, err := c.AppsV1().Deployments(kubeSystemNamespace).Get(ctx, + storageQuotaWebhookPrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicaCount := *deployment.Spec.Replicas + + ginkgo.By("Stopping webhook driver") + isServiceStopped, err = stopKubeSystemPods(ctx, c, kubeSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if isServiceStopped { + framework.Logf("Starting storage-quota-webhook driver") + isServiceStopped, err = startKubeSystemPods(ctx, c, csiReplicaCount, kubeSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + framework.Logf("Starting storage-quota-webhook ") + isServiceStopped, err = startKubeSystemPods(ctx, c, csiReplicaCount, kubeSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if os.Getenv(envFullSyncWaitTime) != "" { + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s + if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime { + framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) + } + } else { + fullSyncWaitTime = defaultFullSyncWaitTime + } + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) + time.Sleep(time.Duration(fullSyncWaitTime) * time.Second) } else { ginkgo.By(fmt.Sprintf("Stopping %v on the vCenter host", serviceName)) vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort @@ -625,6 +696,28 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl "kubernetes", volumeID)) } }() + + if !vanillaCluster { + totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_after) + + storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after) + + storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, pvcUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb*int64(volumeOpsScale), totalquota_used_before, totalquota_used_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb*int64(volumeOpsScale), storagepolicyquota_pvc_before, storagepolicyquota_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb*int64(volumeOpsScale), storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } + } // extendVolumeWithServiceDown extends the volumes and immediately stops the service and wait for @@ -632,6 +725,8 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl func extendVolumeWithServiceDown(serviceName string, namespace string, client clientset.Interface, storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool, isServiceStopped bool, c clientset.Interface) { + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By(fmt.Sprintf("Invoking Test for create volume when %v goes down", serviceName)) @@ -667,7 +762,8 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) scParameters[scParamStoragePolicyID] = profileID // create resource quota - createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + restConfig = getRestConfigClient() + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else { @@ -698,6 +794,21 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl } }() + if !vanillaCluster { + restConfig := getRestConfigClient() + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + + } + ginkgo.By("Creating PVCs using the Storage Class") framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) for i := 0; i < volumeOpsScale; i++ { @@ -731,6 +842,27 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl } }() + if !vanillaCluster { + totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_after) + + storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after) + + storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, pvcUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb*int64(volumeOpsScale), totalquota_used_before, totalquota_used_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb*int64(volumeOpsScale), storagepolicyquota_pvc_before, storagepolicyquota_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb*int64(volumeOpsScale), storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } + ginkgo.By("Create POD") pod, err := createPod(ctx, client, namespace, nil, pvclaims, false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -834,6 +966,27 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl pvcConditions := claims.Status.Conditions expectEqual(len(pvcConditions), 0, "pvc should not have conditions") } + + if !vanillaCluster { + totalquota_used_after_expansion, _ := getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalquota_used_after_expansion :%v", totalquota_used_after_expansion) + + storagepolicyquota_pvc_after_expansion, _ := getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after_expansion :%v", storagepolicyquota_pvc_after_expansion) + + storagepolicy_usage_pvc_after_expansion, _ := getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, namespace, pvcUsage) + framework.Logf("********** storagepolicy_usage_pvc_after_expansion :%v", storagepolicy_usage_pvc_after_expansion) + + //New size is 6Gi, diskSizeInMb is 2Gi so multiplying by 3 to make the expected quota consumption value + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb*3*int64(volumeOpsScale), totalquota_used_before, totalquota_used_after_expansion) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb*3*int64(volumeOpsScale), storagepolicyquota_pvc_before, storagepolicyquota_pvc_after_expansion) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb*3*int64(volumeOpsScale), storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after_expansion) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + } } // stopHostD is a function for waitGroup to run stop hostd parallelly diff --git a/tests/e2e/statefulsets.go b/tests/e2e/statefulsets.go index 7614a45ac6..80d0b99314 100644 --- a/tests/e2e/statefulsets.go +++ b/tests/e2e/statefulsets.go @@ -68,19 +68,20 @@ var _ = ginkgo.Describe("statefulset", func() { f := framework.NewDefaultFramework("e2e-vsphere-statefulset") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( - namespace string - client clientset.Interface - storagePolicyName string - scParameters map[string]string - storageClassName string - zonalPolicy string - zonalWffcPolicy string - categories []string - labels_ns map[string]string - allowedTopologyHAMap map[string][]string - nodeList *v1.NodeList - stsReplicas int32 - allowedTopologies []v1.TopologySelectorLabelRequirement + namespace string + client clientset.Interface + storagePolicyName string + scParameters map[string]string + storageClassName string + zonalPolicy string + zonalWffcPolicy string + categories []string + labels_ns map[string]string + allowedTopologyHAMap map[string][]string + nodeList *v1.NodeList + stsReplicas int32 + allowedTopologies []v1.TopologySelectorLabelRequirement + isStorageQuotaFSSEnabled bool ) ginkgo.BeforeEach(func() { @@ -128,6 +129,11 @@ var _ = ginkgo.Describe("statefulset", func() { nodeList, err = fnodes.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") } + + // vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + // isStorageQuotaFSSEnabled = isFssEnabled(ctx, vcAddress, "STORAGE_QUOTA_M2") + isStorageQuotaFSSEnabled = true + }) ginkgo.AfterEach(func() { @@ -164,6 +170,9 @@ var _ = ginkgo.Describe("statefulset", func() { ginkgo.It("[csi-block-vanilla] [csi-supervisor] [csi-block-vanilla-parallelized] [stretched-svc] Statefulset "+ "testing with default podManagementPolicy", ginkgo.Label(p0, vanilla, block, wcp, core), func() { + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -177,7 +186,7 @@ var _ = ginkgo.Describe("statefulset", func() { sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() } else { @@ -187,6 +196,20 @@ var _ = ginkgo.Describe("statefulset", func() { scParameters[scParamStoragePolicyID] = profileID } + restConfig := getRestConfigClient() + if supervisorCluster && isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storagePolicyName, namespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + } + ginkgo.By("Creating service") service := CreateService(namespace, client) defer func() { @@ -220,6 +243,30 @@ var _ = ginkgo.Describe("statefulset", func() { gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") + if supervisorCluster && isStorageQuotaFSSEnabled { + totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storagePolicyName, namespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_after) + + storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after) + + storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSize1Gi*3, totalquota_used_before, totalquota_used_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + framework.Logf("********** quotavalidationStatus :%v", quotavalidationStatus) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSize1Gi*3, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + framework.Logf("********** pvc_Usage_Quota_After :%v", quotavalidationStatus) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSize1Gi*3, storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after) + framework.Logf("quotavalidationStatus :%v", quotavalidationStatus) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } + // Get the list of Volumes attached to Pods before scale down var volumesBeforeScaleDown []string for _, sspod := range ssPodsBeforeScaleDown.Items { diff --git a/tests/e2e/tkgs_ha.go b/tests/e2e/tkgs_ha.go index d9e4d6363d..8802066d73 100644 --- a/tests/e2e/tkgs_ha.go +++ b/tests/e2e/tkgs_ha.go @@ -71,6 +71,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { labels_ns map[string]string isVcRebooted bool vcAddress string + isStorageQuotaFSSEnabled bool ) ginkgo.BeforeEach(func() { client = f.ClientSet @@ -141,6 +142,10 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { } else { pandoraSyncWaitTime = defaultPandoraSyncWaitTime } + + //vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + //isStorageQuotaFSSEnabled = isFssEnabled(ctx, vcAddress, "STORAGE_QUOTA_M2") + isStorageQuotaFSSEnabled = true }) ginkgo.AfterEach(func() { @@ -189,6 +194,10 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { 18. Delete PVC,POD,SC */ ginkgo.It("Dynamic PVC - Zonal storage and Immediate binding", func() { + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_volSnapshot_before, storagepolicy_usage_volSnapshot_before, storagepolicyquota_volSnapshot_after, storagepolicy_usage_volSnapshot_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity + ctx, cancel := context.WithCancel(context.Background()) defer cancel() framework.Logf("snapc: %v", snapc) @@ -198,12 +207,37 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { framework.ExpectNoError(err, "Unable to find ready and schedulable Node") ginkgo.By("Creating Pvc with Immediate topology storageclass") - createResourceQuota(client, namespace, rqLimit, zonalPolicy) + //createResourceQuota(client, namespace, rqLimit, zonalPolicy) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + restConfig := getRestConfigClient() scParameters[svStorageClassName] = zonalPolicy storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, svNamespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + + storagepolicyquota_volSnapshot_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, snapshotExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_volSnapshot_before) + + storagepolicy_usage_volSnapshot_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, snapshotUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_volSnapshot_before) + } + pvclaim, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -218,6 +252,27 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { svcPVCName := pv.Spec.CSI.VolumeHandle svcPVC := getPVCFromSupervisorCluster(svcPVCName) + if isStorageQuotaFSSEnabled { + totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_after) + + storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after) + + storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb, totalquota_used_before, totalquota_used_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } + defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -270,6 +325,23 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if isStorageQuotaFSSEnabled { + storagepolicy_usage_volSnapshot_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, snapshotUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_volSnapshot_after) + + storagepolicyquota_volSnapshot_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, snapshotExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_volSnapshot_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb, storagepolicyquota_volSnapshot_before, storagepolicyquota_volSnapshot_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicy_usage_volSnapshot_before, storagepolicy_usage_volSnapshot_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } defer func() { if snapshotContentCreated { err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) @@ -321,6 +393,29 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if isStorageQuotaFSSEnabled { + totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup := getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, svNamespace) + framework.Logf("totalquota_used_after :%v, totalReservedQuota_after: %v totalQuota", totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup) + + storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup := getStoragePolicyQuotaForSpecificResourceType(ctx, + restConfig, storageclass.Name, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_after_cleanup :%v, pvc_reservedQuota_after__cleanup: %v PolicyQuota", + storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup) + + pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup := getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("volume ********** pvc_Usage_Quota_After :%v, pvc_reserved_Quota_After: %v ", pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup) + + quotavalidationStatus_afterCleanup := validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, totalquota_used_after, totalquota_used_after_Cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, storagepolicyquota_pvc_after, storagepolicyquota_pvc_after_cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, storagepolicy_usage_pvc_after, pvc_Usage_Quota_After_cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + reservedQuota := validate_reservedQuota_afterCleanUp(ctx, totalReservedQuota_after_Cleanup, pvc_reservedQuota_after_cleanup, pvc_reserved_Quota_After_cleanup) + gomega.Expect(reservedQuota).NotTo(gomega.BeFalse()) + } }) /* @@ -345,6 +440,10 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { */ ginkgo.It("Stateful set - storage class with Zonal storage and wffc and"+ " with parallel pod management policy", func() { + + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity + ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By("CNS_TEST: Running for GC setup") @@ -352,7 +451,10 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { framework.ExpectNoError(err, "Unable to find ready and schedulable Node") ginkgo.By("Create statefulset with parallel pod management policy with replica 3") - createResourceQuota(client, namespace, rqLimit, zonalWffcPolicy) + //createResourceQuota(client, namespace, rqLimit, zonalWffcPolicy) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + restConfig := getRestConfigClient() scParameters[svStorageClassName] = zonalWffcPolicy storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { @@ -366,6 +468,20 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { deleteService(namespace, client, service) }() + //on GC side though it uses labebinding sc , it points to immediate storage class in SVC + if isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, zonalPolicy, svNamespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + zonalPolicy, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + zonalPolicy, svNamespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + } + statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement @@ -393,6 +509,29 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ "kubernetes", volumeHandle)) } + + if isStorageQuotaFSSEnabled { + totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup := getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, zonalPolicy, svNamespace) + framework.Logf("totalquota_used_after :%v, totalReservedQuota_after: %v totalQuota", totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup) + + storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup := getStoragePolicyQuotaForSpecificResourceType(ctx, + restConfig, zonalPolicy, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_after_cleanup :%v, pvc_reservedQuota_after__cleanup: %v PolicyQuota", + storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup) + + pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup := getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + zonalPolicy, svNamespace, pvcUsage) + framework.Logf("volume ********** pvc_Usage_Quota_After :%v, pvc_reserved_Quota_After: %v ", pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup) + + quotavalidationStatus_afterCleanup := validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb*3, totalquota_used_after, totalquota_used_after_Cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb*3, storagepolicyquota_pvc_after, storagepolicyquota_pvc_after_cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb*3, storagepolicy_usage_pvc_after, pvc_Usage_Quota_After_cleanup) + gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse()) + reservedQuota := validate_reservedQuota_afterCleanUp(ctx, totalReservedQuota_after_Cleanup, pvc_reservedQuota_after_cleanup, pvc_reserved_Quota_After_cleanup) + gomega.Expect(reservedQuota).NotTo(gomega.BeFalse()) + } }() verifyVolumeMetadataOnStatefulsets(client, ctx, namespace, statefulset, replicas, @@ -421,7 +560,9 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("CNS_TEST: Running for GC setup") ginkgo.By("Creating Pvc with Immediate topology storageclass") - createResourceQuota(client, namespace, rqLimit, zonalPolicy) + //createResourceQuota(client, namespace, rqLimit, zonalPolicy) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) scParameters[svStorageClassName] = zonalPolicy storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { @@ -557,6 +698,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { 11.Clear all PVC,POD and sc */ ginkgo.It("Verify Online Volume expansion using zonal storage", func() { + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before *resource.Quantity ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -564,12 +706,16 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { nodeList, _ := fnodes.GetReadySchedulableNodes(ctx, client) ginkgo.By("Creating Pvc with Immediate topology storageclass") - createResourceQuota(client, namespace, rqLimit, zonalPolicy) + //createResourceQuota(client, namespace, rqLimit, zonalPolicy) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + restConfig := getRestConfigClient() scParameters[svStorageClassName] = zonalPolicy storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + pvclaim, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -584,6 +730,19 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { svcPVCName := pv.Spec.CSI.VolumeHandle svcPVC := getPVCFromSupervisorCluster(svcPVCName) + if isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, svNamespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + } + defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -628,6 +787,25 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { verifyOnlineVolumeExpansionOnGc(client, namespace, svcPVCName, volHandle, pvclaim, pod, f) + if isStorageQuotaFSSEnabled { + totalquota_used_afterExpansion, _ := getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, svNamespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_afterExpansion) + quotavalidationStatus_afterexpansion := validate_increasedQuota(ctx, diskSizeInMb, totalquota_used_before, totalquota_used_afterExpansion) + gomega.Expect(quotavalidationStatus_afterexpansion).NotTo(gomega.BeFalse()) + + storagepolicyquota_pvc_after_expansion, _ := getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_after_cleanup :%v", storagepolicyquota_pvc_after_expansion) + quotavalidationStatus_afterexpansion = validate_increasedQuota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after_expansion) + gomega.Expect(quotavalidationStatus_afterexpansion).NotTo(gomega.BeFalse()) + + pvc_Usage_Quota_After_expansion, _ := getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("volume ********** pvc_Usage_Quota_After :%v", pvc_Usage_Quota_After_expansion) + quotavalidationStatus_afterexpansion = validate_increasedQuota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after_expansion) + gomega.Expect(quotavalidationStatus_afterexpansion).NotTo(gomega.BeFalse()) + } + }) /* @@ -652,6 +830,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { 12.Clear all PVC,POD and sc */ ginkgo.It("Verify offline Volume expansion using zonal storage", func() { + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before *resource.Quantity ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -660,7 +839,10 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { framework.ExpectNoError(err, "Unable to find ready and schedulable Node") ginkgo.By("Creating Pvc with Immediate topology storageclass") - createResourceQuota(client, namespace, rqLimit, zonalPolicy) + //createResourceQuota(client, namespace, rqLimit, zonalPolicy) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + restConfig := getRestConfigClient() scParameters[svStorageClassName] = zonalPolicy storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { @@ -680,6 +862,19 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { svcPVCName := pv.Spec.CSI.VolumeHandle svcPVC := getPVCFromSupervisorCluster(svcPVCName) + if isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, svNamespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + } + defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -723,6 +918,25 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { verifyOfflineVolumeExpansionOnGc(ctx, client, pvclaim, svcPVCName, namespace, volHandle, pod, pv, f) + if isStorageQuotaFSSEnabled { + totalquota_used_afterExpansion, _ := getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, svNamespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_afterExpansion) + quotavalidationStatus_afterexpansion := validate_increasedQuota(ctx, diskSizeInMb, totalquota_used_before, totalquota_used_afterExpansion) + gomega.Expect(quotavalidationStatus_afterexpansion).NotTo(gomega.BeFalse()) + + storagepolicyquota_pvc_after_expansion, _ := getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_after_cleanup :%v", storagepolicyquota_pvc_after_expansion) + quotavalidationStatus_afterexpansion = validate_increasedQuota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after_expansion) + gomega.Expect(quotavalidationStatus_afterexpansion).NotTo(gomega.BeFalse()) + + pvc_Usage_Quota_After_expansion, _ := getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("volume ********** pvc_Usage_Quota_After :%v", pvc_Usage_Quota_After_expansion) + quotavalidationStatus_afterexpansion = validate_increasedQuota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after_expansion) + gomega.Expect(quotavalidationStatus_afterexpansion).NotTo(gomega.BeFalse()) + } + }) /* @@ -745,14 +959,18 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { 15. Delete pod, gc1-pv and gc1-pvc and svc pvc. */ ginkgo.It("Static volume provisioning using zonal storage", func() { + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity + ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By("CNS_TEST: Running for GC setup") nodeList, err := fnodes.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") - svClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + restConfig := getRestConfigClient() pvcAnnotations := make(map[string]string) annotationVal := "[" var topoList []string @@ -769,12 +987,28 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) ginkgo.By("Creating Pvc with Immediate topology storageclass") - createResourceQuota(client, namespace, rqLimit, zonalPolicy) + //createResourceQuota(client, namespace, rqLimit, zonalPolicy) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) scParameters[svStorageClassName] = zonalPolicy storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + + if isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, svNamespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + } + pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(svNamespace, "", storageclass, nil, "") pvcSpec.Annotations = pvcAnnotations svPvclaim, err := svClient.CoreV1().PersistentVolumeClaims(svNamespace).Create(context.TODO(), @@ -866,6 +1100,27 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { staticPv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) }() + if isStorageQuotaFSSEnabled { + totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_after) + + storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after) + + storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storageclass.Name, svNamespace, pvcUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb, totalquota_used_before, totalquota_used_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } + _, err = verifyPodLocationLevel5(pod, nodeList, allowedTopologyHAMap) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -971,7 +1226,9 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { nodeList, _ := fnodes.GetReadySchedulableNodes(ctx, client) ginkgo.By("Create statefulset with parallel pod management policy with replica 3") - createResourceQuota(client, namespace, rqLimit, zonalPolicy) + //createResourceQuota(client, namespace, rqLimit, zonalPolicy) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) scParameters[svStorageClassName] = zonalWffcPolicy storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { @@ -1179,16 +1436,22 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { }() ginkgo.By("Decrease SVC storage policy resource quota") - svcClient, svNamespace := getSvcClientAndNamespace() - quotaName := svcNamespace + "-storagequota" - framework.Logf("quotaName: %s", quotaName) - resourceQuota := newTestResourceQuota(quotaName, "10Mi", zonalPolicy) - resourceQuota, err = svcClient.CoreV1().ResourceQuotas(svNamespace).Update( - ctx, resourceQuota, metav1.UpdateOptions{}) + // svcClient, svNamespace := getSvcClientAndNamespace() + // quotaName := svcNamespace + "-storagequota" + // framework.Logf("quotaName: %s", quotaName) + // resourceQuota := newTestResourceQuota(quotaName, "10Mi", zonalPolicy) + // resourceQuota, err = svcClient.CoreV1().ResourceQuotas(svNamespace).Update( + // ctx, resourceQuota, metav1.UpdateOptions{}) + // ginkgo.By("create resource quota") + setStoragePolicyQuota(ctx, restConfig, storageclass.Name, namespace, "10Mi") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By(fmt.Sprintf("Create Resource quota: %+v", resourceQuota)) - framework.Logf("Sleeping for 15 seconds to claim resource quota fully") - time.Sleep(time.Duration(15) * time.Second) + defer func() { + setStoragePolicyQuota(ctx, restConfig, storageclass.Name, namespace, rqLimit) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + // ginkgo.By(fmt.Sprintf("Create Resource quota: %+v", resourceQuota)) + // framework.Logf("Sleeping for 15 seconds to claim resource quota fully") + // time.Sleep(time.Duration(15) * time.Second) ginkgo.By("Create statefulset with parallel pod management policy with replica 1") statefulset := GetStatefulSetFromManifest(namespace) @@ -1242,14 +1505,16 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { } ginkgo.By("Increase SVC storagepolicy resource quota") - framework.Logf("quotaName: %s", quotaName) - resourceQuota = newTestResourceQuota(quotaName, rqLimit, zonalPolicy) - resourceQuota, err = svcClient.CoreV1().ResourceQuotas(svNamespace).Update( - ctx, resourceQuota, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By(fmt.Sprintf("ResourceQuota details: %+v", resourceQuota)) - framework.Logf("Sleeping for 15 seconds to claim resource quota fully") - time.Sleep(time.Duration(15) * time.Second) + setStoragePolicyQuota(ctx, restConfig, storageclass.Name, namespace, rqLimit) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // framework.Logf("quotaName: %s", quotaName) + // resourceQuota = newTestResourceQuota(quotaName, rqLimit, zonalPolicy) + // resourceQuota, err = svcClient.CoreV1().ResourceQuotas(svNamespace).Update( + // ctx, resourceQuota, metav1.UpdateOptions{}) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // ginkgo.By(fmt.Sprintf("ResourceQuota details: %+v", resourceQuota)) + // framework.Logf("Sleeping for 15 seconds to claim resource quota fully") + // time.Sleep(time.Duration(15) * time.Second) ginkgo.By("Verify annotations on SVC PV and required node affinity details on SVC PV and GC PV") ginkgo.By("Verify pod gets scheduled on appropriate nodes preset in the availability zone") diff --git a/tests/e2e/tkgs_ha_utils.go b/tests/e2e/tkgs_ha_utils.go index 82eaf7daa2..c0230307e4 100644 --- a/tests/e2e/tkgs_ha_utils.go +++ b/tests/e2e/tkgs_ha_utils.go @@ -167,7 +167,9 @@ func verifyVolumeProvisioningWithServiceDown(serviceName string, namespace strin }() ginkgo.By("Create statefulset with default pod management policy with replica 3") - createResourceQuota(client, namespace, rqLimit, storagePolicyName) + //createResourceQuota(client, namespace, rqLimit, storagePolicyName) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 2456b8deb9..746d3aeb84 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -1446,25 +1446,21 @@ func invokeVCenterServiceControl(ctx context.Context, command, service, host str } /* - Note: As per PR #2935677, even if cns_new_sync is enabled volume expansion - will not work if sps-service is down. - Keeping this code for reference. Disabling isFssEnabled util method as we won't be using - this util method in testcases. - isFssEnabled invokes the given command to check if vCenter has a particular FSS enabled or not +isFssEnabled invokes the given command to check if vCenter has a particular FSS enabled or not */ -// func isFssEnabled(host, fss string) bool { -// sshCmd := fmt.Sprintf("python /usr/sbin/feature-state-wrapper.py %s", fss) -// framework.Logf("Checking if fss is enabled on vCenter host %v", host) -// result, err := fssh.SSH(ctx, sshCmd, host, framework.TestContext.Provider) -// fssh.LogResult(result) -// if err == nil && result.Code == 0 { -// return strings.TrimSpace(result.Stdout) == "enabled" -// } else { -// ginkgo.By(fmt.Sprintf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)) -// gomega.Expect(err).NotTo(gomega.HaveOccurred()) -// } -// return false -// } +func isFssEnabled(ctx context.Context, host, fss string) bool { + sshCmd := fmt.Sprintf("python /usr/sbin/feature-state-wrapper.py %s", fss) + framework.Logf("Checking if fss is enabled on vCenter host %v", host) + result, err := fssh.SSH(ctx, sshCmd, host, framework.TestContext.Provider) + fssh.LogResult(result) + if err == nil && result.Code == 0 { + return strings.TrimSpace(result.Stdout) == "enabled" + } else { + ginkgo.By(fmt.Sprintf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + return false +} // waitVCenterServiceToBeInState invokes the status check for the given service and waits // via service-control on the given vCenter host over SSH. @@ -7026,3 +7022,232 @@ func removeStoragePolicyQuota(ctx context.Context, restClientConfig *rest.Config framework.Logf("Quota after removing: %s", spq.Spec.Limit) } + +// Get storagePolicyQuota consumption based on resourceType (i.e., either volume, snapshot, vmservice) +func getStoragePolicyQuotaForSpecificResourceType(ctx context.Context, restClientConfig *rest.Config, + scName string, namespace string, extensionType string) (*resource.Quantity, *resource.Quantity) { + var usedQuota, reservedQuota *resource.Quantity + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + spq := &storagepolicyv1alpha2.StoragePolicyQuota{} + err = cnsOperatorClient.Get(ctx, + pkgtypes.NamespacedName{Name: scName + storagePolicyQuota, Namespace: namespace}, spq) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + scLevelQuotaStatusList := spq.Status.SCLevelQuotaStatuses + + for _, item := range scLevelQuotaStatusList { + if item.StorageClassName == scName { + resourceTypeLevelQuotaStatusList := spq.Status.ResourceTypeLevelQuotaStatuses + + for _, item := range resourceTypeLevelQuotaStatusList { + if item.ResourceExtensionName == extensionType { + usedQuota = item.ResourceTypeSCLevelQuotaStatuses[0].SCLevelQuotaUsage.Used + reservedQuota = item.ResourceTypeSCLevelQuotaStatuses[0].SCLevelQuotaUsage.Reserved + ginkgo.By(fmt.Sprintf("usedQuota %v, reservedQuota %v", usedQuota, reservedQuota)) + break + } + + } + } + } + + return usedQuota, reservedQuota +} + +// Get total quota consumption by storagePolicy +func getTotalQuotaConsumedByStoragePolicy(ctx context.Context, restClientConfig *rest.Config, + scName string, namespace string) (*resource.Quantity, *resource.Quantity) { + var usedQuota, reservedQuota *resource.Quantity + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + spq := &storagepolicyv1alpha2.StoragePolicyQuota{} + err = cnsOperatorClient.Get(ctx, + pkgtypes.NamespacedName{Name: scName + storagePolicyQuota, Namespace: namespace}, spq) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scLevelQuotaStatusList := spq.Status.SCLevelQuotaStatuses + for _, item := range scLevelQuotaStatusList { + if item.StorageClassName == scName { + usedQuota = item.SCLevelQuotaUsage.Used + reservedQuota = item.SCLevelQuotaUsage.Reserved + ginkgo.By(fmt.Sprintf("usedQuota %v, reservedQuota %v", usedQuota, reservedQuota)) + } + } + return usedQuota, reservedQuota +} + +// Get getStoragePolicyUsageForSpecificResourceType based on resourceType (i.e., either volume, snapshot, vmservice) +// resourceUsage will be either pvcUsage, vmUsage and snapshotUsage +func getStoragePolicyUsageForSpecificResourceType(ctx context.Context, restClientConfig *rest.Config, + scName string, namespace string, resourceUsage string) (*resource.Quantity, *resource.Quantity) { + var usedQuota, reservedQuota *resource.Quantity + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //spq := &storagepolicyv1alpha2.StoragePolicyQuota{} + spq := &storagepolicyv1alpha2.StoragePolicyUsage{} + err = cnsOperatorClient.Get(ctx, + pkgtypes.NamespacedName{Name: scName + resourceUsage, Namespace: namespace}, spq) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if spq.Status.ResourceTypeLevelQuotaUsage.DeepCopy() == nil { + zeroQuantity := resource.MustParse("0") + usedQuota = &zeroQuantity + reservedQuota = &zeroQuantity + } else { + usedQuota = spq.Status.ResourceTypeLevelQuotaUsage.Used + reservedQuota = spq.Status.ResourceTypeLevelQuotaUsage.Reserved + } + return usedQuota, reservedQuota +} + +func validate_totalStoragequota(ctx context.Context, diskSize int64, totalUsedQuotaBefore *resource.Quantity, totalUsedQuotaAfter *resource.Quantity) bool { + var validTotalQuota bool + validTotalQuota = false + + out := make([]byte, 0, 64) + result, suffix := totalUsedQuotaBefore.CanonicalizeBytes(out) + value := string(result) + quotaBefore, err := strconv.ParseInt(string(value), 10, 64) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf(" quotaBefore : %v%s", quotaBefore, string(suffix))) + + result1, suffix1 := totalUsedQuotaAfter.CanonicalizeBytes(out) + value1 := string(result1) + quotaAfter, err := strconv.ParseInt(string(value1), 10, 64) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf(" quotaAfter : %v%s", quotaAfter, string(suffix1))) + + if string(suffix1) == "Gi" { + var bytes int64 = diskSize + // Convert to Gi + //kibibytes := float64(bytes) / 1024 + diskSize = int64(bytes) / 1024 + fmt.Printf("diskSize: %dGi\n", diskSize) + + } + + if quotaBefore+diskSize == quotaAfter { + validTotalQuota = true + ginkgo.By(fmt.Sprintf("quotaBefore+diskSize: %v, quotaAfter : %v", quotaBefore+diskSize, quotaAfter)) + ginkgo.By(fmt.Sprintf("diskSize: %v", diskSize)) + ginkgo.By(fmt.Sprintf("validTotalQuota on storagePolicy: %v", validTotalQuota)) + + } + return validTotalQuota +} + +func validate_totalStoragequota_afterCleanUp(ctx context.Context, diskSize int64, totalUsedQuotaBefore *resource.Quantity, totalUsedQuotaAfterCleanup *resource.Quantity) bool { + var validTotalQuota bool + validTotalQuota = false + + out := make([]byte, 0, 64) + result, suffix := totalUsedQuotaBefore.CanonicalizeBytes(out) + value := string(result) + quotaBefore, err := strconv.ParseInt(string(value), 10, 64) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf(" quotaBefore : %v%s", quotaBefore, string(suffix))) + + result1, suffix1 := totalUsedQuotaAfterCleanup.CanonicalizeBytes(out) + value1 := string(result1) + quotaAfter, err := strconv.ParseInt(string(value1), 10, 64) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf(" quotaAfter : %v%s", quotaAfter, string(suffix1))) + + if string(suffix) == "Gi" { + var bytes int64 = diskSize + // Convert to Gi + //kibibytes := float64(bytes) / 1024 + diskSize = int64(bytes) / 1024 + fmt.Printf("diskSize: %dGi\n", diskSize) + + } + + if quotaBefore-diskSize == quotaAfter { + validTotalQuota = true + ginkgo.By(fmt.Sprintf("quotaBefore +diskSize: %v, quotaAfter : %v", quotaBefore-diskSize, quotaAfter)) + ginkgo.By(fmt.Sprintf("validTotalQuota on storagePolicy: %v", validTotalQuota)) + + } + return validTotalQuota +} + +// validate_reservedQuota_afterCleanUp after the volume goes to bound state or after teast clean up , expected reserved quota should be "0" +func validate_reservedQuota_afterCleanUp(ctx context.Context, total_reservedQuota *resource.Quantity, policy_reservedQuota *resource.Quantity, storagepolicyUsage_reserved_Quota *resource.Quantity) bool { + ginkgo.By(fmt.Sprintf("reservedQuota on total storageQuota CR: %v,storagePolicyQuota CR: %v, storagePolicyUsage CR: %v ", total_reservedQuota.String(), policy_reservedQuota.String(), storagepolicyUsage_reserved_Quota.String())) + return total_reservedQuota.String() == "0" && policy_reservedQuota.String() == "0" && storagepolicyUsage_reserved_Quota.String() == "0" + +} + +func validate_increasedQuota(ctx context.Context, diskSize int64, totalUsedQuotaBefore *resource.Quantity, totalUsedQuotaAfterexpansion *resource.Quantity) bool { + var validTotalQuota bool + validTotalQuota = false + + out := make([]byte, 0, 64) + result, suffix := totalUsedQuotaBefore.CanonicalizeBytes(out) + value := string(result) + quotaBefore, err := strconv.ParseInt(string(value), 10, 64) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf(" quotaBefore : %v%s", quotaBefore, string(suffix))) + + result1, suffix1 := totalUsedQuotaAfterexpansion.CanonicalizeBytes(out) + value1 := string(result1) + quotaAfter, err := strconv.ParseInt(string(value1), 10, 64) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf(" quotaAfter : %v%s", quotaAfter, string(suffix1))) + + if string(suffix) == "Gi" { + var bytes int64 = diskSize + // Convert to Gi + //kibibytes := float64(bytes) / 1024 + diskSize = int64(bytes) / 1024 + fmt.Printf("diskSize: %dGi\n", diskSize) + + } + + if quotaBefore < quotaAfter { + validTotalQuota = true + ginkgo.By(fmt.Sprintf("quotaBefore +diskSize: %v, quotaAfter : %v", quotaBefore, quotaAfter)) + ginkgo.By(fmt.Sprintf("validTotalQuota on storagePolicy: %v", validTotalQuota)) + + } + return validTotalQuota +} + +// stopCSIPods function stops all the running csi pods +func stopKubeSystemPods(ctx context.Context, client clientset.Interface, namespace string) (bool, error) { + collectPodLogs(ctx, client, kubeSystemNamespace) + isServiceStopped := false + err := updateDeploymentReplicawithWait(client, 0, storageQuotaWebhookPrefix, + namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = true + return isServiceStopped, err +} + +// startCSIPods function starts the csi pods and waits till all the pods comes up +func startKubeSystemPods(ctx context.Context, client clientset.Interface, csiReplicas int32, + namespace string) (bool, error) { + ignoreLabels := make(map[string]string) + err := updateDeploymentReplicawithWait(client, csiReplicas, storageQuotaWebhookPrefix, + namespace) + if err != nil { + return true, err + } + // Wait for the CSI Pods to be up and Running + list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, namespace, ignoreLabels) + if err != nil { + return true, err + } + num_csi_pods := len(list_of_pods) + err = fpod.WaitForPodsRunningReady(ctx, client, namespace, int32(num_csi_pods), 0, + pollTimeout) + isServiceStopped := false + return isServiceStopped, err +} diff --git a/tests/e2e/vmservice_vm.go b/tests/e2e/vmservice_vm.go index 7673994740..f99e6014db 100644 --- a/tests/e2e/vmservice_vm.go +++ b/tests/e2e/vmservice_vm.go @@ -28,9 +28,13 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientset "k8s.io/client-go/kubernetes" @@ -63,6 +67,8 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { isVsanHealthServiceStopped bool isSPSserviceStopped bool vcAddress string + isStorageQuotaFSSEnabled bool + defaultDatastore *object.Datastore ) ginkgo.BeforeEach(func() { @@ -120,6 +126,30 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { namespace, vmImageName) vmi = waitNGetVmiForImageName(ctx, vmopC, namespace, vmImageName) gomega.Expect(vmi).NotTo(gomega.BeEmpty()) + + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + isStorageQuotaFSSEnabled = isFssEnabled(ctx, vcAddress, "STORAGE_QUOTA_M2") + + var datacenters []string + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + + finder := find.NewFinder(e2eVSphere.Client.Client, false) + cfg, err := getConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dcList := strings.Split(cfg.Global.Datacenters, ",") + for _, dc := range dcList { + dcName := strings.TrimSpace(dc) + if dcName != "" { + datacenters = append(datacenters, dcName) + } + } + for _, dc := range datacenters { + defaultDatacenter, err := finder.Datacenter(ctx, dc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(defaultDatacenter) + defaultDatastore, err = getDatastoreByURL(ctx, datastoreURL, defaultDatacenter) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }) ginkgo.AfterEach(func() { @@ -1335,4 +1365,193 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { _ = formatNVerifyPvcIsAccessible(vm.Status.Volumes[0].DiskUuid, 1, vmIp) }) + /* + Basic test + Steps: + 1 Assign a spbm policy to test namespace with sufficient quota + 2 Create a PVC say pvc1 + 3 Create a VMservice VM say vm1, pvc1 + 4 verify pvc1 CNS metadata. + 5 Once the vm1 is up verify that the volume is accessible inside vm1 + 6 Delete vm1 + 7 delete pvc1 + 8 Remove spbm policy attached to test namespace + + statically provisioned CSI volumes + Steps: + 1 Assign a spbm policy to test namespace with sufficient quota + 2 Create two FCDs + 3 Create a static PV/PVC using cns register volume API + 4 Create a VMservice VM and with the pvcs created in step 3 + 5 Verify CNS metadata for pvcs. + 6 Write some IO to the CSI volumes and read it back from them and verify the data integrity + 7 Delete VM service VM + 8 delete pvcs + 9 Remove spbm policy attached to test namespace + */ + ginkgo.It("static-vm and verify vm creation and validate storagequota", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var err error + var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity + var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity + + curtime := time.Now().Unix() + curtimeinstring := strconv.FormatInt(curtime, 10) + pvcName := "cns-pvc-" + curtimeinstring + framework.Logf("pvc name :%s", pvcName) + namespace = getNamespaceToRunTests(f) + + restConfig := getRestConfigClient() + ginkgo.By("Get storage Policy") + ginkgo.By(fmt.Sprintf("storagePolicyName: %s", storagePolicyName)) + profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) + framework.Logf("Profile ID :%s", profileID) + scParameters := make(map[string]string) + scParameters["storagePolicyID"] = profileID + + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, storagePolicyName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + framework.Logf("storageclass name :%s", storageclass.GetName()) + + ginkgo.By("create resource quota") + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) + + //restConfig, storageclass, profileID := staticProvisioningPreSetUpUtil(ctx) + + if isStorageQuotaFSSEnabled { + totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storagePolicyName, namespace) + framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before) + + storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before) + + storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before) + } + + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, + "staticfcd"+curtimeinstring, profileID, diskSizeInMb, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("FCD ID: %s", fcdID) + defer func() { + err := e2eVSphere.deleteFCD(ctx, fcdID, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create CNS register volume with above created FCD") + cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, namespace, fcdID, "", pvcName, v1.ReadWriteOnce) + err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("waiting for some time for FCD to register in CNS and for cnsRegisterVolume to get create") + framework.ExpectNoError(waitForCNSRegisterVolumeToGetCreated(ctx, + restConfig, namespace, cnsRegisterVolume, poll, pollTimeout)) + cnsRegisterVolumeName := cnsRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + ginkgo.By("verify created PV, PVC and check the bidirectional reference") + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := getPvFromClaim(client, namespace, pvcName) + verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) + + if isStorageQuotaFSSEnabled { + totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace) + framework.Logf("totalquota_used_after :%v", totalquota_used_after) + + storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, volExtensionName) + framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after) + + storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig, + storagePolicyName, namespace, pvcUsage) + framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after) + + quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb, totalquota_used_before, totalquota_used_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after) + gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse()) + + } + + // ginkgo.By("Create a PVC") + // pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // ginkgo.By("Waiting for all claims to be in bound state") + // pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pv := pvs[0] + // volHandle := pv.Spec.CSI.VolumeHandle + // gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + // defer func() { + // ginkgo.By("Delete PVCs") + // err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // err = fpv.DeletePersistentVolumeClaim(ctx, client, staticPvc.Name, namespace) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // ginkgo.By("Waiting for CNS volumes to be deleted") + // err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // err = e2eVSphere.waitForCNSVolumeToBeDeleted(staticPv.Spec.CSI.VolumeHandle) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storagePolicyName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + }) + }) diff --git a/tests/e2e/vsphere_volume_expansion.go b/tests/e2e/vsphere_volume_expansion.go index 1840b21113..0efba6a3ac 100644 --- a/tests/e2e/vsphere_volume_expansion.go +++ b/tests/e2e/vsphere_volume_expansion.go @@ -307,7 +307,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 10. Make sure file system has increased */ - ginkgo.It("[csi-block-vanilla] [csi-supervisor] [csi-block-vanilla-parallelized] [csi-vcp-mig] Verify online "+ + ginkgo.It("[csi-block-vanilla] [csi-supervisor] [csi-block-vanilla-parallelized] [csi-vcp-mig] Verify online "+ "volume expansion on dynamic volume", ginkgo.Label(p0, block, vanilla, wcp, core), func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) @@ -2865,7 +2865,7 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(ctx context.Context, f *framew gomega.Expect(err).NotTo(gomega.HaveOccurred()) restConfig = getRestConfigClient() - setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, defaultrqLimit) + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred())