diff --git a/Makefile b/Makefile index cdb559fd5..e74f3120f 100755 --- a/Makefile +++ b/Makefile @@ -29,10 +29,12 @@ IDENTITY_PROVIDER ?= $(shell kubectl get --raw /.well-known/openid-configuration DRIVER_BINARY = gcs-fuse-csi-driver SIDECAR_BINARY = gcs-fuse-csi-driver-sidecar-mounter WEBHOOK_BINARY = gcs-fuse-csi-driver-webhook +PREFETCH_BINARY = gcs-fuse-csi-driver-metadata-prefetch DRIVER_IMAGE = ${REGISTRY}/${DRIVER_BINARY} SIDECAR_IMAGE = ${REGISTRY}/${SIDECAR_BINARY} WEBHOOK_IMAGE = ${REGISTRY}/${WEBHOOK_BINARY} +PREFETCH_IMAGE = ${REGISTRY}/${PREFETCH_BINARY} DOCKER_BUILDX_ARGS ?= --push --builder multiarch-multiplatform-builder --build-arg STAGINGVERSION=${STAGINGVERSION} ifneq ("$(shell docker buildx build --help | grep 'provenance')", "") @@ -46,7 +48,7 @@ $(info DRIVER_IMAGE is ${DRIVER_IMAGE}) $(info SIDECAR_IMAGE is ${SIDECAR_IMAGE}) $(info WEBHOOK_IMAGE is ${WEBHOOK_IMAGE}) -all: driver sidecar-mounter webhook +all: driver sidecar-mounter webhook metadata-prefetch driver: mkdir -p ${BINDIR} @@ -56,6 +58,10 @@ sidecar-mounter: mkdir -p ${BINDIR} CGO_ENABLED=0 GOOS=linux GOARCH=$(shell dpkg --print-architecture) go build -mod vendor -ldflags "${LDFLAGS}" -o ${BINDIR}/${SIDECAR_BINARY} cmd/sidecar_mounter/main.go +metadata-prefetch: + mkdir -p ${BINDIR} + CGO_ENABLED=0 GOOS=linux GOARCH=$(shell dpkg --print-architecture) go build -mod vendor -ldflags "${LDFLAGS}" -o ${BINDIR}/${PREFETCH_BINARY} cmd/metadata_prefetch/main.go + webhook: mkdir -p ${BINDIR} CGO_ENABLED=0 GOOS=linux GOARCH=$(shell dpkg --print-architecture) go build -mod vendor -ldflags "${LDFLAGS}" -o ${BINDIR}/${WEBHOOK_BINARY} cmd/webhook/main.go @@ -129,18 +135,27 @@ ifeq (${BUILD_ARM}, true) make build-image-linux-arm64 docker manifest create ${DRIVER_IMAGE}:${STAGINGVERSION} ${DRIVER_IMAGE}:${STAGINGVERSION}_linux_amd64 ${DRIVER_IMAGE}:${STAGINGVERSION}_linux_arm64 docker manifest create ${SIDECAR_IMAGE}:${STAGINGVERSION} ${SIDECAR_IMAGE}:${STAGINGVERSION}_linux_amd64 ${SIDECAR_IMAGE}:${STAGINGVERSION}_linux_arm64 + docker manifest create ${PREFETCH_IMAGE}:${STAGINGVERSION} ${PREFETCH_IMAGE}:${STAGINGVERSION}_linux_amd64 ${PREFETCH_IMAGE}:${STAGINGVERSION}_linux_arm64 else docker manifest create ${DRIVER_IMAGE}:${STAGINGVERSION} ${DRIVER_IMAGE}:${STAGINGVERSION}_linux_amd64 docker manifest create ${SIDECAR_IMAGE}:${STAGINGVERSION} ${SIDECAR_IMAGE}:${STAGINGVERSION}_linux_amd64 + docker manifest create ${PREFETCH_IMAGE}:${STAGINGVERSION} ${PREFETCH_IMAGE}:${STAGINGVERSION}_linux_amd64 endif docker manifest create ${WEBHOOK_IMAGE}:${STAGINGVERSION} ${WEBHOOK_IMAGE}:${STAGINGVERSION}_linux_amd64 docker manifest push --purge ${DRIVER_IMAGE}:${STAGINGVERSION} docker manifest push --purge ${SIDECAR_IMAGE}:${STAGINGVERSION} + docker manifest push --purge ${PREFETCH_IMAGE}:${STAGINGVERSION} docker manifest push --purge ${WEBHOOK_IMAGE}:${STAGINGVERSION} build-image-linux-amd64: + docker buildx build ${DOCKER_BUILDX_ARGS} \ + --file ./cmd/metadata_prefetch/Dockerfile \ + --tag ${PREFETCH_IMAGE}:${STAGINGVERSION}_linux_amd64 \ + --platform linux/amd64 \ + --build-arg TARGETPLATFORM=linux/amd64 . + docker buildx build \ --file ./cmd/csi_driver/Dockerfile \ --tag validation_linux_amd64 \ @@ -164,6 +179,12 @@ build-image-linux-amd64: --platform linux/amd64 . build-image-linux-arm64: + docker buildx build ${DOCKER_BUILDX_ARGS} \ + --file ./cmd/metadata_prefetch/Dockerfile \ + --tag ${PREFETCH_IMAGE}:${STAGINGVERSION}_linux_arm64 \ + --platform linux/arm64 \ + --build-arg TARGETPLATFORM=linux/arm64 . + docker buildx build \ --file ./cmd/csi_driver/Dockerfile \ --tag validation_linux_arm64 \ @@ -198,6 +219,7 @@ generate-spec-yaml: cd ./deploy/overlays/${OVERLAY}; ${BINDIR}/kustomize edit set image gke.gcr.io/gcs-fuse-csi-driver=${DRIVER_IMAGE}:${STAGINGVERSION}; cd ./deploy/overlays/${OVERLAY}; ${BINDIR}/kustomize edit set image gke.gcr.io/gcs-fuse-csi-driver-webhook=${WEBHOOK_IMAGE}:${STAGINGVERSION}; cd ./deploy/overlays/${OVERLAY}; ${BINDIR}/kustomize edit add configmap gcsfusecsi-image-config --behavior=merge --disableNameSuffixHash --from-literal=sidecar-image=${SIDECAR_IMAGE}:${STAGINGVERSION}; + cd ./deploy/overlays/${OVERLAY}; ${BINDIR}/kustomize edit add configmap gcsfusecsi-image-config --behavior=merge --disableNameSuffixHash --from-literal=metadata-sidecar-image=${PREFETCH_IMAGE}:${STAGINGVERSION}; echo "[{\"op\": \"replace\",\"path\": \"/spec/tokenRequests/0/audience\",\"value\": \"${PROJECT}.svc.id.goog\"}]" > ./deploy/overlays/${OVERLAY}/project_patch_csi_driver.json echo "[{\"op\": \"replace\",\"path\": \"/webhooks/0/clientConfig/caBundle\",\"value\": \"${CA_BUNDLE}\"}]" > ./deploy/overlays/${OVERLAY}/caBundle_patch_MutatingWebhookConfiguration.json echo "[{\"op\": \"replace\",\"path\": \"/spec/template/spec/containers/0/env/1/value\",\"value\": \"${IDENTITY_PROVIDER}\"}]" > ./deploy/overlays/${OVERLAY}/identity_provider_patch_csi_node.json diff --git a/cmd/metadata_prefetch/Dockerfile b/cmd/metadata_prefetch/Dockerfile new file mode 100644 index 000000000..e6778ce04 --- /dev/null +++ b/cmd/metadata_prefetch/Dockerfile @@ -0,0 +1,54 @@ +# Copyright 2018 The Kubernetes Authors. +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build metadata-prefetch go binary +FROM --platform=$BUILDPLATFORM golang:1.22.7 AS metadata-prefetch-builder + +ARG STAGINGVERSION + +WORKDIR /gcs-fuse-csi-driver +ADD . . +RUN make metadata-prefetch BINDIR=/bin + +# go/gke-releasing-policies#base-images +FROM gke.gcr.io/debian-base:bookworm-v1.0.4-gke.2 AS debian + +# go/gke-releasing-policies#base-images +FROM gcr.io/distroless/base-debian12 AS output-image + +# Copy existing binaries. +COPY --from=debian /bin/ls /bin/ls + +# Copy dependencies. +COPY --from=debian /lib/x86_64-linux-gnu/libselinux.so.1 /lib/x86_64-linux-gnu/libselinux.so.1 +COPY --from=debian /lib/x86_64-linux-gnu/libc.so.6 /lib/x86_64-linux-gnu/libc.so.6 +COPY --from=debian /lib/x86_64-linux-gnu/libpcre2-8.so.0 /lib/x86_64-linux-gnu/libpcre2-8.so.0 +COPY --from=debian /lib64/ld-linux-x86-64.so.2 /lib64/ld-linux-x86-64.so.2 + +# Validate dependencies +FROM output-image AS validator-image +COPY --from=debian /bin/bash /bin/bash +COPY --from=debian /usr/bin/ldd /usr/bin/ldd +COPY --from=debian /bin/grep /bin/grep +SHELL ["/bin/bash", "-c"] +RUN if ldd /bin/ls | grep "not found"; then echo "!!! Missing deps for ls command !!!" && exit 1; fi + +# Final image +FROM output-image + +# Copy the built binaries +COPY --from=metadata-prefetch-builder /bin/gcs-fuse-csi-driver-metadata-prefetch /gcs-fuse-csi-driver-metadata-prefetch + +ENTRYPOINT ["/gcs-fuse-csi-driver-metadata-prefetch"] diff --git a/cmd/metadata_prefetch/main.go b/cmd/metadata_prefetch/main.go new file mode 100644 index 000000000..14be08dd0 --- /dev/null +++ b/cmd/metadata_prefetch/main.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2024 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "os" + "os/exec" + "os/signal" + "strings" + "syscall" + + "k8s.io/klog/v2" +) + +const ( + mountPathsLocation = "/volumes/" +) + +func main() { + klog.InitFlags(nil) + flag.Parse() + + // Create cancellable context to pass into exec. + ctx, cancel := context.WithCancel(context.Background()) + + // Handle SIGTERM signal. + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGTERM) + + go func() { + <-sigs + klog.Info("Caught SIGTERM signal: Terminating...") + cancel() + + os.Exit(0) // Exit gracefully + }() + + // Start the "ls" command in the background. + // All our volumes are mounted under the /volumes/ directory. + cmd := exec.CommandContext(ctx, "ls", "-R", mountPathsLocation) + cmd.Stdout = nil // Connects file descriptor to the null device (os.DevNull). + + // TODO(hime): We should research stratergies to parallelize ls execution and speed up cache population. + err := cmd.Start() + if err == nil { + mountPaths, err := getDirectoryNames(mountPathsLocation) + if err == nil { + klog.Infof("Running ls on mountPath(s): %s", strings.Join(mountPaths, ", ")) + } else { + klog.Warningf("failed to get mountPaths: %v", err) + } + + err = cmd.Wait() + if err != nil { + klog.Errorf("Error while executing ls command: %v", err) + } else { + klog.Info("Metadata prefetch complete") + } + } else { + klog.Errorf("Error starting ls command: %v.", err) + } + + klog.Info("Going to sleep...") + + // Keep the process running. + select {} +} + +// getDirectoryNames returns a list of strings representing the names of +// the directories within the provided path. +func getDirectoryNames(dirPath string) ([]string, error) { + directories := []string{} + items, err := os.ReadDir(dirPath) + if err != nil { + return directories, err + } + + for _, item := range items { + if item.IsDir() { + directories = append(directories, item.Name()) + } + } + + return directories, nil +} diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go index 795d86048..1728d4cee 100644 --- a/cmd/webhook/main.go +++ b/cmd/webhook/main.go @@ -52,6 +52,7 @@ var ( ephemeralStorageRequest = flag.String("sidecar-ephemeral-storage-request", "5Gi", "The default ephemeral storage request for gcsfuse sidecar container.") ephemeralStorageLimit = flag.String("sidecar-ephemeral-storage-limit", "5Gi", "The default ephemeral storage limit for gcsfuse sidecar container.") sidecarImage = flag.String("sidecar-image", "", "The gcsfuse sidecar container image.") + metadataSidecarImage = flag.String("metadata-sidecar-image", "", "The metadata prefetch sidecar container image.") // These are set at compile time. webhookVersion = "unknown" @@ -72,7 +73,7 @@ func main() { klog.Infof("Running Google Cloud Storage FUSE CSI driver admission webhook version %v, sidecar container image %v", webhookVersion, *sidecarImage) // Load webhook config - c := wh.LoadConfig(*sidecarImage, *imagePullPolicy, *cpuRequest, *cpuLimit, *memoryRequest, *memoryLimit, *ephemeralStorageRequest, *ephemeralStorageLimit) + c := wh.LoadConfig(*sidecarImage, *metadataSidecarImage, *imagePullPolicy, *cpuRequest, *cpuLimit, *memoryRequest, *memoryLimit, *ephemeralStorageRequest, *ephemeralStorageLimit) // Load config for manager, informers, listers kubeConfig := config.GetConfigOrDie() @@ -103,6 +104,8 @@ func main() { // Setup Informer informerFactory := informers.NewSharedInformerFactory(client, resyncDuration) nodeLister := informerFactory.Core().V1().Nodes().Lister() + pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() + pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() informerFactory.Start(context.Done()) informerFactory.WaitForCacheSync(context.Done()) @@ -140,6 +143,8 @@ func main() { Config: c, Decoder: admission.NewDecoder(runtime.NewScheme()), NodeLister: nodeLister, + PvLister: pvLister, + PvcLister: pvcLister, ServerVersion: serverVersion, }, }) diff --git a/deploy/base/node/node.yaml b/deploy/base/node/node.yaml index 053239682..a319e9a5d 100755 --- a/deploy/base/node/node.yaml +++ b/deploy/base/node/node.yaml @@ -133,3 +133,4 @@ metadata: name: gcsfusecsi-image-config data: sidecar-image: gke.gcr.io/gcs-fuse-csi-driver-sidecar-mounter + metadata-sidecar-image: gke.gcr.io/gcs-fuse-csi-driver-metadata-prefetch diff --git a/deploy/base/webhook/deployment.yaml b/deploy/base/webhook/deployment.yaml index 2841190a0..e12481699 100644 --- a/deploy/base/webhook/deployment.yaml +++ b/deploy/base/webhook/deployment.yaml @@ -54,6 +54,7 @@ spec: - --sidecar-ephemeral-storage-limit=0 - --sidecar-ephemeral-storage-request=5Gi - --sidecar-image=$(SIDECAR_IMAGE) + - --metadata-sidecar-image=$(METADATA_SIDECAR_IMAGE) - --sidecar-image-pull-policy=$(SIDECAR_IMAGE_PULL_POLICY) - --cert-dir=/etc/tls-certs - --port=22030 @@ -66,6 +67,11 @@ spec: configMapKeyRef: name: gcsfusecsi-image-config key: sidecar-image + - name: METADATA_SIDECAR_IMAGE + valueFrom: + configMapKeyRef: + name: gcsfusecsi-image-config + key: metadata-sidecar-image resources: limits: cpu: 200m diff --git a/deploy/base/webhook/webhook_setup.yaml b/deploy/base/webhook/webhook_setup.yaml index 9ca946d3e..227553697 100644 --- a/deploy/base/webhook/webhook_setup.yaml +++ b/deploy/base/webhook/webhook_setup.yaml @@ -34,7 +34,7 @@ metadata: name: gcs-fuse-csi-webhook-role rules: - apiGroups: [""] - resources: ["nodes"] + resources: ["nodes", "persistentvolumes", "persistentvolumeclaims"] verbs: ["get","list","watch"] --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/cloud_provider/clientset/fake.go b/pkg/cloud_provider/clientset/fake.go index c1e9b6c4a..3f8c153a5 100644 --- a/pkg/cloud_provider/clientset/fake.go +++ b/pkg/cloud_provider/clientset/fake.go @@ -46,7 +46,7 @@ func (c *FakeClientset) GetPod(namespace, name string) (*corev1.Pod, error) { Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{ { - Name: webhook.SidecarContainerName, + Name: webhook.GcsFuseSidecarName, State: corev1.ContainerState{ Running: &corev1.ContainerStateRunning{}, }, diff --git a/pkg/csi_driver/utils.go b/pkg/csi_driver/utils.go index 7c84ab643..d053cc931 100644 --- a/pkg/csi_driver/utils.go +++ b/pkg/csi_driver/utils.go @@ -313,7 +313,7 @@ func putExitFile(pod *corev1.Pod, targetPath string) error { for _, cs := range pod.Status.ContainerStatuses { switch { // skip the sidecar container itself - case cs.Name == webhook.SidecarContainerName: + case cs.Name == webhook.GcsFuseSidecarName: continue // If the Pod is terminating, the container status from Kubernetes API is not reliable @@ -453,7 +453,7 @@ func getSidecarContainerStatus(isInitContainer bool, pod *corev1.Pod) (*corev1.C } for _, cs := range containerStatusList { - if cs.Name == webhook.SidecarContainerName { + if cs.Name == webhook.GcsFuseSidecarName { return &cs, nil } } diff --git a/pkg/webhook/client.go b/pkg/webhook/client.go new file mode 100644 index 000000000..1f475fdaf --- /dev/null +++ b/pkg/webhook/client.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2024 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "errors" + + corev1 "k8s.io/api/core/v1" +) + +// IsPreprovisionCSIVolume checks whether the volume is a pre-provisioned volume for the desired csiDriver. +func (si *SidecarInjector) IsPreprovisionCSIVolume(csiDriver string, pvc *corev1.PersistentVolumeClaim) (bool, error) { + _, ok, err := si.GetPreprovisionCSIVolume(csiDriver, pvc) + + return ok, err +} + +// GetPreprovisionCSIVolume gets the pre-provisioned persistentVolume when backed by the desired csiDriver. +func (si *SidecarInjector) GetPreprovisionCSIVolume(csiDriver string, pvc *corev1.PersistentVolumeClaim) (*corev1.PersistentVolume, bool, error) { + if csiDriver == "" { + return nil, false, errors.New("csiDriver is empty, cannot verify storage type") + } + + if pvc == nil { + return nil, false, errors.New("pvc is nil, cannot get pv") + } + + // We return a nil error because this volume is still valid. + // A pvc can have this field missing when requesting a dynamically provisioned volume and said PV it is not yet bound. + if pvc.Spec.VolumeName == "" { + return nil, false, nil + } + + // GetPV returns an error if pvc.Spec.VolumeName is not empty and the associated PV object is not found in the API server. + pv, err := si.GetPV(pvc.Spec.VolumeName) + if err != nil { + return nil, false, err // no additional context needed for error. + } + + if pv == nil { + return nil, false, errors.New("pv is nil, cannot get storage type") + } + + // Returns false when PV - PVC pair was created for a different csi driver or different storage type. + if pv.Spec.CSI != nil && pv.Spec.CSI.Driver == csiDriver { + return pv, true, nil + } + + return pv, false, nil +} + +func (si *SidecarInjector) GetPV(name string) (*corev1.PersistentVolume, error) { + pv, err := si.PvLister.Get(name) + if err != nil { + return nil, err + } + + return pv, nil +} + +func (si *SidecarInjector) GetPVC(namespace, name string) (*corev1.PersistentVolumeClaim, error) { + pvc, err := si.PvcLister.PersistentVolumeClaims(namespace).Get(name) + if err != nil { + return nil, err + } + + return pvc, nil +} diff --git a/pkg/webhook/client_test.go b/pkg/webhook/client_test.go new file mode 100644 index 000000000..a9a354b04 --- /dev/null +++ b/pkg/webhook/client_test.go @@ -0,0 +1,415 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2024 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "errors" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/klog/v2" +) + +const resyncDuration = time.Second * 1 + +func TestIsPreprovisionCSIVolume(t *testing.T) { + t.Parallel() + + testcases := []struct { + testName string + csiDriverName string + pvc *corev1.PersistentVolumeClaim + pvsInK8s []corev1.PersistentVolume + expectedResponse bool + expectedError error + }{ + { + testName: "nil pvc passed into IsPreprovisionCSIVolume", + csiDriverName: "fake-csi-driver", + pvc: nil, + expectedResponse: false, + expectedError: errors.New(`pvc is nil, cannot get pv`), + }, + { + testName: "given blank csiDriver name", + csiDriverName: "", + pvc: &corev1.PersistentVolumeClaim{}, + expectedResponse: false, + expectedError: errors.New("csiDriver is empty, cannot verify storage type"), + }, + { + testName: "not preprovisioned pvc", + csiDriverName: "fake-csi-driver", + pvc: &corev1.PersistentVolumeClaim{}, + expectedResponse: false, + expectedError: nil, + }, + { + testName: "preprovisioned pvc volume not found", + csiDriverName: "fake-csi-driver", + pvc: &corev1.PersistentVolumeClaim{ + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pv-1234", + }, + }, + expectedResponse: false, + expectedError: errors.New(`persistentvolume "pv-1234" not found`), + }, + { + testName: "preprovisioned pvc for different csi driver", + csiDriverName: "fake-csi-driver", + pvc: &corev1.PersistentVolumeClaim{ + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pv-1234", + }, + }, + pvsInK8s: []corev1.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-1234", + }, + Spec: corev1.PersistentVolumeSpec{ + PersistentVolumeSource: corev1.PersistentVolumeSource{ + CSI: &corev1.CSIPersistentVolumeSource{ + Driver: "other-csi-driver", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-135", + }, + Spec: corev1.PersistentVolumeSpec{ + PersistentVolumeSource: corev1.PersistentVolumeSource{ + CSI: &corev1.CSIPersistentVolumeSource{ + Driver: "fake-csi-driver", + }, + }, + }, + }, + }, + expectedResponse: false, + }, + { + testName: "preprovisioned pvc for different csi driver", + csiDriverName: "fake-csi-driver", + pvc: &corev1.PersistentVolumeClaim{ + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pv-1234", + }, + }, + pvsInK8s: []corev1.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-1234", + }, + Spec: corev1.PersistentVolumeSpec{ + PersistentVolumeSource: corev1.PersistentVolumeSource{ + CSI: &corev1.CSIPersistentVolumeSource{ + Driver: "fake-csi-driver", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-135", + }, + Spec: corev1.PersistentVolumeSpec{ + PersistentVolumeSource: corev1.PersistentVolumeSource{ + CSI: &corev1.CSIPersistentVolumeSource{ + Driver: "fake-csi-driver", + }, + }, + }, + }, + }, + expectedResponse: true, + expectedError: nil, + }, + { + testName: "preprovisioned pvc with no csi specified", + csiDriverName: "fake-csi-driver", + pvc: &corev1.PersistentVolumeClaim{ + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pv-1234", + }, + }, + pvsInK8s: []corev1.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-1234", + }, + Spec: corev1.PersistentVolumeSpec{ + PersistentVolumeSource: corev1.PersistentVolumeSource{}, + }, + }, + }, + expectedResponse: false, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.testName, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fakeClient := fake.NewSimpleClientset() + + for _, pvInK8s := range testcase.pvsInK8s { + _, err := fakeClient.CoreV1().PersistentVolumes().Create(context.TODO(), &pvInK8s, metav1.CreateOptions{}) + if err != nil { + klog.Errorf("test setup failed: %v", err) + } + } + + csiGroupClient := SidecarInjector{} + + informer := informers.NewSharedInformerFactoryWithOptions(fakeClient, resyncDuration, informers.WithNamespace(metav1.NamespaceAll)) + csiGroupClient.PvLister = informer.Core().V1().PersistentVolumes().Lister() + csiGroupClient.PvcLister = informer.Core().V1().PersistentVolumeClaims().Lister() + + informer.Start(ctx.Done()) + informer.WaitForCacheSync(ctx.Done()) + + response, err := csiGroupClient.IsPreprovisionCSIVolume(testcase.csiDriverName, testcase.pvc) + if err != nil && testcase.expectedError != nil { + if err.Error() != testcase.expectedError.Error() { + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedError.Error(), " but got: ", err.Error()) + } + } else if err != nil || testcase.expectedError != nil { + // if one of them is nil, both must be nil to pass + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedError, " but got: ", err) + } + + if response != testcase.expectedResponse { + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedResponse, " but got: ", response) + } + }) + } +} + +func TestGetPV(t *testing.T) { + t.Parallel() + + testcases := []struct { + testName string + pvName string + pvsInK8s []corev1.PersistentVolume + expectedResponse *corev1.PersistentVolume + expectedError error + }{ + { + testName: "pv not in k8s", + pvName: "pvc-5678", + pvsInK8s: []corev1.PersistentVolume{}, + expectedResponse: nil, + expectedError: errors.New(`persistentvolume "pvc-5678" not found`), + }, + { + testName: "pv in k8s", + pvName: "pvc-12345", + pvsInK8s: []corev1.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-13", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-124", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-12345", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-135", + }, + }, + }, + expectedResponse: &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-12345", + }, + }, + expectedError: nil, + }, + } + + for _, testcase := range testcases { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fakeClient := fake.NewSimpleClientset() + for _, pvInK8s := range testcase.pvsInK8s { + _, err := fakeClient.CoreV1().PersistentVolumes().Create(context.TODO(), &pvInK8s, metav1.CreateOptions{}) + if err != nil { + klog.Errorf("test setup failed: %v", err) + } + } + csiGroupClient := SidecarInjector{} + informer := informers.NewSharedInformerFactoryWithOptions(fakeClient, resyncDuration, informers.WithNamespace(metav1.NamespaceAll)) + csiGroupClient.PvLister = informer.Core().V1().PersistentVolumes().Lister() + csiGroupClient.PvcLister = informer.Core().V1().PersistentVolumeClaims().Lister() + + informer.Start(ctx.Done()) + informer.WaitForCacheSync(ctx.Done()) + + response, err := csiGroupClient.GetPV(testcase.pvName) + if err != nil && testcase.expectedError != nil { + if err.Error() != testcase.expectedError.Error() { + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedError.Error(), " but got: ", err.Error()) + } + } else if err != nil || testcase.expectedError != nil { + // if one of them is nil, both must be nil to pass + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedError, " but got: ", err) + } + + if response.String() != testcase.expectedResponse.String() { + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedResponse, " but got: ", response) + } + } +} + +func TestGetPVC(t *testing.T) { + t.Parallel() + + testcases := []struct { + testName string + pvcName string + pvcNamespace string + pvcsInK8s []corev1.PersistentVolumeClaim + expectedResponse *corev1.PersistentVolumeClaim + expectedError error + }{ + { + testName: "pvc not in k8s", + pvcName: "pvc-5678", + pvcNamespace: metav1.NamespaceAll, + pvcsInK8s: []corev1.PersistentVolumeClaim{}, + expectedResponse: nil, + expectedError: errors.New(`persistentvolumeclaim "pvc-5678" not found`), + }, + { + testName: "pvc in k8s on different namespace", + pvcName: "pvc-12345", + pvcNamespace: metav1.NamespaceSystem, + pvcsInK8s: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-13", + Namespace: metav1.NamespaceDefault, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-12345", + Namespace: metav1.NamespaceDefault, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-124", + Namespace: metav1.NamespaceDefault, + }, + }, + }, + expectedResponse: nil, + expectedError: errors.New(`persistentvolumeclaim "pvc-12345" not found`), + }, + { + testName: "pvc in k8s", + pvcName: "pvc-12345", + pvcNamespace: metav1.NamespaceDefault, + pvcsInK8s: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-13", + Namespace: metav1.NamespaceDefault, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-12345", + Namespace: metav1.NamespaceDefault, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-124", + Namespace: metav1.NamespaceDefault, + }, + }, + }, + expectedResponse: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-12345", + Namespace: metav1.NamespaceDefault, + }, + }, + expectedError: nil, + }, + } + + for _, testcase := range testcases { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fakeClient := fake.NewSimpleClientset() + for _, pvcInK8s := range testcase.pvcsInK8s { + _, err := fakeClient.CoreV1().PersistentVolumeClaims(pvcInK8s.Namespace).Create(context.TODO(), &pvcInK8s, metav1.CreateOptions{}) + if err != nil { + klog.Errorf("failed to setup test: %v", err) + } + } + + csiGroupClient := SidecarInjector{} + + informer := informers.NewSharedInformerFactoryWithOptions(fakeClient, resyncDuration, informers.WithNamespace(metav1.NamespaceAll)) + csiGroupClient.PvLister = informer.Core().V1().PersistentVolumes().Lister() + csiGroupClient.PvcLister = informer.Core().V1().PersistentVolumeClaims().Lister() + + informer.Start(ctx.Done()) + informer.WaitForCacheSync(ctx.Done()) + + response, err := csiGroupClient.GetPVC(testcase.pvcNamespace, testcase.pvcName) + if err != nil && testcase.expectedError != nil { + if err.Error() != testcase.expectedError.Error() { + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedError.Error(), " but got: ", err.Error()) + } + } else if err != nil || testcase.expectedError != nil { + // if one of them is nil, both must be nil to pass + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedError, " but got: ", err) + } + + if response.String() != testcase.expectedResponse.String() { + t.Error("for test: ", testcase.testName, ", want: ", testcase.expectedResponse, " but got: ", response) + } + } +} diff --git a/pkg/webhook/config.go b/pkg/webhook/config.go index ff2f3385c..0b69743cc 100644 --- a/pkg/webhook/config.go +++ b/pkg/webhook/config.go @@ -27,8 +27,9 @@ import ( ) type Config struct { - ContainerImage string `json:"-"` - ImagePullPolicy string `json:"-"` + ContainerImage string `json:"-"` + MetadataContainerImage string `json:"-"` + ImagePullPolicy string `json:"-"` //nolint:tagliatelle CPURequest resource.Quantity `json:"gke-gcsfuse/cpu-request,omitempty"` //nolint:tagliatelle @@ -43,9 +44,10 @@ type Config struct { EphemeralStorageLimit resource.Quantity `json:"gke-gcsfuse/ephemeral-storage-limit,omitempty"` } -func LoadConfig(containerImage, imagePullPolicy, cpuRequest, cpuLimit, memoryRequest, memoryLimit, ephemeralStorageRequest, ephemeralStorageLimit string) *Config { +func LoadConfig(containerImage, metadataContainerImage, imagePullPolicy, cpuRequest, cpuLimit, memoryRequest, memoryLimit, ephemeralStorageRequest, ephemeralStorageLimit string) *Config { return &Config{ ContainerImage: containerImage, + MetadataContainerImage: metadataContainerImage, ImagePullPolicy: imagePullPolicy, CPURequest: resource.MustParse(cpuRequest), CPULimit: resource.MustParse(cpuLimit), @@ -57,7 +59,10 @@ func LoadConfig(containerImage, imagePullPolicy, cpuRequest, cpuLimit, memoryReq } func FakeConfig() *Config { - return LoadConfig("fake-repo/fake-sidecar-image:v999.999.999-gke.0@sha256:c9cd4cde857ab8052f416609184e2900c0004838231ebf1c3817baa37f21d847", "Always", "250m", "250m", "256Mi", "256Mi", "5Gi", "5Gi") + fakeImage1 := "fake-repo/fake-sidecar-image:v999.999.999-gke.0@sha256:c9cd4cde857ab8052f416609184e2900c0004838231ebf1c3817baa37f21d847" + fakeImage2 := "fake-repo/fake-sidecar-image:v888.888.888-gke.0@sha256:c9cd4cde857ab8052f416609184e2900c0004838231ebf1c3817baa37f21d847" + + return LoadConfig(fakeImage1, fakeImage2, "Always", "250m", "250m", "256Mi", "256Mi", "5Gi", "5Gi") } func prepareResourceList(c *Config) (corev1.ResourceList, corev1.ResourceList) { @@ -112,8 +117,9 @@ func populateResource(requestQuantity, limitQuantity *resource.Quantity, default // remaining values that are not specified by user are kept as the default config values. func (si *SidecarInjector) prepareConfig(annotations map[string]string) (*Config, error) { config := &Config{ - ContainerImage: si.Config.ContainerImage, - ImagePullPolicy: si.Config.ImagePullPolicy, + ContainerImage: si.Config.ContainerImage, + MetadataContainerImage: si.Config.MetadataContainerImage, + ImagePullPolicy: si.Config.ImagePullPolicy, } jsonData, err := json.Marshal(annotations) diff --git a/pkg/webhook/injection.go b/pkg/webhook/injection.go index 443377f21..355857f15 100644 --- a/pkg/webhook/injection.go +++ b/pkg/webhook/injection.go @@ -28,6 +28,29 @@ import ( const IstioSidecarName = "istio-proxy" +func (si *SidecarInjector) injectAsNativeSidecar(pod *corev1.Pod) (bool, error) { + supportsNativeSidecar, err := si.supportsNativeSidecar() + if err != nil { + return false, fmt.Errorf("failed to determine native sidecar injection: %w", err) + } + + nativeSidecarEnabled := true + if enable, ok := pod.Annotations[GcsFuseNativeSidecarEnableAnnotation]; ok { + parsedAnnotation, err := ParseBool(enable) + if err != nil { + klog.Errorf("failed to parse enableNativeSidecar annotation: %v", err) + } else { + nativeSidecarEnabled = parsedAnnotation + // Warn the user if they used annotation incorrectly. + if nativeSidecarEnabled && !supportsNativeSidecar { + klog.Errorf("attempting to enable native sidecar on a cluster that does not support it, this is not allowed") + } + } + } + + return nativeSidecarEnabled && supportsNativeSidecar, nil +} + func (si *SidecarInjector) supportsNativeSidecar() (bool, error) { if si.ServerVersion != nil && !si.ServerVersion.AtLeast(minimumSupportedVersion) { return false, nil @@ -63,23 +86,65 @@ func (si *SidecarInjector) supportsNativeSidecar() (bool, error) { return supportsNativeSidecar, nil } -func injectSidecarContainer(pod *corev1.Pod, config *Config, supportsNativeSidecar bool) { - nativeSidecarEnabled := true - if enable, ok := pod.Annotations[GcsFuseNativeSidecarEnableAnnotation]; ok { - parsedAnnotation, err := ParseBool(enable) - if err != nil { - klog.Errorf("failed to parse enableNativeSidecar annotation... ignoring annotation: %v", err) - } else { - nativeSidecarEnabled = parsedAnnotation - } - } - if supportsNativeSidecar && nativeSidecarEnabled { +func injectSidecarContainer(pod *corev1.Pod, config *Config, injectAsNativeSidecar bool) { + if injectAsNativeSidecar { pod.Spec.InitContainers = insert(pod.Spec.InitContainers, GetNativeSidecarContainerSpec(config), getInjectIndex(pod.Spec.InitContainers)) } else { pod.Spec.Containers = insert(pod.Spec.Containers, GetSidecarContainerSpec(config), getInjectIndex(pod.Spec.Containers)) } } +func (si *SidecarInjector) injectMetadataPrefetchSidecarContainer(pod *corev1.Pod, config *Config, injectAsNativeSidecar bool) { + var containerSpec corev1.Container + var index int + + injected, _ := validatePodHasSidecarContainerInjected(MetadataPrefetchSidecarName, pod, []corev1.Volume{}, []corev1.VolumeMount{}) + if injected { + klog.Infof("%s is already injected, skipping injection", MetadataPrefetchSidecarName) + + return + } + + // Extract user provided metadata prefetch sidecar image. + userProvidedMetadataPrefetchSidecarImage, err := ExtractImageAndDeleteContainer(&pod.Spec, MetadataPrefetchSidecarName) + if err != nil { + klog.Errorf("failed to get user provided metadata prefetch image. skipping injection") + + return + } + + if userProvidedMetadataPrefetchSidecarImage != "" { + config.MetadataContainerImage = userProvidedMetadataPrefetchSidecarImage + } + + if injectAsNativeSidecar { + containerSpec = si.GetNativeMetadataPrefetchSidecarContainerSpec(pod, config.MetadataContainerImage) + index = getInjectIndexAfterContainer(pod.Spec.InitContainers, GcsFuseSidecarName) + } else { + containerSpec = si.GetMetadataPrefetchSidecarContainerSpec(pod, config.MetadataContainerImage) + index = getInjectIndexAfterContainer(pod.Spec.Containers, GcsFuseSidecarName) + } + + if len(containerSpec.VolumeMounts) == 0 { + klog.Info("no volumes are requesting metadata prefetch, skipping metadata prefetch sidecar injection") + + return + } + + // This should not happen as we always inject the sidecar after injecting our primary gcsfuse sidecar. + if index == 0 { + klog.Warningf("%s not found when attempting to inject metadata prefetch sidecar. skipping injection", GcsFuseSidecarName) + + return + } + + if injectAsNativeSidecar { + pod.Spec.InitContainers = insert(pod.Spec.InitContainers, containerSpec, index) + } else { + pod.Spec.Containers = insert(pod.Spec.Containers, containerSpec, index) + } +} + func insert(a []corev1.Container, value corev1.Container, index int) []corev1.Container { // For index == len(a) if len(a) == index { @@ -94,7 +159,11 @@ func insert(a []corev1.Container, value corev1.Container, index int) []corev1.Co } func getInjectIndex(containers []corev1.Container) int { - idx, present := containerPresent(containers, IstioSidecarName) + return getInjectIndexAfterContainer(containers, IstioSidecarName) +} + +func getInjectIndexAfterContainer(containers []corev1.Container, containerName string) int { + idx, present := containerPresent(containers, containerName) if present { return idx + 1 } diff --git a/pkg/webhook/injection_test.go b/pkg/webhook/injection_test.go index 3193228c2..017d97d5e 100644 --- a/pkg/webhook/injection_test.go +++ b/pkg/webhook/injection_test.go @@ -19,6 +19,7 @@ package webhook import ( "context" + "reflect" "testing" "time" @@ -28,10 +29,176 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" + "k8s.io/utils/ptr" ) var UnsupportedVersion = version.MustParseGeneric("1.28.0") +func TestInjectAsNativeSidecar(t *testing.T) { + t.Parallel() + + testCases := []struct { + testName string + cpVersion *version.Version + nodes []corev1.Node + pod *corev1.Pod + expect bool + expectedError error + }{ + { + testName: "test should allow native sidecar", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "true", + }, + }, + }, + cpVersion: minimumSupportedVersion, + nodes: nativeSupportNodes(), + expect: true, + }, + { + testName: "test should not native sidecar by user request", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "false", + }, + }, + }, + cpVersion: minimumSupportedVersion, + nodes: nativeSupportNodes(), + expect: false, + }, + { + testName: "test should be native sidecar, user sent malformed request", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "maybe", + }, + }, + }, + cpVersion: minimumSupportedVersion, + nodes: nativeSupportNodes(), + expect: true, + }, + { + testName: "test should not allow native sidecar, skew", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "true", + }, + }, + }, + cpVersion: minimumSupportedVersion, + nodes: skewVersionNodes(), + expect: false, + }, + { + testName: "test should not allow native sidecar, all under 1.29", + cpVersion: minimumSupportedVersion, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "true", + }, + }, + }, + nodes: regularSidecarSupportNodes(), + expect: false, + }, + { + testName: "test should not allow native sidecar, all nodes are 1.29, cp is 1.28", + cpVersion: UnsupportedVersion, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "true", + }, + }, + }, + nodes: nativeSupportNodes(), + expect: false, + }, + { + testName: "test no nodes present, native sidecar support false", + cpVersion: UnsupportedVersion, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "true", + }, + }, + }, + nodes: []corev1.Node{}, + expect: false, + }, + { + testName: "test no nodes present, allow native sidecar support true", + cpVersion: minimumSupportedVersion, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "true", + }, + }, + }, + nodes: []corev1.Node{}, + expect: true, + }, + { + testName: "test no nodes present, allow native sidecar support false", + cpVersion: minimumSupportedVersion, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + GcsFuseNativeSidecarEnableAnnotation: "false", + }, + }, + }, + nodes: []corev1.Node{}, + expect: false, + }, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + t.Parallel() + + fakeClient := fake.NewSimpleClientset() + // Create the nodes. + for _, node := range tc.nodes { + n := node + _, err := fakeClient.CoreV1().Nodes().Create(context.Background(), &n, metav1.CreateOptions{}) + if err != nil { + t.Error("failed to setup/create nodes") + } + } + + informerFactory := informers.NewSharedInformerFactoryWithOptions(fakeClient, time.Second*1, informers.WithNamespace(metav1.NamespaceAll)) + lister := informerFactory.Core().V1().Nodes().Lister() + si := &SidecarInjector{ + NodeLister: lister, + ServerVersion: tc.cpVersion, + } + + stopCh := make(<-chan struct{}) + informerFactory.Start(stopCh) + informerFactory.WaitForCacheSync(stopCh) + + result, err := si.injectAsNativeSidecar(tc.pod) + if result != tc.expect { + t.Errorf("\nfor %s, got native sidecar support to be: %t, but want: %t", tc.testName, result, tc.expect) + if err != nil { + t.Errorf("error returned from method: %v", err) + } + } + }) + } +} + func TestSupportsNativeSidecar(t *testing.T) { t.Parallel() @@ -343,3 +510,859 @@ func TestGetInjectIndex(t *testing.T) { } } } + +func TestInjectMetadataPrefetchSidecar(t *testing.T) { + t.Parallel() + + limits, requests := prepareResourceList(getMetadataPrefetchConfig("fake-image")) + + testCases := []struct { + testName string + pod *corev1.Pod + config Config + nativeSidecar *bool + expectedPod *corev1.Pod + }{ + { + testName: "no injection", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "one", + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "one", + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + }, + }, + }, + { + testName: "fuse sidecar present, no injection", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + }, + }, + }, + { + testName: "fuse sidecar present, no injection due to different driver", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "other-csi", + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "false", + }, + }, + }, + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "other-csi", + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "false", + }, + }, + }, + }, + }, + }, + }, + }, + { + testName: "fuse sidecar present, no injection with volume annotation", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "false", + }, + }, + }, + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "false", + }, + }, + }, + }, + }, + }, + }, + }, + { + testName: "fuse sidecar not present, already injected", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: MetadataPrefetchSidecarName, + Image: "my-private-image", + SecurityContext: GetSecurityContext(), + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: MetadataPrefetchSidecarName, + Image: "my-private-image", + SecurityContext: GetSecurityContext(), + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + }, + }, + }, + { + testName: "fuse sidecar not present, privately hosted image", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: MetadataPrefetchSidecarName, + Image: "my-private-image", + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + }, + }, + }, + }, + { + testName: "fuse sidecar present, injection successful", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: MetadataPrefetchSidecarName, + Env: []corev1.EnvVar{{Name: "NATIVE_SIDECAR", Value: "TRUE"}}, + RestartPolicy: ptr.To(corev1.ContainerRestartPolicyAlways), + SecurityContext: GetSecurityContext(), + Resources: corev1.ResourceRequirements{ + Requests: requests, + Limits: limits, + }, + VolumeMounts: []corev1.VolumeMount{{Name: "my-volume", ReadOnly: true, MountPath: "/volumes/my-volume"}}, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + }, + }, + }, + }, + { + testName: "fuse sidecar present with many volumes and config, injection successful", + config: *FakeConfig(), + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + { + Name: "my-other-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "false", + }, + }, + }, + }, + { + Name: "other-csi-vol", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "other-csi", + }, + }, + }, + { + Name: "my-emptydir", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: MetadataPrefetchSidecarName, + Image: FakeConfig().MetadataContainerImage, + Env: []corev1.EnvVar{{Name: "NATIVE_SIDECAR", Value: "TRUE"}}, + RestartPolicy: ptr.To(corev1.ContainerRestartPolicyAlways), + SecurityContext: GetSecurityContext(), + Resources: corev1.ResourceRequirements{ + Requests: requests, + Limits: limits, + }, + VolumeMounts: []corev1.VolumeMount{{Name: "my-volume", ReadOnly: true, MountPath: "/volumes/my-volume"}}, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + { + Name: "my-other-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "false", + }, + }, + }, + }, + { + Name: "other-csi-vol", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "other-csi", + }, + }, + }, + { + Name: "my-emptydir", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + }, + { + testName: "fuse sidecar present & using privately hosted image, injection successful", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: MetadataPrefetchSidecarName, + Image: "my-private-image", + }, + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: MetadataPrefetchSidecarName, + Env: []corev1.EnvVar{{Name: "NATIVE_SIDECAR", Value: "TRUE"}}, + RestartPolicy: ptr.To(corev1.ContainerRestartPolicyAlways), + SecurityContext: GetSecurityContext(), + Resources: corev1.ResourceRequirements{ + Requests: requests, + Limits: limits, + }, + Image: "my-private-image", + VolumeMounts: []corev1.VolumeMount{{Name: "my-volume", ReadOnly: true, MountPath: "/volumes/my-volume"}}, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + }, + }, + }, + }, + { + testName: "fuse sidecar present & using privately hosted image, injection fail", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: MetadataPrefetchSidecarName, + Image: "a:a:a:a", + }, + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + }, + }, + }, + expectedPod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: GcsFuseSidecarName, + }, + { + Name: "two", + }, + { + Name: "three", + }, + }, + Containers: []corev1.Container{ + { + Name: "workload-one", + }, + { + Name: "workload-two", + }, + { + Name: "workload-three", + }, + }, + Volumes: []corev1.Volume{ + { + Name: "my-volume", + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: gcsFuseCsiDriverName, + VolumeAttributes: map[string]string{ + gcsFuseMetadataPrefetchOnMountVolumeAttribute: "true", + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + t.Parallel() + if tc.nativeSidecar == nil { + tc.nativeSidecar = ptr.To(true) + } + si := SidecarInjector{} + si.injectMetadataPrefetchSidecarContainer(tc.pod, &tc.config, *tc.nativeSidecar) + if !reflect.DeepEqual(tc.pod, tc.expectedPod) { + t.Errorf(`failed to run %s, expected: "%v", but got "%v". Diff: %s`, tc.testName, tc.expectedPod, tc.pod, cmp.Diff(tc.expectedPod, tc.pod)) + } + }) + } +} diff --git a/pkg/webhook/mutatingwebhook.go b/pkg/webhook/mutatingwebhook.go index d8ad71cb9..83d01f4b1 100644 --- a/pkg/webhook/mutatingwebhook.go +++ b/pkg/webhook/mutatingwebhook.go @@ -49,6 +49,8 @@ type SidecarInjector struct { Config *Config Decoder admission.Decoder NodeLister listersv1.NodeLister + PvcLister listersv1.PersistentVolumeClaimLister + PvLister listersv1.PersistentVolumeLister ServerVersion *version.Version } @@ -92,27 +94,30 @@ func (si *SidecarInjector) Handle(_ context.Context, req admission.Request) admi return admission.Errored(http.StatusBadRequest, err) } - if image, err := parseSidecarContainerImage(pod); err == nil { - if image != "" { - config.ContainerImage = image + if userProvidedGcsFuseSidecarImage, err := ExtractImageAndDeleteContainer(&pod.Spec, GcsFuseSidecarName); err == nil { + if userProvidedGcsFuseSidecarImage != "" { + config.ContainerImage = userProvidedGcsFuseSidecarImage } } else { return admission.Errored(http.StatusBadRequest, err) } // Check support for native sidecar. - supportsNativeSidecar, err := si.supportsNativeSidecar() + injectAsNativeSidecar, err := si.injectAsNativeSidecar(pod) if err != nil { return admission.Errored(http.StatusInternalServerError, fmt.Errorf("failed to verify native sidecar support: %w", err)) } // Inject container. - injectSidecarContainer(pod, config, supportsNativeSidecar) + injectSidecarContainer(pod, config, injectAsNativeSidecar) pod.Spec.Volumes = append(GetSidecarContainerVolumeSpec(pod.Spec.Volumes...), pod.Spec.Volumes...) // Log pod mutation. LogPodMutation(pod, config) + // Inject metadata prefetch sidecar. + si.injectMetadataPrefetchSidecarContainer(pod, config, injectAsNativeSidecar) + marshaledPod, err := json.Marshal(pod) if err != nil { return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to marshal pod: %w", err)) diff --git a/pkg/webhook/mutatingwebhook_test.go b/pkg/webhook/mutatingwebhook_test.go index dfa021b58..584cc6aea 100644 --- a/pkg/webhook/mutatingwebhook_test.go +++ b/pkg/webhook/mutatingwebhook_test.go @@ -59,6 +59,7 @@ func TestPrepareConfig(t *testing.T) { }, wantConfig: &Config{ ContainerImage: FakeConfig().ContainerImage, + MetadataContainerImage: FakeConfig().MetadataContainerImage, ImagePullPolicy: FakeConfig().ImagePullPolicy, CPULimit: FakeConfig().CPULimit, CPURequest: FakeConfig().CPURequest, @@ -79,6 +80,7 @@ func TestPrepareConfig(t *testing.T) { }, wantConfig: &Config{ ContainerImage: FakeConfig().ContainerImage, + MetadataContainerImage: FakeConfig().MetadataContainerImage, ImagePullPolicy: FakeConfig().ImagePullPolicy, CPULimit: resource.MustParse("500m"), CPURequest: resource.MustParse("500m"), @@ -99,6 +101,7 @@ func TestPrepareConfig(t *testing.T) { }, wantConfig: &Config{ ContainerImage: FakeConfig().ContainerImage, + MetadataContainerImage: FakeConfig().MetadataContainerImage, ImagePullPolicy: FakeConfig().ImagePullPolicy, CPULimit: resource.MustParse("500m"), CPURequest: resource.MustParse("500m"), @@ -119,6 +122,7 @@ func TestPrepareConfig(t *testing.T) { }, wantConfig: &Config{ ContainerImage: FakeConfig().ContainerImage, + MetadataContainerImage: FakeConfig().MetadataContainerImage, ImagePullPolicy: FakeConfig().ImagePullPolicy, CPULimit: resource.Quantity{}, CPURequest: FakeConfig().CPURequest, @@ -139,6 +143,7 @@ func TestPrepareConfig(t *testing.T) { }, wantConfig: &Config{ ContainerImage: FakeConfig().ContainerImage, + MetadataContainerImage: FakeConfig().MetadataContainerImage, ImagePullPolicy: FakeConfig().ImagePullPolicy, CPULimit: resource.Quantity{}, CPURequest: resource.Quantity{}, @@ -162,6 +167,7 @@ func TestPrepareConfig(t *testing.T) { }, wantConfig: &Config{ ContainerImage: FakeConfig().ContainerImage, + MetadataContainerImage: FakeConfig().MetadataContainerImage, ImagePullPolicy: FakeConfig().ImagePullPolicy, CPULimit: resource.MustParse("500m"), CPURequest: resource.MustParse("100m"), @@ -185,6 +191,7 @@ func TestPrepareConfig(t *testing.T) { }, wantConfig: &Config{ ContainerImage: FakeConfig().ContainerImage, + MetadataContainerImage: FakeConfig().MetadataContainerImage, ImagePullPolicy: FakeConfig().ImagePullPolicy, CPULimit: resource.Quantity{}, CPURequest: resource.MustParse("100m"), @@ -208,6 +215,7 @@ func TestPrepareConfig(t *testing.T) { }, wantConfig: &Config{ ContainerImage: FakeConfig().ContainerImage, + MetadataContainerImage: FakeConfig().MetadataContainerImage, ImagePullPolicy: FakeConfig().ImagePullPolicy, CPULimit: resource.MustParse("500m"), CPURequest: resource.Quantity{}, @@ -350,134 +358,173 @@ func TestValidateMutatingWebhookResponse(t *testing.T) { }, wantResponse: admission.Allowed("The sidecar container was injected, no injection required."), }, + { + name: "native container injection successful test with multiple sidecar entries present", + operation: admissionv1.Create, + inputPod: getDuplicateDeclarationPodSpec(), + wantResponse: generatePatch(t, getDuplicateDeclarationPodSpec(), getDuplicateDeclarationPodSpecResponse(false)), + nodes: nativeSupportNodes(), + }, + { + name: "regular container injection successful test with custom image", + operation: admissionv1.Create, + inputPod: getDuplicateDeclarationPodSpec(), + wantResponse: generatePatch(t, getDuplicateDeclarationPodSpec(), getDuplicateDeclarationPodSpecResponse(false)), + nodes: nativeSupportNodes(), + }, { name: "container injection successful test with multiple sidecar entries present", operation: admissionv1.Create, inputPod: getDuplicateDeclarationPodSpec(), - wantResponse: generatePatch(t, getDuplicateDeclarationPodSpec(), getDuplicateDeclarationPodSpecResponse()), + wantResponse: generatePatch(t, getDuplicateDeclarationPodSpec(), getDuplicateDeclarationPodSpecResponse(false)), nodes: nativeSupportNodes(), }, { name: "regular container injection successful test.", operation: admissionv1.Create, - inputPod: validInputPod(false), - wantResponse: wantResponse(t, false, false), + inputPod: validInputPod(), + wantResponse: wantResponse(t, false, false, false), nodes: skewVersionNodes(), }, { name: "native container set via annotation injection successful test.", operation: admissionv1.Create, - inputPod: validInputPodWithNativeAnnotation(false, "true"), - wantResponse: wantResponse(t, false, true), + inputPod: validInputPodWithNativeAnnotation(false, false, "true"), + wantResponse: wantResponse(t, false, false, true), + nodes: nativeSupportNodes(), + }, + { + name: "native container set via annotation injection successful test.", + operation: admissionv1.Create, + inputPod: validInputPodWithNativeAnnotation(true, true, "true"), + wantResponse: wantResponse(t, true, true, true), nodes: nativeSupportNodes(), }, { name: "native container set via annotation injection successful with custom image test.", operation: admissionv1.Create, - inputPod: validInputPodWithNativeAnnotation(true, "true"), - wantResponse: wantResponse(t, true, true), + inputPod: validInputPodWithNativeAnnotation(true, false, "true"), + wantResponse: wantResponse(t, true, false, true), nodes: nativeSupportNodes(), }, { name: "regular container set via annotation injection successful test.", operation: admissionv1.Create, - inputPod: validInputPodWithNativeAnnotation(false, "false"), - wantResponse: wantResponse(t, false, false), + inputPod: validInputPodWithNativeAnnotation(false, false, "false"), + wantResponse: wantResponse(t, false, false, false), nodes: nativeSupportNodes(), }, { name: "native container set via invalid annotation injection successful test.", operation: admissionv1.Create, - inputPod: validInputPodWithNativeAnnotation(false, "maybe"), - wantResponse: wantResponse(t, false, true), + inputPod: validInputPodWithNativeAnnotation(false, false, "maybe"), + wantResponse: wantResponse(t, false, false, true), nodes: nativeSupportNodes(), }, { name: "native container set via annotation injection unsupported test.", operation: admissionv1.Create, - inputPod: validInputPodWithNativeAnnotation(false, "true"), - wantResponse: wantResponse(t, false, false), + inputPod: validInputPodWithNativeAnnotation(false, false, "true"), + wantResponse: wantResponse(t, false, false, false), nodes: skewVersionNodes(), }, { - name: "Injection with custom sidecar container image successful test.", + name: "Injection with custom sidecar container image successful test with regular nodes.", operation: admissionv1.Create, - inputPod: validInputPod(true), - wantResponse: wantResponse(t, true, false), + inputPod: validInputPodWithCustomImage(false), + wantResponse: wantResponse(t, true, false, false), + nodes: regularSidecarSupportNodes(), + }, + { + name: "Injection with custom native sidecar container image successful as regular container.", + operation: admissionv1.Create, + inputPod: validInputPodWithSettings(true, true), + wantResponse: wantResponse(t, true, true, false), nodes: regularSidecarSupportNodes(), }, { name: "native container injection successful test.", operation: admissionv1.Create, - inputPod: validInputPod(false), - wantResponse: wantResponse(t, false, true), + inputPod: validInputPod(), + wantResponse: wantResponse(t, false, false, true), + nodes: nativeSupportNodes(), + }, + { + name: "Injection with custom sidecar container image successful test with native nodes.", + operation: admissionv1.Create, + inputPod: validInputPodWithCustomImage(false), + wantResponse: wantResponse(t, true, false, true), nodes: nativeSupportNodes(), }, { - name: "Injection with custom sidecar container image successful test.", + name: "Injection with custom native sidecar container image successful test as native sidecar.", operation: admissionv1.Create, - inputPod: validInputPod(true), - wantResponse: wantResponse(t, true, true), + inputPod: validInputPodWithCustomImage(true), + wantResponse: wantResponse(t, true, true, true), nodes: nativeSupportNodes(), }, { name: "regular container injection with istio present success test.", operation: admissionv1.Create, - inputPod: validInputPodWithIstio(false, false), - wantResponse: wantResponseWithIstio(t, false, false), + inputPod: validInputPodWithIstio(false, false, false), + wantResponse: wantResponseWithIstio(t, false, false, false), nodes: skewVersionNodes(), }, { - name: "Injection with custom sidecar container image successful test.", + name: "Injection with custom sidecar container image successful test with istio.", operation: admissionv1.Create, - inputPod: validInputPodWithIstio(true, true), - wantResponse: wantResponseWithIstio(t, true, true), + inputPod: validInputPodWithIstio(true, false, true), + wantResponse: wantResponseWithIstio(t, true, false, true), nodes: nativeSupportNodes(), }, } for _, tc := range testCases { - fakeClient := fake.NewSimpleClientset() - - // Create the nodes. - for _, node := range tc.nodes { - n := node - _, err := fakeClient.CoreV1().Nodes().Create(context.Background(), &n, metav1.CreateOptions{}) - if err != nil { - t.Error("failed to setup/create nodes") + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + fakeClient := fake.NewSimpleClientset() + + // Create the nodes. + for _, node := range tc.nodes { + n := node + _, err := fakeClient.CoreV1().Nodes().Create(context.Background(), &n, metav1.CreateOptions{}) + if err != nil { + t.Error("failed to setup/create nodes") + } } - } - informerFactory := informers.NewSharedInformerFactoryWithOptions(fakeClient, time.Second*1, informers.WithNamespace(metav1.NamespaceAll)) - lister := informerFactory.Core().V1().Nodes().Lister() + informerFactory := informers.NewSharedInformerFactoryWithOptions(fakeClient, time.Second*1, informers.WithNamespace(metav1.NamespaceAll)) + lister := informerFactory.Core().V1().Nodes().Lister() - si := SidecarInjector{ - Client: nil, - Config: FakeConfig(), - Decoder: admission.NewDecoder(runtime.NewScheme()), - NodeLister: lister, - } + si := SidecarInjector{ + Client: nil, + Config: FakeConfig(), + Decoder: admission.NewDecoder(runtime.NewScheme()), + NodeLister: lister, + } - stopCh := make(<-chan struct{}) - informerFactory.Start(stopCh) - informerFactory.WaitForCacheSync(stopCh) + stopCh := make(<-chan struct{}) + informerFactory.Start(stopCh) + informerFactory.WaitForCacheSync(stopCh) - request := &admission.Request{ - AdmissionRequest: admissionv1.AdmissionRequest{ - Operation: tc.operation, - }, - } - if tc.inputPod != nil { - request.Object = runtime.RawExtension{ - Raw: serialize(t, tc.inputPod), + request := &admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: tc.operation, + }, + } + if tc.inputPod != nil { + request.Object = runtime.RawExtension{ + Raw: serialize(t, tc.inputPod), + } } - } - gotResponse := si.Handle(context.Background(), *request) + gotResponse := si.Handle(context.Background(), *request) - if err := compareResponses(tc.wantResponse, gotResponse); err != nil { - t.Errorf("for test: %s\nGot injection result: %v, but want: %v. details: %v", tc.name, gotResponse, tc.wantResponse, err) - } + if err := compareResponses(tc.wantResponse, gotResponse); err != nil { + t.Errorf("for test: %s\nGot injection result: %v, but want: %v. details: %v", tc.name, gotResponse, tc.wantResponse, err) + } + }) } } @@ -522,7 +569,7 @@ func getDuplicateDeclarationPodSpec() *corev1.Pod { Spec: corev1.PodSpec{ InitContainers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "private-repo/fake-sidecar-image:v999.999.999", }, }, @@ -534,7 +581,7 @@ func getDuplicateDeclarationPodSpec() *corev1.Pod { Name: "FakeContainer2", }, { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "private-repo/fake-sidecar-image:v999.999.999", }, }, @@ -556,20 +603,20 @@ func getDuplicateDeclarationPodSpec() *corev1.Pod { } } -func getDuplicateDeclarationPodSpecResponse() *corev1.Pod { - result := modifySpec(*validInputPod(true), true, true) +func getDuplicateDeclarationPodSpecResponse(nativeCustomImage bool) *corev1.Pod { + result := modifySpec(*validInputPodWithCustomImage(nativeCustomImage), true, nativeCustomImage, true) return result } -func validInputPodWithNativeAnnotation(customImage bool, enableNativeSidecarAnnotation string) *corev1.Pod { - pod := validInputPod(customImage) +func validInputPodWithNativeAnnotation(customImage, customNativeImage bool, enableNativeSidecarAnnotation string) *corev1.Pod { + pod := validInputPodWithSettings(customImage, customNativeImage) pod.ObjectMeta.Annotations[GcsFuseNativeSidecarEnableAnnotation] = enableNativeSidecarAnnotation return pod } -func validInputPod(customImage bool) *corev1.Pod { +func validInputPodWithSettings(customImage, native bool) *corev1.Pod { pod := &corev1.Pod{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -597,16 +644,30 @@ func validInputPod(customImage bool) *corev1.Pod { }, } + userProvidedSidecar := corev1.Container{ + Name: GcsFuseSidecarName, + Image: "private-repo/fake-sidecar-image:v999.999.999", + } + if customImage { - pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{ - Name: SidecarContainerName, - Image: "private-repo/fake-sidecar-image:v999.999.999", - }) + if native { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, userProvidedSidecar) + } else { + pod.Spec.Containers = append(pod.Spec.Containers, userProvidedSidecar) + } } return pod } +func validInputPodWithCustomImage(native bool) *corev1.Pod { + return validInputPodWithSettings(true, native) +} + +func validInputPod() *corev1.Pod { + return validInputPodWithSettings(false, false) +} + func getWorkloadSpec(name string) corev1.Container { return corev1.Container{ Name: name, @@ -614,8 +675,8 @@ func getWorkloadSpec(name string) corev1.Container { } } -func validInputPodWithIstio(customImage, nativeIstio bool) *corev1.Pod { - pod := validInputPod(customImage) +func validInputPodWithIstio(customImage, nativeCustomImage, nativeIstio bool) *corev1.Pod { + pod := validInputPodWithSettings(customImage, nativeCustomImage) if nativeIstio { pod.Spec.InitContainers = append([]corev1.Container{istioContainer}, pod.Spec.InitContainers...) @@ -626,19 +687,24 @@ func validInputPodWithIstio(customImage, nativeIstio bool) *corev1.Pod { return pod } -func wantResponse(t *testing.T, customImage bool, native bool) admission.Response { +func wantResponse(t *testing.T, customImage bool, nativeCustomImage bool, native bool) admission.Response { t.Helper() - pod := *validInputPod(customImage) - newPod := *modifySpec(*validInputPod(customImage), customImage, native) + pod := validInputPodWithSettings(customImage, nativeCustomImage) + newPod := *modifySpec(*validInputPodWithSettings(customImage, nativeCustomImage), customImage, nativeCustomImage, native) - return generatePatch(t, &pod, &newPod) + return generatePatch(t, pod, &newPod) } -func modifySpec(newPod corev1.Pod, customImage bool, native bool) *corev1.Pod { +func modifySpec(newPod corev1.Pod, customImage bool, nativeCustomImage, native bool) *corev1.Pod { config := FakeConfig() if customImage { - config.ContainerImage = newPod.Spec.Containers[len(newPod.Spec.Containers)-1].Image - newPod.Spec.Containers = newPod.Spec.Containers[:len(newPod.Spec.Containers)-1] + if nativeCustomImage { + config.ContainerImage = newPod.Spec.InitContainers[len(newPod.Spec.InitContainers)-1].Image + newPod.Spec.InitContainers = newPod.Spec.InitContainers[:len(newPod.Spec.InitContainers)-1] + } else { + config.ContainerImage = newPod.Spec.Containers[len(newPod.Spec.Containers)-1].Image + newPod.Spec.Containers = newPod.Spec.Containers[:len(newPod.Spec.Containers)-1] + } } if native { @@ -657,17 +723,17 @@ func generatePatch(t *testing.T, originalPod *corev1.Pod, newPod *corev1.Pod) ad return admission.PatchResponseFromRaw(serialize(t, originalPod), serialize(t, newPod)) } -func wantResponseWithIstio(t *testing.T, customImage bool, native bool) admission.Response { +func wantResponseWithIstio(t *testing.T, customImage bool, nativeCustomImage, native bool) admission.Response { t.Helper() - originalPod := validInputPod(customImage) + originalPod := validInputPodWithSettings(customImage, nativeCustomImage) if native { originalPod.Spec.InitContainers = append([]corev1.Container{istioContainer}, originalPod.Spec.InitContainers...) } else { originalPod.Spec.Containers = append([]corev1.Container{istioContainer}, originalPod.Spec.Containers...) } - newPod := validInputPod(customImage) + newPod := validInputPodWithSettings(customImage, nativeCustomImage) config := FakeConfig() if customImage { config.ContainerImage = newPod.Spec.Containers[len(newPod.Spec.Containers)-1].Image diff --git a/pkg/webhook/parsers.go b/pkg/webhook/parsers.go index b3cf06ab0..93cdc0381 100644 --- a/pkg/webhook/parsers.go +++ b/pkg/webhook/parsers.go @@ -38,34 +38,44 @@ func ParseBool(str string) (bool, error) { } } -// parseSidecarContainerImage supports our Privately Hosted Sidecar Image option -// by iterating the container list and finding a container named "gke-gcsfuse-sidecar" -// If we find "gke-gcsfuse-sidecar": -// - extract the container image and check if the image is valid +// ExtractImageAndDeleteContainer supports the injection of custom sidecar images. +// We iterate the container list and find a container named "containerName" +// If we find "containerName": +// - extract the container image // - removes the container definition from the container list. -// - remove any mentions of "gke-gcsfuse-sidecar" from initContainer list. +// - verifies if the image is valid // - return image -func parseSidecarContainerImage(pod *corev1.Pod) (string, error) { +// +// We support custom sidecar images because: +// - Requirement for Privately Hosted Sidecar Image feature, for clusters running with limited internet access. +// - Allow fast testing of new sidecar image on a production environment, usually related to a new gcsfuse binary. +func ExtractImageAndDeleteContainer(podSpec *corev1.PodSpec, containerName string) (string, error) { var image string - // Find container named "gke-gcsfuse-sidecar" (SidecarContainerName), extract its image, and remove from list. - if index, present := containerPresent(pod.Spec.Containers, SidecarContainerName); present { - image = pod.Spec.Containers[index].Image + // Find Container named containerName, extract its image, and remove from list. + if index, present := containerPresent(podSpec.Containers, containerName); present { + image = podSpec.Containers[index].Image + + // The next webhook step is to reinject the sidecar, removing user declaration to prevent dual injection creation failures. + copy(podSpec.Containers[index:], podSpec.Containers[index+1:]) + podSpec.Containers = podSpec.Containers[:len(podSpec.Containers)-1] if _, _, _, err := parsers.ParseImageName(image); err != nil { return "", fmt.Errorf("could not parse input image: %q, error: %w", image, err) } - - if image != "" { - copy(pod.Spec.Containers[index:], pod.Spec.Containers[index+1:]) - pod.Spec.Containers = pod.Spec.Containers[:len(pod.Spec.Containers)-1] - } } - // Remove any mention of gke-gcsfuse-sidecar from init container list. - if index, present := containerPresent(pod.Spec.InitContainers, SidecarContainerName); present { - copy(pod.Spec.InitContainers[index:], pod.Spec.InitContainers[index+1:]) - pod.Spec.InitContainers = pod.Spec.InitContainers[:len(pod.Spec.InitContainers)-1] + // Find initContainer named containerName, extract its image, and remove from list. + if index, present := containerPresent(podSpec.InitContainers, containerName); present { + image = podSpec.InitContainers[index].Image + + // The next webhook step is to reinject the sidecar, removing user declaration to prevent dual injection creation failures. + copy(podSpec.InitContainers[index:], podSpec.InitContainers[index+1:]) + podSpec.InitContainers = podSpec.InitContainers[:len(podSpec.InitContainers)-1] + + if _, _, _, err := parsers.ParseImageName(image); err != nil { + return "", fmt.Errorf("could not parse input image: %q, error: %w", image, err) + } } return image, nil diff --git a/pkg/webhook/parsers_test.go b/pkg/webhook/parsers_test.go index 3409eb8bb..d497867a4 100644 --- a/pkg/webhook/parsers_test.go +++ b/pkg/webhook/parsers_test.go @@ -25,18 +25,18 @@ import ( corev1 "k8s.io/api/core/v1" ) -func TestParseSidecarContainerImage(t *testing.T) { +func TestExtractImageAndDeleteContainer(t *testing.T) { t.Parallel() testCases := []struct { - testName string + name string pod corev1.Pod expectedPod corev1.Pod expectedImage string expectedError error }{ { - testName: "no declarations present", + name: "no declarations present", pod: corev1.Pod{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -61,12 +61,12 @@ func TestParseSidecarContainerImage(t *testing.T) { expectedError: nil, }, { - testName: "one declaration present", + name: "one declaration present", pod: corev1.Pod{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "busybox", }, }, @@ -81,18 +81,18 @@ func TestParseSidecarContainerImage(t *testing.T) { expectedError: nil, }, { - testName: "dual declaration present", // This is invalid but we should cover + name: "dual declaration present", // This is invalid but we should cover pod: corev1.Pod{ Spec: corev1.PodSpec{ InitContainers: []corev1.Container{ { - Name: SidecarContainerName, - Image: "other", + Name: GcsFuseSidecarName, + Image: "busybox2", }, }, Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "busybox", }, }, @@ -104,11 +104,11 @@ func TestParseSidecarContainerImage(t *testing.T) { Containers: []corev1.Container{}, }, }, - expectedImage: "busybox", + expectedImage: "busybox2", expectedError: nil, }, { - testName: "one declaration present, many containers", + name: "one declaration present, many containers", pod: corev1.Pod{ Spec: corev1.PodSpec{ InitContainers: []corev1.Container{ @@ -127,7 +127,7 @@ func TestParseSidecarContainerImage(t *testing.T) { }, Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "our-image", }, { @@ -173,8 +173,7 @@ func TestParseSidecarContainerImage(t *testing.T) { expectedError: nil, }, { - // We do not extract image as we dont support init container privately hosted sidecar image declaration. - testName: "one init declaration present, many containers", + name: "one init declaration present, many containers", pod: corev1.Pod{ Spec: corev1.PodSpec{ InitContainers: []corev1.Container{ @@ -187,7 +186,7 @@ func TestParseSidecarContainerImage(t *testing.T) { Image: "busybox", }, { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "our-image", }, { @@ -235,11 +234,11 @@ func TestParseSidecarContainerImage(t *testing.T) { }, }, }, - expectedImage: "", + expectedImage: "our-image", expectedError: nil, }, { - testName: "dual declaration present, many containers", // This is invalid but we should cover anyway. + name: "dual declaration present, many containers", // This is invalid but we should cover anyway. pod: corev1.Pod{ Spec: corev1.PodSpec{ InitContainers: []corev1.Container{ @@ -256,8 +255,8 @@ func TestParseSidecarContainerImage(t *testing.T) { Image: "busybox", }, { - Name: SidecarContainerName, - Image: "other", + Name: GcsFuseSidecarName, + Image: "another-one", }, }, Containers: []corev1.Container{ @@ -270,7 +269,7 @@ func TestParseSidecarContainerImage(t *testing.T) { Image: "busybox", }, { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "custom-image", }, }, @@ -304,27 +303,23 @@ func TestParseSidecarContainerImage(t *testing.T) { }, }, }, - expectedImage: "custom-image", + expectedImage: "another-one", expectedError: nil, }, { - testName: "no image present", + name: "no image present", pod: corev1.Pod{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, }, }, }, }, expectedPod: corev1.Pod{ Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: SidecarContainerName, - }, - }, + Containers: []corev1.Container{}, }, }, expectedImage: "", @@ -332,24 +327,28 @@ func TestParseSidecarContainerImage(t *testing.T) { }, } for _, tc := range testCases { - pod := tc.pod + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + pod := tc.pod - image, err := parseSidecarContainerImage(&pod) - if image != tc.expectedImage { - t.Errorf(`unexpected image: want: "%s" but got: "%s"`, tc.expectedImage, image) - } - if err != nil && tc.expectedError != nil { - if err.Error() != tc.expectedError.Error() { - t.Error("for test: ", tc.testName, ", want: ", tc.expectedError.Error(), " but got: ", err.Error()) + image, err := ExtractImageAndDeleteContainer(&pod.Spec, GcsFuseSidecarName) + if image != tc.expectedImage { + t.Errorf(`unexpected image: want: "%s" but got: "%s"`, tc.expectedImage, image) + } + if err != nil && tc.expectedError != nil { + if err.Error() != tc.expectedError.Error() { + t.Error("for test: ", tc.name, ", want: ", tc.expectedError.Error(), " but got: ", err.Error()) + } + } else if err != nil || tc.expectedError != nil { + // if one of them is nil, both must be nil to pass + t.Error("for test: ", tc.name, ", want: ", tc.expectedError, " but got: ", err) } - } else if err != nil || tc.expectedError != nil { - // if one of them is nil, both must be nil to pass - t.Error("for test: ", tc.testName, ", want: ", tc.expectedError, " but got: ", err) - } - // verifyPod - if diff := cmp.Diff(pod, tc.expectedPod); diff != "" { - t.Errorf(`unexpected pod: diff "%s" want: "%v" but got: "%v"`, diff, tc.expectedPod, pod) - } + // verifyPod + if diff := cmp.Diff(pod, tc.expectedPod); diff != "" { + t.Errorf(`unexpected pod: diff "%s" want: "%v" but got: "%v"`, diff, tc.expectedPod, pod) + } + }) } } diff --git a/pkg/webhook/sidecar_spec.go b/pkg/webhook/sidecar_spec.go index 2ad2af741..a75bc2321 100644 --- a/pkg/webhook/sidecar_spec.go +++ b/pkg/webhook/sidecar_spec.go @@ -18,13 +18,17 @@ limitations under the License. package webhook import ( + "path/filepath" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog/v2" "k8s.io/utils/ptr" ) const ( - SidecarContainerName = "gke-gcsfuse-sidecar" + GcsFuseSidecarName = "gke-gcsfuse-sidecar" + MetadataPrefetchSidecarName = "gke-gcsfuse-metadata-prefetch" SidecarContainerTmpVolumeName = "gke-gcsfuse-tmp" SidecarContainerTmpVolumeMountPath = "/gcsfuse-tmp" SidecarContainerBufferVolumeName = "gke-gcsfuse-buffer" @@ -32,12 +36,24 @@ const ( SidecarContainerCacheVolumeName = "gke-gcsfuse-cache" SidecarContainerCacheVolumeMountPath = "/gcsfuse-cache" + // Webhook relevant volume attributes. + gcsFuseMetadataPrefetchOnMountVolumeAttribute = "gcsfuseMetadataPrefetchOnMount" + // See the nonroot user discussion: https://github.com/GoogleContainerTools/distroless/issues/443 NobodyUID = 65534 NobodyGID = 65534 ) var ( + // Metadata prefetch container resources. + metadataPrefetchCPURequest = resource.MustParse("10m") + metadataPrefetchCPULimit = resource.MustParse("50m") + metadataPrefetchMemoryRequest = resource.MustParse("10Mi") + metadataPrefetchMemoryLimit = resource.MustParse("10Mi") + metadataPrefetchEphemeralStorageRequest = resource.MustParse("10Mi") + metadataPrefetchEphemeralStorageLimit = resource.MustParse("10Mi") + + // gke-gcsfuse-sidecar volumes. tmpVolume = corev1.Volume{ Name: SidecarContainerTmpVolumeName, VolumeSource: corev1.VolumeSource{ @@ -59,6 +75,7 @@ var ( }, } + // gke-gcsfuse-sidecar volumeMounts. TmpVolumeMount = corev1.VolumeMount{ Name: SidecarContainerTmpVolumeName, MountPath: SidecarContainerTmpVolumeMountPath, @@ -89,22 +106,10 @@ func GetSidecarContainerSpec(c *Config) corev1.Container { // The sidecar container follows Restricted Pod Security Standard, // see https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted container := corev1.Container{ - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: c.ContainerImage, ImagePullPolicy: corev1.PullPolicy(c.ImagePullPolicy), - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - ReadOnlyRootFilesystem: ptr.To(true), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - corev1.Capability("ALL"), - }, - }, - SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, - RunAsNonRoot: ptr.To(true), - RunAsUser: ptr.To(int64(NobodyUID)), - RunAsGroup: ptr.To(int64(NobodyGID)), - }, + SecurityContext: GetSecurityContext(), Args: []string{ "--v=5", }, @@ -118,6 +123,101 @@ func GetSidecarContainerSpec(c *Config) corev1.Container { return container } +// GetSecurityContext ensures the sidecar that uses it follows Restricted Pod Security Standard. +// See https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted +func GetSecurityContext() *corev1.SecurityContext { + return &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(true), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{ + corev1.Capability("ALL"), + }, + }, + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, + RunAsNonRoot: ptr.To(true), + RunAsUser: ptr.To(int64(NobodyUID)), + RunAsGroup: ptr.To(int64(NobodyGID)), + } +} + +func (si *SidecarInjector) GetNativeMetadataPrefetchSidecarContainerSpec(pod *corev1.Pod, image string) corev1.Container { + container := si.GetMetadataPrefetchSidecarContainerSpec(pod, image) + container.Env = append(container.Env, corev1.EnvVar{Name: "NATIVE_SIDECAR", Value: "TRUE"}) + container.RestartPolicy = ptr.To(corev1.ContainerRestartPolicyAlways) + + return container +} + +func getMetadataPrefetchConfig(image string) *Config { + return &Config{ + CPURequest: metadataPrefetchCPURequest, + CPULimit: metadataPrefetchCPULimit, + MemoryRequest: metadataPrefetchMemoryRequest, + MemoryLimit: metadataPrefetchMemoryLimit, + EphemeralStorageRequest: metadataPrefetchEphemeralStorageRequest, + EphemeralStorageLimit: metadataPrefetchEphemeralStorageLimit, + MetadataContainerImage: image, + } +} + +func (si *SidecarInjector) GetMetadataPrefetchSidecarContainerSpec(pod *corev1.Pod, image string) corev1.Container { + if pod == nil { + klog.Warning("failed to get metadata prefetch container spec: pod is nil") + + return corev1.Container{} + } + + c := getMetadataPrefetchConfig(image) + limits, requests := prepareResourceList(c) + + // The sidecar container follows Restricted Pod Security Standard, + // see https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + container := corev1.Container{ + Name: MetadataPrefetchSidecarName, + Image: c.MetadataContainerImage, + ImagePullPolicy: corev1.PullPolicy(c.ImagePullPolicy), + SecurityContext: GetSecurityContext(), + Resources: corev1.ResourceRequirements{ + Limits: limits, + Requests: requests, + }, + VolumeMounts: []corev1.VolumeMount{}, + } + + for _, v := range pod.Spec.Volumes { + isGcsFuseCSIVolume, isDynamicMount, volumeAttributes, err := si.isGcsFuseCSIVolume(v, pod.Namespace) + if err != nil { + klog.Errorf("failed to determine if %s is a GcsFuseCSI backed volume: %v", v.Name, err) + } + + if isDynamicMount { + klog.Warningf("dynamic mount set for %s, this is not supported for metadata prefetch. skipping volume", v.Name) + + continue + } + + if isGcsFuseCSIVolume { + enableMetaPrefetchRaw, ok := volumeAttributes[gcsFuseMetadataPrefetchOnMountVolumeAttribute] + // We disable metadata prefetch by default, so we skip injection of volume mount when not set. + if !ok { + continue + } + + enableMetaPrefetch, err := ParseBool(enableMetaPrefetchRaw) + if err != nil { + klog.Errorf(`failed to determine if metadata prefetch is needed for volume "%s": %v`, v.Name, err) + } + + if enableMetaPrefetch { + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{Name: v.Name, MountPath: filepath.Join("/volumes/", v.Name), ReadOnly: true}) + } + } + } + + return container +} + // GetSidecarContainerVolumeSpec returns volumes required by the sidecar container, // skipping the existing custom volumes. func GetSidecarContainerVolumeSpec(existingVolumes ...corev1.Volume) []corev1.Volume { @@ -155,7 +255,7 @@ func GetSidecarContainerVolumeSpec(existingVolumes ...corev1.Volume) []corev1.Vo // 1. True when either native or regular sidecar is present. // 2. True iff the sidecar present is a native sidecar container. func ValidatePodHasSidecarContainerInjected(pod *corev1.Pod) (bool, bool) { - return validatePodHasSidecarContainerInjected(SidecarContainerName, pod, []corev1.Volume{tmpVolume}, []corev1.VolumeMount{TmpVolumeMount}) + return validatePodHasSidecarContainerInjected(GcsFuseSidecarName, pod, []corev1.Volume{tmpVolume}, []corev1.VolumeMount{TmpVolumeMount}) } func sidecarContainerPresent(containerName string, containers []corev1.Container, volumeMounts []corev1.VolumeMount) bool { diff --git a/pkg/webhook/sidecar_spec_test.go b/pkg/webhook/sidecar_spec_test.go index da95ce0dd..cb1ec328e 100644 --- a/pkg/webhook/sidecar_spec_test.go +++ b/pkg/webhook/sidecar_spec_test.go @@ -74,7 +74,7 @@ func TestValidatePodHasSidecarContainerInjectedForAutoInjection(t *testing.T) { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: FakeConfig().ContainerImage, SecurityContext: &corev1.SecurityContext{ RunAsUser: ptr.To(int64(NobodyUID)), @@ -108,7 +108,7 @@ func TestValidatePodHasSidecarContainerInjectedForAutoInjection(t *testing.T) { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: FakeConfig().ContainerImage, SecurityContext: &corev1.SecurityContext{ RunAsUser: ptr.To(int64(NobodyUID)), @@ -135,7 +135,7 @@ func TestValidatePodHasSidecarContainerInjectedForAutoInjection(t *testing.T) { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "private-repo/sidecar-image", SecurityContext: &corev1.SecurityContext{ RunAsUser: ptr.To(int64(NobodyUID)), @@ -164,7 +164,7 @@ func TestValidatePodHasSidecarContainerInjectedForAutoInjection(t *testing.T) { Spec: corev1.PodSpec{ InitContainers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: "private-repo/sidecar-image", SecurityContext: &corev1.SecurityContext{ RunAsUser: ptr.To(int64(NobodyUID)), @@ -194,7 +194,7 @@ func TestValidatePodHasSidecarContainerInjectedForAutoInjection(t *testing.T) { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: FakeConfig().ContainerImage, SecurityContext: &corev1.SecurityContext{ RunAsUser: ptr.To(int64(1234)), @@ -237,7 +237,7 @@ func TestValidatePodHasSidecarContainerInjectedForAutoInjection(t *testing.T) { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: FakeConfig().ContainerImage, SecurityContext: &corev1.SecurityContext{ RunAsUser: ptr.To(int64(NobodyUID)), @@ -266,7 +266,7 @@ func TestValidatePodHasSidecarContainerInjectedForAutoInjection(t *testing.T) { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: SidecarContainerName, + Name: GcsFuseSidecarName, Image: FakeConfig().ContainerImage, SecurityContext: &corev1.SecurityContext{ RunAsUser: ptr.To(int64(NobodyUID)), diff --git a/pkg/webhook/validating_admission_policy_test.go b/pkg/webhook/validating_admission_policy_test.go index 11807af70..6b3f571d7 100644 --- a/pkg/webhook/validating_admission_policy_test.go +++ b/pkg/webhook/validating_admission_policy_test.go @@ -40,7 +40,7 @@ import ( func testPod(initContainer bool, annotation map[string]string, restartPolicy *corev1.ContainerRestartPolicy, env []corev1.EnvVar) *corev1.Pod { container := corev1.Container{ - Name: SidecarContainerName, + Name: GcsFuseSidecarName, RestartPolicy: restartPolicy, Env: env, } diff --git a/pkg/webhook/volumes.go b/pkg/webhook/volumes.go new file mode 100644 index 000000000..e242cc3df --- /dev/null +++ b/pkg/webhook/volumes.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2024 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +const ( + gcsFuseCsiDriverName = "gcsfuse.csi.storage.gke.io" +) + +// isGcsFuseCSIVolume checks if the given volume is backed by gcsfuse csi driver. +// +// Returns the following (in order): +// - isGcsFuseCSIVolume - (bool) whether volume is backed by gcsfuse csi driver. +// - isDynamicMount - (bool) True if volume is attempting to mount all of the buckets in project. +// - volumeAttributes (map[string]string) +// - error - if check failed +func (si *SidecarInjector) isGcsFuseCSIVolume(volume corev1.Volume, namespace string) (bool, bool, map[string]string, error) { + var isDynamicMount bool + + // Check if it is ephemeral volume. + if volume.CSI != nil { + if volume.CSI.Driver == gcsFuseCsiDriverName { + // Ephemeral volume is using dynamic mounting, + // See details: https://cloud.google.com/storage/docs/gcsfuse-mount#dynamic-mount + if val, ok := volume.CSI.VolumeAttributes["bucketName"]; ok && val == "_" { + isDynamicMount = true + } + + return true, isDynamicMount, volume.CSI.VolumeAttributes, nil + } + + return false, false, nil, nil + } + + // Check if it's a persistent volume. + pvc := volume.PersistentVolumeClaim + if pvc == nil { + return false, false, nil, nil + } + pvcName := pvc.ClaimName + pvcObj, err := si.GetPVC(namespace, pvcName) + if err != nil { + return false, false, nil, err + } + + // Check if the PVC is a preprovisioned gcsfuse volume. + pv, ok, err := si.GetPreprovisionCSIVolume(gcsFuseCsiDriverName, pvcObj) + if err != nil || pv == nil { + klog.Warningf("unable to determine if PVC %s/%s is a pre-provisioned gcsfuse volume: %v", namespace, pvcName, err) + + return false, false, nil, nil + } + + if ok { + if pv.Spec.CSI != nil && pv.Spec.CSI.VolumeHandle == "_" { + isDynamicMount = true + } + + return true, isDynamicMount, pv.Spec.CSI.VolumeAttributes, nil + } + + return false, false, nil, nil +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index e5fea509d..349ca14bb 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -111,6 +111,7 @@ var _ = ginkgo.Describe("E2E Test Suite", func() { testsuites.InitGcsFuseCSIGCSFuseIntegrationFileCacheParallelDownloadsTestSuite, testsuites.InitGcsFuseCSIIstioTestSuite, testsuites.InitGcsFuseCSIMetricsTestSuite, + testsuites.InitGcsFuseCSIMetadataPrefetchTestSuite, } testDriver := specs.InitGCSFuseCSITestDriver(c, m, *bucketLocation, *skipGcpSaTest, false) diff --git a/test/e2e/specs/specs.go b/test/e2e/specs/specs.go index 90f875862..2c7b1e87e 100644 --- a/test/e2e/specs/specs.go +++ b/test/e2e/specs/specs.go @@ -67,6 +67,8 @@ const ( EnableFileCachePrefix = "gcsfuse-csi-enable-file-cache" EnableFileCacheAndMetricsPrefix = "gcsfuse-csi-enable-file-cache-and-metrics" EnableFileCacheWithLargeCapacityPrefix = "gcsfuse-csi-enable-file-cache-large-capacity" + EnableMetadataPrefetchPrefix = "gcsfuse-csi-enable-metadata-prefetch" + EnableMetadataPrefetchPrefixForceNewBucketPrefix = "gcsfuse-csi-enable-metadata-prefetch-and-force-new-bucket" ImplicitDirsPath = "implicit-dir" InvalidVolume = "" SkipCSIBucketAccessCheckPrefix = "gcsfuse-csi-skip-bucket-access-check" @@ -264,7 +266,7 @@ func (t *TestPod) CheckSidecarNeverTerminatedAfterAWhile(ctx context.Context, is var sidecarContainerStatus corev1.ContainerStatus for _, cs := range containerStatusList { - if cs.Name == webhook.SidecarContainerName { + if cs.Name == webhook.GcsFuseSidecarName { sidecarContainerStatus = cs break @@ -490,7 +492,7 @@ func (t *TestPod) SetResource(cpuLimit, memoryLimit, storageLimit string) { func (t *TestPod) SetCustomSidecarContainerImage() { t.pod.Spec.Containers = append(t.pod.Spec.Containers, corev1.Container{ - Name: webhook.SidecarContainerName, + Name: webhook.GcsFuseSidecarName, Image: LastPublishedSidecarContainerImage, }) } @@ -498,15 +500,26 @@ func (t *TestPod) SetCustomSidecarContainerImage() { func (t *TestPod) VerifyCustomSidecarContainerImage(isNativeSidecar bool) { if isNativeSidecar { gomega.Expect(t.pod.Spec.InitContainers).To(gomega.HaveLen(1)) - gomega.Expect(t.pod.Spec.InitContainers[0].Name).To(gomega.Equal(webhook.SidecarContainerName)) + gomega.Expect(t.pod.Spec.InitContainers[0].Name).To(gomega.Equal(webhook.GcsFuseSidecarName)) gomega.Expect(t.pod.Spec.InitContainers[0].Image).To(gomega.Equal(LastPublishedSidecarContainerImage)) } else { gomega.Expect(t.pod.Spec.Containers).To(gomega.HaveLen(2)) - gomega.Expect(t.pod.Spec.Containers[0].Name).To(gomega.Equal(webhook.SidecarContainerName)) + gomega.Expect(t.pod.Spec.Containers[0].Name).To(gomega.Equal(webhook.GcsFuseSidecarName)) gomega.Expect(t.pod.Spec.Containers[0].Image).To(gomega.Equal(LastPublishedSidecarContainerImage)) } } +func (t *TestPod) VerifyMetadataPrefetchPresence() { + gomega.Expect(t.pod.Spec.InitContainers).To(gomega.HaveLen(2)) + gomega.Expect(t.pod.Spec.InitContainers[1].Name).To(gomega.Equal(webhook.MetadataPrefetchSidecarName)) + gomega.Expect(t.pod.Spec.Containers).ToNot(gomega.ContainElement(gomega.HaveField("Name", webhook.MetadataPrefetchSidecarName))) +} + +func (t *TestPod) VerifyMetadataPrefetchNotPresent() { + gomega.Expect(t.pod.Spec.InitContainers).ToNot(gomega.ContainElement(gomega.HaveField("Name", webhook.MetadataPrefetchSidecarName))) + gomega.Expect(t.pod.Spec.Containers).ToNot(gomega.ContainElement(gomega.HaveField("Name", webhook.MetadataPrefetchSidecarName))) +} + func (t *TestPod) SetInitContainerWithCommand(cmd string) { cpu, _ := resource.ParseQuantity("100m") mem, _ := resource.ParseQuantity("20Mi") @@ -1138,7 +1151,7 @@ func GetGCSFuseVersion(ctx context.Context, client clientset.Interface) string { FieldSelector: "metadata.name=gcsfusecsi-image-config", }) framework.ExpectNoError(err) - gomega.Expect(configMaps.Items).To(gomega.HaveLen(1)) + gomega.Expect(configMaps.Items).To(gomega.HaveLen(2)) sidecarImageConfig := configMaps.Items[0] image := sidecarImageConfig.Data["sidecar-image"] @@ -1157,7 +1170,7 @@ func GetGCSFuseVersion(ctx context.Context, client clientset.Interface) string { TerminationGracePeriodSeconds: ptr.To(int64(0)), Containers: []corev1.Container{ { - Name: webhook.SidecarContainerName, + Name: webhook.GcsFuseSidecarName, Image: image, }, }, @@ -1172,7 +1185,7 @@ func GetGCSFuseVersion(ctx context.Context, client clientset.Interface) string { tPod.WaitForRunning(ctx) defer tPod.Cleanup(ctx) - stdout, stderr, err := e2epod.ExecCommandInContainerWithFullOutput(f, tPod.pod.Name, webhook.SidecarContainerName, "/gcsfuse", "--version") + stdout, stderr, err := e2epod.ExecCommandInContainerWithFullOutput(f, tPod.pod.Name, webhook.GcsFuseSidecarName, "/gcsfuse", "--version") framework.ExpectNoError(err, "/gcsfuse --version should succeed, but failed with error message %q\nstdout: %s\nstderr: %s", err, stdout, stderr) diff --git a/test/e2e/specs/testdriver.go b/test/e2e/specs/testdriver.go index 0dbeeb18a..f582912f8 100644 --- a/test/e2e/specs/testdriver.go +++ b/test/e2e/specs/testdriver.go @@ -57,6 +57,7 @@ type gcsVolume struct { shared bool readOnly bool skipBucketAccessCheck bool + metadataPrefetch bool enableMetrics bool } @@ -152,7 +153,7 @@ func (n *GCSFuseCSITestDriver) CreateVolume(ctx context.Context, config *storage bucketName = uuid.NewString() case InvalidVolumePrefix, SkipCSIBucketAccessCheckAndInvalidVolumePrefix: bucketName = InvalidVolume - case ForceNewBucketPrefix, EnableFileCacheForceNewBucketPrefix, EnableFileCacheForceNewBucketAndMetricsPrefix: + case ForceNewBucketPrefix, EnableFileCacheForceNewBucketPrefix, EnableMetadataPrefetchPrefixForceNewBucketPrefix, EnableFileCacheForceNewBucketAndMetricsPrefix: bucketName = n.createBucket(ctx, config.Framework.Namespace.Name) case MultipleBucketsPrefix: isMultipleBucketsPrefix = true @@ -224,6 +225,9 @@ func (n *GCSFuseCSITestDriver) CreateVolume(ctx context.Context, config *storage CreateImplicitDirInBucket(ImplicitDirsPath, bucketName) mountOptions += ",implicit-dirs" v.skipBucketAccessCheck = true + case EnableMetadataPrefetchPrefix: + mountOptions += ",file-system:kernel-list-cache-ttl-secs:-1" + v.metadataPrefetch = true } v.mountOptions = mountOptions @@ -258,6 +262,13 @@ func (n *GCSFuseCSITestDriver) GetPersistentVolumeSource(readOnly bool, _ string driver.VolumeContextKeyMountOptions: gv.mountOptions, } + if gv.metadataPrefetch { + va["gcsfuseMetadataPrefetchOnMount"] = "true" + va["metadataStatCacheCapacity"] = "-1" + va["metadataTypeCacheCapacity"] = "-1" + va["metadataCacheTtlSeconds"] = "-1" + } + if gv.fileCacheCapacity != "" { va[driver.VolumeContextKeyFileCacheCapacity] = gv.fileCacheCapacity } @@ -297,6 +308,13 @@ func (n *GCSFuseCSITestDriver) GetVolume(config *storageframework.PerTestConfig, va[driver.VolumeContextKeySkipCSIBucketAccessCheck] = util.TrueStr } + if gv.metadataPrefetch { + va["gcsfuseMetadataPrefetchOnMount"] = "true" + va["metadataStatCacheCapacity"] = "-1" + va["metadataTypeCacheCapacity"] = "-1" + va["metadataCacheTtlSeconds"] = "-1" + } + if gv.enableMetrics { va[driver.VolumeContextKeyDisableMetrics] = util.FalseStr } diff --git a/test/e2e/testsuites/failed_mount.go b/test/e2e/testsuites/failed_mount.go index b6250f603..2f1c8809d 100644 --- a/test/e2e/testsuites/failed_mount.go +++ b/test/e2e/testsuites/failed_mount.go @@ -20,12 +20,16 @@ package testsuites import ( "context" "fmt" + "os" + "strconv" "github.com/googlecloudplatform/gcs-fuse-csi-driver/pkg/cloud_provider/storage" "github.com/googlecloudplatform/gcs-fuse-csi-driver/test/e2e/specs" + "github.com/googlecloudplatform/gcs-fuse-csi-driver/test/e2e/utils" "github.com/onsi/ginkgo/v2" "google.golang.org/grpc/codes" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" @@ -59,6 +63,11 @@ func (t *gcsFuseCSIFailedMountTestSuite) SkipUnsupportedTests(_ storageframework } func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + envVar := os.Getenv(utils.TestWithNativeSidecarEnvVar) + supportsNativeSidecar, err := strconv.ParseBool(envVar) + if err != nil { + klog.Fatalf(`env variable "%s" could not be converted to boolean`, envVar) + } type local struct { config *storageframework.PerTestConfig volumeResource *storageframework.VolumeResource @@ -128,6 +137,12 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes } testCaseNonExistentBucket(specs.FakeVolumePrefix) }) + ginkgo.It("[metadata prefetch]should fail when the specified GCS bucket does not exist", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseNonExistentBucket(specs.EnableMetadataPrefetchPrefix) // FakeVolumePrefix + }) ginkgo.It("[csi-skip-bucket-access-check] should fail when the specified GCS bucket does not exist", func() { if pattern.VolType == storageframework.DynamicPV { @@ -207,6 +222,12 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes ginkgo.It("should fail when the specified service account does not have access to the GCS bucket", func() { testCaseSAInsufficientAccess("") }) + ginkgo.It("[metadata prefetch] should fail when the specified service account does not have access to the GCS bucket", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseSAInsufficientAccess(specs.EnableMetadataPrefetchPrefix) + }) ginkgo.It("[csi-skip-bucket-access-check] should fail when the specified service account does not have access to the GCS bucket", func() { testCaseSAInsufficientAccess(specs.SkipCSIBucketAccessCheckPrefix) @@ -278,6 +299,12 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes ginkgo.It("should fail when the sidecar container is not injected", func() { testCaseSidecarNotInjected("") }) + ginkgo.It("[metadata prefetch] should fail when the sidecar container is not injected", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseSidecarNotInjected(specs.EnableMetadataPrefetchPrefix) + }) ginkgo.It("[csi-skip-bucket-access-check] should fail when the sidecar container is not injected", func() { testCaseSidecarNotInjected(specs.SkipCSIBucketAccessCheckPrefix) @@ -308,6 +335,12 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes ginkgo.It("should fail when the gcsfuse processes got killed due to OOM", func() { testCaseGCSFuseOOM("") }) + ginkgo.It("[metadata prefetch] should fail when the gcsfuse processes got killed due to OOM", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseGCSFuseOOM(specs.EnableMetadataPrefetchPrefix) + }) ginkgo.It("[csi-skip-bucket-access-check] should fail when the gcsfuse processes got killed due to OOM", func() { testCaseGCSFuseOOM(specs.SkipCSIBucketAccessCheckPrefix) @@ -334,6 +367,13 @@ func (t *gcsFuseCSIFailedMountTestSuite) DefineTests(driver storageframework.Tes testcaseInvalidMountOptions(specs.InvalidMountOptionsVolumePrefix) }) + ginkgo.It("[metadata prefetch] should fail when invalid mount options are passed", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testcaseInvalidMountOptions(specs.EnableMetadataPrefetchPrefix) // InvalidMountOptionsVolumePrefix + }) + ginkgo.It("[csi-skip-bucket-access-check] should fail when invalid mount options are passed", func() { testcaseInvalidMountOptions(specs.SkipCSIBucketAccessCheckAndInvalidMountOptionsVolumePrefix) }) diff --git a/test/e2e/testsuites/file_cache.go b/test/e2e/testsuites/file_cache.go index a31576bfe..f48025a1d 100644 --- a/test/e2e/testsuites/file_cache.go +++ b/test/e2e/testsuites/file_cache.go @@ -263,7 +263,7 @@ func (t *gcsFuseCSIFileCacheTestSuite) DefineTests(driver storageframework.TestD tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("cat %v/%v > /dev/null", mountPath, fileName)) - tPod.WaitForLog(ctx, webhook.SidecarContainerName, "while inserting into the cache: size of the entry is more than the cache's maxSize") + tPod.WaitForLog(ctx, webhook.GcsFuseSidecarName, "while inserting into the cache: size of the entry is more than the cache's maxSize") }) ginkgo.It("should have cache miss when the fileCacheCapacity is larger than underlying storage", func() { @@ -300,6 +300,6 @@ func (t *gcsFuseCSIFileCacheTestSuite) DefineTests(driver storageframework.TestD tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) tPod.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("cat %v/%v > /dev/null", mountPath, fileName)) - tPod.WaitForLog(ctx, webhook.SidecarContainerName, "no space left on device") + tPod.WaitForLog(ctx, webhook.GcsFuseSidecarName, "no space left on device") }) } diff --git a/test/e2e/testsuites/istio.go b/test/e2e/testsuites/istio.go index b7940d7b2..e624c4e84 100644 --- a/test/e2e/testsuites/istio.go +++ b/test/e2e/testsuites/istio.go @@ -20,12 +20,17 @@ package testsuites import ( "context" "fmt" + "os" + "strconv" "github.com/googlecloudplatform/gcs-fuse-csi-driver/test/e2e/specs" + "github.com/googlecloudplatform/gcs-fuse-csi-driver/test/e2e/utils" "github.com/onsi/ginkgo/v2" "google.golang.org/grpc/codes" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e/framework" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" admissionapi "k8s.io/pod-security-admission/api" @@ -57,6 +62,11 @@ func (t *gcsFuseCSIIstioTestSuite) SkipUnsupportedTests(_ storageframework.TestD } func (t *gcsFuseCSIIstioTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + envVar := os.Getenv(utils.TestWithNativeSidecarEnvVar) + supportsNativeSidecar, err := strconv.ParseBool(envVar) + if err != nil { + klog.Fatalf(`env variable "%s" could not be converted to boolean`, envVar) + } type local struct { config *storageframework.PerTestConfig volumeResource *storageframework.VolumeResource @@ -83,8 +93,8 @@ func (t *gcsFuseCSIIstioTestSuite) DefineTests(driver storageframework.TestDrive framework.ExpectNoError(err, "while cleaning up") } - testGCSFuseWithIstio := func(holdApplicationUntilProxyStarts, registryOnly bool) { - init() + testGCSFuseWithIstio := func(configPrefix string, holdApplicationUntilProxyStarts, registryOnly bool) { + init(configPrefix) defer cleanup() ginkgo.By("Configuring the pod") @@ -120,15 +130,27 @@ func (t *gcsFuseCSIIstioTestSuite) DefineTests(driver storageframework.TestDrive } ginkgo.It("should store data with istio injected at index 0", func() { - testGCSFuseWithIstio(true, false) + testGCSFuseWithIstio("", true, false) + }) + ginkgo.It("[metadata prefetch] should store data with istio injected at index 0", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testGCSFuseWithIstio(specs.EnableMetadataPrefetchPrefix, true, false) }) ginkgo.It("[flaky] should store data with istio injected at the last index", func() { - testGCSFuseWithIstio(false, false) + testGCSFuseWithIstio("", false, false) }) ginkgo.It("should store data with istio registry only outbound traffic policy mode", func() { - testGCSFuseWithIstio(true, true) + testGCSFuseWithIstio("", true, true) + }) + ginkgo.It("[metadata prefetch] should store data with istio registry only outbound traffic policy mode", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testGCSFuseWithIstio(specs.EnableMetadataPrefetchPrefix, true, true) }) ginkgo.It("[flaky] should fail with istio registry only outbound traffic policy mode missing Pod annotation", func() { diff --git a/test/e2e/testsuites/metadata_prefetch.go b/test/e2e/testsuites/metadata_prefetch.go new file mode 100644 index 000000000..48ba49004 --- /dev/null +++ b/test/e2e/testsuites/metadata_prefetch.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. +Copyright 2024 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "context" + "fmt" + "os" + "strconv" + + "github.com/googlecloudplatform/gcs-fuse-csi-driver/test/e2e/specs" + "github.com/googlecloudplatform/gcs-fuse-csi-driver/test/e2e/utils" + "github.com/onsi/ginkgo/v2" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e/framework" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + admissionapi "k8s.io/pod-security-admission/api" +) + +type gcsFuseCSIMetadataPrefetchTestSuite struct { + tsInfo storageframework.TestSuiteInfo +} + +// InitGcsFuseCSIMetadataPrefetchTestSuite returns gcsFuseCSIMetadataPrefetchTestSuite that implements TestSuite interface. +func InitGcsFuseCSIMetadataPrefetchTestSuite() storageframework.TestSuite { + return &gcsFuseCSIMetadataPrefetchTestSuite{ + tsInfo: storageframework.TestSuiteInfo{ + Name: "metadataPrefetch", + TestPatterns: []storageframework.TestPattern{ + storageframework.DefaultFsCSIEphemeralVolume, + storageframework.DefaultFsPreprovisionedPV, + }, + }, + } +} + +func (t *gcsFuseCSIMetadataPrefetchTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { + return t.tsInfo +} + +func (t *gcsFuseCSIMetadataPrefetchTestSuite) SkipUnsupportedTests(_ storageframework.TestDriver, _ storageframework.TestPattern) { +} + +func (t *gcsFuseCSIMetadataPrefetchTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + envVar := os.Getenv(utils.TestWithNativeSidecarEnvVar) + supportsNativeSidecar, err := strconv.ParseBool(envVar) + if err != nil { + klog.Fatalf(`env variable "%s" could not be converted to boolean`, envVar) + } + + type local struct { + config *storageframework.PerTestConfig + volumeResource *storageframework.VolumeResource + } + var l local + ctx := context.Background() + + // Beware that it also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewFrameworkWithCustomTimeouts("volumes", storageframework.GetDriverTimeouts(driver)) + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + + init := func(configPrefix ...string) { + l = local{} + l.config = driver.PrepareTest(ctx, f) + if len(configPrefix) > 0 { + l.config.Prefix = configPrefix[0] + } + l.volumeResource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, e2evolume.SizeRange{}) + } + + cleanup := func() { + var cleanUpErrs []error + cleanUpErrs = append(cleanUpErrs, l.volumeResource.CleanupResource(ctx)) + err := utilerrors.NewAggregate(cleanUpErrs) + framework.ExpectNoError(err, "while cleaning up") + } + + testCaseStoreAndRetainData := func(configPrefix string) { + init(configPrefix) + defer cleanup() + + ginkgo.By("Configuring the first pod") + tPod1 := specs.NewTestPod(f.ClientSet, f.Namespace) + tPod1.SetupVolume(l.volumeResource, volumeName, mountPath, false) + + ginkgo.By("Deploying the first pod") + tPod1.Create(ctx) + + ginkgo.By("Checking that the first pod is running") + tPod1.WaitForRunning(ctx) + + ginkgo.By("Checking that the first pod command exits with no error") + tPod1.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) + tPod1.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("echo 'hello world' > %v/data && grep 'hello world' %v/data", mountPath, mountPath)) + + ginkgo.By("Deleting the first pod") + tPod1.Cleanup(ctx) + + ginkgo.By("Configuring the second pod") + tPod2 := specs.NewTestPod(f.ClientSet, f.Namespace) + tPod2.SetupVolume(l.volumeResource, volumeName, mountPath, false) + + ginkgo.By("Deploying the second pod") + tPod2.Create(ctx) + defer tPod2.Cleanup(ctx) + + ginkgo.By("Checking that the second pod is running") + tPod2.WaitForRunning(ctx) + + ginkgo.By("Checking that the second pod command exits with no error") + tPod2.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("mount | grep %v | grep rw,", mountPath)) + tPod2.VerifyExecInPodSucceed(f, specs.TesterContainerName, fmt.Sprintf("grep 'hello world' %v/data", mountPath)) + + if supportsNativeSidecar { + ginkgo.By("Checking metadata prefetch sidecar present on the second pod") + tPod2.VerifyMetadataPrefetchPresence() + } else { + ginkgo.By("Checking metadata prefetch sidecar not present on the second pod") + tPod2.VerifyMetadataPrefetchNotPresent() + } + + tPod2.Cleanup(ctx) + } + + ginkgo.It("[metadata prefetch] should store data and retain the data", func() { + if pattern.VolType == storageframework.DynamicPV { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseStoreAndRetainData(specs.EnableMetadataPrefetchPrefix) + }) +} diff --git a/test/e2e/testsuites/multivolume.go b/test/e2e/testsuites/multivolume.go index b8f0f6b31..1f6676cdb 100644 --- a/test/e2e/testsuites/multivolume.go +++ b/test/e2e/testsuites/multivolume.go @@ -20,12 +20,16 @@ package testsuites import ( "context" "fmt" + "os" + "strconv" "strings" "time" "github.com/googlecloudplatform/gcs-fuse-csi-driver/test/e2e/specs" + "github.com/googlecloudplatform/gcs-fuse-csi-driver/test/e2e/utils" "github.com/onsi/ginkgo/v2" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" @@ -58,6 +62,11 @@ func (t *gcsFuseCSIMultiVolumeTestSuite) SkipUnsupportedTests(_ storageframework } func (t *gcsFuseCSIMultiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + envVar := os.Getenv(utils.TestWithNativeSidecarEnvVar) + supportsNativeSidecar, err := strconv.ParseBool(envVar) + if err != nil { + klog.Fatalf(`env variable "%s" could not be converted to boolean`, envVar) + } type local struct { config *storageframework.PerTestConfig volumeResourceList []*storageframework.VolumeResource @@ -248,6 +257,15 @@ func (t *gcsFuseCSIMultiVolumeTestSuite) DefineTests(driver storageframework.Tes testOnePodTwoVols() }) + ginkgo.It("[metadata prefetch] should access multiple volumes backed by different buckets from the same Pod", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + init(2, specs.EnableMetadataPrefetchPrefixForceNewBucketPrefix) + defer cleanup() + + testOnePodTwoVols() + }) // This tests below configuration: // [pod1] diff --git a/test/e2e/testsuites/volumes.go b/test/e2e/testsuites/volumes.go index 5da193115..45e524d6f 100644 --- a/test/e2e/testsuites/volumes.go +++ b/test/e2e/testsuites/volumes.go @@ -147,6 +147,13 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri testCaseStoreAndRetainData(specs.SkipCSIBucketAccessCheckPrefix) }) + ginkgo.It("[metadata prefetch] should store data and retain the data", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseStoreAndRetainData(specs.EnableMetadataPrefetchPrefix) + }) + testCaseReadOnlyFailedWrite := func(configPrefix string) { init(configPrefix) defer cleanup() @@ -198,6 +205,12 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.It("[read-only][csi-skip-bucket-access-check] should fail when write", func() { testCaseReadOnlyFailedWrite(specs.SkipCSIBucketAccessCheckPrefix) }) + ginkgo.It("[read-only][metadata prefetch] should fail when write", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseReadOnlyFailedWrite(specs.EnableMetadataPrefetchPrefix) + }) testCaseStoreRetainData := func(configPrefix string, uid, gid, fsgroup int) { init(configPrefix) @@ -252,6 +265,13 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri testCaseStoreRetainData(specs.SkipCSIBucketAccessCheckPrefix, 1001, 2002, 3003) }) + ginkgo.It("[metadata prefetch] should store data and retain the data", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseStoreRetainData(specs.EnableMetadataPrefetchPrefix, 1001, 2002, 3003) + }) + testCaseImplicitDir := func(configPrefix string) { init(configPrefix) defer cleanup() @@ -316,6 +336,12 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.It("[csi-skip-bucket-access-check] should store data using custom sidecar container image", func() { testCaseStoreDataCustomContainerImage(specs.SkipCSIBucketAccessCheckPrefix) }) + ginkgo.It("[metadata prefetch] should store data using custom sidecar container image", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseStoreDataCustomContainerImage(specs.EnableMetadataPrefetchPrefix) + }) testCaseCustomBufferVol := func(configPrefix string) { init(configPrefix) @@ -380,4 +406,10 @@ func (t *gcsFuseCSIVolumesTestSuite) DefineTests(driver storageframework.TestDri ginkgo.It("[csi-skip-bucket-access-check] should store data and retain the data in init container", func() { testCaseStoreDataInitContainer(specs.SkipCSIBucketAccessCheckPrefix) }) + ginkgo.It("[metadata prefetch] should store data and retain the data in init container", func() { + if pattern.VolType == storageframework.DynamicPV || !supportsNativeSidecar { + e2eskipper.Skipf("skip for volume type %v", storageframework.DynamicPV) + } + testCaseStoreDataInitContainer(specs.EnableMetadataPrefetchPrefix) + }) } diff --git a/test/e2e/utils/handler.go b/test/e2e/utils/handler.go index 4f9b69cd1..d62284a9d 100644 --- a/test/e2e/utils/handler.go +++ b/test/e2e/utils/handler.go @@ -220,10 +220,13 @@ func generateTestSkip(testParams *TestParameters) string { if !testParams.SupportsNativeSidecar { skipTests = append(skipTests, "init.container", "fast.termination") + skipTests = append(skipTests, "metadata.prefetch") } if testParams.UseGKEManagedDriver { skipTests = append(skipTests, "metrics") + // TODO(jaimebz): Skip this test until Managed Driver has changes released. + skipTests = append(skipTests, "metadata.prefetch") // TODO(saikatroyc) remove this skip when GCSFuse CSI v1.4.3 is back-ported to the below GKE versions. if strings.HasPrefix(testParams.GkeClusterVersion, "1.27") || strings.HasPrefix(testParams.GkeClusterVersion, "1.28") {