Skip to content

Commit

Permalink
add peerpod controller
Browse files Browse the repository at this point in the history
Fixes: #872

Signed-off-by: Cathy Avery <[email protected]>
  • Loading branch information
caavery committed Oct 5, 2023
1 parent 6f66f63 commit 92eb08a
Show file tree
Hide file tree
Showing 17 changed files with 368 additions and 0 deletions.
39 changes: 39 additions & 0 deletions Dockerfile.peerpods-libvirt
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Golang image and version (defaults are provided).
# Use e.g. `golang` for multi-arch support.
ARG IMG_NAME
ARG IMG_VERSION

# Build the manager binary
FROM ${IMG_NAME:-golang}:${IMG_VERSION:-1.20} as builder

WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download

# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/

# The container running the operator/controllers needs the libvirt libraries as
# as the peerpod-ctrl needs to dynamically link with libvirt
RUN apt-get update -y && apt-get install -y libvirt-dev

# Build
RUN CGO_ENABLED=1 GOOS=linux go build -tags=libvirt -a -o manager main.go

# Libvirt cannot be installed to distroless for packaging so remove it for now
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
# FROM gcr.io/distroless/static:nonroot

FROM ${IMG_NAME:-golang}:${IMG_VERSION:-1.20}
RUN apt-get update -y && apt-get install -y libvirt-dev
WORKDIR /
COPY --from=builder /workspace/manager .

ENTRYPOINT ["/manager"]
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,12 @@ run: manifests generate fmt vet ## Run a controller from your host.

.PHONY: docker-build
docker-build: test ## Build docker image with the manager.
ifneq (, $(PEERPODS))
@echo PEERPODS is enabled
docker build -t ${IMG} -f Dockerfile.peerpods-libvirt .
else
docker build -t ${IMG} .
endif

.PHONY: docker-push
docker-push: ## Push docker image with the manager.
Expand Down
8 changes: 8 additions & 0 deletions PROJECT
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,12 @@ resources:
kind: CcRuntime
path: github.com/confidential-containers/operator/api/v1beta1
version: v1beta1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: confidentialcontainers.org
kind: PeerPod
path: github.com/confidential-containers/cloud-api-adaptor/peerpod-ctrl/api/v1alpha1
version: v1alpha1
version: "3"
1 change: 1 addition & 0 deletions config/crd/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ kind: Kustomization
resources:
- bases/confidentialcontainers.org_ccruntimes.yaml
- github.com/confidential-containers/cloud-api-adaptor//peerpodconfig-ctrl/config/crd?ref=v0.7.0
- github.com/confidential-containers/cloud-api-adaptor//peerpod-ctrl/config/crd?ref=v0.7.0
#+kubebuilder:scaffold:crdkustomizeresource

patches:
Expand Down
3 changes: 3 additions & 0 deletions config/default/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ patches:
#- manager_config_patch.yaml

# [PEERPODS] To enable the peerpod controllers uncomment this section.
#- path: peerpods/runasnonroot.yaml
#- path: peerpods/ssh-volume.yaml
#- path: peerpods/ssh-mount.yaml
#- path: peerpods/peerpods-namespace.yaml
#- path: peerpods/enable-peerpods.yaml
# target:
Expand Down
10 changes: 10 additions & 0 deletions config/default/peerpods/runasnonroot.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
securityContext:
runAsNonRoot: false
14 changes: 14 additions & 0 deletions config/default/peerpods/ssh-mount.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
volumeMounts:
- mountPath: /root/.ssh/
name: ssh
readOnly: true
14 changes: 14 additions & 0 deletions config/default/peerpods/ssh-volume.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
volumes:
- name: ssh
secret:
defaultMode: 384
optional: true
secretName: ssh-key-secret
29 changes: 29 additions & 0 deletions config/rbac/caa_rbac.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# This file is based on https://github.com/confidential-containers/cloud-api-adaptor/blob/staging/install/rbac/peer-pod.yaml
# It adds the required rules to the default SA which is used by CAA DA
# when owner reference is both object must be on the same namespace, hence,
# caa should have cluster-wide permissions to support any pod namespace
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: caa-role
rules:
- apiGroups: [""]
resources: ["pods", "pods/finalizers"]
verbs: ["get","create", "patch", "update"]
- apiGroups: ["confidentialcontainers.org"]
resources: ["peerpods", "pods"]
verbs: ["create", "patch", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: caa-rolebinding
subjects:
- kind: ServiceAccount
name: default
namespace: system
roleRef:
kind: ClusterRole
name: caa-role
apiGroup: rbac.authorization.k8s.io
2 changes: 2 additions & 0 deletions config/rbac/ccruntime_editor_role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ rules:
resources:
- ccruntimes
- peerpodconfigs
- peerpods
verbs:
- create
- delete
Expand All @@ -24,5 +25,6 @@ rules:
resources:
- ccruntimes/status
- peerpodconfigs/status
- peerpods/status
verbs:
- get
2 changes: 2 additions & 0 deletions config/rbac/ccruntime_viewer_role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ rules:
resources:
- ccruntimes
- peerpodconfigs
- peerpods
verbs:
- get
- list
Expand All @@ -20,5 +21,6 @@ rules:
resources:
- ccruntimes/status
- peerpodconfigs/status
- peerpods/status
verbs:
- get
3 changes: 3 additions & 0 deletions config/rbac/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,6 @@ resources:
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml
# the following is custom rbac manifests required for
# cloud-api-adaptor when peerpod-ctrl is used
- caa_rbac.yaml
26 changes: 26 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,32 @@ rules:
- get
- patch
- update
- apiGroups:
- confidentialcontainers.org
resources:
- peerpods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- confidentialcontainers.org
resources:
- peerpods/finalizers
verbs:
- update
- apiGroups:
- confidentialcontainers.org
resources:
- peerpods/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
Expand Down
3 changes: 3 additions & 0 deletions controllers/ccruntime_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,9 @@ const (
//+kubebuilder:rbac:groups=confidentialcontainers.org,resources=peerpodconfigs,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=confidentialcontainers.org,resources=peerpodconfigs/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=confidentialcontainers.org,resources=peerpodconfigs/finalizers,verbs=update
//+kubebuilder:rbac:groups=confidentialcontainers.org,resources=peerpods,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=confidentialcontainers.org,resources=peerpods/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=confidentialcontainers.org,resources=peerpods/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=create;get;update;list;watch
//+kubebuilder:rbac:groups="",resources=secrets,verbs=create;get;update;list;watch

Expand Down
112 changes: 112 additions & 0 deletions docs/PEERPODS-TEST.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
## Introduction
This document describes testing to confirm that the operators are functioning.
These test were performed on a libvirt provider as an example.

## Check that the peerpod-config CR was created
```
kubectl get peerpodconfigs -n confidential-containers-system
NAME AGE
coco-config-peer-pods 15m
```

## Check that the peerpodconfig-ctrl controller created the cloud adapter daemon and it is running
```
kubectl get pods -n confidential-containers-system
NAME READY STATUS RESTARTS AGE
cc-operator-controller-manager-68b5979488-x8bhb 2/2 Running 0 31m
cc-operator-daemon-install-lrwc5 1/1 Running 0 12s
cc-operator-pre-install-daemon-2cllk 1/1 Running 0 14s
peerpodconfig-ctrl-caa-daemon-bv45g 1/1 Running 0 14s
```

## Create a Peerpod
```
kubectl apply -f fedora-sleep.yaml
```
```
fedora-sleep.yaml
apiVersion: v1
kind: Pod
metadata:
name: fedora-sleep
spec:
runtimeClassName: kata-remote
restartPolicy: Never
containers:
- name: sleep-forever
image: registry.fedoraproject.org/fedora
command: ["sleep"]
args: [ "infinity"]
```

## Check that the peerpod CR was created as a result of creating the peerpod
```
kubectl get peerpods
NAME AGE
fedora-sleep-resource-gqjl6 7m26s
```

## Check that the peerpod pod is running and that a VM was created and is up
```
kubectl get pods
NAME READY STATUS RESTARTS AGE
fedora-sleep 1/1 Running 0 7m7s
kcli get vms
+-----------------------------+--------+-----------------+------------+----------+---------+
| Name | Status | Ip | Source | Plan | Profile |
+-----------------------------+--------+-----------------+------------+----------+---------+
| coco-k8s-ctlplane-0 | up | 192.168.122.251 | ubuntu2004 | coco-k8s | kvirt |
| coco-k8s-worker-0 | up | 192.168.122.68 | ubuntu2004 | coco-k8s | kvirt |
| podvm-fedora-sleep-59d239f2 | up | 192.168.122.23 | | | |
+-----------------------------+--------+-----------------+------------+----------+---------+
```

## Check that the peerpod-ctrl controller is working
First delete the cloud adapter daemon.
```
kubectl delete pod peerpodconfig-ctrl-caa-daemon-bv45g -n confidential-containers-system
pod "peerpodconfig-ctrl-caa-daemon-bv45g" deleted
```

This will cause the peerpod to error.
```
kubectl get pods
NAME READY STATUS RESTARTS AGE
fedora-sleep 0/1 Error 0 13m
```
A new caa-daemon will start up but it will not know about the existing peerpod
VM. The peerpod-ctrl controller will clean up the orphaned peerpod resources once the pod
is deleted.
```
kubectl delete pod fedora-sleep
pod "fedora-sleep" deleted
kubectl get peerpod
No resources found in default namespace.
kcli get vms
+---------------------+--------+-----------------+------------+----------+---------+
| Name | Status | Ip | Source | Plan | Profile |
+---------------------+--------+-----------------+------------+----------+---------+
| coco-k8s-ctlplane-0 | up | 192.168.122.251 | ubuntu2004 | coco-k8s | kvirt |
| coco-k8s-worker-0 | up | 192.168.122.68 | ubuntu2004 | coco-k8s | kvirt |
+---------------------+--------+-----------------+------------+----------+---------+
```

In the operator log look for [adaptor/cloud/libvirt] and you should see references to deleting the instance resources:
```
kubectl logs cc-operator-controller-manager-68b5979488-x8bhb -f -n confidential-containers-system --all-containers=true
INFO deleting instance {"controller": "peerpod", "controllerGroup": "confidentialcontainers.org", "controllerKind": "PeerPod", "PeerPod": {"name":"fedora-sleep-resource-gqjl6","namespace":"default"}, "namespace": "default", "name": "fedora-sleep-resource-gqjl6", "reconcileID": "520379c0-e37a-4628-9c85-67955502a7d0", "InstanceID": "42", "CloudProvider": "libvirt"}
[adaptor/cloud/libvirt] Deleting instance (42)
[adaptor/cloud/libvirt] Checking if instance (42) exists
[adaptor/cloud/libvirt] domainDef [{{ disk} disk 0xc00096f8c0 <nil> 0xc0006ac870 <nil> <nil> <nil> <nil> <nil> 0xc0000d7f90 <nil> <nil> <nil> <nil> <nil> 0xc0005c41f8 <nil> <nil> 0xc0005b5200} {{ disk} cdrom 0xc00096fe60 <nil> 0xc0006ac900 <nil> <nil> <nil> <nil> <nil> 0xc00088c000 <nil> 0x308fd88 <nil> <nil> <nil> <nil> <nil> <nil> 0xc0005b5440}]
[adaptor/cloud/libvirt] Check if podvm-fedora-sleep-59d239f2-root.qcow2 volume exists
[adaptor/cloud/libvirt] Deleting volume podvm-fedora-sleep-59d239f2-root.qcow2
[adaptor/cloud/libvirt] Check if podvm-fedora-sleep-59d239f2-cloudinit.iso volume exists
[adaptor/cloud/libvirt] Deleting volume podvm-fedora-sleep-59d239f2-cloudinit.iso
[adaptor/cloud/libvirt] deleted an instance 42
INFO instance deleted {"controller": "peerpod", "controllerGroup": "confidentialcontainers.org", "controllerKind": "PeerPod", "PeerPod": {"name":"fedora-sleep-resource-gqjl6","namespace":"default"}, "namespace": "default", "name": "fedora-sleep-resource-gqjl6", "reconcileID": "520379c0-e37a-4628-9c85-67955502a7d0", "InstanceID": "42", "CloudProvider": "libvirt"}
```
Loading

0 comments on commit 92eb08a

Please sign in to comment.