diff --git a/apisnoop/README.org b/apisnoop/README.org new file mode 100644 index 0000000..75035e3 --- /dev/null +++ b/apisnoop/README.org @@ -0,0 +1,231 @@ +#+title: Kubevirt-talos + +* Initialise providers +#+begin_src tmate +clusterctl init -b talos -c talos +#+end_src + +* Install CDI support for KubeVirt +#+begin_src shell +export TAG=$(curl -s -w %{redirect_url} https://github.com/kubevirt/containerized-data-importer/releases/latest) +export VERSION=$(echo ${TAG##*/}) +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-operator.yaml +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr.yaml +#+end_src + +#+RESULTS: +#+begin_example +namespace/cdi created +customresourcedefinition.apiextensions.k8s.io/cdis.cdi.kubevirt.io created +clusterrole.rbac.authorization.k8s.io/cdi-operator-cluster created +clusterrolebinding.rbac.authorization.k8s.io/cdi-operator created +serviceaccount/cdi-operator created +role.rbac.authorization.k8s.io/cdi-operator created +rolebinding.rbac.authorization.k8s.io/cdi-operator created +deployment.apps/cdi-operator created +configmap/cdi-operator-leader-election-helper created +cdi.cdi.kubevirt.io/cdi created +#+end_example + +* Coder Iteration Loop +** Start Coder +#+begin_src tmate :window coder :dir "../../.." + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=https://coder.$SHARINGIO_PAIR_BASE_DNS_NAME \ + 2>&1 | tee coder-server.log +#+end_src +#+begin_src shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +#+end_src + +#+RESULTS: +#+begin_example +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +#+end_example +** coder url +#+begin_src shell :dir "../../.." +grep "coder login https://" coder-server.log | cut -d\ -f 4 +#+end_src + +#+RESULTS: +#+begin_example +https://coder.bobymcbobs.pair.sharing.io +#+end_example + + +* kubevirt workspace +** create template and cluster + +#+begin_src tmate :dir "../../.." :window kubevirt +cd ~/sharingio/coder +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +coder template create kubevirt-talos \ + -d examples/templates/kubevirt-talos \ + --yes +coder create kv1 --template kubevirt-talos +#+end_src + +** update template and new cluster + +#+begin_src tmate :dir "../../.." :window kubevirt +export WORKSPACE=kv1 +coder template push kubevirt-talos -d examples/templates/kubevirt-talos --yes +coder create $WORKSPACE --template kubevirt-talos --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +#+end_src + +** grab new cluster kubeconfig + +#+begin_src tmate :dir "../../.." :window kubectl +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +#+end_src + +** inner cluster +#+begin_src shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-mwwff 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-ppw92 0/1 Pending 0 81s +kube-system pod/etcd-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-apiserver-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-controller-manager-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-proxy-48s9l 1/1 Running 0 81s +kube-system pod/kube-scheduler-kv1-97525 1/1 Running 0 90s + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 97s +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 96s + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 96s + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 96s + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 82s + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 88s +#+end_example + +** cni not yet working +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes | grep -B6 KubeletNotReady +#+end_src + +#+RESULTS: +#+begin_example +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized +#+end_example + +** list all relevant resources +#+begin_src shell +kubectl get $(kubectl api-resources | grep -E 'x-k8s|sidero' | awk '{print $1}' | xargs | tr ' ' ','),secrets,configmaps,pods,svc,pvc,vmis -o wide -n kv10 +#+end_src + +#+RESULTS: +#+begin_example +NAME AGE +clusterresourceset.addons.cluster.x-k8s.io/kv10 25s + +NAME AGE +talosconfig.bootstrap.cluster.x-k8s.io/kv10-nz842 5s + +NAME AGE +talosconfigtemplate.bootstrap.cluster.x-k8s.io/kv10 27s + +NAME PHASE AGE VERSION +cluster.cluster.x-k8s.io/kv10 Provisioned 24s + +NAME CLUSTER DESIRED REPLICAS READY UPDATED UNAVAILABLE PHASE AGE VERSION +machinedeployment.cluster.x-k8s.io/kv10 kv10 0 Running 23s v1.23.5 + +NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION +machine.cluster.x-k8s.io/kv10-mknmr kv10 Provisioning 5s v1.25.2 + +NAME CLUSTER DESIRED REPLICAS READY AVAILABLE AGE VERSION +machineset.cluster.x-k8s.io/kv10-7c6b4779d9 kv10 0 22s v1.23.5 + +NAME READY INITIALIZED REPLICAS READY REPLICAS UNAVAILABLE REPLICAS +taloscontrolplane.controlplane.cluster.x-k8s.io/kv10 1 1 + +NAME AGE +kubevirtcluster.infrastructure.cluster.x-k8s.io/kv10 26s + +NAME AGE +kubevirtmachine.infrastructure.cluster.x-k8s.io/kv10-cp-9klxl 5s + +NAME AGE +kubevirtmachinetemplate.infrastructure.cluster.x-k8s.io/kv10 22s +kubevirtmachinetemplate.infrastructure.cluster.x-k8s.io/kv10-cp 22s + +NAME TYPE DATA AGE +secret/default-token-5f5r8 kubernetes.io/service-account-token 3 30s +secret/kv10-ca Opaque 2 5s +secret/kv10-kubeconfig cluster.x-k8s.io/secret 1 4s +secret/kv10-mknmr-bootstrap-data Opaque 1 5s +secret/kv10-mknmr-bootstrap-data-userdata cluster.x-k8s.io/secret 1 5s +secret/kv10-ssh-keys cluster.x-k8s.io/secret 2 24s +secret/kv10-talos Opaque 1 5s +secret/kv10-talosconfig Opaque 1 5s +secret/letsencrypt-prod kubernetes.io/tls 2 30s + +NAME DATA AGE +configmap/capi-init 1 24s +configmap/kube-root-ca.crt 1 30s + +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +pod/importer-kv10-cp-9klxl-vmdisk-dv 0/1 Pending 0 5s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +service/kv10-lb ClusterIP 10.97.239.4 6443/TCP 24s cluster.x-k8s.io/cluster-name=kv10,cluster.x-k8s.io/role=control-plane + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE +persistentvolumeclaim/kv10-cp-9klxl-vmdisk-dv Pending local-path 5s Filesystem +#+end_example + +* Tear down + +#+begin_src tmate :window kubevirt +coder delete kv9 +#+end_src + +* Prepare +#+begin_src shell +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm template -n ingress-nginx ingress-nginx ingress-nginx/ingress-nginx --version 4.4.0 --values ./values/ingress-nginx.yaml > ./ingress-nginx.yaml +#+end_src diff --git a/apisnoop/cluster.tf b/apisnoop/cluster.tf new file mode 100644 index 0000000..1b2fc34 --- /dev/null +++ b/apisnoop/cluster.tf @@ -0,0 +1,773 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "0.4.15" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.12.1" + } + } +} + +# https://www.terraform.io/language/providers/configuration#provider-configuration-1 +# > You can use expressions in the values of these configuration arguments, +# but can only reference values that are known before the configuration is applied. +# This means you can safely reference input variables, but not attributes +# exported by resources (with an exception for resource arguments that +# are specified directly in the configuration). +#### no data.X :( +# provider "kubernetes" { +# alias = "vcluster" +# host = yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["server"] +# client_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-certificate-data"]) +# client_key = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-key-data"]) +# cluster_ca_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["certificate-authority-data"]) +# } + +data "coder_workspace" "me" {} + +resource "coder_agent" "main" { + os = "linux" + arch = "amd64" + startup_script = <> $HOME/.bashrc + mkdir -p bin + + ( + cd + for repo in $INIT_DEFAULT_REPOS; do (git-clone-structured "https://github.com/$repo" || true); done + ) | tee repo-clone.log & + + # install and start code-server + curl -fsSL https://code-server.dev/install.sh | sh | tee code-server-install.log + code-server --auth none --port 13337 | tee code-server-install.log & + EOT +} + +variable "repos" { + type = string + description = "GitHub repos to clone; i.e: kubernetes/kubernetes, cncf/k8s-conformance" + default = "kubernetes/kubernetes" +} + +# code-server +resource "coder_app" "code-server" { + agent_id = coder_agent.main.id + name = "code-server" + icon = "/icon/code.svg" + url = "http://localhost:13337?folder=/home/coder" + relative_path = true + + healthcheck { + url = "http://localhost:13337/healthz" + interval = 3 + threshold = 10 + } +} + +data "kubernetes_namespace" "workspace" { + # count = data.coder_workspace.me.start_count + metadata { + name = "coder-workspaces" + } +} + +resource "kubernetes_manifest" "cluster" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + "labels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1beta1" + "kind" = "TalosControlPlane" + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "clusterNetwork" = { + "pods" = { + "cidrBlocks" = [ + "10.244.0.0/16", + ] + } + "services" = { + "cidrBlocks" = [ + "10.96.0.0/12", + ] + } + } + } + } +} + +resource "kubernetes_manifest" "kvcluster" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "controlPlaneServiceTemplate" = { + "spec" = { + "type" = "ClusterIP" + } + } + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_control_plane" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = "${data.coder_workspace.me.name}-cp" + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "runStrategy" = "Always" + "dataVolumeTemplates" = [ + { + "metadata" = { + "name" = "vmdisk-dv" + } + "spec" = { + "pvc" = { + "accessModes" = ["ReadWriteOnce"] + "resources" = { + "requests" = { + "storage" = "50Gi" + } + } + } + "source" = { + "registry" = { + "url" = "docker://quay.io/containercraft/talos/nocloud@sha256:4b68854f63b15fa2ebd57b53dc293ce17babb6a0f2d77373cdc30e964bb65ca3" + } + } + } + }, + ] + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "interfaces" = [ + { + "name" = "default" + "bridge" = {} + } + ] + "disks" = [ + { + "disk" = { + "bus" = "scsi" + } + "bootOrder" = 1 + "name" = "vmdisk" + }, + ] + "rng" = {} + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "networks" = [ + { + "name" = "default" + "pod" = {} + } + ] + "volumes" = [ + { + "cloudInitNoCloud" = {} + "name" = "cloudinitvolume" + }, + { + "dataVolume" = { + "name" = "vmdisk-dv" + } + "name" = "vmdisk" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { + manifest = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1alpha3" + "kind" = "TalosControlPlane" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "controlPlaneConfig" = { + "controlplane" = { + "generateType" = "controlplane" + "configPatches" = [ + { + "op" = "add" + "path" = "/debug" + "value" = true + }, + { + "op" = "add" + "path" = "/machine/network" + "value" = { + "nameservers" = ["8.8.8.8", "1.1.1.1"] + } + }, + { + "op" = "replace" + "path" = "/machine/install" + "value" = { + "bootloader" = true + "wipe" = false + "disk" = "/dev/sda" + "image" = "ghcr.io/siderolabs/installer:v1.2.5" + "extraKernelArgs" = ["console=ttyS0"] + } + }, + { + "op" = "add" + "path" = "/cluster/apiServer/admissionControl/0/configuration" + "value" = { + "apiVersion" = "pod-security.admission.config.k8s.io/v1alpha1" + "kind" = "PodSecurityConfiguration" + "defaults" = { + "enforce" = "privileged" + "enforce-version" = "latest" + "audit" = "restricted" + "audit-version" = "latest" + "warn" = "restricted" + "warn-version" = "latest" + } + "exemptions" = { + "usernames" = [] + "runtimeClasses" = [] + "namespaces" = ["kube-system"] + } + } + }, + # { + # "op" = "add" + # "path" = "/machine/kubelet/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/apiServer/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/controllerManager/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + { + "op" = "add" + "path" = "/cluster/allowSchedulingOnControlPlanes" + "value" = true + } + ] + } + "init" = { + "configPatches" = [ + { + "op" = "replace" + "path" = "/machine/install" + "value" = { + "bootloader" = true + "wipe" = false + "disk" = "/dev/sda" + "image" = "ghcr.io/siderolabs/installer:v1.2.5" + "extraKernelArgs" = ["console=ttyS0"] + } + }, + { + "op" = "add" + "path" = "/cluster/apiServer/admissionControl/0/configuration" + "value" = { + "apiVersion" = "pod-security.admission.config.k8s.io/v1alpha1" + "kind" = "PodSecurityConfiguration" + "defaults" = { + "enforce" = "privileged" + "enforce-version" = "latest" + "audit" = "restricted" + "audit-version" = "latest" + "warn" = "restricted" + "warn-version" = "latest" + } + "exemptions" = { + "usernames" = [] + "runtimeClasses" = [] + "namespaces" = ["kube-system"] + } + } + }, + { + "op" = "add" + "path" = "/debug" + "value" = true + }, + { + "op" = "add" + "path" = "/machine/network" + "value" = { + "nameservers" = ["8.8.8.8", "1.1.1.1"] + } + }, + # { + # "op" = "add" + # "path" = "/machine/kubelet/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/apiServer/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/controllerManager/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + { + "op" = "add" + "path" = "/cluster/allowSchedulingOnControlPlanes" + "value" = true + }, + { + "op" = "add" + "path" = "/cluster/apiServer/extraArgs" + "value" = { + "audit-policy-file" = "/etc/kubernetes/pki/audit-policy.yaml" + "audit-log-path" = "-" + "audit-webhook-config-file" = "/etc/kubernetes/pki/audit-sink.yaml" + } + }, + { + "op" = "add" + "path" = "/machine/files/0" + "value" = { + "content" = <<-EOF +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: + - level: RequestResponse +EOF + "permissions" = "0o755" + "path" = "/etc/kubernetes/pki/audit-policy.yaml" + "op" = "create" + } + }, + { + "op" = "add" + "path" = "/machine/files/1" + "value" = { + "content" = <<-EOF +apiVersion: v1 +kind: Config +clusters: + - cluster: + server: http://10.96.96.96:9900/events + name: auditsink-cluster +contexts: + - context: + cluster: auditsink-cluster + user: "" + name: auditsink-context +current-context: auditsink-context +users: [] +preferences: {} +EOF + "permissions" = "0o755" + "path" = "/etc/kubernetes/pki/audit-sink.yaml" + "op" = "create" + } + } + ] + "generateType" = "init" + } + } + "infrastructureTemplate" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = "${data.coder_workspace.me.name}-cp" + } + "replicas" = 1 + "version" = "v1.25.2" + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "runStrategy" = "Always" + "dataVolumeTemplates" = [ + { + "metadata" = { + "name" = "vmdisk-dv" + } + "spec" = { + "pvc" = { + "accessModes" = [ + "ReadWriteOnce" + ] + "resources" = { + "requests" = { + "storage" = "50Gi" + } + } + } + "source" = { + "registry" = { + "url" = "docker://quay.io/containercraft/talos/nocloud@sha256:4b68854f63b15fa2ebd57b53dc293ce17babb6a0f2d77373cdc30e964bb65ca3" + } + } + } + }, + ] + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "interfaces" = [ + { + "name" = "default" + "bridge" = {} + } + ] + "disks" = [ + { + "disk" = { + "bus" = "virtio" + } + "name" = "vmdisk" + }, + ] + "rng" = {} + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "networks" = [ + { + "name" = "default" + "pod" = {} + } + ] + "volumes" = [ + { + "cloudInitNoCloud" = {} + "name" = "cloudinitvolume" + }, + { + "dataVolume" = { + "name" = "vmdisk-dv" + } + "name" = "vmdisk" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "talosconfigtemplate_talos_em_worker_a" { + manifest = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1alpha3" + "kind" = "TalosConfigTemplate" + "metadata" = { + "labels" = { + "cluster.x-k8s.io/cluster-name" = data.coder_workspace.me.name + } + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "template" = { + "spec" = { + "generateType" = "join" + "talosVersion" = "v1.2.5" + } + } + } + } +} + +resource "kubernetes_manifest" "machinedeployment_md_0" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "MachineDeployment" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "clusterName" = data.coder_workspace.me.name + "replicas" = 0 + "selector" = { + "matchLabels" = null + } + "template" = { + "spec" = { + "bootstrap" = { + "configRef" = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1beta1" + "kind" = "TalosConfigTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + } + "clusterName" = "kv1" + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "version" = "v1.23.5" + } + } + } + } +} + +resource "kubernetes_manifest" "configmap_capi_init" { + manifest = { + "kind" = "ConfigMap" + "metadata" = { + "name" = "${data.coder_workspace.me.name}-capi-init" + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "apiVersion" = "v1" + "data" = { + "namespaces" = templatefile("namespaces.yaml", {}) + "cool.yaml" = templatefile("cool.template.yaml", + { + coder_command = jsonencode(["sh", "-c", coder_agent.main.init_script]), + coder_token = coder_agent.main.token + instance_name = data.coder_workspace.me.name + repos = var.repos + }) + "ingress-nginx" = templatefile("ingress-nginx.yaml", {}) + } + } +} + +resource "kubernetes_manifest" "clusterresourceset_capi_init" { + manifest = { + "apiVersion" = "addons.cluster.x-k8s.io/v1beta1" + "kind" = "ClusterResourceSet" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.kubernetes_namespace.workspace.metadata[0].name + } + "spec" = { + "clusterSelector" = { + "matchLabels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "resources" = [ + { + "kind" = "ConfigMap" + "name" = "${data.coder_workspace.me.name}-capi-init" + }, + # { + # "kind" = "Secret" + # "name" = "vcluster-kubeconfig" + # }, + ] + "strategy" = "ApplyOnce" + } + } +} + +resource "kubernetes_service" "cluster_port_web_traffic" { + metadata { + name = "${data.coder_workspace.me.name}-web" + namespace = data.kubernetes_namespace.workspace.metadata[0].name + } + spec { + selector = { + "cluster.x-k8s.io/cluster-name" = data.coder_workspace.me.name + } + port { + name = "http" + port = "80" + target_port = "31080" + } + port { + name = "https" + port = "31443" + target_port = "443" + } + type = "ClusterIP" + } +} + +resource "kubernetes_ingress_v1" "cluster_port_web_traffic" { + metadata { + name = "${data.coder_workspace.me.name}-web" + namespace = data.kubernetes_namespace.workspace.metadata[0].name + annotations = { + "test_a" = data.coder_workspace.me.access_url + } + } + spec { + rule { + host = "${data.coder_workspace.me.name}.coder.sharing.io" + http { + path { + path = "/" + path_type = "ImplementationSpecific" + backend { + service { + name = "${data.coder_workspace.me.name}-web" + port { + number = 80 + } + } + } + } + } + } + rule { + host = "*.${data.coder_workspace.me.name}.coder.sharing.io" + http { + path { + path = "/" + path_type = "ImplementationSpecific" + backend { + service { + name = "${data.coder_workspace.me.name}-web" + port { + number = 80 + } + } + } + } + } + } + } +} + +# resource "time_sleep" "wait_50_seconds" { +# create_duration = "50s" +# } +# data "kubernetes_secret_v1" "kubeconfig" { +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } + +# depends_on = [ +# kubernetes_manifest.clusterresourceset_capi_init, +# kubernetes_manifest.taloscontrolplane_talos_em_control_plane, +# kubernetes_manifest.kvcluster, +# kubernetes_manifest.cluster, +# time_sleep.wait_50_seconds +# ] +# } + +# resource "coder_metadata" "kubeconfig" { +# count = data.coder_workspace.me.start_count +# resource_id = data.kubernetes_namespace.workspace[0].id +# item { +# key = "description" +# value = "The kubeconfig to connect to the cluster with" +# } +# item { +# key = "kubeconfig" +# value = data.kubernetes_secret_v1.kubeconfig == null ? "" : data.kubernetes_secret_v1.kubeconfig.data.value +# sensitive = true +# } + +# depends_on = [ +# data.kubernetes_secret_v1.kubeconfig, +# time_sleep.wait_50_seconds +# ] +# } diff --git a/apisnoop/cool.template.yaml b/apisnoop/cool.template.yaml new file mode 100644 index 0000000..67b6042 --- /dev/null +++ b/apisnoop/cool.template.yaml @@ -0,0 +1,84 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coder + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: coder + namespace: default +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: code-server + namespace: default +spec: + selector: + matchLabels: + app: code-server + serviceName: code-server + template: + metadata: + labels: + app: code-server + spec: + serviceAccountName: coder + securityContext: + runAsUser: 1000 + fsGroup: 1000 + containers: + - name: code-server + # image: ghcr.io/ii/emacs-coder:latest + image: registry.gitlab.com/sharingio/environment/environment:2022.09.30.0909 + command: ${coder_command} + securityContext: + runAsUser: 1000 + env: + - name: CODER_AGENT_TOKEN + value: ${coder_token} + - name: PAIR_ENVIRONMENT_DEBUG + value: "true" + - name: REINIT_HOME_FOLDER + value: "true" + - name: INIT_DEFAULT_REPOS_FOLDER + value: /home/ii + - name: INIT_DEFAULT_DIR + value: /home/ii + - name: INIT_DEFAULT_REPOS + value: ${repos} + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: run-containerd-containerd-sock + - mountPath: /home/coder + name: home-coder + - mountPath: /var/run/host + name: host + nodeSelector: + node-role.kubernetes.io/control-plane: "" + volumes: + - hostPath: + path: /run/containerd/containerd.sock + type: Socket + name: run-containerd-containerd-sock + - name: home-coder + hostPath: + path: /var/home/coder + - hostPath: + path: / + name: host + - name: podlabels + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels diff --git a/apisnoop/ingress-nginx.yaml b/apisnoop/ingress-nginx.yaml new file mode 100644 index 0000000..7ab2a01 --- /dev/null +++ b/apisnoop/ingress-nginx.yaml @@ -0,0 +1,811 @@ +--- +# Source: ingress-nginx/templates/controller-poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + minAvailable: 1 +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: "ingress-nginx" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-nginx-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-nginx-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: "ingress-nginx" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller-admission + namespace: ingress-nginx +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + type: NodePort + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + nodePort: 31080 + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + nodePort: 31443 + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-nginx-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - ingress-nginx + topologyKey: kubernetes.io/hostname + weight: 1 + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-hpa.yaml +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: ingress-nginx-controller + minReplicas: 3 + maxReplicas: 3 + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 50 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "ingress-nginx" + name: ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: "ingress-nginx" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: "ingress-nginx" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: ingress-nginx-admission-create + namespace: ingress-nginx + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f" + imagePullPolicy: IfNotPresent + args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: ingress-nginx-admission-patch + namespace: ingress-nginx + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.4.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.5.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/apisnoop/manifest/kvtalos.yaml b/apisnoop/manifest/kvtalos.yaml new file mode 100644 index 0000000..c67a200 --- /dev/null +++ b/apisnoop/manifest/kvtalos.yaml @@ -0,0 +1,213 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster-name: talos + name: talos +spec: + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: TalosControlPlane + name: talos + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: KubevirtCluster + name: talos + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 172.26.0.0/16 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtCluster +metadata: + name: talos +spec: + controlPlaneServiceTemplate: + spec: + type: ClusterIP +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: talos +spec: + clusterName: talos + replicas: 0 + selector: + matchLabes: null + template: + spec: + clusterName: talos + version: v1.23.5 + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: TalosConfigTemplate + name: talos + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: talos +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: TalosControlPlane +metadata: + name: talos +spec: + replicas: 1 + version: "v1.25.2" + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: talos-cp + controlPlaneConfig: + init: + generateType: init + configPatches: + - path: /machine/install + op: replace + value: + bootloader: true + disk: /dev/vda + image: ghcr.io/siderolabs/installer:v1.2.5 + wipe: false + extraKernelArgs: + - console=ttyS0 + - path: /machine/kubelet/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/apiServer/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/controllerManager/extraArgs + op: add + value: + cloud-provider: external + - path: /cluster/allowSchedulingOnMasters + op: add + value: true + controlplane: + generateType: controlplane + configPatches: + - path: /machine/install + op: replace + value: + bootloader: true + disk: /dev/vda + image: ghcr.io/siderolabs/installer:v1.2.5 + wipe: false + extraKernelArgs: + - console=ttyS0 + - path: /machine/kubelet/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/apiServer/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/controllerManager/extraArgs + op: add + value: + cloud-provider: external + - path: /cluster/allowSchedulingOnMasters + op: add + value: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: talos-cp +spec: + template: + spec: + virtualMachineTemplate: + spec: + runStrategy: Always + template: + spec: + evictionStrategy: External + volumes: + - name: vmdisk + dataVolume: + name: vmdisk-dv + domain: + cpu: + cores: 2 + devices: + disks: + - name: vmdisk + disk: + bus: scsi + memory: + guest: "4Gi" + dataVolumeTemplates: + - metadata: + name: vmdisk-dv + spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "15Gi" + source: + http: + url: "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: talos +spec: + template: + spec: + virtualMachineTemplate: + spec: + runStrategy: Always + template: + spec: + evictionStrategy: External + volumes: + - name: vmdisk + dataVolume: + name: vmdisk-dv + domain: + cpu: + cores: 16 + devices: + disks: + - name: vmdisk + disk: + bus: scsi + memory: + guest: "64Gi" + dataVolumeTemplates: + - metadata: + name: vmdisk-dv + spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "100Gi" + source: + http: + url: "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: talos +spec: + template: + spec: + generateType: join + talosVersion: v1.2.5 diff --git a/apisnoop/namespaces.yaml b/apisnoop/namespaces.yaml new file mode 100644 index 0000000..6878f0b --- /dev/null +++ b/apisnoop/namespaces.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx diff --git a/apisnoop/role+binding.yaml b/apisnoop/role+binding.yaml new file mode 100644 index 0000000..519bbc4 --- /dev/null +++ b/apisnoop/role+binding.yaml @@ -0,0 +1,120 @@ +# Requires: +# clusterctl init --infrastructure kubevirt +# clusterctl init --bootstrap talos --control-plane talos +# Some are at Cluster Level, some are at the coder namespace level +# +# Error: Failed to determine if resource "kv1/kv1" exists +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: coder-clusterapi-perms +rules: + - apiGroups: + - "apiextensions.k8s.io" + resources: + - "customresourcedefinitions" + verbs: + - "list" + - "get" + - apiGroups: + - "" + - "cluster.x-k8s.io" + - "bootstrap.cluster.x-k8s.io" + - "controlplane.cluster.x-k8s.io" + - "infrastructure.cluster.x-k8s.io" + - "addons.cluster.x-k8s.io" + - "cdi.kubevirt.io" + resources: + - "namespaces" + - "configmaps" + - "clusters" + - "machinedeployments" + - "talosconfigtemplates" + - "taloscontrolplanes" + - "kubevirtclusters" + - "kubevirtmachinetemplates" + - "clusterresourcesets" + - "kubeadmconfigtemplates" + - "kubeadmcontrolplanes" + - "datavolumes" + verbs: + - "list" + - "get" + - "patch" + - "create" + - "delete" + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - apiGroups: + - "networking.k8s.io" + - "" + resources: + - ingresses + - services + verbs: + - get + - list + - create + - patch + - update + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder-clusterapi + namespace: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coder-clusterapi-perms +subjects: + - kind: ServiceAccount + name: coder + namespace: coder +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: Role +# metadata: +# name: coder-clusterapi-perms +# namespace: coder +# rules: +# - apiGroups: +# - "" +# resources: +# - "configmaps" +# verbs: +# - "list" +# - "get" +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: RoleBinding +# metadata: +# name: coder-clusterapi +# namespace: coder +# roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: Role +# name: coder-clusterapi-perms +# subjects: +# - kind: ServiceAccount +# name: coder +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: coder-clusterapi-cluster + namespace: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coder-clusterapi-perms +subjects: + - kind: ServiceAccount + name: coder + namespace: coder diff --git a/apisnoop/values/ingress-nginx.yaml b/apisnoop/values/ingress-nginx.yaml new file mode 100644 index 0000000..ac7c8cf --- /dev/null +++ b/apisnoop/values/ingress-nginx.yaml @@ -0,0 +1,23 @@ +podSecurityPolicy: + enabled: false +controller: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - ingress-nginx + topologyKey: kubernetes.io/hostname + weight: 1 + ingressClassResource: + default: true + service: + externalTrafficPolicy: Local + type: NodePort + nodePorts: + http: "31080" + https: "31443"