diff --git a/.github/smol_k8s_lab_config_k3d.yaml b/.github/smol_k8s_lab_config_k3d.yaml new file mode 100644 index 000000000..526746083 --- /dev/null +++ b/.github/smol_k8s_lab_config_k3d.yaml @@ -0,0 +1,1115 @@ +--- +smol_k8s_lab: + # Terminal User Interface with clickable buttons. + # Useful for learning smol-k8s-lab or verifying your configuration + tui: + # if set to true, we'll always launch smol-k8s-lab in interactive mode :) + # else you need to pass in --interactive or -i to use the TUI + enabled: false + # show bottom footer help bar + show_footer: true + # accessibility options for users that benefit from TTS and Bell sounds + accessibility: + bell: + # ring the built in terminal bell on focus to new elements on the screen + on_focus: true + # ring the built in terminal bell when something is wrong + on_error: true + text_to_speech: + # use a specific program for text to speech - needs to be a full path + # macOS default: say + speech_program: say + # read aloud the screen title and description + screen_titles: true + # read aloud the element id, value, and tooltip each time you switch focus + on_focus: false + # press f5 to read the element id and selected row of DataTables + on_key_press: true + + # enable text to speech + # k9s is a terminal UI dashboard and interface for interacting with Kubernetes + k9s: + # when set to true, if smol-k8s-lab is in interactive mode, it runs k9s + # immediately after the cluster is up and enabled apps have been installed + enabled: false + # default command to run when k9s launches. Defaults to application.argoproj.io + # so that you can view the status of all of your argo apps immediately. This + # default results in running: k9s --command applications.argoproj.io + command: applications.argoproj.io + + # logging config for the smol-k8s-lab CLI + log: + # path of file to log to if console logging is NOT desired + file: "" + # logging level, Options: debug, info, warn, error + level: info + + # store your password and tokens directly in your local password manager + local_password_manager: + enabled: false + # enable the use of Bitwarden as your password manager. + # To use Bitwarden, you must export the following environment variables: + # BW_PASSWORD, BW_CLIENTID, BW_CLIENTSECRET, BW_SESSION + # If you're missing any of these, smol-k8s-lab will prompt for them + name: bitwarden + # if existing items are found in your password manager, do one of: + # + # ask: (default in tui mode) display a dialog window asking you how to proceed + # edit: edit item, if there's one item found, ask if multiple found + # duplicate: create an additional item with the same name + # no_action: don't do anything, just continue on with the script + duplicate_strategy: ask + +# which distros of Kubernetes to deploy. Options: kind, k3s, k3d +# NOTE: only kind and k3d are available on macOS at this time +k8s_distros: + k3s: + # set to true to enable deploying a Kubernetes cluster using k3s + enabled: false + # if k8s_distro set to k3s/k3d, you can add an array of extra arguments to pass + # to the k3s install script as a k3s.yaml file. If you enable cilium, we + # automatically pass in flannel-backend: none and disable-network-policy: true + k3s_yaml: + # if you enable MetalLB, we automatically add servicelb to the disable list + # enables encryption at rest for Kubernetes secrets + secrets-encryption: true + # disables traefik so we can enable ingress-nginx, remove if you're using traefik + disable: + - "traefik" + node-label: + - "ingress-ready=true" + kubelet-arg: + - "max-pods=150" + nodes: {} + + k3d: + # set to true to enable deploying a Kubernetes cluster using k3d + enabled: true + # if k8s_distro set to k3s/k3d, you can add an array of extra arguments to pass + # to the k3s install script as a k3s.yaml file. if you enable cilium, we + # automatically pass in flannel-backend: none and disable-network-policy: true + k3s_yaml: + # if you enable MetalLB, we automatically add servicelb to the disable list + # enables encryption at rest for Kubernetes secrets + secrets-encryption: true + # disables traefik so we can enable ingress-nginx, remove if you're using traefik + disable: + - "traefik" + kubelet-arg: + - "max-pods=150" + node-label: + - "ingress-ready=true" + # how many dockerized k3s nodes to deploy + nodes: + control_plane: 1 + workers: 0 + + kind: + # set to true to enable deploying a Kubernetes cluster using kind + enabled: false + # change the kubelet config for this node in k3s, feel free to add more values + kubelet_extra_args: + node-labels: "ingress-ready=true" + max-pods: 110 + pods-per-core: 0 + resolv-conf: "/etc/resolv.conf" + networking_args: + ipFamily: "ipv4" + disableDefaultCNI: false + apiServerAddress: "127.0.0.1" + podSubnet: "10.244.0.0/16" + # how many dockerized kind nodes to deploy + nodes: + control_plane: 1 + workers: 0 + +# anything here gets set for all apps if you're using our default repos +apps_global_config: + # setting this changes all the below domains to use the following cluster_issuer + # change to letsencrypt-prod when you're ready to go live with your infra + cluster_issuer: "letsencrypt-staging" + # change to your tz: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List + time_zone: "Europe/Amsterdam" + # always deploy external secrets. *Must be a string of "" (don't use external secrets) or "bitwarden" to use bitwarden for external secrets* + external_secrets: "none" + +# --------------------------------------------------------------------------- +# Argo CD deployed and maintained Applications to run on Kubernetes +# --------------------------------------------------------------------------- +apps: + argo_cd: + # Set to false if you you just want a bare cluster with only the above apps" + enabled: true + description: | + [link=https://argo-cd.readthedocs.io/en/stable/]Argo CD[/link] is a declarative, GitOps continuous delivery tool for Kubernetes. + + smol-k8s-lab installs Argo CD with helm initially to support initial configuration of your admin user and disabling of dex. After your OIDC provider is configured, Argo CD begins managing itself using the below configured Argo CD repo. + + The Appset Secret Plugin is required if you want to use the default [link="https://github.com/small-hack/argocd-apps"]small-hack/argocd-apps[/link] [gold3]argo.repo[/gold3] and default enabled if Argo CD is enabled, so we can create a k8s Secret with your more private info such as hostnames, IP addresses, and emails in a deployment that runs alongside Argo CD to provide Argo CD ApplicationSets This plugin has no ingress and cannot be reached from outside the cluster. + + To disable Appset Secret Plugin, please set directory recursion to false. + + Learn more: [link=https://github.com/small-hack/appset-secret-plugin]https://github.com/small-hack/appset-secret-plugin[/link] + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + # FQDN hostname for accessing the Argo CD web interface + hostname: "argocd.test.com" + # which oidc provider to use for Argo CD: defaults to Zitadel + oidc_provider: "zitadel" + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + # change to argocd/argocd to not use app of apps with secret plugin + path: "argocd/app_of_apps/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "argocd" + # recurse directories in the provided git repo, if true, we also deploy the appset secret plugin + directory_recursion: true + # source repos for Argo CD argo-cd Project (in addition to argo_cd.argo.repo) + project: + source_repos: + - https://argoproj.github.io/argo-helm + - https://small-hack.github.io/appset-secret-plugin + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: + - prometheus + + # This app is installed with helm or manifests depending on what is recommended + # for your k8s distro. Becomes managed by Argo CD if you enable it below + cert_manager: + # ! NOTE: you currently can't set this to false. It is necessary to deploy + # most of our supported Argo CD apps since they often have TLS enabled either + # for pod connectivity or ingress + enabled: true + description: | + [link=https://cert-manager.io/]cert-manager[/link] let's you use LetsEncrypt to generate TLS certs for all your apps with ingress. + + smol-k8s-lab supports initialization by creating two [link=https://cert-manager.io/docs/concepts/issuer/]ClusterIssuers[/link] for both staging and production using a provided email address as the account ID for acme. + + # Initialize of the app through smol-k8s-lab + init: + # Deploys staging and prod ClusterIssuers and prompts you for + # values if they were not set. Switch to false if you don't want + # to deploy any ClusterIssuers + enabled: true + values: + # Used for to generate certs and alert you if they're going to expire + email: "cicdtest@test.com" + # choose between "http01" or "dns01" + cluster_issuer_acme_challenge_solver: http01 + # only needed if cluster_issuer_challenge_solver set to dns01 + # currently only cloudflare is supported + cluster_issuer_acme_dns01_provider: cloudflare + sensitive_values: + # can be passed in as env vars if you pre-pend CERT_MANAGER_ + # e.g. CERT_MANAGER_CLOUDFLARE_API_TOKEN + - CLOUDFLARE_API_TOKEN + argo: + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "cert-manager/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "cert-manager" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for cert-manager CD App Project (in addition to argo.repo) + project: + source_repos: + - https://charts.jetstack.io + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: + - kube-system + + # This app is installed with helm or manifests depending on what is recommended + # for your k8s distro. Becomes managed by Argo CD if you enable it below + cilium: + enabled: false + description: | + [link=https://cilium.io/]Cilium[/link] is an open source, cloud native solution for providing, securing, and observing network connectivity between workloads, fueled by the revolutionary Kernel technology eBPF. + # Initialize of the app through smol-k8s-lab + init: + enabled: true + argo: + secret_keys: + hostname: "" + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "demo/cilium/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "cilium" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD cilium Project + project: + source_repos: + - "https://helm.cilium.io/" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + cnpg_operator: + description: | + CloudNative PostgeSQL Operator for Kubernetes. This lets you create an + manage many clusters of postgresql, including backups to s3. + # default disabled while a helm chart is being actively developed + enabled: false + argo: + # secret keys to provide for the argocd secret plugin app, none by default + secret_keys: {} + # git repo to install the Argo CD app from + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! + path: postgres/operators/cloud-native-postgres/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: cnpg-system + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://github.com/small-hack/argocd-apps + - https://cloudnative-pg.github.io/charts + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + external_secrets_operator: + enabled: true + description: | + [link=https://external-secrets.io/latest/]External Secrets Operator[/link] is a Kubernetes operator that integrates external secret management systems like HashiCorp Vault, CyberArk Conjur, Bitwarden, Gitlab, and many more. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret. + + To deploy the Bitwarden provider, please set apps_global_config.external_secrets to "bitwarden". + + The [link="https://github.com/jessebot/bitwarden-eso-provider/"]Bitwarden External Secrets Provider[/link] is used to store k8s secrets in Bitwarden®. This deployment has no ingress and can't be connected to from outside the cluster. There is a networkPolicy that only allows the pod to communicate with the External Secrets Operator in the same namespaces. + + smol-k8s-lab support initialization by creating a Kubernetes secret with your Bitwarden credentials so that the provider can unlock your vault. You will need to setup an [link=https://bitwarden.com/help/personal-api-key/]API key[/link] ahead of time. You can pass these credentials in by setting the following environment variables: + + BITWARDEN_PASSWORD, BITWARDEN_CLIENTSECRET, BITWARDEN_CLIENTID + # Initialization of the app through smol-k8s-lab + init: + enabled: false + argo: + # git repo to install the Argo CD app from + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! + # change to external-secrets-operator/external-secrets-operator/ to deploy + # ONLY the external-secrets-operator, so this will not use app of apps and + # therefore we will not deploy the Bitwarden ESO provider. Use if you want to use + # a different provider + path: external-secrets-operator/app_of_apps/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: external-secrets + # recurse directories in the provided git repo + directory_recursion: false + # secret keys to provide for the Argo CD Appset secret plugin, none by default + secret_keys: {} + # source repos for Argo CD App Project (in addition to app.argo.repo) + project: + source_repos: + - https://charts.external-secrets.io + # you can remove this one if you're not using bitwarden to store your k8s secrets + - https://small-hack.github.io/bitwarden-eso-provider + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + infisical: + enabled: false + description: | + ⚠️ [magenta]demo Status[/magenta] + + [link=https://infisical.com/]Infisical[/link] is an open-source, end-to-end encrypted secret management platform that enables teams to easily manage and sync their env vars. + + This app will remain in demo status till there is a way to create an initial user easily. + # Initialization of the app through smol-k8s-lab + init: + enabled: true + argo: + secret_keys: + hostname: "" + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "demo/infisical/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "infisical" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to app.argo.repo) + project: + source_repos: + - "registry-1.docker.io" + - "https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts/" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + # This app is installed with helm or manifests depending on what is recommended + # for your k8s distro. Becomes managed by Argo CD if you enable it below + ingress_nginx: + enabled: true + description: | + Dedefault deploys [link=https://github.com/kubernetes/ingress-nginx]ingress-nginx[/link] and cert-manager as one argocd app of apps and project in the same namespace + Ingress-nginx cannot be disabled unless you provide your own ingress controller app + argo: + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + # change to "ingress/ingress-nginx/" to not use app of apps + path: "ingress-nginx/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "ingress-nginx" + # recurse directories in the provided git repo + directory_recursion: false + # secret keys to provide for the Argo CD Appset secret plugin, none by default + secret_keys: {} + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://charts.jetstack.io + - "https://kubernetes.github.io/ingress-nginx" + - "https://jessebot.github.io/vouch-helm-chart" + - "https://github.com/kubernetes/ingress-nginx" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + k8tz: + enabled: false + description: | + [link=https://github.com/k8tz/k8tz]k8tz[/link] is a Kubernetes admission controller and a CLI tool to inject timezones into Pods and CronJobs. This solves the issue of the default timezone for most images being UTC, yet not being guaranteed from container to container. + + smol-k8s-lab will use your globally defined timezone to set k8tz's timezone. You can view and change the default value of Europe/Amsterdam by clicking the [cornflower_blue]Modify Globals[/] button above. You can find your timezone identifier on [link=https://wikipedia.org/wiki/List_of_tz_database_time_zones#List]this wikipedia list[/link]. + init: + enabled: true + argo: + secret_keys: [] + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "k8tz/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "k8tz" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to app.argo.repo) + project: + source_repos: + - "https://k8tz.github.io/k8tz/" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + k8up: + enabled: false + description: | + [link=https://k8up.io]K8up[/link] ([i]pronounced "ketchup?"[/]) is a Kubernetes Operator based on Restic for backups of Persistent Volumes in k8s into S3 compatible storage like MinIO. Backs up all PVCs marked as ReadWriteMany, ReadWriteOnce or with a specific label. Can also perform "Application Aware" backups, containing the output of any tool capable of writing to stdout. You can also perform individual, on-demand backups, and restores from the k8up CLI tool. + + smol-k8s-lab will use your globally defined timezone to set k8up's timezone. You can view and change the default value of Europe/Amsterdam by clicking the [cornflower_blue]Modify Globals[/] button above. You can find your timezone identifier on [link=https://wikipedia.org/wiki/List_of_tz_database_time_zones#List]this wikipedia list[/link]. + init: + enabled: true + argo: + secret_keys: [] + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "k8up/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "k8up" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to app.argo.repo) + project: + source_repos: + - "https://k8up-io.github.io/k8up" + - "https://github.com/k8up-io/k8up.git" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + kepler: + description: | + [link=https://github.com/sustainable-computing-io/kepler]Kepler[/link] (Kubernetes Efficient Power Level Exporter) uses eBPF to probe energy-related system stats and exports them as Prometheus metrics. + enabled: false + # Initialization of the app through smol-k8s-lab + init: + enabled: false + argo: + # secret keys to provide for the argocd secret plugin app, none by default + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "demo/kepler/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "kepler" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - "https://sustainable-computing-io.github.io/kepler-helm-chart" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + kubevirt: + description: | + [link=https://kubevirt.io/]kubevirt[/link] lets you manage virtual machines via Kubernetes. + + # default disabled while a helm chart is being actively developed + enabled: false + argo: + # secret keys to provide for the argocd secret plugin app, none by default + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "kubevirt/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "kubevirt" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - "https://github.com/small-hack/argocd-apps" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + kyverno: + description: | + [link=https://kyverno.io/]Kyverno[/link] is a native policy manager for Kubernetes. + enabled: false + argo: + # secret keys to provide for the argocd secret plugin app, none by default + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "kyverno/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "kyvero" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: [] + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + mastodon: + description: | + [link=https://joinmastodon.org/]Mastodon[/link] is an open source self hosted social media network. + + smol-k8s-lab supports initializing mastodon, by setting up your hostname, SMTP credentials, redis credentials, postgresql credentials, and an admin user credentials. We pass all credentials as secrets in the namespace and optionally save them to Bitwarden. + + smol-k8s-lab also creates a local s3 endpoint and as well as S3 bucket and credentials if you enable set mastodon.argo.secret_keys.s3_provider to "minio" or "seaweedfs". Both seaweedfs and minio require you to specify a remote s3 endpoint, bucket, region, and accessID/secretKey so that we can make sure you have remote backups. + + To provide sensitive values via environment variables to smol-k8s-lab use: + - MASTODON_SMTP_PASSWORD + - MASTODON_S3_BACKUP_ACCESS_ID + - MASTODON_S3_BACKUP_SECRET_KEY + - MASTODON_RESTIC_REPO_PASSWORD + enabled: false + init: + enabled: true + values: + # admin user + admin_user: "tootadmin" + # admin user's email + admin_email: "" + # mail server to send verification and notification emails + smtp_host: "change@me-to-enable.mail" + # mail user for smtp host + smtp_user: "change me to enable mail" + sensitive_values: + # these can be passed in as env vars if you pre-pend MASTODON_ to each one + - SMTP_PASSWORD + - S3_BACKUP_ACCESS_ID + - S3_BACKUP_SECRET_KEY + - RESTIC_REPO_PASSWORD + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + admin_user: tootadmin + # hostname that users go to in the browser + hostname: "" + # set the local s3 provider for mastodon's public data in one bucket + # and private database backups in another. can be minio or seaweedfs + s3_provider: seaweedfs + # how large the backing pvc's capacity should be for minio or seaweedfs + s3_pvc_capacity: 120Gi + # local s3 endpoint for postgresql backups, backed up constantly + s3_endpoint: "" + s3_region: eu-west-1 + # Remote S3 configuration, for pushing remote backups of your local postgresql backups + # these are done only nightly right now, for speed and cost optimization + s3_backup_endpoint: "" + s3_backup_region: "" + s3_backup_bucket: "" + # git repo to install the Argo CD app from + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! + path: mastodon/small-hack/app_of_apps/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: mastodon + # recurse directories in the git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + # depending on if you use seaweedfs or minio, you can remove the other source repo + source_repos: + - registry-1.docker.io + - https://small-hack.github.io/cloudnative-pg-cluster-chart + - https://operator.min.io/ + - https://seaweedfs.github.io/seaweedfs/helm + - https://small-hack.github.io/mastodon-helm-chart + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + matrix: + description: | + [link=https://matrix.org/]Matrix[/link] is an open protocol for decentralised, secure communications. + This deploys a matrix synapse server, element (web frontend), and turn server (voice) + + smol-k8s-lab supports initialization by creating initial secrets for your: + - matrix, element, and federation hostnames, + - credentials for: postgresql, admin user, S3 storage, and SMTP + + smol-k8s-lab also sets up an OIDC application via Zitadel. + + To provide sensitive values via environment variables to smol-k8s-lab use: + - MATRIX_SMTP_PASSWORD + - MATRIX_S3_BACKUP_ACCESS_ID + - MATRIX_S3_BACKUP_SECRET_KEY + - MATRIX_RESTIC_REPO_PASSWORD + enabled: false + init: + enabled: true + values: + smtp_user: "change me to enable mail" + smtp_host: "change@me-to-enable.mail" + sensitive_values: + - SMTP_PASSWORD + - S3_BACKUP_ACCESS_ID + - S3_BACKUP_SECRET_KEY + - RESTIC_REPO_PASSWORD + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + # hostname of the synapse matrix server + hostname: "" + # the hostname of the element web interface + element_hostname: "" + # hostname for federation, that others can see you on the fediverse + federation_hostname: "" + # email for of the admin user + admin_email: "" + # choose S3 as the local primary object store from either: seaweedfs, or minio + # SeaweedFS - deploy SeaweedFS filer/s3 gateway + # MinIO - deploy MinIO vanilla helm chart + s3_provider: seaweedfs + # local s3 provider bucket name + s3_bucket: matrix + # the endpoint you'd like to use for your minio or SeaweedFS instance + s3_endpoint: "" + # how large the backing pvc's capacity should be for minio or seaweedfs + s3_pvc_capacity: 100Gi + s3_region: eu-west-1 + # these are for pushing remote backups of your local s3 storage, for speed and cost optimization + s3_backup_endpoint: "" + s3_backup_bucket: "" + s3_backup_region: "" + # git repo to install the Argo CD app from + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! + path: matrix/app_of_apps/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: matrix + # recurse directories in the git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://small-hack.github.io/cloudnative-pg-cluster-chart + - https://small-hack.github.io/matrix-chart + - https://operator.min.io/ + - https://seaweedfs.github.io/seaweedfs/helm + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + # This app is installed with helm or manifests depending on what is recommended + # for your k8s distro. Becomes managed by Argo CD if you enable it below + metallb: + enabled: false + description: | + [link=https://metallb.org/]MetalLB[/link] Helps expose IP addresses for loadbalancers on metal if you're on a vm or container where you can't get an IP. + + smol-k8s-lab support initialization by deploying a default l2Advertisement and IPAddressPool. + + Cloud Compatibility: [link=https://metallb.org/installation/clouds/]https://metallb.org/installation/clouds/[/link] + + # Initialize of the app through smol-k8s-lab + init: + enabled: true + values: + address_pool: [] + argo: + # secret keys to provide for the argocd secret plugin app, none by default + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "metallb/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "metallb-system" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD metallb Project (in addition to metallb.argo.repo) + project: + source_repos: + - "https://github.com/metallb/metallb.git" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + minio_operator: + enabled: false + description: | + [link=https://min.io/]MinIO®️[/link] is a high-performance, self hosted S3 compatible object store. MinIO is dual-licensed under open source GNU AGPL v3 and a commercial enterprise license. + + smol-k8s-lab deploys MinIO Operator and admin Console. The operator creates Custom Resource Definitions for MinIO Tenants, which are isolated instances of minio with their own API and console endpoints. This is useful to isolate different teams, apps, regions, etc. Below we also have a default tenant to get you started, but keep in mind, if you're using the default social apps (Nextcloud, Matrix, and Mastodon), we already create tenants for those. + init: + enabled: true + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + admin_console_hostname: "" + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "minio/operator/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "minio" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://operator.min.io/ + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + minio_tenant: + enabled: false + description: | + [link=https://min.io/]MinIO®️[/link] is a high-performance, self hosted S3 compatible object store. MinIO is dual-licensed under open source GNU AGPL v3 and a commercial enterprise license. + + smol-k8s-lab deploys the MinIO API and User Console for a tenant, along with an OIDC app provided by Zitadel. + init: + enabled: true + values: + # this is the admin user for the tenant + root_user: "minio-admin" + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + tenant_name: "" + api_hostname: "" + user_console_hostname: "" + # options: "local" or "s3". local is for a local filesystem mount. s3 is for using an s3 bucket + backup_method: "local" + s3_backup_endpoint: "none" + s3_backup_bucket: "set me to your bucket name" + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "minio/tenant/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "minio" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://operator.min.io/ + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + nextcloud: + enabled: false + description: | + [link=https://nextcloud.com/]Nextcloud Hub[/link] is the industry-leading, fully open-source, on-premises content collaboration platform. Teams access, share and edit their documents, chat and participate in video calls and manage their mail and calendar and projects across mobile, desktop and web interfaces + + smol-k8s-lab supports initialization by setting up your admin username, password, and SMTP username and password, as well as your redis and postgresql credentials. + + To avoid providing sensitive values everytime you run smol-k8s-lab, consider exporting the following environment variables before running smol-k8s-lab: + - NEXTCLOUD_SMTP_PASSWORD + - NEXTCLOUD_S3_BACKUP_ACCESS_KEY + - NEXTCLOUD_S3_BACKUP_ACCESS_ID + - NEXTCLOUD_RESTIC_REPO_PASSWORD + + Note: smol-k8s-lab is not affiliated with Nextcloud GmbH. This is a community-supported-only install method. + # initialize the app by setting up new k8s secrets and/or bitwarden items + init: + enabled: true + values: + # change the name of your admin user to whatever you like. This is used in an admin credentials k8s secret + admin_user: "nextcloud_admin" + smtp_user: "change me to enable mail" + smtp_host: "change-me-to-real-email@to-enable.mail" + sensitive_values: + - SMTP_PASSWORD + - S3_BACKUP_ACCESS_KEY + - S3_BACKUP_ACCESS_ID + - RESTIC_REPO_PASSWORD + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + hostname: "" + # choose S3 as the local primary object store from either: seaweedfs, or minio + # SeaweedFS - deploy SeaweedFS filer/s3 gateway + # MinIO - deploy MinIO vanilla helm chart + s3_provider: seaweedfs + # the endpoint you'd like to use for your minio or SeaweedFS instance + s3_endpoint: "" + # how large the backing pvc's capacity should be for minio or seaweedfs + s3_pvc_capacity: 100Gi + s3_region: eu-west-1 + # these are for pushing remote backups of your local s3 storage, for speed and cost optimization + s3_backup_endpoint: "" + s3_backup_bucket: "" + s3_backup_region: "" + # git repo to install the Argo CD app from + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! + path: nextcloud/app_of_apps/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: nextcloud + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - registry-1.docker.io + - https://nextcloud.github.io/helm + - https://small-hack.github.io/cloudnative-pg-cluster-chart + - https://seaweedfs.github.io/seaweedfs/helm + - https://github.com/seaweedfs/seaweedfs + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + prometheus: + description: | + Full monitoring stack with [link=https://prometheus.io/docs/introduction/overview/]Prometheus[/link], grafana, loki, and alert manager. + + smol-k8s-lab supports initialization by setting up your ingress hostnames. + + enabled: false + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + # FQDN to use for Prometheus web interface + hostname: "" + # FQDN to use for grafana + grafana_hostname: "" + # FQDN to use for Alert Manager web interface + alert_manager_hostname: "" + # FQDN to use for the prometheus push gateway + push_gateway_hostname: "" + # git repo to install the Argo CD app from + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! This + # is an app of apps. Change to "monitoring/kube-prometheus-stack/" to + # only install kube-prometheus-stack (foregoing loki and push gateway) + path: prometheus/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "prometheus" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - "registry-1.docker.io" + - "https://github.com/prometheus-community/helm-charts.git" + - "https://prometheus-community.github.io/helm-charts" + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: + - kube-system + + seaweedfs: + enabled: false + description: | + [link=https://github.com/seaweedfs/seaweedfs]seaweedfs[/link] is a filesystem with an exposable S3 endpoint. + + This is mostly meant to be for testing, but have at it :D + + If directory_recursion is set to true, we will also deploy the csi driver. + init: + enabled: true + values: + root_user: admin + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + hostname: "" + s3_endpoint: "" + s3_region: eu-west-1 + # git repo to install the Argo CD app from + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! + path: seaweedfs/app_of_apps/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: seaweedfs + # recurse directories in the provided git repo + # if set to false, we will not deploy the CSI driver + directory_recursion: true + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://seaweedfs.github.io/seaweedfs/helm + - https://seaweedfs.github.io/seaweedfs-csi-driver/helm + - https://github.com/seaweedfs/seaweedfs + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + seaweedfs_csi_driver: + enabled: false + description: | + [link=https://github.com/seaweedfs/seaweedfs]seaweedfs[/link] is a filesystem with an exposable S3 endpoint. This app deploys ONLY the CSI driver. + + init: + enabled: true + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: [] + # git repo to install the Argo CD app from + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! + path: demo/seaweedfs/app_of_apps/csi_driver/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: seaweedfs + # recurse directories in the provided git repo + # if set to false, we will not deploy the CSI driver + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://seaweedfs.github.io/seaweedfs-csi-driver/helm + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + vault: + description: | + [link=https://www.hashicorp.com/products/vault]Vault[/link] is a secrets management tool by Hashicorp. + + Legal Disclaimer: Vault is open source, but does not use a standard FOSS license. smol-k8s-lab is not afilitated with Hashicorp and does not offer any kind of paid support for anything Hashicorp publishes. If you'd like help with Hashicorp's Vault, you can ask for support via the smol-k8s-lab community, or via official Hashicorp sources, such as via their paid support contracts. Keep in mind though, smol-k8s-lab is not supported by Hashicorp. + enabled: false + # Initialization of the app through smol-k8s-lab using bitwarden and/or k8s secrets + init: + enabled: true + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + # name of the cluster that vault is associated with, can be any unique name + cluster_name: my-cool-cluster + repo: https://github.com/small-hack/argocd-apps + # path in the argo repo to point to. Trailing slash very important! + path: demo/vault/ + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: vault + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://helm.releases.hashicorp.com + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + vouch: + description: | + [link=https://github.com/vouch/vouch-proxy]vouch-proxy[/link] can help you forward requests for OIDC authentication to any ingress source that doesn't already have it. Super useful for web pages like prometheus's UI. + + smol-k8s-lab supports the initialization of vouch if you also enable zitadel by creating OIDC applications and credentials and your vouch-proxy Kubernetes Secret. + enabled: false + # Initialization of the app through smol-k8s-lab using bitwarden and/or k8s secrets + init: + enabled: true + values: + # list of domains allowed to be behind vouch such as example.com + domains: [] + # - example.com + # email addresses allowed to authenticate via vouch + emails: [] + # - beep@boop.com + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + # FQDN to use for vouch + hostname: "" + # repo to install the Argo CD app from + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "vouch-proxy/app_of_apps/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "vouch" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://jessebot.github.io/vouch-helm-chart + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] + + zitadel: + enabled: false + description: | + [link=https://zitadel.com/opensource]ZITADEL[/link] is an open source self hosted IAM platform for the cloud era + + smol-k8s-lab supports initialization of: + - an admin service account + - a human admin user (including an autogenerated password) + - a project with a name of your chosing + - 2 OIDC applications for Argo CD and Vouch + - 2 Argo CD groups (admins and users), 1 vouch groups + - groupsClaim action to enforce group roles on authentication + - updates your appset_secret_plugin secret and refreshes the pod + + The default app will also deploy SeaweedFS to backup your database which in turn is backed up to a remote s3 provider of your choice. + + To provide sensitive values via environment variables to smol-k8s-lab use: + - ZITADEL_S3_BACKUP_ACCESS_ID + - ZITADEL_S3_BACKUP_SECRET_KEY + - ZITADEL_RESTIC_REPO_PASSWORD + init: + # Switch to false if you don't want to create intial secrets or use the + # API via a service acocunt to create the above described resources + enabled: true + values: + # initial human user's usename + username: "" + # initial human user's email + email: "" + # initial human's first name + first_name: "" + # initial human's last name + last_name: "" + # options: GENDER_UNSPECIFIED, GENDER_MALE, GENDER_FEMALE, GENDER_DIVERSE + # more coming soon, see: https://github.com/zitadel/zitadel/issues/6355 + gender: GENDER_UNSPECIFIED + # name of the default project to create OIDC applications in + project: core + sensitive_values: + # sensitive values to provide via environment variables or via the TUI + - S3_BACKUP_ACCESS_ID + - S3_BACKUP_SECRET_KEY + - RESTIC_REPO_PASSWORD + # coming soon after we refactor a bit + # - SMTP_PASSWORD + argo: + # secrets keys to make available to Argo CD ApplicationSets + secret_keys: + # FQDN to use for zitadel + hostname: "" + # set the local s3 provider for zitadel's database backups. can be minio or seaweedfs + s3_provider: seaweedfs + # local s3 endpoint for postgresql backups, backed up constantly + s3_endpoint: "" + # capacity for the PVC backing your local s3 instance + s3_pvc_capacity: 2Gi + # Remote S3 configuration, for pushing remote backups of your local postgresql backups + # these are done only nightly right now, for speed and cost optimization + s3_backup_endpoint: "" + s3_backup_region: "" + s3_backup_bucket: "" + # repo to install the Argo CD app from + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + # if you want to use cockroachdb, change to zitadel/zitadel_and_cockroachdb + path: "zitadel/app_of_apps/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "zitadel" + # recurse directories in the provided git repo + directory_recursion: true + # source repos for Argo CD App Project (in addition to argo.repo) + project: + source_repos: + - https://charts.zitadel.com + - https://zitadel.github.io/zitadel-charts + - https://small-hack.github.io/cloudnative-pg-cluster-chart + - https://operator.min.io/ + - https://seaweedfs.github.io/seaweedfs/helm + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: [] diff --git a/.github/smol_k8s_lab_config_k3s.yaml b/.github/smol_k8s_lab_config_k3s.yaml index 4ab9a37af..b09cd307a 100644 --- a/.github/smol_k8s_lab_config_k3s.yaml +++ b/.github/smol_k8s_lab_config_k3s.yaml @@ -80,10 +80,7 @@ k8s_distros: - "ingress-ready=true" kubelet-arg: - "max-pods=150" - # not yet adjustable on k3s at this time - nodes: - control_plane: 1 - workers: 0 + nodes: {} k3d: # set to true to enable deploying a Kubernetes cluster using k3d @@ -196,13 +193,23 @@ apps: # Initialize of the app through smol-k8s-lab init: # Deploys staging and prod ClusterIssuers and prompts you for - # cert-manager.argo.secret_keys if they were not set. Switch to false if - # you don't want to deploy any ClusterIssuers + # values if they were not set. Switch to false if you don't want + # to deploy any ClusterIssuers enabled: true - argo: - secret_keys: - # Used for letsencrypt-staging, to generate certs + values: + # Used for to generate certs and alert you if they're going to expire email: "cicdtest@test.com" + # choose between "http01" or "dns01" + cluster_issuer_acme_challenge_solver: http01 + # only needed if cluster_issuer_challenge_solver set to dns01 + # currently only cloudflare is supported + cluster_issuer_acme_dns01_provider: cloudflare + sensitive_values: + # can be passed in as env vars if you pre-pend CERT_MANAGER_ + # e.g. CERT_MANAGER_CLOUDFLARE_API_TOKEN + - CLOUDFLARE_API_TOKEN + argo: + secret_keys: {} # git repo to install the Argo CD app from repo: "https://github.com/small-hack/argocd-apps" # path in the argo repo to point to. Trailing slash very important! diff --git a/.github/smol_k8s_lab_config_kind.yaml b/.github/smol_k8s_lab_config_kind.yaml index f4db30af5..f78124b7e 100644 --- a/.github/smol_k8s_lab_config_kind.yaml +++ b/.github/smol_k8s_lab_config_kind.yaml @@ -196,13 +196,23 @@ apps: # Initialize of the app through smol-k8s-lab init: # Deploys staging and prod ClusterIssuers and prompts you for - # cert-manager.argo.secret_keys if they were not set. Switch to false if - # you don't want to deploy any ClusterIssuers + # values if they were not set. Switch to false if you don't want + # to deploy any ClusterIssuers enabled: true - argo: - secret_keys: - # Used for letsencrypt-staging, to generate certs + values: + # Used for to generate certs and alert you if they're going to expire email: "cicdtest@test.com" + # choose between "http01" or "dns01" + cluster_issuer_acme_challenge_solver: http01 + # only needed if cluster_issuer_challenge_solver set to dns01 + # currently only cloudflare is supported + cluster_issuer_acme_dns01_provider: cloudflare + sensitive_values: + # can be passed in as env vars if you pre-pend CERT_MANAGER_ + # e.g. CERT_MANAGER_CLOUDFLARE_API_TOKEN + - CLOUDFLARE_API_TOKEN + argo: + secret_keys: {} # git repo to install the Argo CD app from repo: "https://github.com/small-hack/argocd-apps" # path in the argo repo to point to. Trailing slash very important! diff --git a/.github/workflows/test-smol-k8s-lab.yml b/.github/workflows/test-smol-k8s-lab.yml index dfd05b0be..98da71c1b 100644 --- a/.github/workflows/test-smol-k8s-lab.yml +++ b/.github/workflows/test-smol-k8s-lab.yml @@ -4,10 +4,11 @@ on: pull_request: paths-ignore: - ".github/**" + - "mkdocs.yaml" - "README.md" - "docs/**" jobs: - smol_k8s_lab_test: + test_kind: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -16,7 +17,7 @@ jobs: id: setup-python uses: actions/setup-python@v4 with: - python-version: '3.11' + python-version: '3.12' - name: Install Poetry uses: snok/install-poetry@v1 @@ -39,7 +40,72 @@ jobs: source .venv/bin/activate smol-k8s-lab -c .github/smol_k8s_lab_config_kind.yaml + test_k3s: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + id: setup-python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + installer-parallel: true + + - name: Install smol-k8s-lab via poetry + run: poetry install + + - name: Install Argo CD cli + run: | + curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64 + sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd + rm argocd-linux-amd64 + - name: test smol-k8s-lab with k3s run: | source .venv/bin/activate + export KUBECONFIG="/home/runner/.config/kube/config" smol-k8s-lab -c .github/smol_k8s_lab_config_k3s.yaml + + test_k3d: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + id: setup-python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + installer-parallel: true + + - name: Install smol-k8s-lab via poetry + run: poetry install + + - name: Install Argo CD cli + run: | + curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64 + sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd + rm argocd-linux-amd64 + + - name: Install k3d + run: | + curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash + + - name: test smol-k8s-lab with k3d + run: | + source .venv/bin/activate + export KUBECONFIG="/home/runner/.config/kube/config" + smol-k8s-lab -c .github/smol_k8s_lab_config_k3d.yaml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d049b378e..3ac5b9827 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,7 +24,7 @@ If it's a code fix, please: We also utilize the [Bitwarden cli], for a password manager so you never have to see/know your Argo CD password. -NOTE: We're open to unit, integration, and ci testing btw! We just don't have any because we weren't stable enough to justify them yet. 🤦 +NOTE: We're open to unit and integration tests btw! We just don't have anything but ci via Github Actions, because we weren't stable enough to justify them yet. 🤦 ### Prereqs diff --git a/README.md b/README.md index 8809619d9..a34def9de 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,263 @@ After you've followed the installation instructions, if you're *new* to `smol-k8 # saving the config and deploying it for you smol-k8s-lab ``` +
+

Upgrading config from v2.2.4 to v3.x

+ +If you've installed smol-k8s-lab prior to `v3.0.0`, please backup your old configuration, and then remove the `~/.config/smol-k8s-lab/config.yaml` (or `$XDG_CONFIG_HOME/smol-k8s-lab/config.yaml`) file entirely, then run the following with either pip or pipx: + +*if using pip*: +```yaml +# this upgrades smol-k8s-lab +pip3.11 install --upgrade smol-k8s-lab + +# this initializes a new configuration +smol-k8s-lab +``` + +*or if using pipx*: +```yaml +# this upgrades smol-k8s-lab +pipx upgrade smol-k8s-lab + +# this initializes a new configuration +smol-k8s-lab +``` + +The main breaking changes between `v2.2.4` and `v3.0` are as follows: + +- *home assistant has graduated from demo app to live app* + +You'll need to change `apps.home_assistant.argo.path` to either `home-assistant/toleration_and_affinity/` if you're using node labels and taints, or `home-assistant/` if you're deploying to a single node cluster. Here's an example with no tolerations or node affinity: + +```yaml +apps: + home_assistant: + enabled: false + description: | + [link=https://home-assistant.io]Home Assistant[/link] is a home IOT management solution. + + By default, we assume you want to use node affinity and tolerations to keep home assistant pods on certain nodes and keep other pods off said nodes. If you don't want to use either of these features but still want to use the small-hack/argocd-apps repo, first change the argo path to /home-assistant/ and then remove the 'toleration_' and 'affinity' secret_keys from the yaml file under apps.home_assistant.description. + argo: + secret_keys: + hostname: "home-assistant.coolestdogintheworld.dog" + repo: https://github.com/small-hack/argocd-apps + path: home-assistant/ + revision: main + namespace: home-assistant + directory_recursion: false + project: + source_repos: + - http://jessebot.github.io/home-assistant-helm + destination: + namespaces: + - argocd +``` + +And here's an example for labeled and tainted nodes, where your pod can use tolerations and node affinity: + +```yaml +apps: + home_assistant: + enabled: false + description: | + [link=https://home-assistant.io]Home Assistant[/link] is a home IOT management solution. + + By default, we assume you want to use node affinity and tolerations to keep home assistant pods on certain nodes and keep other pods off said nodes. If you don't want to use either of these features but still want to use the small-hack/argocd-apps repo, first change the argo path to /home-assistant/ and then remove the 'toleration_' and 'affinity' secret_keys from the yaml file under apps.home_assistant.description. + argo: + secret_keys: + hostname: "home-assistant.coolestdogintheworld.dog" + toleration_key: "blutooth" + toleration_operator: "Equals" + toleration_value: "True" + toleration_effect: "NoSchedule" + affinity_key: "blutooth" + affinity_value: "True" + repo: https://github.com/small-hack/argocd-apps + path: home-assistant/toleration_and_affinity/ + revision: main + namespace: home-assistant + directory_recursion: false + project: + source_repos: + - http://jessebot.github.io/home-assistant-helm + destination: + namespaces: + - argocd +``` + + +- *new k3s feature for adding additional nodes* + +This feature changes `k8s_distros.k3s.nodes` to be a dictionary so that you can include additional nodes for us to join to the cluster after we create it, but before we install apps. Here's an example of how you can add a new node to k3s on installation: + + +```yaml +k8s_distros: + k3s: + enabled: false + k3s_yaml: + # if you enable MetalLB, we automatically add servicelb to the disable list + # enables encryption at rest for Kubernetes secrets + secrets-encryption: true + # disables traefik so we can enable ingress-nginx, remove if you're using traefik + disable: + - "traefik" + node-label: + - "ingress-ready=true" + kubelet-arg: + - "max-pods=150" + # nodes to SSH to and join to cluster. example: + nodes: + # name can be a hostname or ip address + serverfriend1.lan: + # change ssh_key to the name of a local private key to use + ssh_key: id_rsa + # must be node type of "worker" or "control_plane" + node_type: worker + # labels are optional, but may be useful for pod node affinity + node_labels: + - iot=true + # taints are optional, but may be useful for pod tolerations + node_taints: + - iot=true:NoSchedule +``` + +if you don't want to add any nodes, this is what you should change your nodes section to be: + +```yaml +k8s_distros: + k3s: + enabled: false + k3s_yaml: + # if you enable MetalLB, we automatically add servicelb to the disable list + # enables encryption at rest for Kubernetes secrets + secrets-encryption: true + # disables traefik so we can enable ingress-nginx, remove if you're using traefik + disable: + - "traefik" + node-label: + - "ingress-ready=true" + kubelet-arg: + - "max-pods=150" + # nodes to SSH to and join to cluster. example: + nodes: {} +``` + + +- *cert-manager now supports DNS01 challenge solver using the Cloudflare provider* + +This feature reworks the `apps.cert_manager.init` and `apps.cert_manager.argo.secret_keys` sections. + +Here's an example of using the HTTP01 challenge solver, which would be the only previously supported challenge solver, so if you want everything to just work how it did before your config file should look like this: + +```yaml +apps: + cert_manager: + enabled: true + description: | + [link=https://cert-manager.io/]cert-manager[/link] let's you use LetsEncrypt to generate TLS certs for all your apps with ingress. + + smol-k8s-lab supports optional initialization by creating [link=https://cert-manager.io/docs/configuration/acme/]ACME Issuer type[/link] [link=https://cert-manager.io/docs/concepts/issuer/]ClusterIssuers[/link] using either the HTTP01 or DNS01 challenge solvers. We create two ClusterIssuers: letsencrypt-staging and letsencrypt-staging. + + For the DNS01 challange solver, you will need to either export $CLOUDFLARE_API_TOKEN as an env var, or fill in the sensitive value for it each time you run smol-k8s-lab. + + Currently, Cloudflare is the only supported DNS provider for the DNS01 challenge solver. If you'd like to use a different DNS provider or use a different Issuer type all together, please either set one up outside of smol-k8s-lab. We also welcome [link=https://github.com/small-hack/smol-k8s-lab/pulls]PRs[/link] to add these features :) + + # Initialize of the app through smol-k8s-lab + init: + # Deploys staging and prod ClusterIssuers and prompts you for + # values if they were not set. Switch to false if you don't want + # to deploy any ClusterIssuers + enabled: true + values: + # Used for to generate certs and alert you if they're going to expire + email: "you@emailsforfriends.com" + # choose between "http01" or "dns01" + cluster_issuer_acme_challenge_solver: http01 + # only needed if cluster_issuer_challenge_solver set to dns01, + # currently only cloudflare is supported + cluster_issuer_acme_dns01_provider: cloudflare + sensitive_values: [] + argo: + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "cert-manager/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "cert-manager" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for cert-manager CD App Project (in addition to argo.repo) + project: + source_repos: + - https://charts.jetstack.io + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: + - kube-system +``` + +And here's how you'd use the new DNS01 feature (keep in mind you need to either provide a sensitive value each time you run `smol-k8s-lab`, OR you need to export `$CLOUDFLARE_API_TOKEN` as an env var prior to running `smol-k8s-lab`): + + +```yaml +apps: + cert_manager: + enabled: true + description: | + [link=https://cert-manager.io/]cert-manager[/link] let's you use LetsEncrypt to generate TLS certs for all your apps with ingress. + + smol-k8s-lab supports optional initialization by creating [link=https://cert-manager.io/docs/configuration/acme/]ACME Issuer type[/link] [link=https://cert-manager.io/docs/concepts/issuer/]ClusterIssuers[/link] using either the HTTP01 or DNS01 challenge solvers. We create two ClusterIssuers: letsencrypt-staging and letsencrypt-staging. + + For the DNS01 challange solver, you will need to either export $CLOUDFLARE_API_TOKEN as an env var, or fill in the sensitive value for it each time you run smol-k8s-lab. + + Currently, Cloudflare is the only supported DNS provider for the DNS01 challenge solver. If you'd like to use a different DNS provider or use a different Issuer type all together, please either set one up outside of smol-k8s-lab. We also welcome [link=https://github.com/small-hack/smol-k8s-lab/pulls]PRs[/link] to add these features :) + + # Initialize of the app through smol-k8s-lab + init: + # Deploys staging and prod ClusterIssuers and prompts you for + # values if they were not set. Switch to false if you don't want + # to deploy any ClusterIssuers + enabled: true + values: + # Used for to generate certs and alert you if they're going to expire + email: "you@emailsforfriends.com" + # choose between "http01" or "dns01" + cluster_issuer_acme_challenge_solver: dns01 + # only needed if cluster_issuer_challenge_solver set to dns01 + # currently only cloudflare is supported + cluster_issuer_acme_dns01_provider: cloudflare + sensitive_values: + # can be passed in as env vars if you pre-pend CERT_MANAGER_ + # e.g. CERT_MANAGER_CLOUDFLARE_API_TOKEN + - CLOUDFLARE_API_TOKEN + argo: + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "cert-manager/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "cert-manager" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for cert-manager CD App Project (in addition to argo.repo) + project: + source_repos: + - https://charts.jetstack.io + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: + - kube-system +``` + +

Upgrading config from v1.x to v2.x

diff --git a/docs/assets/images/screenshots/add_node_k3s_tab.svg b/docs/assets/images/screenshots/add_node_k3s_tab.svg new file mode 100644 index 000000000..32033784a --- /dev/null +++ b/docs/assets/images/screenshots/add_node_k3s_tab.svg @@ -0,0 +1,308 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BaseApp + + + + + + + + + + ʕ ᵔᴥᵔʔ smol k8s lab — Kubernetes distro config + + + 🌱 Select a k8s distro────────────────────────────────────────────────────── + +▔▔▔▔▔▔▔▔▔▔▔▔▔K3s, by Rancher Labs, is a minimal Kubernetes distro that  +k3sfits in about 70MB. (it's also optomized for ARM) Learn more: +▁▁▁▁▁▁▁▁▁▁▁▁▁k3s.io. + +───────────────────────────────────────────────────Inputs below are optional + + +Customize k3s install with extra optionsandnodes────────────────────────── + +k3s.yamlKubelet Config Options🆕 Add Remote Nodes +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Add a node below for something to appear here... + + +               _____ +              /     \ +              vvvvvvv  /|__/| +                 I   /O,O   | +                 I /_____   |      /|/| +                C|/^ ^ ^ \  |    /oo  |    _//| +                 |^ ^ ^ ^ |W|   |/^^\ |   /oo | +                  \m___m__|_|    \m_m_|   \mm_| + +                "Totoros" (from "My Neighbor Totoro") +                    --- Duke Lee + + + + + +🖥️ Add a new node + +▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔ +host:hostname or ip addressnode type:worker +▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ + +▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔ +ssh key:id_rsanode labels:labels to apply to thi +▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ + +▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔ +node taints:taints to apply to th➕ new node +▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ + + +────────────────────────────────────────────────────────────── ➕ k3s option + + + + + + diff --git a/docs/assets/images/screenshots/bitwarden_credentials_screen.svg b/docs/assets/images/screenshots/bitwarden_credentials_screen.svg new file mode 100644 index 000000000..2cb4f5e2f --- /dev/null +++ b/docs/assets/images/screenshots/bitwarden_credentials_screen.svg @@ -0,0 +1,276 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BaseApp + + + + + + + + + + ʕ ᵔᴥᵔʔ smol k8s lab — Review your configuration (last step!) + + +ReviewAllValues──────────────────────────────────────────────────────── + +Core ConfigK8s Distro ConfigApps ConfigGlobal Parameters Config +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +smol_k8s_lab: +# Terminal User Interface with clickable buttons. +# Useful for learning smol-k8s-lab or verifying your configuration +tui: +🛡️ Enter Bitwarden Vault Credentials────────────────────────────────────── + +Requires personal API credentials. To avoid this prompt, export the  +following env vars before running smol-k8s-lab: BW_PASSWORD, BW_CLIENTID, + +▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔ +password:password +▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ + +▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔ +client ID:client_id +▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ + +▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔ +client secret:client_secret +▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ + + +▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔ +▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ + + +──────────────────────────────────────────────────────────────────────────── +# enable text to speech +# k9s is a terminal UI dashboard and interface for interacting wit + +──────────────────────────────────────────────────────────────────────────── + + + +▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔ +🚊 Let's roll!✋Go Back +▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ + + + + + diff --git a/docs/assets/images/screenshots/help_text.svg b/docs/assets/images/screenshots/help_text.svg index 33f4059eb..c73e0d2f5 100644 --- a/docs/assets/images/screenshots/help_text.svg +++ b/docs/assets/images/screenshots/help_text.svg @@ -1,4 +1,4 @@ - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - term + term - + - -                           🧸smol k8s lab - -Install slim Kubernetes distros + plus all your apps via Argo CD. - -Usage:smol-k8s-lab[OPTIONS] - -╭─ ʕ ᵔᴥᵔʔ Options ──────────────────────────────────────────────────────────────────────────────╮ - --c--config CONFIG_FILEFull path and name of the YAML config file to parse.  -Defaults to $XDG_CONFIG_HOME/smol-k8s-lab/config.yaml - --D--delete CLUSTER_NAMEDelete an existing cluster by name.                   - --i--interactiveNew! ⚙️ Interactively configures  smol-k8s-lab - --v--versionPrint the version of smol-k8s-lab (v2.2.4)            - --h--helpShow this message and exit.                           -╰────────────────────────────────────────────────── ♥ docs: github.com/small-hack/smol-k8s-lab─╯ + +                           🧸smol k8s lab + +Install slim Kubernetes distros + plus all your apps via Argo CD. + +Usage:smol-k8s-lab[OPTIONS] + +╭─ ʕ ᵔᴥᵔʔ Options ───────────────────────────────────────────────────────────────────────────╮ + +-c--config CONFIG_FILEFull path and name of the YAML config file to parse.  +Defaults to $XDG_CONFIG_HOME/smol-k8s-lab/config.yaml + +-D--delete CLUSTER_NAMEDelete an existing cluster by name.                   + +-i--interactiveNew! ⚙️ Interactively configures  smol-k8s-lab + +-v--versionPrint the version of smol-k8s-lab (v3.0.0)            + +-h--helpShow this message and exit.                           +╰─────────────────────────────────────────────── ♥ docs: github.com/small-hack/smol-k8s-lab─╯ diff --git a/docs/config_file.md b/docs/config_file.md index 215284f66..c2944413f 100644 --- a/docs/config_file.md +++ b/docs/config_file.md @@ -99,10 +99,21 @@ k8s_distros: - "ingress-ready=true" kubelet-arg: - "max-pods=150" - # not yet adjustable on k3s at this time + # list of nodes to SSH to and join to cluster + # if using single node, set to nodes: {} nodes: - control_plane: 1 - workers: 0 + # name can be a hostname or ip address + serverfriend1.lan: + # change ssh_key to the name of a local private key to use + ssh_key: id_rsa + # must be node type of "worker" or "control_plane" + node_type: worker + # labels are optional, but may be useful for pod node affinity + node_labels: + - iot=true + # taints are optional, but may be useful for pod tolerations + node_taints: + - iot=true:NoSchedule ``` ### k3d diff --git a/docs/installation.md b/docs/installation.md index a61d30196..283756f9b 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -84,13 +84,22 @@ smol-k8s-lab ```
-

Upgrading to v1.x or v2.x

+

Upgrading to v1.x, v2.x, v3.x

-If you've installed smol-k8s-lab prior to v1.0.0, please backup your old configuration, ~/.config/smol-k8s-lab/config.yaml (or $XDG_CONFIG_HOME/smol-k8s-lab/config.yaml), and then remove the file entirely. Then, run the following: +If you've installed smol-k8s-lab prior to v3.0.0, please backup your old configuration, ~/.config/smol-k8s-lab/config.yaml (or $XDG_CONFIG_HOME/smol-k8s-lab/config.yaml), and then remove the file entirely. Then, run the following if you're using pip: ```bash # this upgrades smol-k8s-lab -pip3.11 install --upgrade smol-k8s-lab +pip install --upgrade smol-k8s-lab + +# this initializes a new configuration +smol-k8s-lab +``` + +if you're using pipx: +```bash +# this upgrades smol-k8s-lab +pipx upgrade smol-k8s-lab # this initializes a new configuration smol-k8s-lab diff --git a/docs/k8s_apps/cert_manager.md b/docs/k8s_apps/cert_manager.md index d0bae0778..b7cc2040e 100644 --- a/docs/k8s_apps/cert_manager.md +++ b/docs/k8s_apps/cert_manager.md @@ -4,13 +4,140 @@ We use [cert-manager](https://cert-manager.io) to generate TLS certs for the web Argo CD web interface screenshot of cert manager in tree view mode showing cert-manager-helm-chart with three of its children. The screenshot does not show the entire Argo CD application because it contains well over 10 different roles and cluster roles and does not fit on one page, so instead we've chosen to show only the deployment children which are cert-manager, cert-manager-caininjector, and cert-manager-webhook each with their own replicasets and pods. -By default, we install two cluster issuers: +By default, we create two ClusterIssuers using the HTTP01 challenge solver: - `letsencrypt-staging` - `letsencrypt-prod` All applications will use `letsencrypt-staging` by default, until you change this setting via the [TUI](/tui/apps_screen/#modifying-globally-available-templating-parameters-for-argo-cd-applicationsets) or [config file](/config_file/#globally-available-argo-cd-applicationset). We default to the staging server, because `letsencrypt-prod` has very tight rate limiting and when testing, as one does in a lab, you can easily exceed this, which can issue you a ban for at least a week. +Alternatively, you can also use the DNS01 challenge solver with cloudflare only. If you'd like to use a different DNS provider for the DNS01 challenge solver type, please submit a PR as the devs only have regular access to cloudflare and can't test other providers at this time. + +## Example configs + +### HTTP01 Challenge Solver + +This is the default challenge solver. + +```yaml +apps: + # This app is installed with helm or manifests depending on what is recommended + # for your k8s distro. Becomes managed by Argo CD if you enable it below + cert_manager: + # ! NOTE: you currently can't set this to false. It is necessary to deploy + # most of our supported Argo CD apps since they often have TLS enabled either + # for pod connectivity or ingress. IF set to false, you need an alternative SSL pipeline + enabled: true + description: | + [link=https://cert-manager.io/]cert-manager[/link] let's you use LetsEncrypt to generate TLS certs for all your apps with ingress. + + smol-k8s-lab supports optional initialization by creating [link=https://cert-manager.io/docs/configuration/acme/]ACME Issuer type[/link] [link=https://cert-manager.io/docs/concepts/issuer/]ClusterIssuers[/link] using either the HTTP01 or DNS01 challenge solvers. We create two ClusterIssuers: letsencrypt-staging and letsencrypt-staging. + + For the DNS01 challange solver, you will need to either export $CLOUDFLARE_API_TOKEN as an env var, or fill in the sensitive value for it each time you run smol-k8s-lab. + + Currently, Cloudflare is the only supported DNS provider for the DNS01 challenge solver. If you'd like to use a different DNS provider or use a different Issuer type all together, please either set one up outside of smol-k8s-lab. We also welcome [link=https://github.com/small-hack/smol-k8s-lab/pulls]PRs[/link] to add these features :) + + # Initialize of the app through smol-k8s-lab + init: + # Deploys staging and prod ClusterIssuers and prompts you for + # values if they were not set. Switch to false if you don't want + # to deploy any ClusterIssuers + enabled: true + values: + # Used for to generate certs and alert you if they're going to expire + email: coolfriend@amazingdogs.dog + # choose between "http01" or "dns01" + cluster_issuer_acme_challenge_solver: http01 + # only needed if cluster_issuer_challenge_solver set to dns01 + # currently only cloudflare is supported + cluster_issuer_acme_dns01_provider: cloudflare + sensitive_values: [] + argo: + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "cert-manager/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "cert-manager" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for cert-manager CD App Project (in addition to argo.repo) + project: + source_repos: + - https://charts.jetstack.io + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: + - kube-system +``` + + +### DNS01 Challenge Solver + +For the DNS01 challange solver, you will need to either export `$CLOUDFLARE_API_TOKEN` as an env var, or fill in the sensitive value for it each time you run `smol-k8s-lab`. + +```yaml +apps: + # This app is installed with helm or manifests depending on what is recommended + # for your k8s distro. Becomes managed by Argo CD if you enable it below + cert_manager: + # ! NOTE: you currently can't set this to false. It is necessary to deploy + # most of our supported Argo CD apps since they often have TLS enabled either + # for pod connectivity or ingress. IF set to false, you need an alternative SSL pipeline + enabled: true + description: | + [link=https://cert-manager.io/]cert-manager[/link] let's you use LetsEncrypt to generate TLS certs for all your apps with ingress. + + smol-k8s-lab supports optional initialization by creating [link=https://cert-manager.io/docs/configuration/acme/]ACME Issuer type[/link] [link=https://cert-manager.io/docs/concepts/issuer/]ClusterIssuers[/link] using either the HTTP01 or DNS01 challenge solvers. We create two ClusterIssuers: letsencrypt-staging and letsencrypt-staging. + + For the DNS01 challange solver, you will need to either export $CLOUDFLARE_API_TOKEN as an env var, or fill in the sensitive value for it each time you run smol-k8s-lab. + + Currently, Cloudflare is the only supported DNS provider for the DNS01 challenge solver. If you'd like to use a different DNS provider or use a different Issuer type all together, please either set one up outside of smol-k8s-lab. We also welcome [link=https://github.com/small-hack/smol-k8s-lab/pulls]PRs[/link] to add these features :) + + # Initialize of the app through smol-k8s-lab + init: + # Deploys staging and prod ClusterIssuers and prompts you for + # values if they were not set. Switch to false if you don't want + # to deploy any ClusterIssuers + enabled: true + values: + # Used for to generate certs and alert you if they're going to expire + email: coolfriend@amazingdogs.dog + # choose between "http01" or "dns01" + cluster_issuer_acme_challenge_solver: dns01 + # only needed if cluster_issuer_challenge_solver set to dns01 + # currently only cloudflare is supported + cluster_issuer_acme_dns01_provider: cloudflare + sensitive_values: + # you can remove this if you're not using cloudflare as your DNS01 provider + # can be passed in as env vars if you pre-pend CERT_MANAGER_ + # e.g. CERT_MANAGER_CLOUDFLARE_API_TOKEN + - CLOUDFLARE_API_TOKEN + argo: + secret_keys: {} + # git repo to install the Argo CD app from + repo: "https://github.com/small-hack/argocd-apps" + # path in the argo repo to point to. Trailing slash very important! + path: "cert-manager/" + # either the branch or tag to point at in the argo repo above + revision: main + # namespace to install the k8s app in + namespace: "cert-manager" + # recurse directories in the provided git repo + directory_recursion: false + # source repos for cert-manager CD App Project (in addition to argo.repo) + project: + source_repos: + - https://charts.jetstack.io + destination: + # automatically includes the app's namespace and argocd's namespace + namespaces: + - kube-system +``` + ## Troubleshooting Follow the steps in the [cert-manager common error troubleshooting guide](https://cert-manager.io/docs/faq/acme/#common-errors)), you can also change the `letsencrypt-staging` value to `letsencrypt-prod` for any domains you own and can configure to point to your cluster via DNS. diff --git a/docs/k8s_apps/experimental/home_assistant.md b/docs/k8s_apps/experimental/home_assistant.md deleted file mode 100644 index 9c3769111..000000000 --- a/docs/k8s_apps/experimental/home_assistant.md +++ /dev/null @@ -1,31 +0,0 @@ -[Home Assistant](https://www.home-assistant.io/) is an open source IoT management solution. We deploy a [small-hack maintained helm chart](https://github.com/small-hack/home-assistant-chart/) by default. - -The main variable you need to worry about when setting up home assistant is your `hostname`. - -## Example config - -```yaml -apps: - home_assistant: - enabled: false - description: | - ⚠️ [magenta]demo Status[/magenta] - - Home Assistant is a home IOT management solution. - argo: - secret_keys: - hostname: "home-assistant.cooldomainfordogs.biz" - repo: https://github.com/small-hack/argocd-apps - # note: this path may change after the home assistant app is certified - # as production ready in the small-hack/argocd-apps repo - path: demo/home-assistant/ - revision: main - namespace: home-assistant - directory_recursion: false - project: - source_repos: - - http://jessebot.github.io/home-assistant-helm - destination: - namespaces: - - argocd -``` diff --git a/docs/k8s_apps/home_assistant.md b/docs/k8s_apps/home_assistant.md new file mode 100644 index 000000000..23a657221 --- /dev/null +++ b/docs/k8s_apps/home_assistant.md @@ -0,0 +1,61 @@ +[Home Assistant](https://www.home-assistant.io/) is an open source IoT management solution. We deploy a [small-hack maintained helm chart](https://github.com/small-hack/home-assistant-chart/) by default. + +The main variable you need to worry about when setting up home assistant is your `hostname`. + +## Example configs + +### Using tolerations and node affinity + +```yaml +apps: + home_assistant: + enabled: false + description: | + [link=https://home-assistant.io]Home Assistant[/link] is a home IOT management solution. + + By default, we assume you want to use node affinity and tolerations to keep home assistant pods on certain nodes and keep other pods off said nodes. If you don't want to use either of these features but still want to use the small-hack/argocd-apps repo, first change the argo path to /home-assistant/ and then remove the 'toleration_' and 'affinity' secret_keys from the yaml file under apps.home_assistant.description. + argo: + secret_keys: + hostname: "home-assistant.cooldomainfordogs.biz" + toleration_key: "iot" + toleration_operator: "Equals" + toleration_value: "true" + toleration_effect: "NoSchedule" + affinity_key: "iot" + affinity_value: "true" + repo: https://github.com/small-hack/argocd-apps + path: home-assistant/toleration_and_affinity/ + revision: main + namespace: home-assistant + directory_recursion: false + project: + source_repos: + - http://jessebot.github.io/home-assistant-helm + destination: + namespaces: + - argocd +``` + +### Without tolerations and node affinity + +```yaml +apps: + home_assistant: + enabled: false + description: | + [link=https://home-assistant.io]Home Assistant[/link] is a home IOT management solution. + argo: + secret_keys: + hostname: "home-assistant.cooldomainfordogs.biz" + repo: https://github.com/small-hack/argocd-apps + path: home-assistant/ + revision: main + namespace: home-assistant + directory_recursion: false + project: + source_repos: + - http://jessebot.github.io/home-assistant-helm + destination: + namespaces: + - argocd +``` diff --git a/docs/tui/confirmation_screen.md b/docs/tui/confirmation_screen.md new file mode 100644 index 000000000..c41245b76 --- /dev/null +++ b/docs/tui/confirmation_screen.md @@ -0,0 +1,11 @@ +## Confirming your configuration + +After you've taken a look through all the configuration screens, you'll be blessed by this final overview screen that lets you check out each section of the config file without the TUI: + +![terminal screenshot of the smol-k8s-lab confirmation screen. At the top it says smol k8s lab - Review your configuration (last step!) and then there is one main large box titled Review All Values that contains 4 tabs: Core config, K8s Distro Config, Apps Config, and Global Parameters Config. Under each tab is that section of the smol-k8s-lab config file with syntax highlighting. Below the main box on the screen are two buttons: 🚆 Let's roll!, ✋Go Back.](../../assets/images/screenshots/confirm_screen.svg) + +## Bitwarden screen + +If you haven't exported your Bitwarden credentials as env vars (`BW_PASSWORD`, `BW_CLIENTID`, and `BW_CLIENTSECRET`), then after you hit the "Let's Go" button, you'll see this screen: + +![terminal screenshot of the smol-k8s-lab confirmation screen. At the top it says smol k8s lab - Review your configuration (last step!). Below that is a large modal for filling out your bitwarden credentials. Behind that modal is the confirmation screen which is detailed above in the alt text for the first image on this page](../../assets/images/screenshots/bitwarden_credentials_screen.svg) diff --git a/docs/tui/distro_screen.md b/docs/tui/distro_screen.md index ac0b30342..8ab9440ca 100644 --- a/docs/tui/distro_screen.md +++ b/docs/tui/distro_screen.md @@ -44,3 +44,13 @@ Enter the name of the new option you'd like to add. !!! Tip To turn off bells, visit the [TUI config screen](/tui/tui_config.md) + +## Add Nodes to k3s clusters (🆕 in v3.0) + +The ability to other metal nodes to your cluster is exclusively to k3s in smol-k8s-lab. To add a new node in the TUI, make sure k3s is selected via the dropdown on the distro config screen. In the second box on the screen, there are three tabs, the final tab called "Add Remote Nodes" is the one you want to click. You can also use the left and right arrow keys to navigate the tabs. + +Once there, you should see a list of any existing nodes you've added via your [config file](/config_file.md). + +If you haven't added any clusters to your config file, you will see a random ascii art from a time in the past. The second half of the tab has a small form for you to add new nodes: + +![terminal screenshot showing smol-k8s-lab "add remote nodes" tab for the k3s distro config screen](../../assets/images/screenshots/add_node_k3s_tab.svg) diff --git a/mkdocs.yaml b/mkdocs.yaml index 1c5ce8318..210d1016a 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -68,6 +68,7 @@ nav: - K8s Apps: tui/apps_screen.md - Config: tui/tui_config.md - Getting Help: tui/help_screen.md + - Confirmation: tui/confirmation_screen.md - Config File: config_file.md - Default Apps: - Argo CD: @@ -81,12 +82,12 @@ nav: - Bitwarden ESO Provider: k8s_apps/bitwarden_eso_provider.md - Experimental: - Cilium: k8s_apps/experimental/cilium.md - - Home Assistant: k8s_apps/experimental/home_assistant.md - Infisical: k8s_apps/experimental/infisical.md - Kepler: k8s_apps/experimental/kepler.md - Kyverno: k8s_apps/experimental/kyverno.md - Kubevirt: k8s_apps/experimental/kubevirt.md - MinIO: k8s_apps/experimental/minio.md + - Home Assistant: k8s_apps/home_assistant.md - Ingress Nginx: k8s_apps/ingress_nginx.md - K8tz: k8s_apps/k8tz.md - K8up: k8s_apps/k8up.md diff --git a/poetry.lock b/poetry.lock index 85fb24d72..af907ec6a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -837,13 +837,13 @@ source = ["Cython (>=3.0.7)"] [[package]] name = "markdown" -version = "3.5.2" +version = "3.6" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "Markdown-3.5.2-py3-none-any.whl", hash = "sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd"}, - {file = "Markdown-3.5.2.tar.gz", hash = "sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8"}, + {file = "Markdown-3.6-py3-none-any.whl", hash = "sha256:48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f"}, + {file = "Markdown-3.6.tar.gz", hash = "sha256:ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224"}, ] [package.extras] diff --git a/pyproject.toml b/pyproject.toml index f25db32c0..8b48d8a89 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "smol_k8s_lab" -version = "2.2.4" +version = "3.0.0" description = "CLI and TUI to quickly install slimmer Kubernetes distros and then manage apps declaratively using Argo CD" authors = ["Jesse Hitch ", "Max Roby "] diff --git a/smol_k8s_lab/config/default_config.yaml b/smol_k8s_lab/config/default_config.yaml index c0a2f4687..b27aacb90 100644 --- a/smol_k8s_lab/config/default_config.yaml +++ b/smol_k8s_lab/config/default_config.yaml @@ -80,10 +80,21 @@ k8s_distros: - "ingress-ready=true" kubelet-arg: - "max-pods=150" - # not yet adjustable on k3s at this time - nodes: - control_plane: 1 - workers: 0 + # list of nodes to SSH to and join to cluster. example: + # nodes: + # # name can be a hostname or ip address + # serverfriend1.lan: + # # change ssh_key to the name of a local private key to use + # ssh_key: id_rsa + # # must be node type of "worker" or "control_plane" + # node_type: worker + # # labels are optional, but may be useful for pod node affinity + # node_labels: + # - iot=true + # # taints are optional, but may be useful for pod tolerations + # node_taints: + # - iot=true:NoSchedule + nodes: {} k3d: # set to true to enable deploying a Kubernetes cluster using k3d @@ -191,18 +202,32 @@ apps: description: | [link=https://cert-manager.io/]cert-manager[/link] let's you use LetsEncrypt to generate TLS certs for all your apps with ingress. - smol-k8s-lab supports initialization by creating two [link=https://cert-manager.io/docs/concepts/issuer/]ClusterIssuers[/link] for both staging and production using a provided email address as the account ID for acme. + smol-k8s-lab supports optional initialization by creating [link=https://cert-manager.io/docs/configuration/acme/]ACME Issuer type[/link] [link=https://cert-manager.io/docs/concepts/issuer/]ClusterIssuers[/link] using either the HTTP01 or DNS01 challenge solvers. We create two ClusterIssuers: letsencrypt-staging and letsencrypt-staging. + + For the DNS01 challange solver, you will need to either export $CLOUDFLARE_API_TOKEN as an env var, or fill in the sensitive value for it each time you run smol-k8s-lab. + + Currently, Cloudflare is the only supported DNS provider for the DNS01 challenge solver. If you'd like to use a different DNS provider or use a different Issuer type all together, please either set one up outside of smol-k8s-lab. We also welcome [link=https://github.com/small-hack/smol-k8s-lab/pulls]PRs[/link] to add these features :) # Initialize of the app through smol-k8s-lab init: # Deploys staging and prod ClusterIssuers and prompts you for - # cert-manager.argo.secret_keys if they were not set. Switch to false if - # you don't want to deploy any ClusterIssuers + # values if they were not set. Switch to false if you don't want + # to deploy any ClusterIssuers enabled: true + values: + # Used for to generate certs and alert you if they're going to expire + email: "" + # choose between "http01" or "dns01" + cluster_issuer_acme_challenge_solver: http01 + # only needed if cluster_issuer_challenge_solver set to dns01 + # currently only cloudflare is supported + cluster_issuer_acme_dns01_provider: cloudflare + sensitive_values: + # can be passed in as env vars if you pre-pend CERT_MANAGER_ + # e.g. CERT_MANAGER_CLOUDFLARE_API_TOKEN + - CLOUDFLARE_API_TOKEN argo: - secret_keys: - # Used for letsencrypt-staging, to generate certs - email: "" + secret_keys: {} # git repo to install the Argo CD app from repo: "https://github.com/small-hack/argocd-apps" # path in the argo repo to point to. Trailing slash very important! @@ -325,14 +350,20 @@ apps: home_assistant: enabled: false description: | - ⚠️ [magenta]demo Status[/magenta] + [link=https://home-assistant.io]Home Assistant[/link] is a home IOT management solution. - Home Assistant is a home IOT management solution. + By default, we assume you want to use node affinity and tolerations to keep home assistant pods on certain nodes and keep other pods off said nodes. If you don't want to use either of these features but still want to use the small-hack/argocd-apps repo, first change the argo path to /home-assistant/ and then remove the 'toleration_' and 'affinity' secret_keys from the yaml file under apps.home_assistant.description. argo: secret_keys: hostname: "" + toleration_key: "" + toleration_operator: "" + toleration_value: "" + toleration_effect: "" + affinity_key: "" + affinity_value: "" repo: https://github.com/small-hack/argocd-apps - path: demo/home-assistant/ + path: home-assistant/toleration_and_affinity/ revision: main namespace: home-assistant directory_recursion: false diff --git a/smol_k8s_lab/k8s_apps/__init__.py b/smol_k8s_lab/k8s_apps/__init__.py index 6ac36b2cc..4328b3824 100644 --- a/smol_k8s_lab/k8s_apps/__init__.py +++ b/smol_k8s_lab/k8s_apps/__init__.py @@ -185,12 +185,7 @@ def setup_base_apps(k8s_obj: K8s, # manager SSL/TLS certificates via lets-encrypt header("Installing [green]cert-manager[/green] for TLS certificates...", '📜') if cert_manager_dict["enabled"]: - cert_manager_init = cert_manager_dict['init']['enabled'] - if cert_manager_init: - email = cert_manager_dict['argo']['secret_keys']['email'] - else: - email = "" - configure_cert_manager(k8s_obj, email) + configure_cert_manager(k8s_obj, cert_manager_dict['init']) # then we install argo cd if it's enabled if argo_enabled: diff --git a/smol_k8s_lab/k8s_apps/ingress/cert_manager.py b/smol_k8s_lab/k8s_apps/ingress/cert_manager.py index c4694f36e..145ccfd2d 100644 --- a/smol_k8s_lab/k8s_apps/ingress/cert_manager.py +++ b/smol_k8s_lab/k8s_apps/ingress/cert_manager.py @@ -10,7 +10,8 @@ import logging as log -def configure_cert_manager(k8s_obj: K8s, email_addr: str = "") -> None: +def configure_cert_manager(k8s_obj: K8s, + init_dict: dict = {}) -> None: """ Installs cert-manager helm chart and optionally creates letsencrypt acme ClusterIssuers for both staging and production if email_addr is passed in @@ -23,38 +24,63 @@ def configure_cert_manager(k8s_obj: K8s, email_addr: str = "") -> None: set_options={'installCRDs': 'true'}) release.install(True) - if email_addr: - log.info("Creating ClusterIssuers for staging and production.") - # we create a ClusterIssuer for both staging and prod - acme_staging = "https://acme-staging-v02.api.letsencrypt.org/directory" - private_key_ref = "letsencrypt-staging" - for issuer in ['letsencrypt-staging', 'letsencrypt-prod']: - if issuer == "letsencrypt-prod": - acme_staging = acme_staging.replace("staging-", "") - private_key_ref = private_key_ref.replace("-staging", "-prod") - issuers_dict = { - 'apiVersion': "cert-manager.io/v1", - 'kind': 'ClusterIssuer', - 'metadata': {'name': issuer}, - 'spec': { - 'acme': {'email': email_addr, - 'server': acme_staging, - 'privateKeySecretRef': { - 'name': "letsencrypt-staging" - }, - 'solvers': [ - {'http01': {'ingress': {'class': 'nginx'}}} - ] - } - } - } + if init_dict['enabled']: + init_values = init_dict['values'] + create_cluster_issuers(init_values, k8s_obj) + + +def create_cluster_issuers(init_values: dict, k8s_obj: K8s = None) -> None: + """ + create ClusterIssuers for cert manager + """ + solver = init_values.get('cluster_issuer_acme_challenge_solver', "http01").lower() + if solver == "dns01": + # create the cloudflare api token secret + provider = init_values.get("cluster_issuer_acme_dns01_provider", "") + if provider == "cloudflare": + token_dict = {"token": init_values['CLOUDFLARE_API_TOKEN']} + k8s_obj.create_secret("cloudflare-api-token", + "cert-manager", + token_dict) + challenge = {"cloudflare": { + "apiTokenSecretRef": { + "name": "cloudflare-api-token", + "key": "token" + } + } + } + else: + log.error("We currently only support cloudflare as the DNS " + "provider for the ACME Issuer type in cert-manager. " + f"If you'd like to see {provider} supported, please " + "submit a PR and we'll take a look!") + else: + challenge = {'ingress': {'class': 'nginx'}} - # not working: https://github.com/kubernetes-client/python/issues/2103 - # k8s_obj.create_from_manifest_dict(api_group="cert-manager.io", - # api_version="v1", - # namespace='ingress', - # plural_obj_name='clusterissuers', - # manifest_dict=issuers_dict) + log.info("Creating ClusterIssuers for staging and production.") + + # we create a ClusterIssuer for both staging and prod + acme_staging = "https://acme-staging-v02.api.letsencrypt.org/directory" + private_key_ref = "letsencrypt-staging" + + for issuer in ['letsencrypt-staging', 'letsencrypt-prod']: + if issuer == "letsencrypt-prod": + acme_staging = acme_staging.replace("staging-", "") + private_key_ref = private_key_ref.replace("-staging", "-prod") + + issuers_dict = { + 'apiVersion': "cert-manager.io/v1", + 'kind': 'ClusterIssuer', + 'metadata': {'name': issuer}, + 'spec': { + 'acme': { + 'email': init_values['email'], + 'server': acme_staging, + 'privateKeySecretRef': {'name': private_key_ref}, + 'solvers': [{solver: challenge}] + } + } + } - # backup plan till above issue is resolved - k8s_obj.apply_custom_resources([issuers_dict]) + # backup plan till above issue is resolved + k8s_obj.apply_custom_resources([issuers_dict]) diff --git a/smol_k8s_lab/k8s_distros/__init__.py b/smol_k8s_lab/k8s_distros/__init__.py index 8a296feab..e080551a9 100644 --- a/smol_k8s_lab/k8s_distros/__init__.py +++ b/smol_k8s_lab/k8s_distros/__init__.py @@ -140,8 +140,7 @@ def create_k8s_distro(cluster_name: str, k3s_args['disable-network-policy'] = True if k8s_distro == "k3s": - install_k3s_cluster(cluster_name, - k3s_args) + install_k3s_cluster(cluster_name, k3s_args, distro_metadata['nodes']) # curently unsupported - in alpha state if k8s_distro == "k3d": diff --git a/smol_k8s_lab/k8s_distros/k3s.py b/smol_k8s_lab/k8s_distros/k3s.py index 3ebdf7f89..f4ce5e5aa 100644 --- a/smol_k8s_lab/k8s_distros/k3s.py +++ b/smol_k8s_lab/k8s_distros/k3s.py @@ -21,7 +21,8 @@ def install_k3s_cluster(cluster_name: str, extra_k3s_parameters: dict = { "write-kubeconfig-mode": 700 - } + }, + extra_nodes: dict = {} ) -> None: """ python installation for k3s, emulates curl -sfL https://get.k3s.io | sh - @@ -60,6 +61,52 @@ def install_k3s_cluster(cluster_name: str, # remove the script after we're done remove('./install.sh') + # if we have extra remote nodes to join to the cluster... + if extra_nodes: + join_k3s_nodes(extra_nodes) + + +def join_k3s_nodes(extra_nodes: dict) -> None: + """ + process extra remote nodes to join to the cluster as well as apply any labels, + or taints, after we're done joining the node + """ + # this gets the internal ip address of our current control plane node + ip_cmd = ("kubectl get nodes -o custom-columns=NAME:.status.addresses[0].address" + " -l node-role.kubernetes.io/master --no-headers") + k3s_control_plane_ip = subproc([ip_cmd]).strip() + + # token from the server is needed for the new agent + k3s_token = subproc(["sudo cat /var/lib/rancher/k3s/server/node-token"]).strip() + k3s_cmd = ('\'curl -sfL https://get.k3s.io | ' + f'K3S_URL="https://{k3s_control_plane_ip}:6443" ' + f'K3S_TOKEN="{k3s_token}" sh -\'') + + # for each node and it's meta data, ssh in and join the node + for node, metadata in extra_nodes.items(): + ssh_cmd = "ssh -o StrictHostKeyChecking=no " + ssh_key = metadata.get('ssh_key', 'id_rsa') + if ssh_key != "id_rsa": + ssh_cmd += f"-i {metadata['ssh_key']} {node} " + else: + ssh_cmd += f"{node} " + + # join node to cluster + subproc([ssh_cmd + k3s_cmd], shell=True, universal_newlines=True) + + labels = metadata.get('node_labels', None) + taints = metadata.get('node_taints', None) + + # after joining the node make sure the labels are up to date + if labels: + for label in labels: + subproc([f"kubectl label nodes {node} {label}"]) + + # after joining the node make sure the taints are up to date + if taints: + for taint in taints: + subproc([f"kubectl taint nodes {node} {taint}"]) + def uninstall_k3s(cluster_name: str) -> str: """ diff --git a/smol_k8s_lab/k8s_tools/k8s_lib.py b/smol_k8s_lab/k8s_tools/k8s_lib.py index 7c2a0f260..5f9123e0e 100644 --- a/smol_k8s_lab/k8s_tools/k8s_lib.py +++ b/smol_k8s_lab/k8s_tools/k8s_lib.py @@ -29,7 +29,7 @@ def __init__(self): def create_secret(self, name: str, namespace: str, - str_data: str, + str_data: dict, inline_key: str = "", labels: dict = {}) -> None: """ diff --git a/smol_k8s_lab/tui/base.py b/smol_k8s_lab/tui/base.py index 5659fd97b..f66f79def 100644 --- a/smol_k8s_lab/tui/base.py +++ b/smol_k8s_lab/tui/base.py @@ -51,7 +51,7 @@ class BaseApp(App): Binding(key="f", key_display="f", action="toggle_footer", - description="Toggle footer"), + description="Hide footer"), Binding(key="q,escape", action="quit", show=False), @@ -76,6 +76,7 @@ def __init__(self, user_config: dict = INITIAL_USR_CONFIG) -> None: self.cluster_names = [] self.current_cluster = "" self.sensitive_values = { + 'cert_manager': {}, 'nextcloud': {}, 'matrix': {}, 'mastodon': {}, @@ -417,13 +418,23 @@ def check_for_invalid_inputs(self, apps_dict: dict = {}) -> list: empty_fields.append(key) # sensitive inputs - init_sensitive_values = init_dict.get('sensitive_values', None) + init_sensitive_values = init_dict.get('sensitive_values', + None) if init_sensitive_values: + # cert manager is special + if app == "cert_manager": + solver = init_values['cluster_issuer_acme_challenge_solver'] + if solver == "http01": + skip = True + else: + skip = False + prompts = self.check_for_env_vars(app, metadata) if prompts: for value in prompts: if not self.sensitive_values[app].get(value, ""): - empty_fields.append(value) + if not skip: + empty_fields.append(value) # check for empty secret key fields (some apps don't have secret keys) secret_keys = metadata['argo'].get('secret_keys', None) diff --git a/smol_k8s_lab/tui/css/add_nodes_widget.tcss b/smol_k8s_lab/tui/css/add_nodes_widget.tcss new file mode 100644 index 000000000..074154748 --- /dev/null +++ b/smol_k8s_lab/tui/css/add_nodes_widget.tcss @@ -0,0 +1,47 @@ +$bluish_white: rgb(189,216,255); +$lavender: rgb(174,168,248); +$spacechalk_red: rgb(255,141,135); +$neon_magenta: rgb(242,137,249); +$light_pink: rgb(255,175,249); +$orange: rgb(253,205,54); +$soft_yellow: rgb(249,249,134); +$soft_green: rgb(193,255,135); +$spacechalk_lime: rgb(168,253,87); +$sky_blue: rgb(92,201,253); +$cornflower: rgb(95,135,255); +$light_cornflower: rgb(122,162,247); +$blue_gray: rgb(86,95,137); +$dark_gray: rgb(58,58,58); +$navy: rgb(35,35,54); + +/* number of nodes box */ +#add-nodes-box { + background: $navy 60%; + padding-left: 1; + grid-rows: 0.9fr 0.1fr 0.55fr; + align: center middle; + content-align: center middle; +} + +#nodes-data-table { +} + +#nodes-tab { + width: auto; + height: auto; +} + +#new-node-text { + background: $dark_gray 80%; + width: 100%; + color: $light_cornflower; +} + +.k3s-node-input-row { + grid-size: 2; + grid-gutter: 1; +} + +#new-node-button { + margin-left: 10; +} diff --git a/smol_k8s_lab/tui/css/base.tcss b/smol_k8s_lab/tui/css/base.tcss index 683a53417..81438f60d 100644 --- a/smol_k8s_lab/tui/css/base.tcss +++ b/smol_k8s_lab/tui/css/base.tcss @@ -297,3 +297,17 @@ DataTable { color: $bluish_white; content-align: left middle; } + +/* Input in "label: input" row */ +.input-row-input { + align: left middle; +} + +/* Label in "label: input" row */ +.input-row-label { + align: left middle; + padding-top: 1; + padding-bottom: 1; + color: $bluish_white; +} + diff --git a/smol_k8s_lab/tui/css/k3s.tcss b/smol_k8s_lab/tui/css/k3s.tcss index 7508aa983..24180aaa9 100644 --- a/smol_k8s_lab/tui/css/k3s.tcss +++ b/smol_k8s_lab/tui/css/k3s.tcss @@ -46,6 +46,11 @@ $navy: rgb(35,35,54); height: auto; } +/* tabpane 2 */ +#k3s-nodes-tab { + height: auto; +} + /* k3s container itself */ .k3s-arg-scroll { align: center middle; diff --git a/smol_k8s_lab/tui/css/node_modal.tcss b/smol_k8s_lab/tui/css/node_modal.tcss new file mode 100644 index 000000000..d515253df --- /dev/null +++ b/smol_k8s_lab/tui/css/node_modal.tcss @@ -0,0 +1,57 @@ +$bluish_white: rgb(189,216,255); +$lavender: rgb(174,168,248); +$spacechalk_red: rgb(255,141,135); +$neon_magenta: rgb(242,137,249); +$light_pink: rgb(255,175,249); +$orange: rgb(253,205,54); +$soft_yellow: rgb(249,249,134); +$soft_green: rgb(193,255,135); +$spacechalk_lime: rgb(168,253,87); +$sky_blue: rgb(92,201,253); +$cornflower: rgb(95,135,255); +$light_cornflower: rgb(122,162,247); +$blue_gray: rgb(86,95,137); +$dark_gray: rgb(58,58,58); +$navy: rgb(35,35,54); + + +/* whole modal screen */ +#node-question-modal-screen { + width: 80%; + align: center middle; +} + +.modify-delete-size { + height: 14; +} + +#node-modal-text { + color: $cornflower; +} + +#node-question-box { + align: center middle; + content-align: center middle; + background: $navy 75%; + border: round $cornflower; + padding: 1; +} + +#modal-button-box { + grid-size: 3; + align: center middle; + height: auto; +} + +#node-modal-text { + content-align: center middle; + width: 100%; + color: $sky_blue; + padding-top: 1; + padding-bottom: 1; + background: $dark_gray 30%; +} + +#node-name-input { + margin-bottom: 1; +} diff --git a/smol_k8s_lab/tui/css/smol_k8s_cfg.tcss b/smol_k8s_lab/tui/css/smol_k8s_cfg.tcss index 26c915954..6cb86aeaa 100644 --- a/smol_k8s_lab/tui/css/smol_k8s_cfg.tcss +++ b/smol_k8s_lab/tui/css/smol_k8s_cfg.tcss @@ -130,11 +130,6 @@ $navy: rgb(35,35,54); color: $bluish_white; } -/* Input in "label: input" row */ -.input-row-input { - align: left middle; -} - /* password manager config box */ #password-manager-config { padding: 1; diff --git a/smol_k8s_lab/tui/css/tui_config.tcss b/smol_k8s_lab/tui/css/tui_config.tcss index b8d2565b6..74c02d458 100644 --- a/smol_k8s_lab/tui/css/tui_config.tcss +++ b/smol_k8s_lab/tui/css/tui_config.tcss @@ -67,20 +67,6 @@ $navy: rgb(35,35,54); grid-columns: 0.2fr 1fr; } -/* Label in "label: input" row */ -.input-row-label { - align: left middle; - padding-top: 1; - padding-bottom: 1; - width: 100%; - color: $bluish_white; -} - -/* Input in "label: input" row */ -.input-row-input { - align: left middle; -} - /* this is the help-text at the top of each module */ .soft-text { color: $bluish_white 60%; diff --git a/smol_k8s_lab/tui/distro_screen.py b/smol_k8s_lab/tui/distro_screen.py index e29fabe70..81ce84c8c 100755 --- a/smol_k8s_lab/tui/distro_screen.py +++ b/smol_k8s_lab/tui/distro_screen.py @@ -39,6 +39,7 @@ class DistroConfigScreen(Screen): """ CSS_PATH = ["./css/distro_config.tcss", "./css/node_inputs_widget.tcss", + "./css/add_nodes_widget.tcss", "./css/k3s.tcss", "./css/kind.tcss"] diff --git a/smol_k8s_lab/tui/distro_widgets/add_nodes.py b/smol_k8s_lab/tui/distro_widgets/add_nodes.py new file mode 100644 index 000000000..875e85cdd --- /dev/null +++ b/smol_k8s_lab/tui/distro_widgets/add_nodes.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3.11 +from smol_k8s_lab.constants import HOME_DIR +from smol_k8s_lab.tui.util import input_field, drop_down +from smol_k8s_lab.tui.distro_widgets.modify_node_modal import NodeModalScreen + +from os.path import join +from rich.text import Text +from textual import on +from textual.app import ComposeResult, Widget +from textual.containers import Grid +from textual.widgets import Label, DataTable, Button + +placeholder = """ +[grey53] + _____ + / \\ + vvvvvvv /|__/| + I /O,O | + I /_____ | /|/| + C|/^ ^ ^ \ | /oo | _//| + |^ ^ ^ ^ |W| |/^^\ | /oo | + \m___m__|_| \m_m_| \mm_| + + "Totoros" (from "My Neighbor Totoro") + --- Duke Lee +""" + +class AddNodesBox(Widget): + """ + widget for adding new nodes to a local k3s cluster + """ + def __init__(self, nodes: dict = {}, id: str = "") -> None: + # this is just to take a few variables for class organizing + self.nodes = nodes + super().__init__(id=id) + + def compose(self) -> ComposeResult: + with Grid(id="add-nodes-box"): + yield Label( + "Add a node below for something to appear here...\n" + placeholder, + id="nodes-placeholder" + ) + yield Label("🖥️ Add a new node", id="new-node-text") + yield self.add_node_row() + + def on_mount(self) -> None: + """ + generate nodes table + """ + if self.nodes: + self.get_widget_by_id("nodes-placeholder").display = False + self.generate_nodes_table() + + def generate_nodes_table(self) -> None: + """ + generate a readable table for all the nodes. + + Each row is has a height of 3 and is centered to make it easier to read + for people with dyslexia + """ + data_table = DataTable(zebra_stripes=True, + id="nodes-data-table", + cursor_type="row") + + # then fill in the cluster table + data_table.add_column(Text("Node", justify="center")) + data_table.add_column(Text("Type", justify="center")) + data_table.add_column(Text("SSH Key", justify="center")) + data_table.add_column(Text("Labels", justify="center")) + data_table.add_column(Text("Taints", justify="center")) + + for node, metadata in self.nodes.items(): + # labels can be a list or CommentedSeq, so we convert to str + labels = metadata.get('node_labels', "") + if not isinstance(labels, str): + if labels: + if len(labels) == 1: + labels = labels[0] + else: + labels = labels.join(",") + else: + labels = "" + + # taints can be a list or CommentedSeq, so we convert to str + taints = metadata.get('node_taints', "") + if not isinstance(taints, str): + if taints: + if len(taints) == 1: + taints = taints[0] + else: + taints = taints.join(",") + else: + taints = "" + + row = [node, metadata['node_type'], metadata['ssh_key'], labels, taints] + # we use an extra line to center the rows vertically + styled_row = [Text(str("\n" + cell), justify="center") for cell in row] + + # we add extra height to make the rows more readable + data_table.add_row(*styled_row, height=3, key=row[0]) + + # grid for the cluster data table + table_grid = Grid(data_table, id="table-grid") + + # the actual little box in the middle of screen + main_grid = Grid(table_grid, id="node-table-box-grid") + + # modify clusters box title + main_grid.border_title = ("Select a row to [#ffaff9]modify[/] or [#ffaff9]" + "delete[/] an [i]existing[/] [#C1FF87]node[/]") + + nodes_container = self.get_widget_by_id("add-nodes-box") + nodes_container.mount(main_grid, before="#new-node-text") + + @on(DataTable.RowHighlighted) + def node_row_highlighted(self, event: DataTable.RowHighlighted) -> None: + """ + check which row was selected to read it aloud + """ + if self.app.speak_on_focus: + self.say_row(event.data_table) + + @on(DataTable.RowSelected) + def node_row_selected(self, event: DataTable.RowSelected) -> None: + """ + check which row was selected to launch a modal screen to modify or delete it + """ + if event.data_table.id == "nodes-data-table": + def update_nodes(response: list = []): + """ + check if cluster has been deleted + """ + node = response[0] + node_metadata = response[1] + + # make sure we actually got anything, because the user may have + # hit the cancel button + if node and not node_metadata: + data_table = self.get_widget_by_id("nodes-data-table") + data_table.remove_row(node) + + if data_table.row_count < 1: + data_table.remove() + self.get_widget_by_id("nodes-placeholder").display = True + + self.delete_from_parent_yaml(node) + + row_index = event.cursor_row + row = event.data_table.get_row_at(row_index) + + # get the row's first column (the name of the node) and remove whitespace + node = row[0].plain.strip() + + # launch modal UI to ask if they'd like to modify or delete a node + self.app.push_screen(NodeModalScreen(node, self.nodes[node]), update_nodes) + + def update_parent_yaml(self, node_name: str, node_metadata: dict): + """ + update the base parent app yaml with new nodes + """ + distro_cfg = self.app.cfg['k8s_distros']['k3s']['nodes'] + + # make sure the taints and labels are written as lists + for node_list in ['node_taints', 'node_labels']: + value = node_metadata.get(node_list, []) + if value: + node_metadata[node_list] = value.split(',') + else: + node_metadata[node_list] = [] + + distro_cfg[node_name] = node_metadata + self.app.write_yaml() + + def delete_from_parent_yaml(self, node_name: str): + """ + delete an extra node and update the base parent app yaml + """ + distro_cfg = self.app.cfg['k8s_distros']['k3s']['nodes'] + distro_cfg.pop(node_name, None) + self.app.write_yaml() + + def add_node_row(self, node: str = "", node_dict: dict = {}) -> None: + """ + add a node input section for k3s + """ + hostname = node + + # hostname label and input + host_label_tooltip = ( + "The hostname or ip address of the node you'd like to " + "join to the cluster" + ) + host_input = input_field(label="host", + initial_value=hostname, + name="host", + placeholder="hostname or ip address", + tooltip=host_label_tooltip) + + # node type label and input + node_type_tooltip = ("The type for this Kubernetes node. " + "Choose between worker or control_plane.") + + node_type_dropdown = drop_down( + ['worker', 'control_plane'], + select_value=node_dict.get('node_type', 'worker'), + name="node_type", + tooltip=node_type_tooltip, + label="node_type" + ) + + # ssh key label and input + default_ssh_key = join(HOME_DIR, ".ssh/id_rsa") + ssh_key_label_tooltip = ( + "The SSH key to use to connect to the other node. This " + f"defaults to {default_ssh_key}" + ) + ssh_key = node_dict.get('ssh_key', "id_rsa") + ssh_key_input = input_field(label="ssh_key", + initial_value=ssh_key, + name="ssh_key", + placeholder="SSH key to connect to host", + tooltip=ssh_key_label_tooltip) + + # node labels label and input + node_labels_label_tooltip = ( + "Any labels you'd like to apply to this node (useful for node " + "affinity). For multiple labels, use commas to seperate them." + ) + node_labels = node_dict.get('node_labels', "") + node_labels_input = input_field( + label="node_labels", + initial_value=node_labels, + name="node_labels", + placeholder="labels to apply to this node", + tooltip=node_labels_label_tooltip) + + # taints label and input + taints_label_tooltip = ( + "Any taints you'd like to apply to this node (useful for pod " + "tolerations). For multiple labels, use commas to seperate them." + ) + taints = node_dict.get('node_taints', "") + taints_input = input_field( + label="node_taints", + initial_value=taints, + name="node_taints", + placeholder="taints to apply to this node", + tooltip=taints_label_tooltip) + + # submit button + submit = Button("➕ new node", id="new-node-button") + submit.tooltip = "Submit new node to cluster to be joined on cluster creation" + + return Grid(host_input, node_type_dropdown, ssh_key_input, + node_labels_input, taints_input, submit, + id=f"{hostname}-row", classes="k3s-node-input-row") + + @on(Button.Pressed) + def submit_new_node(self, event: Button.Pressed): + """ + submit new node to cluster + """ + if event.button.id == "new-node-button": + host = self.get_widget_by_id("host").value + node_type = self.get_widget_by_id("node-type").value + ssh_key = self.get_widget_by_id("ssh-key").value + node_labels = self.get_widget_by_id("node-labels").value + taints = self.get_widget_by_id("node-taints").value + node_metadata = {"node_type": node_type, + "ssh_key": ssh_key, + "node_labels": node_labels, + "node_taints": taints} + + if not self.nodes: + self.nodes = {host: node_metadata} + self.generate_nodes_table() + self.get_widget_by_id("nodes-placeholder").display = False + else: + self.nodes[host] = node_metadata + data_table = self.get_widget_by_id("nodes-data-table") + row = [host, node_type, ssh_key, node_labels, taints] + # we use an extra line to center the rows vertically + styled_row = [Text(str("\n" + cell), justify="center") for cell in row] + # we add extra height to make the rows more readable + data_table.add_row(*styled_row, height=3, key=row[0]) + + self.update_parent_yaml(host, node_metadata) diff --git a/smol_k8s_lab/tui/distro_widgets/k3s_config.py b/smol_k8s_lab/tui/distro_widgets/k3s_config.py index 1f6611fc7..0eb928e33 100644 --- a/smol_k8s_lab/tui/distro_widgets/k3s_config.py +++ b/smol_k8s_lab/tui/distro_widgets/k3s_config.py @@ -2,7 +2,7 @@ # internal library from smol_k8s_lab.constants import XDG_CACHE_DIR, DEFAULT_DISTRO_OPTIONS from smol_k8s_lab.tui.distro_widgets.kubelet_config import KubeletConfig -from smol_k8s_lab.tui.distro_widgets.node_adjustment import NodeAdjustmentBox +from smol_k8s_lab.tui.distro_widgets.add_nodes import AddNodesBox from smol_k8s_lab.tui.util import create_sanitized_list from smol_k8s_lab.tui.validators.already_exists import CheckIfNameAlreadyInUse @@ -67,17 +67,6 @@ def compose(self) -> ComposeResult: self.metadata = DEFAULT_DISTRO_OPTIONS[self.distro] with Grid(classes="k8s-distro-config", id=f"{self.distro}-box"): - - # take number of nodes from config and make string - nodes = self.metadata.get('nodes', - {'control_plane': 1, 'workers': 0}) - control_nodes = str(nodes.get('control_plane', '1')) - worker_nodes = str(nodes.get('workers', '0')) - - # node input row - yield NodeAdjustmentBox(self.distro, control_nodes, worker_nodes) - - # Add the TabbedContent widget for kind config with TabbedContent(initial="k3s-yaml-tab", id="k3s-tabbed-content"): # tab 1 - networking options @@ -89,10 +78,16 @@ def compose(self) -> ComposeResult: # tab 2 - kubelet options with TabPane("Kubelet Config Options", id="k3s-kubelet-tab"): - # kubelet config section for kind only + # kubelet config section for kind only? kubelet_args = self.metadata['k3s_yaml'].get('kubelet-arg', '') yield KubeletConfig('k3s', kubelet_args) + if self.distro == "k3s": + # tab 3 - add remote nodes + with TabPane("🆕 Add [i]Remote[/i] Nodes", id="k3s-nodes-tab"): + yield AddNodesBox(self.metadata.get('nodes', []), + id="nodes-tab") + def on_mount(self) -> None: """ screen and box border styling @@ -100,9 +95,14 @@ def on_mount(self) -> None: # update tabbed content box tabbed_content = self.query_one(TabbedContent) - tabbed_content.border_title = ( - "[i]Add extra[/] options for the [#C1FF87]k3s[/] install script" - ) + if self.distro == "k3s": + top_title = ("[#ffaff9]Customize[/] k3s install with extra " + "[#C1FF87]options[/] [i]and[/i] [#C1FF87]nodes[/]") + else: + top_title = ("[#ffaff9]Add[/] [i]extra[/] options for the " + "[#C1FF87]k3s[/] install script") + + tabbed_content.border_title = top_title subtitle = ( "[b][@click=screen.launch_new_option_modal()] ➕ k3s option[/][/]" @@ -115,13 +115,29 @@ def on_mount(self) -> None: def action_show_tab(self, tab: str) -> None: """Switch to a new tab.""" - self.get_widget_by_id("k3s-tabbed-content").show_tab(tab) - self.get_widget_by_id("k3s-tabbed-content").active = tab + tabbed_content = self.get_widget_by_id("k3s-tabbed-content") + tabbed_content.show_tab(tab) + tabbed_content.active = tab @on(TabbedContent.TabActivated) - def speak_when_tab_selected(self, event: TabbedContent.TabActivated) -> None: + def when_tab_selected(self, event: TabbedContent.TabActivated) -> None: + """ + speaks name of tab if tts is on and changes button at the button of border + """ + tab = event.tab.id if self.app.speak_on_focus: - self.app.action_say(f"Selected tab is {event.tab.id}") + self.app.action_say(f"Selected tab is {tab}") + + # change border subtitle button depending on the tab activated + tabbed_content = self.query_one(TabbedContent) + if tab == "k3s-nodes-tab": + tabbed_content.border_subtitle = ( + "[b][@click=screen.launch_new_option_modal()] ➕ node[/][/]" + ) + else: + tabbed_content.border_subtitle = ( + "[b][@click=screen.launch_new_option_modal()] ➕ k3s option[/][/]" + ) class K3sConfig(Static): diff --git a/smol_k8s_lab/tui/distro_widgets/modify_node_modal.py b/smol_k8s_lab/tui/distro_widgets/modify_node_modal.py new file mode 100644 index 000000000..2ed5a0d86 --- /dev/null +++ b/smol_k8s_lab/tui/distro_widgets/modify_node_modal.py @@ -0,0 +1,111 @@ +# external libraries +from os import system +from textual import on +from textual.app import ComposeResult, NoMatches +from textual.binding import Binding +from textual.containers import Grid +from textual.screen import ModalScreen +from textual.widgets import Button, Label + + +class NodeModalScreen(ModalScreen): + CSS_PATH = ["../css/node_modal.tcss"] + BINDINGS = [Binding(key="b,escape,q", + key_display="b", + action="press_cancel", + description="Back")] + + def __init__(self, node: str, node_metadata: dict) -> None: + self.node = node + self.node_metadata = node_metadata + super().__init__() + + def compose(self) -> ComposeResult: + + question = ('What would you like to do with ' + f'[#C1FF87]{self.node}[/]?') + # base screen grid + with Grid(id="node-question-modal-screen", classes="modify-delete-size"): + # grid for node question and buttons + with Grid(id="node-question-box"): + yield Label(question, id="node-modal-text") + + with Grid(id="modal-button-box"): + # modify button allows user to change apps (and soon distro details) + modify_button = Button("✏️ Modify", id="modify-node-button") + modify_button.tooltip = "Modify the node's metadata" + yield modify_button + + # delete button deletes the node + delete_button = Button("🚮 Delete", id="delete-node-first-try") + delete_button.tooltip = "[magenta]Delete[/] the node 😱" + yield delete_button + + cancel = Button("🤷 Cancel", id="cancel") + cancel.tooltip = "Return to previous screen" + yield cancel + + def on_mount(self): + """ + say the title if that self.app.speak_screen_titles is set to True + """ + if self.app.speak_screen_titles: + self.app.action_say( + f"Screen title: What would you like to do with {self.node}?" + ) + + def action_press_cancel(self) -> None: + """ + presses the cancel button + """ + self.get_widget_by_id("cancel").action_press() + + @on(Button.Pressed) + def button_pressed(self, event: Button.Pressed) -> None: + if event.button.id == "modify-node-button": + print("ok") + + elif event.button.id == "delete-node-first-try": + # don't display the first delete button or the modify button + event.button.display = False + self.get_widget_by_id("modify-node-button").display = False + + # are you sure, the text + confirm_txt = ('Are you [b][i]sure[/][/] you want to [#ffaff9]delete[/]' + f' [#C1FF87]{self.node}[/]?') + self.get_widget_by_id("node-modal-text").update(confirm_txt) + if self.app.speak_screen_titles: + self.app.action_say(f"Are you sure you want to delete {self.node}?") + + # are you sure, the button + sure_button = Button("🚮 Yes", id="delete-button-second-try") + self.get_widget_by_id("modal-button-box").mount(sure_button, + before="#cancel") + + # if the user really wants to delete a node, we do it + elif event.button.id == "delete-button-second-try": + # after deleting pop the screen + self.dismiss([self.node, None]) + + elif event.button.id == "cancel": + # resets the modal + try: + delete_2nd_try = self.get_widget_by_id("delete-button-second-try") + + if not delete_2nd_try.display: + self.app.pop_screen() + else: + delete_2nd_try.display = False + self.get_widget_by_id("modify-node-button").display = True + + question = f'What would you like to do with [#C1FF87]{self.node}[/]?' + self.get_widget_by_id("node-modal-text").update(question) + if self.app.bell_on_error: + self.app.action_say( + f"What would you like to do with {self.node}?" + ) + + self.get_widget_by_id("delete-node-first-try").display = True + except NoMatches: + pass + self.app.pop_screen() diff --git a/smol_k8s_lab/tui/make_screenshots.py b/smol_k8s_lab/tui/make_screenshots.py index 4b6516ebe..68b4ca62a 100755 --- a/smol_k8s_lab/tui/make_screenshots.py +++ b/smol_k8s_lab/tui/make_screenshots.py @@ -11,7 +11,7 @@ async def make_base_screenshots() -> None: """ - make all the screenshots for the start screen, help screen, and config screen + make all the screenshots for the start screen, help screen, and TUI config screen """ async with BaseApp().run_test(size=(87, 47)) as pilot: pilot.app.save_screenshot(f"{screenshot_path}/start_screen.svg") @@ -27,14 +27,14 @@ async def make_base_screenshots() -> None: async def make_distro_screen_screenshots() -> None: """ - make all the screenshots + make all the screenshots for the kubernetes distribution config screen """ async with BaseApp().run_test(size=(90, 55)) as pilot: - # press the "enter" key and then f key to go to the distro screen and then hide the footer + # press the "enter" key and then f key to go to the distro screen, then hide the footer await pilot.press("enter", "f") pilot.app.save_screenshot(f"{screenshot_path}/distro_config_screen.svg") - # Test selecting another distro from the drop down + # Test selecting another distro from the top drop down await pilot.press("enter", "down", "enter") pilot.app.save_screenshot(f"{screenshot_path}/kind_config_screen.svg") @@ -43,6 +43,10 @@ async def make_distro_screen_screenshots() -> None: pilot.app.save_screenshot(f"{screenshot_path}/add_k3s_option_screen.svg") await pilot.press("escape") + # Test selecting the new node tab for k3s + await pilot.press("tab","right","right") + pilot.app.save_screenshot(f"{screenshot_path}/add_node_k3s_tab.svg") + # Test selecting another distro from the drop down await pilot.press("tab", "tab", "tab", "right") pilot.app.save_screenshot(f"{screenshot_path}/kind_config_screen2.svg") @@ -50,7 +54,7 @@ async def make_distro_screen_screenshots() -> None: async def make_apps_screen_screenshots() -> None: """ - make all the screenshots + Make all the screenshots for the Argo CD ApplicationSet configuration screen """ async with BaseApp().run_test(size=(90, 55)) as pilot: # press the "enter" key and then the "n" key @@ -65,20 +69,26 @@ async def make_apps_screen_screenshots() -> None: await pilot.press("escape","tab","tab","enter") pilot.app.save_screenshot(f"{screenshot_path}/modify_global_parameters_modal_screen.svg") + async def make_confirmation_screen_screenshots() -> None: """ - make all the screenshots + make all the screenshots for the confirmation screen """ async with BaseApp().run_test(size=(87, 47)) as pilot: # logging and password config - await pilot.press("enter", "n", "n") + await pilot.press("enter", "n", "n", "n") pilot.app.save_screenshot(f"{screenshot_path}/logging_password_config.svg") # confirmation screen finally await pilot.press("n") pilot.app.save_screenshot(f"{screenshot_path}/confirm_screen.svg") + # enter bitwarden credentials + await pilot.press("n", "tab", "tab", "enter") + pilot.app.save_screenshot(f"{screenshot_path}/bitwarden_credentials_screen.svg") + if __name__ == "__main__": asyncio.run(make_base_screenshots()) asyncio.run(make_distro_screen_screenshots()) asyncio.run(make_apps_screen_screenshots()) + asyncio.run(make_confirmation_screen_screenshots()) diff --git a/smol_k8s_lab/tui/tui_config_screen.py b/smol_k8s_lab/tui/tui_config_screen.py index 36bf7ee6b..d7ba15b8f 100755 --- a/smol_k8s_lab/tui/tui_config_screen.py +++ b/smol_k8s_lab/tui/tui_config_screen.py @@ -83,7 +83,7 @@ def compose(self) -> ComposeResult: classes="soft-text") with Grid(classes="triple-switch-row"): yield bool_option( - label="enabled:", + label="enabled", name="enabled", switch_value=self.cfg['enabled'], tooltip=("Enable tui mode by default. Otherwise, you" @@ -92,14 +92,14 @@ def compose(self) -> ComposeResult: ) yield bool_option( - label="footer:", + label="footer", name="show_footer", switch_value=self.cfg['show_footer'], tooltip="show the footer at the bottom of the screen" ) yield bool_option( - label="k9s:", + label="k9s", name="k9s-enabled", switch_value=self.cfg['k9s']['enabled'], tooltip="launch k9s, a k8s TUI dashboard when cluster is up" @@ -107,7 +107,7 @@ def compose(self) -> ComposeResult: with Grid(classes="k9s-input-row"): yield input_field( - label="k9s command:", + label="k9s_command", name="k9s-command", initial_value=self.cfg['k9s']['command'], placeholder="command to run when k9s starts", @@ -177,7 +177,7 @@ def compose(self) -> ComposeResult: with Grid(id="bell-row"): yield bool_option( - label="bell on focus:", + label="bell_on_focus", name="bell-on_focus", switch_value=self.cfg['bell']['on_focus'], tooltip=( @@ -186,7 +186,7 @@ def compose(self) -> ComposeResult: ) yield bool_option( - label="bell on error:", + label="bell_on_error", name="bell-on_error", switch_value=self.cfg['bell']['on_error'], tooltip=( @@ -197,7 +197,7 @@ def compose(self) -> ComposeResult: with Grid(id="tts-row"): yield bool_option( - label="TTS screen titles:", + label="TTS_screen_titles", name="text-to-speech-screen_titles", switch_value=self.cfg['text_to_speech']['screen_titles'], tooltip=( @@ -206,7 +206,7 @@ def compose(self) -> ComposeResult: ) yield bool_option( - label="TTS on key press:", + label="TTS_on_key_press", name="text-to-speech-on_key_press", switch_value=self.cfg['text_to_speech']['on_key_press'], tooltip=( @@ -216,7 +216,7 @@ def compose(self) -> ComposeResult: ) yield bool_option( - label="TTS on focus:", + label="TTS_on_focus", name="text-to-speech-on_focus", switch_value=self.cfg['text_to_speech']['on_focus'], tooltip=( @@ -227,7 +227,7 @@ def compose(self) -> ComposeResult: ) yield input_field( - label="speech program:", + label="speech_program", name="text-to-speech-speech_program", initial_value=self.cfg['text_to_speech']['speech_program'], placeholder="name of program for speech", diff --git a/smol_k8s_lab/tui/util.py b/smol_k8s_lab/tui/util.py index 4c8e628ca..80dc8d53c 100644 --- a/smol_k8s_lab/tui/util.py +++ b/smol_k8s_lab/tui/util.py @@ -10,7 +10,7 @@ from textual.screen import ModalScreen from textual.suggester import SuggestFromList from textual.validation import Length -from textual.widgets import Input, Button, Label, Switch +from textual.widgets import Input, Button, Label, Switch, Select KUBELET_SUGGESTIONS = SuggestFromList(( @@ -227,24 +227,55 @@ def bool_option(label: str, switch_value: bool, name: str, tooltip: str) -> Hori switch = Switch(value=switch_value, classes="bool-switch-row-switch", name=name, - id=label) + id=label.replace("_", "-").replace(" ", "-")) switch.tooltip = tooltip extra_class = name.replace('_',"-") return Horizontal(bool_label, switch, classes=f"bool-switch-row {extra_class}") +def drop_down(values: list, + name: str, + tooltip: str, + select_value: str = "", + label: str = "") -> Horizontal: + """ + returns a label and switch row in a Horizontal container + """ + if label: + select_label = Label(label.replace("_", " ") + ":", classes="input-row-label") + select_label.tooltip = tooltip + id = label.replace("_", "-") + else: + id = name.replace("_", "-") + + select = Select.from_values(values, + name=name, + value=select_value, + classes="dropdown-row-dropdown", + id=id + ) + select.tooltip = tooltip + + extra_class = name.replace('_',"-") + if label: + return Horizontal(select_label, select, + classes=f"{extra_class}") + else: + return Horizontal(select, classes=f"{extra_class}") + + def input_field(label: str, initial_value: str, name: str, placeholder: str, tooltip: str = "") -> Horizontal: """ returns an input label and field within a Horizontal container """ - input_label = Label(label + ":", classes="input-row-label") + input_label = Label(label.replace("_", " ") + ":", classes="input-row-label") input_label.tooltip = tooltip input_dict = {"placeholder": placeholder, "classes": "input-row-input", - "id": label, + "id": label.replace("_","-"), "name": name} if initial_value: input_dict["value"] = initial_value