From 5d469d14605f560d93a5ee787cb540c3bc832ced Mon Sep 17 00:00:00 2001 From: obaydullahmhs Date: Fri, 6 Sep 2024 15:52:02 +0600 Subject: [PATCH] Add kcc tls docs Signed-off-by: obaydullahmhs --- .../kafka/tls/connectcluster-issuer.yaml | 8 + .../kafka/tls/connectcluster-tls.yaml | 21 ++ docs/examples/kafka/tls/kafka-dev-tls.yaml | 23 ++ docs/examples/kafka/tls/kafka-prod-tls.yaml | 34 +++ docs/guides/kafka/README.md | 2 +- .../clustering/topology-cluster/index.md | 47 ++-- docs/guides/kafka/concepts/connector.md | 4 +- docs/guides/kafka/concepts/kafka.md | 3 +- .../kafka/concepts/kafkaconnectorversion.md | 2 +- .../kafka/concepts/schemaregistryversion.md | 2 +- .../kafka/connectcluster/connectcluster.md | 6 +- docs/guides/kafka/connectcluster/overview.md | 8 +- docs/guides/kafka/quickstart/kafka/index.md | 2 +- docs/guides/kafka/restproxy/overview.md | 2 +- docs/guides/kafka/schemaregistry/overview.md | 2 +- docs/guides/kafka/tls/combined.md | 250 +++++++++++++++++ docs/guides/kafka/tls/connectcluster.md | 224 ++++++++++++++++ docs/guides/kafka/tls/overview.md | 4 +- docs/guides/kafka/tls/topology.md | 253 ++++++++++++++++++ 19 files changed, 856 insertions(+), 41 deletions(-) create mode 100644 docs/examples/kafka/tls/connectcluster-issuer.yaml create mode 100644 docs/examples/kafka/tls/connectcluster-tls.yaml create mode 100644 docs/examples/kafka/tls/kafka-dev-tls.yaml create mode 100644 docs/examples/kafka/tls/kafka-prod-tls.yaml create mode 100644 docs/guides/kafka/tls/combined.md create mode 100644 docs/guides/kafka/tls/connectcluster.md create mode 100644 docs/guides/kafka/tls/topology.md diff --git a/docs/examples/kafka/tls/connectcluster-issuer.yaml b/docs/examples/kafka/tls/connectcluster-issuer.yaml new file mode 100644 index 0000000000..a8777926f2 --- /dev/null +++ b/docs/examples/kafka/tls/connectcluster-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: connectcluster-ca-issuer + namespace: demo +spec: + ca: + secretName: connectcluster-ca \ No newline at end of file diff --git a/docs/examples/kafka/tls/connectcluster-tls.yaml b/docs/examples/kafka/tls/connectcluster-tls.yaml new file mode 100644 index 0000000000..5ac77544d5 --- /dev/null +++ b/docs/examples/kafka/tls/connectcluster-tls.yaml @@ -0,0 +1,21 @@ +apiVersion: kafka.kubedb.com/v1alpha1 +kind: ConnectCluster +metadata: + name: connectcluster-tls + namespace: demo +spec: + version: 3.6.1 + enableSSL: true + tls: + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: connectcluster-ca-issuer + replicas: 3 + connectorPlugins: + - postgres-2.4.2.final + - jdbc-2.6.1.final + kafkaRef: + name: kafka-prod + namespace: demo + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/tls/kafka-dev-tls.yaml b/docs/examples/kafka/tls/kafka-dev-tls.yaml new file mode 100644 index 0000000000..c3c163b83a --- /dev/null +++ b/docs/examples/kafka/tls/kafka-dev-tls.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev-tls + namespace: demo +spec: + version: 3.6.1 + enableSSL: true + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: kafka-ca-issuer + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/tls/kafka-prod-tls.yaml b/docs/examples/kafka/tls/kafka-prod-tls.yaml new file mode 100644 index 0000000000..f939caa1d3 --- /dev/null +++ b/docs/examples/kafka/tls/kafka-prod-tls.yaml @@ -0,0 +1,34 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod-tls + namespace: demo +spec: + version: 3.6.1 + enableSSL: true + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: kafka-ca-issuer + topology: + broker: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/guides/kafka/README.md b/docs/guides/kafka/README.md index 37e20b7ab0..0038f83e05 100644 --- a/docs/guides/kafka/README.md +++ b/docs/guides/kafka/README.md @@ -81,7 +81,7 @@ KubeDB supports The following Kafka versions. Supported version are applicable f ## User Guide - [Quickstart Kafka](/docs/guides/kafka/quickstart/kafka/index.md) with KubeDB Operator. -- [Quickstart ConnectCluster](/docs/guides/kafka/quickstart/connectcluster/index.md) with KubeDB Operator. +- [Quickstart ConnectCluster](/docs/guides/kafka/connectcluster/overview.md) with KubeDB Operator. - Kafka Clustering supported by KubeDB - [Combined Clustering](/docs/guides/kafka/clustering/combined-cluster/index.md) - [Topology Clustering](/docs/guides/kafka/clustering/topology-cluster/index.md) diff --git a/docs/guides/kafka/clustering/topology-cluster/index.md b/docs/guides/kafka/clustering/topology-cluster/index.md index b40de66213..93e7d72e98 100644 --- a/docs/guides/kafka/clustering/topology-cluster/index.md +++ b/docs/guides/kafka/clustering/topology-cluster/index.md @@ -141,22 +141,21 @@ Hence, the cluster is ready to use. Let's check the k8s resources created by the operator on the deployment of Kafka CRO: ```bash -$ kubectl get all,secret,pvc -n demo -l 'app.kubernetes.io/instance=kafka-prod' +$ kubectl get all,petset,secret,pvc -n demo -l 'app.kubernetes.io/instance=kafka-prod' NAME READY STATUS RESTARTS AGE pod/kafka-prod-broker-0 1/1 Running 0 4m10s pod/kafka-prod-broker-1 1/1 Running 0 4m4s pod/kafka-prod-broker-2 1/1 Running 0 3m57s pod/kafka-prod-controller-0 1/1 Running 0 4m8s -pod/kafka-prod-controller-1 1/1 Running 2 (3m35s ago) 4m +pod/kafka-prod-controller-1 1/1 Running 0 4m pod/kafka-prod-controller-2 1/1 Running 0 3m53s -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/kafka-prod-broker ClusterIP None 9092/TCP,29092/TCP 4m14s -service/kafka-prod-controller ClusterIP None 9093/TCP 4m14s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kafka-prod-pods ClusterIP None 9092/TCP,9093/TCP,29092/TCP 4m14s NAME READY AGE -petset.apps/kafka-prod-broker 3/3 4m10s -petset.apps/kafka-prod-controller 3/3 4m8s +petset.apps.k8s.appscode.com/kafka-prod-broker 3/3 4m10s +petset.apps.k8s.appscode.com/kafka-prod-controller 3/3 4m8s NAME TYPE VERSION AGE appbinding.appcatalog.appscode.com/kafka-prod kubedb.com/kafka 3.6.1 4m8s @@ -202,25 +201,28 @@ ssl.truststore.password=*********** Now, we have to use a bootstrap server to perform operations in a kafka broker. For this demo, we are going to use the http endpoint of the headless service `kafka-prod-broker` as bootstrap server for publishing & consuming messages to kafka brokers. These endpoints are pointing to all the kafka broker pods. We will set an environment variable for the `clientauth.properties` filepath as well. At first, describe the service to get the http endpoints. ```bash -$ kubectl describe svc -n demo kafka-prod-broker -Name: kafka-prod-broker +$ kubectl describe svc -n demo kafka-prod-pods +Name: kafka-prod-pods Namespace: demo Labels: app.kubernetes.io/component=database app.kubernetes.io/instance=kafka-prod app.kubernetes.io/managed-by=kubedb.com app.kubernetes.io/name=kafkas.kubedb.com Annotations: -Selector: app.kubernetes.io/instance=kafka-prod,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=kafkas.kubedb.com,kubedb.com/role=broker +Selector: app.kubernetes.io/instance=kafka-prod,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=kafkas.kubedb.com Type: ClusterIP IP Family Policy: SingleStack IP Families: IPv4 IP: None IPs: None -Port: http 9092/TCP -TargetPort: http/TCP +Port: broker 9092/TCP +TargetPort: broker/TCP Endpoints: 10.244.0.33:9092,10.244.0.37:9092,10.244.0.41:9092 -Port: internal 29092/TCP -TargetPort: internal/TCP +Port: controller 9093/TCP +TargetPort: controller/TCP +Endpoints: 10.244.0.16:9093,10.244.0.20:9093,10.244.0.24:9093 +Port: local 29092/TCP +TargetPort: local/TCP Endpoints: 10.244.0.33:29092,10.244.0.37:29092,10.244.0.41:29092 Session Affinity: None Events: @@ -229,7 +231,7 @@ Events: Use the `http endpoints` and `clientauth.properties` file to set environment variables. These environment variables will be useful for handling console command operations easily. ```bash -root@kafka-prod-broker-0:~# export SERVER="10.244.0.100:9092,10.244.0.104:9092,10.244.0.108:9092" +root@kafka-prod-broker-0:~# export SERVER=" 10.244.0.33:9092,10.244.0.37:9092,10.244.0.41:9092" root@kafka-prod-broker-0:~# export CLIENTAUTHCONFIG="$HOME/config/clientauth.properties" ``` @@ -243,17 +245,17 @@ LeaderEpoch: 15 HighWatermark: 1820 MaxFollowerLag: 0 MaxFollowerLagTimeMs: 159 -CurrentVoters: [0,1,2] -CurrentObservers: [3,4,5] +CurrentVoters: [1000,1001,1002] +CurrentObservers: [0,1,2] ``` It will show you important metadata information like clusterID, current leader ID, broker IDs which are participating in leader election voting and IDs of those brokers who are observers. It is important to mention that each broker is assigned a numeric ID which is called its broker ID. The ID is assigned sequentially with respect to the host pod name. In this case, The pods assigned broker IDs are as follows: | Pods | Broker ID | |---------------------|:---------:| -| kafka-prod-broker-0 | 3 | -| kafka-prod-broker-1 | 4 | -| kafka-prod-broker-2 | 5 | +| kafka-prod-broker-0 | 0 | +| kafka-prod-broker-1 | 1 | +| kafka-prod-broker-2 | 2 | Let's create a topic named `sample` with 1 partitions and a replication factor of 1. Describe the topic once it's created. You will see the leader ID for each partition and their replica IDs along with in-sync-replicas(ISR). @@ -264,12 +266,12 @@ Created topic sample. root@kafka-prod-broker-0:~# kafka-topics.sh --command-config $CLIENTAUTHCONFIG --describe --topic sample --bootstrap-server localhost:9092 Topic: sample TopicId: mqlupmBhQj6OQxxG9m51CA PartitionCount: 1 ReplicationFactor: 1 Configs: segment.bytes=1073741824 - Topic: sample Partition: 0 Leader: 4 Replicas: 4 Isr: 4 + Topic: sample Partition: 0 Leader: 1 Replicas: 1 Isr: 1 ``` Now, we are going to start a producer and a consumer for topic `sample` using console. Let's use this current terminal for producing messages and open a new terminal for consuming messages. Let's set the environment variables for bootstrap server and the configuration file in consumer terminal also. -From the topic description we can see that the leader partition for partition 0 is 4 that is `kafka-prod-broker-1`. If we produce messages to `kafka-prod-broker-1` broker(brokerID=4) it will store those messages in partition 0. Let's produce messages in the producer terminal and consume them from the consumer terminal. +From the topic description we can see that the leader partition for partition 0 is 1 that is `kafka-prod-broker-1`. If we produce messages to `kafka-prod-broker-1` broker(brokerID=1) it will store those messages in partition 0. Let's produce messages in the producer terminal and consume them from the consumer terminal. ```bash root@kafka-prod-broker-1:~# kafka-console-producer.sh --producer.config $CLIENTAUTHCONFIG --topic sample --request-required-acks all --bootstrap-server localhost:9092 @@ -290,7 +292,6 @@ I hope it's received by console consumer Notice that, messages are coming to the consumer as you continue sending messages via producer. So, we have created a kafka topic and used kafka console producer and consumer to test message publishing and consuming successfully. - ## Cleaning Up TO clean up the k8s resources created by this tutorial, run: diff --git a/docs/guides/kafka/concepts/connector.md b/docs/guides/kafka/concepts/connector.md index d4806d6121..8f132a49f0 100644 --- a/docs/guides/kafka/concepts/connector.md +++ b/docs/guides/kafka/concepts/connector.md @@ -70,8 +70,8 @@ Deletion policy `WipeOut` will delete the connector from the ConnectCluster when ## Next Steps -- Learn how to use KubeDB to run a Apache Kafka cluster [here](/docs/guides/kafka/quickstart/kafka/index.md). -- Learn how to use KubeDB to run a Apache Kafka Connect cluster [here](/docs/guides/kafka/quickstart/connectcluster/index.md). +- Learn how to use KubeDB to run Apache Kafka cluster [here](/docs/guides/kafka/quickstart/kafka/index.md). +- Learn how to use KubeDB to run Apache Kafka Connect cluster [here](/docs/guides/kafka/connectcluster/overview.md). - Detail concepts of [KafkaConnectorVersion object](/docs/guides/kafka/concepts/kafkaconnectorversion.md). - Learn to use KubeDB managed Kafka objects using [CLIs](/docs/guides/kafka/cli/cli.md). - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/concepts/kafka.md b/docs/guides/kafka/concepts/kafka.md index 361d4f65f5..048a28b612 100644 --- a/docs/guides/kafka/concepts/kafka.md +++ b/docs/guides/kafka/concepts/kafka.md @@ -302,7 +302,8 @@ NB. If `spec.topology` is set, then `spec.storage` needs to be empty. Instead us ### spec.monitor Kafka managed by KubeDB can be monitored with Prometheus operator out-of-the-box. To learn more, -- [Monitor Apache with Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md) +- [Monitor Apache Kafka with Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md) +- [Monitor Apache Kafka with Built-in Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md) ### spec.podTemplate diff --git a/docs/guides/kafka/concepts/kafkaconnectorversion.md b/docs/guides/kafka/concepts/kafkaconnectorversion.md index 1bdc181e24..fe06dde4ec 100644 --- a/docs/guides/kafka/concepts/kafkaconnectorversion.md +++ b/docs/guides/kafka/concepts/kafkaconnectorversion.md @@ -88,4 +88,4 @@ helm upgrade -i kubedb oci://ghcr.io/appscode-charts/kubedb \ - Learn about Kafka CRD [here](/docs/guides/kafka/concepts/kafka.md). - Learn about ConnectCluster CRD [here](/docs/guides/kafka/concepts/connectcluster.md). -- Deploy your first ConnectCluster with KubeDB by following the guide [here](/docs/guides/kafka/quickstart/connectcluster/index.md). +- Deploy your first ConnectCluster with KubeDB by following the guide [here](/docs/guides/kafka/connectcluster/overview.md). diff --git a/docs/guides/kafka/concepts/schemaregistryversion.md b/docs/guides/kafka/concepts/schemaregistryversion.md index d7833c7e9d..d1a84915d4 100644 --- a/docs/guides/kafka/concepts/schemaregistryversion.md +++ b/docs/guides/kafka/concepts/schemaregistryversion.md @@ -90,4 +90,4 @@ helm upgrade -i kubedb oci://ghcr.io/appscode-charts/kubedb \ - Learn about Kafka CRD [here](/docs/guides/kafka/concepts/kafka.md). - Learn about SchemaRegistry CRD [here](/docs/guides/kafka/concepts/schemaregistry.md). -- Deploy your first ConnectCluster with KubeDB by following the guide [here](/docs/guides/kafka/quickstart/connectcluster/index.md). +- Deploy your first ConnectCluster with KubeDB by following the guide [here](/docs/guides/kafka/connectcluster/overview.md). diff --git a/docs/guides/kafka/connectcluster/connectcluster.md b/docs/guides/kafka/connectcluster/connectcluster.md index 2d31c26a0d..36d2d2d5a8 100644 --- a/docs/guides/kafka/connectcluster/connectcluster.md +++ b/docs/guides/kafka/connectcluster/connectcluster.md @@ -182,7 +182,7 @@ Hence, the cluster is ready to use. Let's check the k8s resources created by the operator on the deployment of ConnectCluster: ```bash -$ kubectl get all,secret -n demo -l 'app.kubernetes.io/instance=connectcluster-distributed' +$ kubectl get all,petset,secret -n demo -l 'app.kubernetes.io/instance=connectcluster-distributed' NAME READY STATUS RESTARTS AGE pod/connectcluster-distributed-0 1/1 Running 0 8m55s pod/connectcluster-distributed-1 1/1 Running 0 8m52s @@ -191,8 +191,8 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP service/connectcluster-distributed ClusterIP 10.128.238.9 8083/TCP 17m service/connectcluster-distributed-pods ClusterIP None 8083/TCP 17m -NAME READY AGE -petset.apps/connectcluster-distributed 2/2 8m56s +NAME READY AGE +petset.apps.k8s.appscode.com/connectcluster-distributed 2/2 8m56s NAME TYPE VERSION AGE appbinding.appcatalog.appscode.com/connectcluster-distributed kafka.kubedb.com/connectcluster 3.6.1 8m56s diff --git a/docs/guides/kafka/connectcluster/overview.md b/docs/guides/kafka/connectcluster/overview.md index ef89b1ae1b..5cec9c8e62 100644 --- a/docs/guides/kafka/connectcluster/overview.md +++ b/docs/guides/kafka/connectcluster/overview.md @@ -39,7 +39,7 @@ demo Active 9s > Note: YAML files used in this tutorial are stored in [guides/kafka/quickstart/connectcluster/yamls](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/kafka/quickstart/connectcluster/yamls) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). -> We have designed this tutorial to demonstrate a production setup of KubeDB managed Apache Kafka Connect Cluster. If you just want to try out KubeDB, you can bypass some safety features following the tips [here](/docs/guides/kafka/quickstart/connectcluster/index.md#tips-for-testing). +> We have designed this tutorial to demonstrate a production setup of KubeDB managed Apache Kafka Connect Cluster. If you just want to try out KubeDB, you can bypass some safety features following the tips [here](/docs/guides/kafka/connectcluster/overview.md#tips-for-testing). ## Find Available ConnectCluster Versions @@ -336,7 +336,7 @@ Events: On deployment of a ConnectCluster CR, the operator creates the following resources: ```bash -$ kubectl get all,secret -n demo -l 'app.kubernetes.io/instance=connectcluster-quickstart' +$ kubectl get all,petset,secret -n demo -l 'app.kubernetes.io/instance=connectcluster-quickstart' NAME READY STATUS RESTARTS AGE pod/connectcluster-quickstart-0 1/1 Running 0 3m50s pod/connectcluster-quickstart-1 1/1 Running 0 3m7s @@ -346,8 +346,8 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP service/connectcluster-quickstart ClusterIP 10.128.221.44 8083/TCP 3m55s service/connectcluster-quickstart-pods ClusterIP None 8083/TCP 3m55s -NAME READY AGE -petset.apps/connectcluster-quickstart 3/3 3m50s +NAME READY AGE +petset.apps.k8s.appscode.com/connectcluster-quickstart 3/3 3m50s NAME TYPE VERSION AGE appbinding.appcatalog.appscode.com/connectcluster-quickstart kafka.kubedb.com/connectcluster 3.6.1 3m50s diff --git a/docs/guides/kafka/quickstart/kafka/index.md b/docs/guides/kafka/quickstart/kafka/index.md index 709814ea6f..48cb813564 100644 --- a/docs/guides/kafka/quickstart/kafka/index.md +++ b/docs/guides/kafka/quickstart/kafka/index.md @@ -435,7 +435,7 @@ If you are just testing some basic functionalities, you might want to avoid addi ## Next Steps - [Quickstart Kafka](/docs/guides/kafka/quickstart/kafka/index.md) with KubeDB Operator. -- [Quickstart ConnectCluster](/docs/guides/kafka/quickstart/connectcluster/index.md) with KubeDB Operator. +- [Quickstart ConnectCluster](/docs/guides/kafka/connectcluster/overview.md) with KubeDB Operator. - Kafka Clustering supported by KubeDB - [Combined Clustering](/docs/guides/kafka/clustering/combined-cluster/index.md) - [Topology Clustering](/docs/guides/kafka/clustering/topology-cluster/index.md) diff --git a/docs/guides/kafka/restproxy/overview.md b/docs/guides/kafka/restproxy/overview.md index 91075b1fb3..7c54381889 100644 --- a/docs/guides/kafka/restproxy/overview.md +++ b/docs/guides/kafka/restproxy/overview.md @@ -39,7 +39,7 @@ demo Active 9s > Note: YAML files used in this tutorial are stored in [examples/kafka/restproxy/](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/kafka/restproxy) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). -> We have designed this tutorial to demonstrate a production setup of KubeDB managed Schema Registry. If you just want to try out KubeDB, you can bypass some safety features following the tips [here](/docs/guides/kafka/quickstart/connectcluster/index.md#tips-for-testing). +> We have designed this tutorial to demonstrate a production setup of KubeDB managed Schema Registry. If you just want to try out KubeDB, you can bypass some safety features following the tips [here](/docs/guides/kafka/restproxy/overview.md#tips-for-testing). ## Find Available RestProxy Versions diff --git a/docs/guides/kafka/schemaregistry/overview.md b/docs/guides/kafka/schemaregistry/overview.md index 9b2c62260a..017d78a9ba 100644 --- a/docs/guides/kafka/schemaregistry/overview.md +++ b/docs/guides/kafka/schemaregistry/overview.md @@ -39,7 +39,7 @@ demo Active 9s > Note: YAML files used in this tutorial are stored in [examples/kafka/schemaregistry/](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/kafka/schemaregistry) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). -> We have designed this tutorial to demonstrate a production setup of KubeDB managed Schema Registry. If you just want to try out KubeDB, you can bypass some safety features following the tips [here](/docs/guides/kafka/quickstart/connectcluster/index.md#tips-for-testing). +> We have designed this tutorial to demonstrate a production setup of KubeDB managed Schema Registry. If you just want to try out KubeDB, you can bypass some safety features following the tips [here](/docs/guides/kafka/schemaregistry/overview.md#tips-for-testing). ## Find Available SchemaRegistry Versions diff --git a/docs/guides/kafka/tls/combined.md b/docs/guides/kafka/tls/combined.md new file mode 100644 index 0000000000..529392ee91 --- /dev/null +++ b/docs/guides/kafka/tls/combined.md @@ -0,0 +1,250 @@ +--- +title: Kafka Combined TLS/SSL Encryption +menu: + docs_{{ .version }}: + identifier: kf-tls-combined + name: Combined Cluster + parent: kf-tls + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run Kafka with TLS/SSL (Transport Encryption) + +KubeDB supports providing TLS/SSL encryption for Kafka. This tutorial will show you how to use KubeDB to run a Kafka cluster with TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/kafka](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/kafka) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB uses following crd fields to enable SSL/TLS encryption in Kafka. + +- `spec:` + - `enableSSL` + - `tls:` + - `issuerRef` + - `certificate` + +Read about the fields in details in [kafka concept](/docs/guides/kafka/concepts/kafka.md), + +`tls` is applicable for all types of Kafka (i.e., `combined` and `topology`). + +Users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `tls.crt`, `tls.key`, `keystore.jks` and `truststore.jks`. + +## Create Issuer/ ClusterIssuer + +We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in Kafka. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating you ca certificates using openssl. + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=kafka/O=kubedb" +``` + +- Now create a ca-secret using the certificate files you have just generated. + +```bash +kubectl create secret tls kafka-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +``` + +Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: kafka-ca-issuer + namespace: demo +spec: + ca: + secretName: kafka-ca +``` + +Apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/tls/kf-issuer.yaml +issuer.cert-manager.io/kafka-ca-issuer created +``` + +## TLS/SSL encryption in Kafka Combined Cluster + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev-tls + namespace: demo +spec: + version: 3.6.1 + enableSSL: true + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: kafka-ca-issuer + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +### Deploy Kafka Combined Cluster + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/tls/kafka-dev-tls.yaml +kafka.kubedb.com/kafka-dev-tls created +``` + +Now, wait until `kafka-dev-tls created` has status `Ready`. i.e, + +```bash +$ watch kubectl get mg -n demo + +Every 2.0s: kubectl get kafka -n demo aadee: Fri Sep 6 12:34:51 2024 +NAME TYPE VERSION STATUS AGE +kafka-dev-tls kubedb.com/v1 3.6.1 Provisioning 0s +kafka-dev-tls kubedb.com/v1 3.6.1 Provisioning 12s +. +. +kafka-dev-tls kubedb.com/v1 3.6.1 Ready 77s +``` + +### Verify TLS/SSL in Kafka Combined Cluster + +```bash +$ kubectl describe secret -n demo kafka-dev-tls-client-cert + +Name: kafka-dev-tls-client-cert +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=kafka-dev-tls + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=kafkas.kubedb.com + controller.cert-manager.io/fao=true +Annotations: cert-manager.io/alt-names: + *.kafka-dev-tls-pods.demo.svc.cluster.local,kafka-dev-tls-pods,kafka-dev-tls-pods.demo.svc,kafka-dev-tls-pods.demo.svc.cluster.local,local... + cert-manager.io/certificate-name: kafka-dev-tls-client-cert + cert-manager.io/common-name: kafka-dev-tls-pods.demo.svc + cert-manager.io/ip-sans: 127.0.0.1 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: kafka-ca-issuer + cert-manager.io/uri-sans: + +Type: kubernetes.io/tls + +Data +==== +truststore.jks: 891 bytes +ca.crt: 1184 bytes +keystore.jks: 3245 bytes +tls.crt: 1452 bytes +tls.key: 1704 bytes +``` + +Now, Let's exec into a kafka broker pod and verify the configuration that the TLS is enabled. + +```bash +$ kubectl exec -it -n demo kafka-dev-tls-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'ssl.keystore' + ssl.keystore.certificate.chain=null sensitive=true synonyms={} + ssl.keystore.key=null sensitive=true synonyms={} + ssl.keystore.location=/var/private/ssl/server.keystore.jks sensitive=false synonyms={STATIC_BROKER_CONFIG:ssl.keystore.location=/var/private/ssl/server.keystore.jks} + ssl.keystore.password=null sensitive=true synonyms={STATIC_BROKER_CONFIG:ssl.keystore.password=null} + ssl.keystore.type=JKS sensitive=false synonyms={DEFAULT_CONFIG:ssl.keystore.type=JKS} + zookeeper.ssl.keystore.location=null sensitive=false synonyms={} + zookeeper.ssl.keystore.password=null sensitive=true synonyms={} + zookeeper.ssl.keystore.type=null sensitive=false synonyms={} + ssl.keystore.certificate.chain=null sensitive=true synonyms={} + ssl.keystore.key=null sensitive=true synonyms={} + ssl.keystore.location=/var/private/ssl/server.keystore.jks sensitive=false synonyms={STATIC_BROKER_CONFIG:ssl.keystore.location=/var/private/ssl/server.keystore.jks} + ssl.keystore.password=null sensitive=true synonyms={STATIC_BROKER_CONFIG:ssl.keystore.password=null} + ssl.keystore.type=JKS sensitive=false synonyms={DEFAULT_CONFIG:ssl.keystore.type=JKS} + zookeeper.ssl.keystore.location=null sensitive=false synonyms={} + zookeeper.ssl.keystore.password=null sensitive=true synonyms={} + zookeeper.ssl.keystore.type=null sensitive=false synonyms={} + ssl.keystore.certificate.chain=null sensitive=true synonyms={} + ssl.keystore.key=null sensitive=true synonyms={} + ssl.keystore.location=/var/private/ssl/server.keystore.jks sensitive=false synonyms={STATIC_BROKER_CONFIG:ssl.keystore.location=/var/private/ssl/server.keystore.jks} + ssl.keystore.password=null sensitive=true synonyms={STATIC_BROKER_CONFIG:ssl.keystore.password=null} + ssl.keystore.type=JKS sensitive=false synonyms={DEFAULT_CONFIG:ssl.keystore.type=JKS} + zookeeper.ssl.keystore.location=null sensitive=false synonyms={} + zookeeper.ssl.keystore.password=null sensitive=true synonyms={} + zookeeper.ssl.keystore.type=null sensitive=false synonyms={} +``` + +We can see from the above output that, keystore location is `/var/private/ssl/server.keystore.jks` which means that TLS is enabled. + +You will find a file named `clientauth.properties` in the config directory. This file is generated by the operator which contains necessary authentication/authorization/certificate configurations that are required during connect to the Kafka cluster. + +```bash +root@kafka-dev-tls-0:~# cat config/clientauth.properties +sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="*************"; +security.protocol=SASL_SSL +sasl.mechanism=PLAIN +ssl.truststore.location=/var/private/ssl/server.truststore.jks +ssl.truststore.password=*********** +``` + +Now, let's exec into the kafka pod and connect using this configuration to verify the TLS is enabled. + +```bash +$ kubectl exec -it -n demo kafka-dev-tls-0 -- bash +kafka@kafka-dev-tls-0:~$ kafka-metadata-quorum.sh --command-config config/clientauth.properties --bootstrap-server localhost:9092 describe --status +ClusterId: 11ef-921c-f2a07f85765w +LeaderId: 1 +LeaderEpoch: 17 +HighWatermark: 1292 +MaxFollowerLag: 0 +MaxFollowerLagTimeMs: 16 +CurrentVoters: [0,1,2] +CurrentObservers: [] +``` + +From the above output, we can see that we are able to connect to the Kafka cluster using the TLS configuration. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kafka -n demo kafka-dev-tls +kubectl delete issuer -n demo kafka-ca-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Monitor your Kafka cluster with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). +- Monitor your Kafka cluster with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md). +- Use [kubedb cli](/docs/guides/kafka/cli/cli.md) to manage databases like kubectl for Kubernetes. +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/tls/connectcluster.md b/docs/guides/kafka/tls/connectcluster.md new file mode 100644 index 0000000000..64be177ebc --- /dev/null +++ b/docs/guides/kafka/tls/connectcluster.md @@ -0,0 +1,224 @@ +--- +title: Kafka ConnectCluster TLS/SSL Encryption +menu: + docs_{{ .version }}: + identifier: kf-tls-connectcluster + name: ConnectCluster + parent: kf-tls + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run Kafka ConnectCluster with TLS/SSL (Transport Encryption) + +KubeDB supports providing TLS/SSL encryption for Kafka ConnectCluster. This tutorial will show you how to use KubeDB to run a Kafka ConnectCluster with TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/kafka](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/kafka) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB uses following crd fields to enable SSL/TLS encryption in Kafka. + +- `spec:` + - `enableSSL` + - `tls:` + - `issuerRef` + - `certificate` + +Read about the fields in details in [kafka concept](/docs/guides/kafka/concepts/kafka.md), + +`tls` is applicable for all types of Kafka (i.e., `combined` and `topology`). + +Users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `tls.crt`, `tls.key`, `keystore.jks` and `truststore.jks`. + +## Create Issuer/ ClusterIssuer + +We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in Kafka. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating you ca certificates using openssl. + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=connectcluster/O=kubedb" +``` + +- Now create a ca-secret using the certificate files you have just generated. + +```bash +kubectl create secret tls connectcluster-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +``` + +Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: connectcluster-ca-issuer + namespace: demo +spec: + ca: + secretName: connectcluster-ca +``` + +Apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/tls/connectcluster-issuer.yaml +issuer.cert-manager.io/connectcluster-ca-issuer created +``` + +## TLS/SSL encryption in Kafka Topology Cluster + +> **Note:** Before creating Kafka ConnectCluster, make sure you have a Kafka cluster with/without TLS/SSL enabled. If you don't have a Kafka cluster, you can follow the steps [here](/docs/guides/kafka/tls/topology.md). + +```yaml +apiVersion: kafka.kubedb.com/v1alpha1 +kind: ConnectCluster +metadata: + name: connectcluster-distributed + namespace: demo +spec: + version: 3.6.1 + enableSSL: true + tls: + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: connectcluster-ca-issuer + replicas: 3 + connectorPlugins: + - postgres-2.4.2.final + - jdbc-2.6.1.final + kafkaRef: + name: kafka-prod-tls + namespace: demo + deletionPolicy: WipeOut +``` + +Here, +- `spec.enableSSL` is set to `true` to enable TLS/SSL encryption. +- `spec.tls.issuerRef` refers to the `Issuer` that we have created in the previous step. +- `spec.kafkaRef` refers to the Kafka cluster that we have created from [here](/docs/guides/kafka/tls/topology.md). + +### Deploy Kafka ConnectCluster with TLS/SSL + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/tls/connectcluster-tls.yaml +connectcluster.kafka.kubedb.com/connectcluster-tls created +``` + +Now, wait until `connectcluster-tls created` has status `Ready`. i.e, + +```bash +$ watch kubectl get connectcluster -n demo + +Every 2.0s: kubectl get connectcluster -n demo aadee: Fri Sep 6 14:59:32 2024 + +NAME TYPE VERSION STATUS AGE +connectcluster-tls kafka.kubedb.com/v1alpha1 3.6.1 Provisioning 0s +connectcluster-tls kafka.kubedb.com/v1alpha1 3.6.1 Provisioning 34s +. +. +connectcluster-tls kafka.kubedb.com/v1alpha1 3.6.1 Ready 2m +``` + +### Verify TLS/SSL in Kafka ConnectCluster + +```bash +$ kubectl describe secret -n demo connectcluster-tls-client-connect-cert + +Name: connectcluster-tls-client-connect-cert +Namespace: demo +Labels: app.kubernetes.io/component=kafka + app.kubernetes.io/instance=connectcluster-tls + app.kubernetes.io/managed-by=kafka.kubedb.com + app.kubernetes.io/name=connectclusters.kafka.kubedb.com + controller.cert-manager.io/fao=true +Annotations: cert-manager.io/alt-names: + *.connectcluster-tls-pods.demo.svc,*.connectcluster-tls-pods.demo.svc.cluster.local,connectcluster-tls,connectcluster-tls-pods.demo.svc,co... + cert-manager.io/certificate-name: connectcluster-tls-client-connect-cert + cert-manager.io/common-name: connectcluster-tls-pods.demo.svc + cert-manager.io/ip-sans: 127.0.0.1 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: connectcluster-ca-issuer + cert-manager.io/uri-sans: + +Type: kubernetes.io/tls + +Data +==== +ca.crt: 1184 bytes +tls.crt: 1566 bytes +tls.key: 1704 bytes +``` + +Now, Let's exec into a ConnectCluster pod and verify the configuration that the TLS is enabled. + +```bash +$ kubectl exec -it connectcluster-tls-0 -n demo -- bash +kafka@connectcluster-tls-0:~$ curl -u "$CONNECT_CLUSTER_USER:$CONNECT_CLUSTER_PASSWORD" http://localhost:8083 +curl: (1) Received HTTP/0.9 when not allowed +``` + +From the above output, we can see that we are unable to connect to the Kafka cluster using the HTTP protocol. + +```bash +kafka@connectcluster-tls-0:~$ curl -u "$CONNECT_CLUSTER_USER:$CONNECT_CLUSTER_PASSWORD" https://localhost:8083 +curl: (60) SSL certificate problem: unable to get local issuer certificate +More details here: https://curl.se/docs/sslcerts.html + +curl failed to verify the legitimacy of the server and therefore could not +establish a secure connection to it. To learn more about this situation and +how to fix it, please visit the web page mentioned above. +``` + +Here, we can see that we are unable to connect to the Kafka cluster using the HTTPS protocol. This is because the client does not have the CA certificate to verify the server certificate. + +```bash +kafka@connectcluster-tls-0:~$ curl --cacert /var/private/ssl/ca.crt -u "$CONNECT_CLUSTER_USER:$CONNECT_CLUSTER_PASSWORD" https://localhost:8083 +{"version":"3.6.1","commit":"5e3c2b738d253ff5","kafka_cluster_id":"11ef-8f52-c284f2efe29w"} +``` + +From the above output, we can see that we are able to connect to the Kafka ConnectCluster using the TLS configuration. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kafka -n demo kafka-prod-tls +kubectl delete connectcluster -n demo connectcluster-tls +kubectl delete issuer -n demo connectcluster-ca-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Monitor your Kafka cluster with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). +- Monitor your Kafka cluster with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md). +- Use [kubedb cli](/docs/guides/kafka/cli/cli.md) to manage databases like kubectl for Kubernetes. +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/tls/overview.md b/docs/guides/kafka/tls/overview.md index b9977b9e2c..f094edb746 100644 --- a/docs/guides/kafka/tls/overview.md +++ b/docs/guides/kafka/tls/overview.md @@ -51,9 +51,9 @@ Deploying Kafka with TLS/SSL configuration process consists of the following ste 2. Then the user creates a `Kafka` CR which refers to the `Issuer/ClusterIssuer` CR that the user created in the previous step. -3. `KubeDB` Provisioner operator watches for the `Kafka` cr. +3. `KubeDB` Provisioner operator watches for the `Kafka` cr. -4. When it finds one, it creates `Secret`, `Service`, etc. for the `Kafka` database. +4. When it finds one, it creates `Secret`, `Service`, etc. for the `Kafka` cluster. 5. `KubeDB` Ops-manager operator watches for `Kafka`(5c), `Issuer/ClusterIssuer`(5b), `Secret` and `Service`(5a). diff --git a/docs/guides/kafka/tls/topology.md b/docs/guides/kafka/tls/topology.md new file mode 100644 index 0000000000..2c94878d98 --- /dev/null +++ b/docs/guides/kafka/tls/topology.md @@ -0,0 +1,253 @@ +--- +title: Kafka Combined TLS/SSL Encryption +menu: + docs_{{ .version }}: + identifier: kf-tls-topology + name: Topology Cluster + parent: kf-tls + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run Kafka with TLS/SSL (Transport Encryption) + +KubeDB supports providing TLS/SSL encryption for Kafka. This tutorial will show you how to use KubeDB to run a Kafka cluster with TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/kafka](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/kafka) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB uses following crd fields to enable SSL/TLS encryption in Kafka. + +- `spec:` + - `enableSSL` + - `tls:` + - `issuerRef` + - `certificate` + +Read about the fields in details in [kafka concept](/docs/guides/kafka/concepts/kafka.md), + +`tls` is applicable for all types of Kafka (i.e., `combined` and `topology`). + +Users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `tls.crt`, `tls.key`, `keystore.jks` and `truststore.jks`. + +## Create Issuer/ ClusterIssuer + +We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in Kafka. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating you ca certificates using openssl. + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=kafka/O=kubedb" +``` + +- Now create a ca-secret using the certificate files you have just generated. + +```bash +kubectl create secret tls kafka-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +``` + +Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: kafka-ca-issuer + namespace: demo +spec: + ca: + secretName: kafka-ca +``` + +Apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/tls/kf-issuer.yaml +issuer.cert-manager.io/kafka-ca-issuer created +``` + +## TLS/SSL encryption in Kafka Topology Cluster + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod-tls + namespace: demo +spec: + version: 3.6.1 + enableSSL: true + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: kafka-ca-issuer + topology: + broker: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +### Deploy Kafka Topology Cluster with TLS/SSL + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/tls/kafka-prod-tls.yaml +kafka.kubedb.com/kafka-prod-tls created +``` + +Now, wait until `kafka-prod-tls created` has status `Ready`. i.e, + +```bash +$ watch kubectl get kafka -n demo + +Every 2.0s: kubectl get kafka -n demo aadee: Fri Sep 6 12:34:51 2024 +NAME TYPE VERSION STATUS AGE +kafka-prod-tls kubedb.com/v1 3.6.1 Provisioning 17s +kafka-prod-tls kubedb.com/v1 3.6.1 Provisioning 12s +. +. +kafka-prod-tls kubedb.com/v1 3.6.1 Ready 2m1s +``` + +### Verify TLS/SSL in Kafka Topology Cluster + +```bash +$ kubectl describe secret kafka-prod-tls-client-cert -n demo + +Name: kafka-prod-tls-client-cert +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=kafka-prod-tls + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=kafkas.kubedb.com + controller.cert-manager.io/fao=true +Annotations: cert-manager.io/alt-names: + *.kafka-prod-tls-pods.demo.svc.cluster.local,kafka-prod-tls-pods,kafka-prod-tls-pods.demo.svc,kafka-prod-tls-pods.demo.svc.cluster.local,l... + cert-manager.io/certificate-name: kafka-prod-tls-client-cert + cert-manager.io/common-name: kafka-prod-tls-pods.demo.svc + cert-manager.io/ip-sans: 127.0.0.1 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: Issuer + cert-manager.io/issuer-name: kafka-ca-issuer + cert-manager.io/uri-sans: + +Type: kubernetes.io/tls + +Data +==== +ca.crt: 1184 bytes +keystore.jks: 3254 bytes +tls.crt: 1460 bytes +tls.key: 1708 bytes +truststore.jks: 891 bytes +``` + +Now, Let's exec into a kafka broker pod and verify the configuration that the TLS is enabled. + +```bash +$ kubectl exec -it -n demo kafka-prod-tls-broker-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'ssl.keystore' + ssl.keystore.certificate.chain=null sensitive=true synonyms={} + ssl.keystore.key=null sensitive=true synonyms={} + ssl.keystore.location=/var/private/ssl/server.keystore.jks sensitive=false synonyms={STATIC_BROKER_CONFIG:ssl.keystore.location=/var/private/ssl/server.keystore.jks} + ssl.keystore.password=null sensitive=true synonyms={STATIC_BROKER_CONFIG:ssl.keystore.password=null} + ssl.keystore.type=JKS sensitive=false synonyms={DEFAULT_CONFIG:ssl.keystore.type=JKS} + zookeeper.ssl.keystore.location=null sensitive=false synonyms={} + zookeeper.ssl.keystore.password=null sensitive=true synonyms={} + zookeeper.ssl.keystore.type=null sensitive=false synonyms={} + ssl.keystore.certificate.chain=null sensitive=true synonyms={} + ssl.keystore.key=null sensitive=true synonyms={} + ssl.keystore.location=/var/private/ssl/server.keystore.jks sensitive=false synonyms={STATIC_BROKER_CONFIG:ssl.keystore.location=/var/private/ssl/server.keystore.jks} + ssl.keystore.password=null sensitive=true synonyms={STATIC_BROKER_CONFIG:ssl.keystore.password=null} + ssl.keystore.type=JKS sensitive=false synonyms={DEFAULT_CONFIG:ssl.keystore.type=JKS} + zookeeper.ssl.keystore.location=null sensitive=false synonyms={} + zookeeper.ssl.keystore.password=null sensitive=true synonyms={} + zookeeper.ssl.keystore.type=null sensitive=false synonyms={} +``` + +We can see from the above output that, keystore location is `/var/private/ssl/server.keystore.jks` which means that TLS is enabled. + +You will find a file named `clientauth.properties` in the config directory. This file is generated by the operator which contains necessary authentication/authorization/certificate configurations that are required during connect to the Kafka cluster. + +```bash +root@kafka-prod-broker-tls-0:~# cat config/clientauth.properties +sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="*************"; +security.protocol=SASL_SSL +sasl.mechanism=PLAIN +ssl.truststore.location=/var/private/ssl/server.truststore.jks +ssl.truststore.password=*********** +``` + +Now, let's exec into the kafka pod and connect using this configuration to verify the TLS is enabled. + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-tls-0 -- bash +kafka@kafka-prod-broker-tls-0:~$ kafka-metadata-quorum.sh --command-config config/clientauth.properties --bootstrap-server localhost:9092 describe --status +ClusterId: 11ef-921c-f2a07f85765w +LeaderId: 1001 +LeaderEpoch: 17 +HighWatermark: 390 +MaxFollowerLag: 0 +MaxFollowerLagTimeMs: 18 +CurrentVoters: [1000,1001] +CurrentObservers: [0,1] +``` + +From the above output, we can see that we are able to connect to the Kafka cluster using the TLS configuration. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kafka -n demo kafka-prod-tls +kubectl delete issuer -n demo kafka-ca-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Monitor your Kafka cluster with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). +- Monitor your Kafka cluster with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md). +- Use [kubedb cli](/docs/guides/kafka/cli/cli.md) to manage databases like kubectl for Kubernetes. +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md).