diff --git a/docs/examples/kafka/configuration/kafka-combined.yaml b/docs/examples/kafka/configuration/kafka-combined.yaml index d458714d65..fd61f4701b 100644 --- a/docs/examples/kafka/configuration/kafka-combined.yaml +++ b/docs/examples/kafka/configuration/kafka-combined.yaml @@ -6,17 +6,8 @@ metadata: spec: replicas: 2 version: 3.6.1 - podTemplate: - spec: - containers: - - name: kafka - resources: - limits: - cpu: 1 - memory: 2Gi - requests: - cpu: 500m - memory: 1Gi + configSecret: + name: configsecret-combined storage: accessModes: - ReadWriteOnce @@ -25,4 +16,4 @@ spec: storage: 1Gi storageClassName: standard storageType: Durable - deletionPolicy: DoNotTerminate \ No newline at end of file + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/configuration/kafka-topology.yaml b/docs/examples/kafka/configuration/kafka-topology.yaml index 331d2ba015..6359857f64 100644 --- a/docs/examples/kafka/configuration/kafka-topology.yaml +++ b/docs/examples/kafka/configuration/kafka-topology.yaml @@ -10,14 +10,6 @@ spec: topology: broker: replicas: 2 - podTemplate: - spec: - containers: - - name: kafka - resources: - requests: - cpu: "500m" - memory: "1Gi" storage: accessModes: - ReadWriteOnce @@ -27,14 +19,6 @@ spec: storageClassName: standard controller: replicas: 2 - podTemplate: - spec: - containers: - - name: kafka - resources: - requests: - cpu: "500m" - memory: "1Gi" storage: accessModes: - ReadWriteOnce diff --git a/docs/examples/kafka/reconfigure/kafka-combined-custom-config.yaml b/docs/examples/kafka/reconfigure/kafka-combined-custom-config.yaml new file mode 100644 index 0000000000..18f8cf53df --- /dev/null +++ b/docs/examples/kafka/reconfigure/kafka-combined-custom-config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: kf-combined-custom-config + namespace: demo +stringData: + server.properties: |- + log.retention.hours=100 \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/kafka-combined.yaml b/docs/examples/kafka/reconfigure/kafka-combined.yaml new file mode 100644 index 0000000000..9f5fcbe740 --- /dev/null +++ b/docs/examples/kafka/reconfigure/kafka-combined.yaml @@ -0,0 +1,19 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev + namespace: demo +spec: + replicas: 2 + version: 3.6.1 + configSecret: + name: kf-combined-custom-config + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-combined.yaml b/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-combined.yaml new file mode 100644 index 0000000000..c945a4d15d --- /dev/null +++ b/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-combined.yaml @@ -0,0 +1,15 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-apply-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + applyConfig: + server.properties: |- + log.retention.hours=150 + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-topology.yaml b/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-topology.yaml new file mode 100644 index 0000000000..162b149bfb --- /dev/null +++ b/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-topology.yaml @@ -0,0 +1,18 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-apply-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + applyConfig: + broker.properties: |- + log.retention.hours=150 + controller.properties: |- + controller.quorum.election.timeout.ms=4000 + controller.quorum.fetch.timeout.ms=5000 + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml b/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml new file mode 100644 index 0000000000..9382a2b025 --- /dev/null +++ b/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + configSecret: + name: new-kf-combined-custom-config + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml b/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml new file mode 100644 index 0000000000..f4b9f5cc0d --- /dev/null +++ b/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + configSecret: + name: new-kf-topology-custom-config + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/kafka-topology-custom-config.yaml b/docs/examples/kafka/reconfigure/kafka-topology-custom-config.yaml new file mode 100644 index 0000000000..a113be5ae3 --- /dev/null +++ b/docs/examples/kafka/reconfigure/kafka-topology-custom-config.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: kf-topology-custom-config + namespace: demo +stringData: + broker.properties: |- + log.retention.hours=100 + controller.properties: |- + controller.quorum.election.timeout.ms=2000 \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/kafka-topology.yaml b/docs/examples/kafka/reconfigure/kafka-topology.yaml new file mode 100644 index 0000000000..20488615a8 --- /dev/null +++ b/docs/examples/kafka/reconfigure/kafka-topology.yaml @@ -0,0 +1,30 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + configSecret: + name: kf-topology-custom-config + topology: + broker: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/new-kafka-combined-custom-config.yaml b/docs/examples/kafka/reconfigure/new-kafka-combined-custom-config.yaml new file mode 100644 index 0000000000..b7daa9beb4 --- /dev/null +++ b/docs/examples/kafka/reconfigure/new-kafka-combined-custom-config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: new-kf-combined-custom-config + namespace: demo +stringData: + server.properties: |- + log.retention.hours=125 \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/new-kafka-topology-custom-config.yaml b/docs/examples/kafka/reconfigure/new-kafka-topology-custom-config.yaml new file mode 100644 index 0000000000..3bf34a3ded --- /dev/null +++ b/docs/examples/kafka/reconfigure/new-kafka-topology-custom-config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: new-kf-topology-custom-config + namespace: demo +stringData: + broker.properties: |- + log.retention.hours=125 \ No newline at end of file diff --git a/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-down-combined.yaml b/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-down-combined.yaml new file mode 100644 index 0000000000..0cb298d523 --- /dev/null +++ b/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-down-combined.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-down-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-dev + horizontalScaling: + node: 2 \ No newline at end of file diff --git a/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-down-topology.yaml b/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-down-topology.yaml new file mode 100644 index 0000000000..0706afe78c --- /dev/null +++ b/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-down-topology.yaml @@ -0,0 +1,13 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-down-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-prod + horizontalScaling: + topology: + broker: 2 + controller: 2 \ No newline at end of file diff --git a/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-up-combined.yaml b/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-up-combined.yaml new file mode 100644 index 0000000000..e302cbd2fe --- /dev/null +++ b/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-up-combined.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-up-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-dev + horizontalScaling: + node: 3 \ No newline at end of file diff --git a/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-up-topology.yaml b/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-up-topology.yaml new file mode 100644 index 0000000000..0a71039967 --- /dev/null +++ b/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-up-topology.yaml @@ -0,0 +1,13 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-up-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-prod + horizontalScaling: + topology: + broker: 3 + controller: 3 \ No newline at end of file diff --git a/docs/examples/kafka/scaling/kafka-combined.yaml b/docs/examples/kafka/scaling/kafka-combined.yaml new file mode 100644 index 0000000000..f401c1440e --- /dev/null +++ b/docs/examples/kafka/scaling/kafka-combined.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev + namespace: demo +spec: + replicas: 2 + version: 3.6.1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/scaling/kafka-topology.yaml b/docs/examples/kafka/scaling/kafka-topology.yaml new file mode 100644 index 0000000000..e8112984dc --- /dev/null +++ b/docs/examples/kafka/scaling/kafka-topology.yaml @@ -0,0 +1,28 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + topology: + broker: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/scaling/vertical-scaling/kafka-vertical-scaling-combined.yaml b/docs/examples/kafka/scaling/vertical-scaling/kafka-vertical-scaling-combined.yaml new file mode 100644 index 0000000000..38d51a3376 --- /dev/null +++ b/docs/examples/kafka/scaling/vertical-scaling/kafka-vertical-scaling-combined.yaml @@ -0,0 +1,20 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-vscale-combined + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: kafka-dev + verticalScaling: + node: + resources: + requests: + memory: "1.2Gi" + cpu: "0.6" + limits: + memory: "1.2Gi" + cpu: "0.6" + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/kafka/scaling/vertical-scaling/kafka-vertical-scaling-topology.yaml b/docs/examples/kafka/scaling/vertical-scaling/kafka-vertical-scaling-topology.yaml new file mode 100644 index 0000000000..3b890be76d --- /dev/null +++ b/docs/examples/kafka/scaling/vertical-scaling/kafka-vertical-scaling-topology.yaml @@ -0,0 +1,28 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-vscale-topology + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: kafka-prod + verticalScaling: + broker: + resources: + requests: + memory: "1.2Gi" + cpu: "0.6" + limits: + memory: "1.2Gi" + cpu: "0.6" + controller: + resources: + requests: + memory: "1.1Gi" + cpu: "0.6" + limits: + memory: "1.1Gi" + cpu: "0.6" + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/guides/kafka/configuration/kafka-combined.md b/docs/guides/kafka/configuration/kafka-combined.md index bc39764963..fe51efa6e3 100644 --- a/docs/guides/kafka/configuration/kafka-combined.md +++ b/docs/guides/kafka/configuration/kafka-combined.md @@ -91,17 +91,6 @@ spec: version: 3.6.1 configSecret: name: configsecret-combined - podTemplate: - spec: - containers: - - name: kafka - resources: - limits: - cpu: 1 - memory: 2Gi - requests: - cpu: 500m - memory: 1Gi storage: accessModes: - ReadWriteOnce @@ -160,9 +149,7 @@ To cleanup the Kubernetes resources created by this tutorial, run: ```bash $ kubectl delete kf -n demo kafka-dev - $ kubectl delete secret -n demo configsecret-combined - $ kubectl delete namespace demo ``` diff --git a/docs/guides/kafka/configuration/kafka-topology.md b/docs/guides/kafka/configuration/kafka-topology.md index d9a0a62ee0..c3161647d5 100644 --- a/docs/guides/kafka/configuration/kafka-topology.md +++ b/docs/guides/kafka/configuration/kafka-topology.md @@ -103,14 +103,6 @@ spec: topology: broker: replicas: 2 - podTemplate: - spec: - containers: - - name: kafka - resources: - requests: - cpu: "500m" - memory: "1Gi" storage: accessModes: - ReadWriteOnce @@ -120,14 +112,6 @@ spec: storageClassName: standard controller: replicas: 2 - podTemplate: - spec: - containers: - - name: kafka - resources: - requests: - cpu: "500m" - memory: "1Gi" storage: accessModes: - ReadWriteOnce diff --git a/docs/guides/kafka/reconfigure/combined.md b/docs/guides/kafka/reconfigure/combined.md deleted file mode 100644 index 7920f3ba7e..0000000000 --- a/docs/guides/kafka/reconfigure/combined.md +++ /dev/null @@ -1,645 +0,0 @@ ---- -title: Reconfigure Kafka Combined -menu: - docs_{{ .version }}: - identifier: kf-reconfigure-combined - name: Combined - parent: kf-reconfigure - weight: 30 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Reconfigure Kafka Combined Cluster - -This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Kafka Combined cluster. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. - -- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). - -- You should be familiar with the following `KubeDB` concepts: - - [Kafka](/docs/guides/kafka/concepts/kafka.md) - - [Combined](/docs/guides/kafka/clustering/combined-cluster/index.md) - - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) - - [Reconfigure Overview](/docs/guides/kafka/reconfigure/overview.md) - -To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. - -```bash -$ kubectl create ns demo -namespace/demo created -``` - -> **Note:** YAML files used in this tutorial are stored in [docs/examples/mongodb](/docs/examples/mongodb) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. - -Now, we are going to deploy a `Kafka` Replicaset using a supported version by `KubeDB` operator. Then we are going to apply `KafkaOpsRequest` to reconfigure its configuration. - -### Prepare Kafka Replicaset - -Now, we are going to deploy a `Kafka` Replicaset database with version `4.4.26`. - -### Deploy Kafka - -At first, we will create `mongod.conf` file containing required configuration settings. - -```ini -$ cat mongod.conf -net: - maxIncomingConnections: 10000 -``` -Here, `maxIncomingConnections` is set to `10000`, whereas the default value is `65536`. - -Now, we will create a secret with this configuration file. - -```bash -$ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.conf -secret/mg-custom-config created -``` - -In this section, we are going to create a Kafka object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, - -```yaml -apiVersion: kubedb.com/v1 -kind: Kafka -metadata: - name: mg-replicaset - namespace: demo -spec: - version: "4.4.26" - replicas: 3 - replicaSet: - name: rs0 - storageType: Durable - storage: - storageClassName: "standard" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - configSecret: - name: mg-custom-config -``` - -Let's create the `Kafka` CR we have shown above, - -```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mg-replicaset-config.yaml -mongodb.kubedb.com/mg-replicaset created -``` - -Now, wait until `mg-replicaset` has status `Ready`. i.e, - -```bash -$ kubectl get mg -n demo -NAME VERSION STATUS AGE -mg-replicaset 4.4.26 Ready 19m -``` - -Now, we will check if the database has started with the custom configuration we have provided. - -First we need to get the username and password to connect to a mongodb instance, -```bash -$ kubectl get secrets -n demo mg-replicaset-auth -o jsonpath='{.data.\username}' | base64 -d -root - -$ kubectl get secrets -n demo mg-replicaset-auth -o jsonpath='{.data.\password}' | base64 -d -nrKuxni0wDSMrgwy -``` - -Now let's connect to a mongodb instance and run a mongodb internal command to check the configuration we have provided. - -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--replSet=rs0", - "--keyFile=/data/configdb/key.txt", - "--clusterAuthMode=keyFile", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 10000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "replication" : { - "replSet" : "rs0" - }, - "security" : { - "authorization" : "enabled", - "clusterAuthMode" : "keyFile", - "keyFile" : "/data/configdb/key.txt" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1, - "$clusterTime" : { - "clusterTime" : Timestamp(1614668500, 1), - "signature" : { - "hash" : BinData(0,"7sh886HhsNYajGxYGp5Jxi52IzA="), - "keyId" : NumberLong("6934943333319966722") - } - }, - "operationTime" : Timestamp(1614668500, 1) -} -``` - -As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been set to `10000`. - -### Reconfigure using new config secret - -Now we will reconfigure this database to set `maxIncomingConnections` to `20000`. - -Now, we will edit the `mongod.conf` file containing required configuration settings. - -```ini -$ cat mongod.conf -net: - maxIncomingConnections: 20000 -``` - -Then, we will create a new secret with this configuration file. - -```bash -$ kubectl create secret generic -n demo new-custom-config --from-file=./mongod.conf -secret/new-custom-config created -``` - -#### Create KafkaOpsRequest - -Now, we will use this secret to replace the previous secret using a `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: KafkaOpsRequest -metadata: - name: mops-reconfigure-replicaset - namespace: demo -spec: - type: Reconfigure - databaseRef: - name: mg-replicaset - configuration: - replicaSet: - configSecret: - name: new-custom-config - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-replicaset` database. -- `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.customConfig.replicaSet.configSecret.name` specifies the name of the new secret. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. -- Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -Let's create the `KafkaOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mops-reconfigure-replicaset.yaml -mongodbopsrequest.ops.kubedb.com/mops-reconfigure-replicaset created -``` - -#### Verify the new configuration is working - -If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Kafka` object. - -Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, - -```bash -$ watch kubectl get mongodbopsrequest -n demo -Every 2.0s: kubectl get mongodbopsrequest -n demo -NAME TYPE STATUS AGE -mops-reconfigure-replicaset Reconfigure Successful 113s -``` - -We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. - -```bash -$ kubectl describe mongodbopsrequest -n demo mops-reconfigure-replicaset -Name: mops-reconfigure-replicaset -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: KafkaOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T07:04:31Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:configuration: - .: - f:replicaSet: - .: - f:configSecret: - .: - f:name: - f:databaseRef: - .: - f:name: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T07:04:31Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:replicaSet: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T07:04:31Z - Resource Version: 29869 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/mongodbopsrequests/mops-reconfigure-replicaset - UID: 064733d6-19db-4153-82f7-bc0580116ee6 -Spec: - Apply: IfReady - Configuration: - Replica Set: - Config Secret: - Name: new-custom-config - Database Ref: - Name: mg-replicaset - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: Reconfigure -Status: - Conditions: - Last Transition Time: 2021-03-02T07:04:31Z - Message: Kafka ops request is reconfiguring database - Observed Generation: 1 - Reason: Reconfigure - Status: True - Type: Reconfigure - Last Transition Time: 2021-03-02T07:06:21Z - Message: Successfully Reconfigured Kafka - Observed Generation: 1 - Reason: ReconfigureReplicaset - Status: True - Type: ReconfigureReplicaset - Last Transition Time: 2021-03-02T07:06:21Z - Message: Successfully completed the modification process. - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 2m55s KubeDB Ops-manager operator Pausing Kafka demo/mg-replicaset - Normal PauseDatabase 2m55s KubeDB Ops-manager operator Successfully paused Kafka demo/mg-replicaset - Normal ReconfigureReplicaset 65s KubeDB Ops-manager operator Successfully Reconfigured Kafka - Normal ResumeDatabase 65s KubeDB Ops-manager operator Resuming Kafka demo/mg-replicaset - Normal ResumeDatabase 65s KubeDB Ops-manager operator Successfully resumed Kafka demo/mg-replicaset - Normal Successful 65s KubeDB Ops-manager operator Successfully Reconfigured Database -``` - -Now let's connect to a mongodb instance and run a mongodb internal command to check the new configuration we have provided. - -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--replSet=rs0", - "--keyFile=/data/configdb/key.txt", - "--clusterAuthMode=keyFile", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 20000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "replication" : { - "replSet" : "rs0" - }, - "security" : { - "authorization" : "enabled", - "clusterAuthMode" : "keyFile", - "keyFile" : "/data/configdb/key.txt" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1, - "$clusterTime" : { - "clusterTime" : Timestamp(1614668887, 1), - "signature" : { - "hash" : BinData(0,"5q35Y51+YpbVHFKoaU7lUWi38oY="), - "keyId" : NumberLong("6934943333319966722") - } - }, - "operationTime" : Timestamp(1614668887, 1) -} -``` - -As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been changed from `10000` to `20000`. So the reconfiguration of the database is successful. - - -### Reconfigure using apply config - -Now we will reconfigure this database again to set `maxIncomingConnections` to `30000`. This time we won't use a new secret. We will use the `applyConfig` field of the `KafkaOpsRequest`. This will merge the new config in the existing secret. - -#### Create KafkaOpsRequest - -Now, we will use the new configuration in the `applyConfig` field in the `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, - -```yaml -apiVersion: ops.kubedb.com/v1alpha1 -kind: KafkaOpsRequest -metadata: - name: mops-reconfigure-apply-replicaset - namespace: demo -spec: - type: Reconfigure - databaseRef: - name: mg-replicaset - configuration: - replicaSet: - applyConfig: - mongod.conf: |- - net: - maxIncomingConnections: 30000 - readinessCriteria: - oplogMaxLagSeconds: 20 - objectsCountDiffPercentage: 10 - timeout: 5m - apply: IfReady -``` - -Here, - -- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-apply-replicaset` database. -- `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.replicaSet.applyConfig` specifies the new configuration that will be merged in the existing secret. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. -- Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. - -Let's create the `KafkaOpsRequest` CR we have shown above, - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mops-reconfigure-apply-replicaset.yaml -mongodbopsrequest.ops.kubedb.com/mops-reconfigure-apply-replicaset created -``` - -#### Verify the new configuration is working - -If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. - -Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, - -```bash -$ watch kubectl get mongodbopsrequest -n demo -Every 2.0s: kubectl get mongodbopsrequest -n demo -NAME TYPE STATUS AGE -mops-reconfigure-apply-replicaset Reconfigure Successful 109s -``` - -We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. - -```bash -$ kubectl describe mongodbopsrequest -n demo mops-reconfigure-apply-replicaset -Name: mops-reconfigure-apply-replicaset -Namespace: demo -Labels: -Annotations: -API Version: ops.kubedb.com/v1alpha1 -Kind: KafkaOpsRequest -Metadata: - Creation Timestamp: 2021-03-02T07:09:39Z - Generation: 1 - Managed Fields: - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:apply: - f:configuration: - .: - f:replicaSet: - .: - f:applyConfig: - f:databaseRef: - .: - f:name: - f:readinessCriteria: - .: - f:objectsCountDiffPercentage: - f:oplogMaxLagSeconds: - f:timeout: - f:type: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2021-03-02T07:09:39Z - API Version: ops.kubedb.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:replicaSet: - f:podTemplate: - .: - f:controller: - f:metadata: - f:spec: - .: - f:resources: - f:status: - .: - f:conditions: - f:observedGeneration: - f:phase: - Manager: kubedb-enterprise - Operation: Update - Time: 2021-03-02T07:09:39Z - Resource Version: 31005 - Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/mongodbopsrequests/mops-reconfigure-apply-replicaset - UID: 0137442b-1b04-43ed-8de7-ecd913b44065 -Spec: - Apply: IfReady - Configuration: - Replica Set: - Apply Config: net: - maxIncomingConnections: 30000 - - Database Ref: - Name: mg-replicaset - Readiness Criteria: - Objects Count Diff Percentage: 10 - Oplog Max Lag Seconds: 20 - Timeout: 5m - Type: Reconfigure -Status: - Conditions: - Last Transition Time: 2021-03-02T07:09:39Z - Message: Kafka ops request is reconfiguring database - Observed Generation: 1 - Reason: Reconfigure - Status: True - Type: Reconfigure - Last Transition Time: 2021-03-02T07:11:14Z - Message: Successfully Reconfigured Kafka - Observed Generation: 1 - Reason: ReconfigureReplicaset - Status: True - Type: ReconfigureReplicaset - Last Transition Time: 2021-03-02T07:11:14Z - Message: Successfully completed the modification process. - Observed Generation: 1 - Reason: Successful - Status: True - Type: Successful - Observed Generation: 1 - Phase: Successful -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal PauseDatabase 9m20s KubeDB Ops-manager operator Pausing Kafka demo/mg-replicaset - Normal PauseDatabase 9m20s KubeDB Ops-manager operator Successfully paused Kafka demo/mg-replicaset - Normal ReconfigureReplicaset 7m45s KubeDB Ops-manager operator Successfully Reconfigured Kafka - Normal ResumeDatabase 7m45s KubeDB Ops-manager operator Resuming Kafka demo/mg-replicaset - Normal ResumeDatabase 7m45s KubeDB Ops-manager operator Successfully resumed Kafka demo/mg-replicaset - Normal Successful 7m45s KubeDB Ops-manager operator Successfully Reconfigured Database -``` - -Now let's connect to a mongodb instance and run a mongodb internal command to check the new configuration we have provided. - -```bash -$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet -{ - "argv" : [ - "mongod", - "--dbpath=/data/db", - "--auth", - "--ipv6", - "--bind_ip_all", - "--port=27017", - "--tlsMode=disabled", - "--replSet=rs0", - "--keyFile=/data/configdb/key.txt", - "--clusterAuthMode=keyFile", - "--config=/data/configdb/mongod.conf" - ], - "parsed" : { - "config" : "/data/configdb/mongod.conf", - "net" : { - "bindIp" : "*", - "ipv6" : true, - "maxIncomingConnections" : 30000, - "port" : 27017, - "tls" : { - "mode" : "disabled" - } - }, - "replication" : { - "replSet" : "rs0" - }, - "security" : { - "authorization" : "enabled", - "clusterAuthMode" : "keyFile", - "keyFile" : "/data/configdb/key.txt" - }, - "storage" : { - "dbPath" : "/data/db" - } - }, - "ok" : 1, - "$clusterTime" : { - "clusterTime" : Timestamp(1614669580, 1), - "signature" : { - "hash" : BinData(0,"u/xTAa4aW/8bsRvBYPffwQCeTF0="), - "keyId" : NumberLong("6934943333319966722") - } - }, - "operationTime" : Timestamp(1614669580, 1) -} -``` - -As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been changed from `20000` to `30000`. So the reconfiguration of the database using the `applyConfig` field is successful. - - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete mg -n demo mg-replicaset -kubectl delete mongodbopsrequest -n demo mops-reconfigure-replicaset mops-reconfigure-apply-replicaset -``` \ No newline at end of file diff --git a/docs/guides/kafka/reconfigure/kafka-combined.md b/docs/guides/kafka/reconfigure/kafka-combined.md new file mode 100644 index 0000000000..d209dea624 --- /dev/null +++ b/docs/guides/kafka/reconfigure/kafka-combined.md @@ -0,0 +1,506 @@ +--- +title: Reconfigure Kafka Combined +menu: + docs_{{ .version }}: + identifier: kf-reconfigure-combined + name: Combined + parent: kf-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Kafka Combined Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Kafka Combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [Combined](/docs/guides/kafka/clustering/combined-cluster/index.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + - [Reconfigure Overview](/docs/guides/kafka/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/kafka](/docs/examples/kafka) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Kafka` Combined cluster using a supported version by `KubeDB` operator. Then we are going to apply `KafkaOpsRequest` to reconfigure its configuration. + +### Prepare Kafka Combined Cluster + +Now, we are going to deploy a `Kafka` combined cluster with version `3.6.1`. + +### Deploy Kafka + +At first, we will create a secret with the `server.properties` file containing required configuration settings. + +**server.properties:** + +```properties +log.retention.hours=100 +``` +Here, `log.retention.hours` is set to `100`, whereas the default value is `168`. + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: kf-combined-custom-config + namespace: demo +stringData: + server.properties: |- + log.retention.hours=100 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-combined-custom-config.yaml +secret/kf-combined-custom-config created +``` + +In this section, we are going to create a Kafka object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev + namespace: demo +spec: + replicas: 2 + version: 3.6.1 + configSecret: + name: kf-combined-custom-config + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-combined.yaml +kafka.kubedb.com/kafka-dev created +``` + +Now, wait until `kafka-dev` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-dev kubedb.com/v1 3.6.1 Provisioning 0s +kafka-dev kubedb.com/v1 3.6.1 Provisioning 24s +. +. +kafka-dev kubedb.com/v1 3.6.1 Ready 92s +``` + +Now, we will check if the kafka has started with the custom configuration we have provided. + +Exec into the Kafka pod and execute the following commands to see the configurations: +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- bash +kafka@kafka-dev-0:~$ kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep log.retention.hours + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} +``` +Here, we can see that our given configuration is applied to the Kafka cluster for all brokers. `log.retention.hours` is set to `100` from the default value `168`. + +### Reconfigure using new config secret + +Now we will reconfigure this cluster to set `log.retention.hours` to `125`. + +Now, update our `server.properties` file with the new configuration. + +**server.properties:** + +```properties +log.retention.hours=125 +``` + +Then, we will create a new secret with this configuration file. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: new-kf-combined-custom-config + namespace: demo +stringData: + server.properties: |- + log.retention.hours=125 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/new-kafka-combined-custom-config.yaml +secret/new-kf-combined-custom-config created +``` + +#### Create KafkaOpsRequest + +Now, we will use this secret to replace the previous secret using a `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + configSecret: + name: new-kf-combined-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `kafka-dev` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configSecret.name` specifies the name of the new secret. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml +kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-combined created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Kafka` object. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequests -n demo +NAME TYPE STATUS AGE +kfops-reconfigure-combined Reconfigure Successful 4m55s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-reconfigure-combined +Name: kfops-reconfigure-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-01T09:14:46Z + Generation: 1 + Resource Version: 258361 + UID: ac2147ba-51cf-4ebf-8328-76253379108c +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-kf-combined-custom-config + Database Ref: + Name: kafka-dev + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-01T09:14:46Z + Message: Kafka ops-request has started to reconfigure kafka nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-01T09:14:55Z + Message: successfully reconciled the Kafka with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-01T09:15:00Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-0 + Last Transition Time: 2024-08-01T09:15:00Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-0 + Last Transition Time: 2024-08-01T09:16:15Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-0 + Last Transition Time: 2024-08-01T09:16:20Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-1 + Last Transition Time: 2024-08-01T09:16:20Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-1 + Last Transition Time: 2024-08-01T09:17:20Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-1 + Last Transition Time: 2024-08-01T09:17:25Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-01T09:17:25Z + Message: Successfully completed reconfigure kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 5m32s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-reconfigure-combined + Normal Starting 5m32s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-dev + Normal Successful 5m32s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-reconfigure-combined + Normal UpdatePetSets 5m23s KubeDB Ops-manager Operator successfully reconciled the Kafka with new configure + Warning get pod; ConditionStatus:True; PodName:kafka-dev-0 5m18s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-0 5m18s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-0 5m13s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-0 4m3s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Warning get pod; ConditionStatus:True; PodName:kafka-dev-1 3m58s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-1 3m58s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-1 3m53s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-1 2m58s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Normal RestartNodes 2m53s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 2m53s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-dev + Normal Successful 2m53s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-reconfigure-combined +``` + +Now let's exec one of the instance and run a kafka-configs.sh command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'log.retention.hours' + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready kafka, the value of `log.retention.hours` has been changed from `100` to `125`. So the reconfiguration of the cluster is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this cluster again to set `log.retention.hours` to `150`. This time we won't use a new secret. We will use the `applyConfig` field of the `KafkaOpsRequest`. This will merge the new config in the existing secret. + +#### Create KafkaOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-apply-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + applyConfig: + server.properties: |- + log.retention.hours=150 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `kafka-dev` cluster. +- `spec.type` specifies that we are performing `Reconfigure` on kafka. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-combined.yaml +kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-apply-combined created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequests -n demo kfops-reconfigure-apply-combined +NAME TYPE STATUS AGE +kfops-reconfigure-apply-combined Reconfigure Successful 55s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the cluster. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-reconfigure-apply-combined +Name: kfops-reconfigure-apply-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-01T09:27:03Z + Generation: 1 + Resource Version: 259123 + UID: fdc46ef0-e2ae-490a-aab8-6a3380ec09d1 +Spec: + Apply: IfReady + Configuration: + Apply Config: + server.properties: log.retention.hours=150 + Database Ref: + Name: kafka-dev + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-01T09:27:03Z + Message: Kafka ops-request has started to reconfigure kafka nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-01T09:27:06Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2024-08-01T09:27:12Z + Message: successfully reconciled the Kafka with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-01T09:27:17Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-0 + Last Transition Time: 2024-08-01T09:27:17Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-0 + Last Transition Time: 2024-08-01T09:27:27Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-0 + Last Transition Time: 2024-08-01T09:27:32Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-1 + Last Transition Time: 2024-08-01T09:27:32Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-1 + Last Transition Time: 2024-08-01T09:27:52Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-1 + Last Transition Time: 2024-08-01T09:27:57Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-01T09:27:57Z + Message: Successfully completed reconfigure kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m7s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-reconfigure-apply-combined + Normal Starting 2m7s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-dev + Normal Successful 2m7s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-reconfigure-apply-combined + Normal UpdatePetSets 118s KubeDB Ops-manager Operator successfully reconciled the Kafka with new configure + Warning get pod; ConditionStatus:True; PodName:kafka-dev-0 113s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-0 113s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-0 108s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-0 103s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Warning get pod; ConditionStatus:True; PodName:kafka-dev-1 98s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-1 98s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-1 93s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-1 78s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Normal RestartNodes 73s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 73s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-dev + Normal Successful 73s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-reconfigure-apply-combined +``` + +Now let's exec into one of the instance and run a `kafka-configs.sh` command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'log.retention.hours' + log.retention.hours=150 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=150, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=150 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=150, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready kafka, the value of `log.retention.hours` has been changed from `125` to `150`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kf -n demo kafka-dev +kubectl delete kafkaopsrequest -n demo kfops-reconfigure-apply-combined kfops-reconfigure-combined +kubectl delete secret -n demo kf-combined-custom-config new-kf-combined-custom-config +kubectl delete namespace demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/reconfigure/kafka-topology.md b/docs/guides/kafka/reconfigure/kafka-topology.md new file mode 100644 index 0000000000..b9167a1e77 --- /dev/null +++ b/docs/guides/kafka/reconfigure/kafka-topology.md @@ -0,0 +1,625 @@ +--- +title: Reconfigure Kafka Topology +menu: + docs_{{ .version }}: + identifier: kf-reconfigure-topology + name: Topology + parent: kf-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Kafka Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Kafka Topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [Topology](/docs/guides/kafka/clustering/topology-cluster/index.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + - [Reconfigure Overview](/docs/guides/kafka/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/kafka](/docs/examples/kafka) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Kafka` Topology cluster using a supported version by `KubeDB` operator. Then we are going to apply `KafkaOpsRequest` to reconfigure its configuration. + +### Prepare Kafka Topology Cluster + +Now, we are going to deploy a `Kafka` topology cluster with version `3.6.1`. + +### Deploy Kafka + +At first, we will create a secret with the `broker.properties` and `controller.properties` file containing required configuration settings. + +**broker.properties:** + +```properties +log.retention.hours=100 +``` + +**controller.properties:** + +```properties +controller.quorum.election.timeout.ms=2000 +``` + +Here, `log.retention.hours` is set to `100`, whereas the default value is `168` for broker and `controller.quorum.election.timeout.ms` is set to `2000` for controller. + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: kf-topology-custom-config + namespace: demo +stringData: + broker.properties: |- + log.retention.hours=100 + controller.properties: |- + controller.quorum.election.timeout.ms=2000 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-topology-custom-config.yaml +secret/kf-topology-custom-config created +``` + +> **Note:** + +In this section, we are going to create a Kafka object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + configSecret: + name: kf-topology-custom-config + topology: + broker: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-topology.yaml +kafka.kubedb.com/kafka-prod created +``` + +Now, wait until `kafka-prod` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-prod kubedb.com/v1 3.6.1 Provisioning 0s +kafka-prod kubedb.com/v1 3.6.1 Provisioning 24s +. +. +kafka-prod kubedb.com/v1 3.6.1 Ready 92s +``` + +Now, we will check if the kafka has started with the custom configuration we have provided. + +Exec into the Kafka pod and execute the following commands to see the configurations: +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- bash +kafka@kafka-prod-broker-0:~$ kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep log.retention.hours + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} +``` +Here, we can see that our given configuration is applied to the Kafka cluster for all brokers. `log.retention.hours` is set to `100` from the default value `168`. + +### Reconfigure using new config secret + +Now we will reconfigure this cluster to set `log.retention.hours` to `125`. + +Now, update our `broker.properties` and `controller.properties` file with the new configuration. + +**broker.properties:** + +```properties +log.retention.hours=125 +``` + +**controller.properties:** + +```properties +controller.quorum.election.timeout.ms=3000 +controller.quorum.fetch.timeout.ms=4000 +``` + +Then, we will create a new secret with this configuration file. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: new-kf-topology-custom-config + namespace: demo +stringData: + broker.properties: |- + log.retention.hours=125 + controller.properties: |- + controller.quorum.election.timeout.ms=3000 + controller.quorum.fetch.timeout.ms=4000 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/new-kafka-topology-custom-config.yaml +secret/new-kf-topology-custom-config created +``` + +#### Create KafkaOpsRequest + +Now, we will use this secret to replace the previous secret using a `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + configSecret: + name: new-kf-topology-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `kafka-prod` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configSecret.name` specifies the name of the new secret. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml +kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-topology created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Kafka` object. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequests -n demo +NAME TYPE STATUS AGE +kfops-reconfigure-topology Reconfigure Successful 4m55s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-reconfigure-topology +Name: kfops-reconfigure-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T05:08:37Z + Generation: 1 + Resource Version: 332491 + UID: b6e8cb1b-d29f-445e-bb01-60d29012c7eb +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-kf-topology-custom-config + Database Ref: + Name: kafka-prod + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-02T05:08:37Z + Message: Kafka ops-request has started to reconfigure kafka nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-02T05:08:45Z + Message: check reconcile; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: CheckReconcile + Last Transition Time: 2024-08-02T05:09:42Z + Message: successfully reconciled the Kafka with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T05:09:47Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:09:47Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:10:02Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:10:07Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:10:07Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:10:22Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:10:27Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:10:27Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:11:12Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:11:17Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:11:17Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:11:32Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:11:37Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T05:11:39Z + Message: Successfully completed reconfigure kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m7s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-reconfigure-topology + Normal Starting 3m7s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-prod + Normal Successful 3m7s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-reconfigure-topology + Warning check reconcile; ConditionStatus:False 2m59s KubeDB Ops-manager Operator check reconcile; ConditionStatus:False + Normal UpdatePetSets 2m2s KubeDB Ops-manager Operator successfully reconciled the Kafka with new configure + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 117s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 117s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 112s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 102s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 97s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 97s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 92s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 82s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 77s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 77s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 72s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 32s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 27s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 27s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 22s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 12s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Normal RestartNodes 7s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 5s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-prod + Normal Successful 5s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-reconfigure-topology +``` + +Now let's exec one of the instance and run a kafka-configs.sh command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'log.retention.hours' + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready kafka, the value of `log.retention.hours` has been changed from `100` to `125`. So the reconfiguration of the cluster is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this cluster again to set `log.retention.hours` to `150`. This time we won't use a new secret. We will use the `applyConfig` field of the `KafkaOpsRequest`. This will merge the new config in the existing secret. + +#### Create KafkaOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-apply-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + applyConfig: + broker.properties: |- + log.retention.hours=150 + controller.properties: |- + controller.quorum.election.timeout.ms=4000 + controller.quorum.fetch.timeout.ms=5000 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `kafka-prod` cluster. +- `spec.type` specifies that we are performing `Reconfigure` on kafka. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-topology.yaml +kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-apply-topology created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequests -n demo kfops-reconfigure-apply-topology +NAME TYPE STATUS AGE +kfops-reconfigure-apply-topology Reconfigure Successful 55s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the cluster. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-reconfigure-apply-topology +Name: kfops-reconfigure-apply-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T05:14:42Z + Generation: 1 + Resource Version: 332996 + UID: 551d2c92-9431-47a7-a699-8f8115131b49 +Spec: + Apply: IfReady + Configuration: + Apply Config: + broker.properties: log.retention.hours=150 + controller.properties: controller.quorum.election.timeout.ms=4000 +controller.quorum.fetch.timeout.ms=5000 + Database Ref: + Name: kafka-prod + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-02T05:14:42Z + Message: Kafka ops-request has started to reconfigure kafka nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-02T05:14:45Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2024-08-02T05:14:52Z + Message: successfully reconciled the Kafka with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T05:14:57Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:14:57Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:15:07Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:15:12Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:15:12Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:15:27Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:15:32Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:15:32Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:16:07Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:16:12Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:16:12Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:16:27Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:16:32Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T05:16:35Z + Message: Successfully completed reconfigure kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m6s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-reconfigure-apply-topology + Normal Starting 2m6s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-prod + Normal Successful 2m6s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-reconfigure-apply-topology + Normal UpdatePetSets 116s KubeDB Ops-manager Operator successfully reconciled the Kafka with new configure + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 111s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 111s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 106s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 101s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 96s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 96s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 91s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 81s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 76s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 76s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 71s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 41s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 36s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 36s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 31s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 21s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Normal RestartNodes 15s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 14s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-prod + Normal Successful 14s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-reconfigure-apply-topology +``` + +Now let's exec into one of the instance and run a `kafka-configs.sh` command to check the new configuration we have provided. + +```bash +$ $ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'log.retention.hours' + log.retention.hours=150 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=150, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=150 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=150, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready kafka, the value of `log.retention.hours` has been changed from `125` to `150`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kf -n demo kafka-dev +kubectl delete kafkaopsrequest -n demo kfops-reconfigure-apply-topology kfops-reconfigure-topology +kubectl delete secret -n demo kf-topology-custom-config new-kf-topology-custom-config +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/scaling/_index.md b/docs/guides/kafka/scaling/_index.md new file mode 100644 index 0000000000..98b83c7106 --- /dev/null +++ b/docs/guides/kafka/scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Scaling Kafka +menu: + docs_{{ .version }}: + identifier: kf-scaling + name: Scaling + parent: kf-kafka-guides + weight: 43 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/kafka/scaling/horizontal-scaling/_index.md b/docs/guides/kafka/scaling/horizontal-scaling/_index.md new file mode 100644 index 0000000000..30adbd72dd --- /dev/null +++ b/docs/guides/kafka/scaling/horizontal-scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Horizontal Scaling +menu: + docs_{{ .version }}: + identifier: kf-horizontal-scaling + name: Horizontal Scaling + parent: kf-scaling + weight: 10 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/kafka/scaling/horizontal-scaling/combined.md b/docs/guides/kafka/scaling/horizontal-scaling/combined.md new file mode 100644 index 0000000000..4ded4cffb7 --- /dev/null +++ b/docs/guides/kafka/scaling/horizontal-scaling/combined.md @@ -0,0 +1,969 @@ +--- +title: Horizontal Scaling Combined Kafka +menu: + docs_{{ .version }}: + identifier: kf-horizontal-scaling-combined + name: Combined Cluster + parent: kf-horizontal-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale Kafka Combined Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the Kafka combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [Combined](/docs/guides/kafka/clustering/combined-cluster/index.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + - [Horizontal Scaling Overview](/docs/guides/kafka/scaling/horizontal-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/kafka](/docs/examples/kafka) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on Combined Cluster + +Here, we are going to deploy a `Kafka` combined cluster using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Kafka Combined cluster + +Now, we are going to deploy a `Kafka` combined cluster with version `3.6.1`. + +### Deploy Kafka combined cluster + +In this section, we are going to deploy a Kafka combined cluster. Then, in the next section we will scale the cluster using `KafkaOpsRequest` CRD. Below is the YAML of the `Kafka` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev + namespace: demo +spec: + replicas: 2 + version: 3.6.1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/kafka-combined.yaml +kafka.kubedb.com/kafka-dev created +``` + +Now, wait until `kafka-dev` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-dev kubedb.com/v1 3.6.1 Provisioning 0s +kafka-dev kubedb.com/v1 3.6.1 Provisioning 24s +. +. +kafka-dev kubedb.com/v1 3.6.1 Ready 92s +``` + +Let's check the number of replicas has from kafka object, number of pods the petset have, + +```bash +$ kubectl get kafka -n demo kafka-dev -o json | jq '.spec.replicas' +2 + +$ kubectl get petset -n demo kafka-dev -o json | jq '.spec.replicas' +2 +``` + +We can see from both command that the cluster has 2 replicas. + +Also, we can verify the replicas of the combined from an internal kafka command by exec into a replica. + +Now let's exec to a instance and run a kafka internal command to check the number of replicas, + +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- kafka-broker-api-versions.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties +kafka-dev-0.kafka-dev-pods.demo.svc.cluster.local:9092 (id: 0 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +kafka-dev-1.kafka-dev-pods.demo.svc.cluster.local:9092 (id: 1 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +``` + +We can see from the above output that the kafka has 2 nodes. + +We are now ready to apply the `KafkaOpsRequest` CR to scale this cluster. + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the combined cluster to meet the desired number of replicas after scaling. + +#### Create KafkaOpsRequest + +In order to scale up the replicas of the combined cluster, we have to create a `KafkaOpsRequest` CR with our desired replicas. Below is the YAML of the `KafkaOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-up-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-dev + horizontalScaling: + node: 3 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `kafka-dev` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on kafka. +- `spec.horizontalScaling.node` specifies the desired replicas after scaling. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-up-combined.yaml +kafkaopsrequest.ops.kubedb.com/kfops-hscale-up-combined created +``` + +#### Verify Combined cluster replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Kafka` object and related `PetSets` and `Pods`. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ watch kubectl get kafkaopsrequest -n demo +NAME TYPE STATUS AGE +kfops-hscale-up-combined HorizontalScaling Successful 106s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe kafkaopsrequests -n demo kfops-hscale-up-combined +Name: kfops-hscale-up-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T10:19:56Z + Generation: 1 + Resource Version: 353093 + UID: f91de2da-82c4-4175-aab4-de0f3e1ce498 +Spec: + Apply: IfReady + Database Ref: + Name: kafka-dev + Horizontal Scaling: + Node: 3 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-08-02T10:19:57Z + Message: Kafka ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-08-02T10:20:05Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-0 + Last Transition Time: 2024-08-02T10:20:05Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-0 + Last Transition Time: 2024-08-02T10:20:15Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-0 + Last Transition Time: 2024-08-02T10:20:20Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-1 + Last Transition Time: 2024-08-02T10:20:20Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-1 + Last Transition Time: 2024-08-02T10:21:00Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-1 + Last Transition Time: 2024-08-02T10:21:05Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T10:22:15Z + Message: Successfully Scaled Up Server Node + Observed Generation: 1 + Reason: ScaleUpCombined + Status: True + Type: ScaleUpCombined + Last Transition Time: 2024-08-02T10:21:10Z + Message: patch pet setkafka-dev; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSetkafka-dev + Last Transition Time: 2024-08-02T10:22:10Z + Message: node in cluster; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: NodeInCluster + Last Transition Time: 2024-08-02T10:22:15Z + Message: Successfully completed horizontally scale kafka cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 4m34s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-hscale-up-combined + Normal Starting 4m34s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-dev + Normal Successful 4m34s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-hscale-up-combined + Warning get pod; ConditionStatus:True; PodName:kafka-dev-0 4m26s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-0 4m26s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-0 4m21s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-0 4m16s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Warning get pod; ConditionStatus:True; PodName:kafka-dev-1 4m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-1 4m11s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-1 4m6s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-1 3m31s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Normal RestartNodes 3m26s KubeDB Ops-manager Operator Successfully restarted all nodes + Warning patch pet setkafka-dev; ConditionStatus:True 3m21s KubeDB Ops-manager Operator patch pet setkafka-dev; ConditionStatus:True + Warning node in cluster; ConditionStatus:False 2m46s KubeDB Ops-manager Operator node in cluster; ConditionStatus:False + Warning node in cluster; ConditionStatus:True 2m21s KubeDB Ops-manager Operator node in cluster; ConditionStatus:True + Normal ScaleUpCombined 2m16s KubeDB Ops-manager Operator Successfully Scaled Up Server Node + Normal Starting 2m16s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-dev + Normal Successful 2m16s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-hscale-up-combined +``` + +Now, we are going to verify the number of replicas this cluster has from the Kafka object, number of pods the petset have, + +```bash +$ kubectl get kafka -n demo kafka-dev -o json | jq '.spec.replicas' +3 + +$ kubectl get petset -n demo kafka-dev -o json | jq '.spec.replicas' +3 +``` + +Now let's connect to a kafka instance and run a kafka internal command to check the number of replicas, +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- kafka-broker-api-versions.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties +kafka-dev-0.kafka-dev-pods.demo.svc.cluster.local:9092 (id: 0 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +kafka-dev-1.kafka-dev-pods.demo.svc.cluster.local:9092 (id: 1 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +kafka-dev-2.kafka-dev-pods.demo.svc.cluster.local:9092 (id: 2 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +``` + +From all the above outputs we can see that the brokers of the combined kafka is `3`. That means we have successfully scaled up the replicas of the Kafka combined cluster. + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the kafka combined cluster to meet the desired number of replicas after scaling. + +#### Create KafkaOpsRequest + +In order to scale down the replicas of the kafka combined cluster, we have to create a `KafkaOpsRequest` CR with our desired replicas. Below is the YAML of the `KafkaOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-down-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-dev + horizontalScaling: + node: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `kafka-dev` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on kafka. +- `spec.horizontalScaling.node` specifies the desired replicas after scaling. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-down-combined.yaml +kafkaopsrequest.ops.kubedb.com/kfops-hscale-down-combined created +``` + +#### Verify Combined cluster replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Kafka` object and related `PetSets` and `Pods`. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ watch kubectl get kafkaopsrequest -n demo +NAME TYPE STATUS AGE +kfops-hscale-down-combined HorizontalScaling Successful 2m32s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe kafkaopsrequests -n demo kfops-hscale-down-combined +Name: kfops-hscale-down-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T10:46:39Z + Generation: 1 + Resource Version: 354924 + UID: f1a0b85d-1a86-463c-a3e4-72947badd108 +Spec: + Apply: IfReady + Database Ref: + Name: kafka-dev + Horizontal Scaling: + Node: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-08-02T10:46:39Z + Message: Kafka ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-08-02T10:47:07Z + Message: Successfully Scaled Down Server Node + Observed Generation: 1 + Reason: ScaleDownCombined + Status: True + Type: ScaleDownCombined + Last Transition Time: 2024-08-02T10:46:57Z + Message: reassign partitions; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReassignPartitions + Last Transition Time: 2024-08-02T10:46:57Z + Message: is pet set patched; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsPetSetPatched + Last Transition Time: 2024-08-02T10:46:57Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-08-02T10:46:58Z + Message: delete pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePvc + Last Transition Time: 2024-08-02T10:47:02Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-08-02T10:47:13Z + Message: successfully reconciled the Kafka with modified node + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T10:47:18Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-0 + Last Transition Time: 2024-08-02T10:47:18Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-0 + Last Transition Time: 2024-08-02T10:47:28Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-0 + Last Transition Time: 2024-08-02T10:47:33Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-1 + Last Transition Time: 2024-08-02T10:47:33Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-1 + Last Transition Time: 2024-08-02T10:48:53Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-1 + Last Transition Time: 2024-08-02T10:48:58Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T10:48:58Z + Message: Successfully completed horizontally scale kafka cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m39s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-hscale-down-combined + Normal Starting 2m39s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-dev + Normal Successful 2m39s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-hscale-down-combined + Warning reassign partitions; ConditionStatus:True 2m21s KubeDB Ops-manager Operator reassign partitions; ConditionStatus:True + Warning is pet set patched; ConditionStatus:True 2m21s KubeDB Ops-manager Operator is pet set patched; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m21s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 2m20s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 2m20s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 2m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 2m16s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m16s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Normal ScaleDownCombined 2m11s KubeDB Ops-manager Operator Successfully Scaled Down Server Node + Normal UpdatePetSets 2m5s KubeDB Ops-manager Operator successfully reconciled the Kafka with modified node + Warning get pod; ConditionStatus:True; PodName:kafka-dev-0 2m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-0 2m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-0 115s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-0 110s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Warning get pod; ConditionStatus:True; PodName:kafka-dev-1 105s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-1 105s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-1 100s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-1 25s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Normal RestartNodes 20s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 20s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-dev + Normal Successful 20s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-hscale-down-combined +``` + +Now, we are going to verify the number of replicas this cluster has from the Kafka object, number of pods the petset have, + +```bash +$ kubectl get kafka -n demo kafka-dev -o json | jq '.spec.replicas' +2 + +$ kubectl get petset -n demo kafka-dev -o json | jq '.spec.replicas' +2 +``` + +Now let's connect to a kafka instance and run a kafka internal command to check the number of replicas, + +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- kafka-broker-api-versions.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties +kafka-dev-0.kafka-dev-pods.demo.svc.cluster.local:9092 (id: 0 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +kafka-dev-1.kafka-dev-pods.demo.svc.cluster.local:9092 (id: 1 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +``` + +From all the above outputs we can see that the replicas of the combined cluster is `2`. That means we have successfully scaled down the replicas of the Kafka combined cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kf -n demo kafka-dev +kubectl delete kafkaopsrequest -n demo kfops-hscale-up-combined kfops-hscale-down-combined +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Kafka with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/scaling/horizontal-scaling/overview.md b/docs/guides/kafka/scaling/horizontal-scaling/overview.md new file mode 100644 index 0000000000..b492ef34de --- /dev/null +++ b/docs/guides/kafka/scaling/horizontal-scaling/overview.md @@ -0,0 +1,54 @@ +--- +title: Kafka Horizontal Scaling Overview +menu: + docs_{{ .version }}: + identifier: kf-horizontal-scaling-overview + name: Overview + parent: kf-horizontal-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Kafka Horizontal Scaling + +This guide will give an overview on how KubeDB Ops-manager operator scales up or down `Kafka` cluster replicas of various component such as Combined, Broker, Controller. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + +## How Horizontal Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator scales up or down `Kafka` database components. Open the image in a new tab to see the enlarged version. + +
+  Horizontal scaling process of Kafka +
Fig: Horizontal scaling process of Kafka
+
+ +The Horizontal scaling process consists of the following steps: + +1. At first, a user creates a `Kafka` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Kafka` CR. + +3. When the operator finds a `Kafka` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to scale the various components (ie. ReplicaSet, Shard, ConfigServer, Mongos, etc.) of the `Kafka` cluster, the user creates a `KafkaOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `KafkaOpsRequest` CR. + +6. When it finds a `KafkaOpsRequest` CR, it halts the `Kafka` object which is referred from the `KafkaOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Kafka` object during the horizontal scaling process. + +7. Then the `KubeDB` Ops-manager operator will scale the related PetSet Pods to reach the expected number of replicas defined in the `KafkaOpsRequest` CR. + +8. After the successfully scaling the replicas of the related PetSet Pods, the `KubeDB` Ops-manager operator updates the number of replicas in the `Kafka` object to reflect the updated state. + +9. After the successful scaling of the `Kafka` replicas, the `KubeDB` Ops-manager operator resumes the `Kafka` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on horizontal scaling of Kafka cluster using `KafkaOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/kafka/scaling/horizontal-scaling/topology.md b/docs/guides/kafka/scaling/horizontal-scaling/topology.md new file mode 100644 index 0000000000..2ec7ce487f --- /dev/null +++ b/docs/guides/kafka/scaling/horizontal-scaling/topology.md @@ -0,0 +1,1151 @@ +--- +title: Horizontal Scaling Topology Kafka +menu: + docs_{{ .version }}: + identifier: kf-horizontal-scaling-topology + name: Topology Cluster + parent: kf-horizontal-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale Kafka Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the Kafka topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [Topology](/docs/guides/kafka/clustering/topology-cluster/index.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + - [Horizontal Scaling Overview](/docs/guides/kafka/scaling/horizontal-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/kafka](/docs/examples/kafka) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on Topology Cluster + +Here, we are going to deploy a `Kafka` topology cluster using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Kafka Topology cluster + +Now, we are going to deploy a `Kafka` topology cluster with version `3.6.1`. + +### Deploy Kafka topology cluster + +In this section, we are going to deploy a Kafka topology cluster. Then, in the next section we will scale the cluster using `KafkaOpsRequest` CRD. Below is the YAML of the `Kafka` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + topology: + broker: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/kafka-topology.yaml +kafka.kubedb.com/kafka-prod created +``` + +Now, wait until `kafka-prod` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-prod kubedb.com/v1 3.6.1 Provisioning 0s +kafka-prod kubedb.com/v1 3.6.1 Provisioning 24s +. +. +kafka-prod kubedb.com/v1 3.6.1 Ready 92s +``` + +Let's check the number of replicas has from kafka object, number of pods the petset have, + +**Broker Replicas** + +```bash +$ kubectl get kafka -n demo kafka-prod -o json | jq '.spec.topology.broker.replicas' +2 + +$ kubectl get petset -n demo kafka-prod-broker -o json | jq '.spec.replicas' +2 +``` + +**Controller Replicas** + +```bash +$ kubectl get kafka -n demo kafka-prod -o json | jq '.spec.topology.controller.replicas' +2 + +$ kubectl get petset -n demo kafka-prod-controller -o json | jq '.spec.replicas' +2 +``` + +We can see from commands that the cluster has 2 replicas for both broker and controller. + +Also, we can verify the replicas of the topology from an internal kafka command by exec into a replica. + +Now let's exec to a broker instance and run a kafka internal command to check the number of replicas for broker and controller., + +**Broker** + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-broker-api-versions.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties +kafka-prod-broker-0.kafka-prod-pods.demo.svc.cluster.local:9092 (id: 0 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +kafka-prod-broker-1.kafka-prod-pods.demo.svc.cluster.local:9092 (id: 1 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +``` + +**Controller** + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-metadata-quorum.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties describe --status | grep CurrentObservers +CurrentObservers: [0,1] +``` + +We can see from the above output that the kafka has 2 nodes for broker and 2 nodes for controller. + +We are now ready to apply the `KafkaOpsRequest` CR to scale this cluster. + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the topology cluster to meet the desired number of replicas after scaling. + +#### Create KafkaOpsRequest + +In order to scale up the replicas of the topology cluster, we have to create a `KafkaOpsRequest` CR with our desired replicas. Below is the YAML of the `KafkaOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-up-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-prod + horizontalScaling: + topology: + broker: 3 + controller: 3 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `kafka-prod` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on kafka. +- `spec.horizontalScaling.topology.broker` specifies the desired replicas after scaling for broker. +- `spec.horizontalScaling.topology.controller` specifies the desired replicas after scaling for controller. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-up-topology.yaml +kafkaopsrequest.ops.kubedb.com/kfops-hscale-up-topology created +``` + +> **Note:** If you want to scale down only broker or controller, you can specify the desired replicas for only broker or controller in the `KafkaOpsRequest` CR. You can specify one at a time. If you want to scale broker only, no node will need restart to apply the changes. But if you want to scale controller, all nodes will need restart to apply the changes. + +#### Verify Topology cluster replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Kafka` object and related `PetSets` and `Pods`. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ watch kubectl get kafkaopsrequest -n demo +NAME TYPE STATUS AGE +kfops-hscale-up-topology HorizontalScaling Successful 106s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe kafkaopsrequests -n demo kfops-hscale-up-topology +Name: kfops-hscale-up-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T11:02:51Z + Generation: 1 + Resource Version: 356503 + UID: 44e0db0c-2094-4c13-a3be-9ca680888545 +Spec: + Apply: IfReady + Database Ref: + Name: kafka-prod + Horizontal Scaling: + Topology: + Broker: 3 + Controller: 3 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-08-02T11:02:51Z + Message: Kafka ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-08-02T11:02:59Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T11:03:00Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T11:03:09Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T11:03:14Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T11:03:14Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T11:03:24Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T11:03:29Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T11:03:30Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T11:03:59Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T11:04:04Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T11:04:05Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T11:04:19Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T11:04:24Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T11:04:59Z + Message: Successfully Scaled Up Broker + Observed Generation: 1 + Reason: ScaleUpBroker + Status: True + Type: ScaleUpBroker + Last Transition Time: 2024-08-02T11:04:30Z + Message: patch pet setkafka-prod-broker; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSetkafka-prod-broker + Last Transition Time: 2024-08-02T11:04:55Z + Message: node in cluster; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: NodeInCluster + Last Transition Time: 2024-08-02T11:05:15Z + Message: Successfully Scaled Up Controller + Observed Generation: 1 + Reason: ScaleUpController + Status: True + Type: ScaleUpController + Last Transition Time: 2024-08-02T11:05:05Z + Message: patch pet setkafka-prod-controller; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSetkafka-prod-controller + Last Transition Time: 2024-08-02T11:05:15Z + Message: Successfully completed horizontally scale kafka cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 4m19s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-hscale-up-topology + Normal Starting 4m19s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-prod + Normal Successful 4m19s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-hscale-up-topology + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 4m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 4m10s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 4m6s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 4m1s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 3m56s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 3m56s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 3m50s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 3m46s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 3m41s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 3m41s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 3m36s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 3m11s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 3m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 3m5s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 3m1s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 2m51s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Normal RestartNodes 2m46s KubeDB Ops-manager Operator Successfully restarted all nodes + Warning patch pet setkafka-prod-broker; ConditionStatus:True 2m40s KubeDB Ops-manager Operator patch pet setkafka-prod-broker; ConditionStatus:True + Warning node in cluster; ConditionStatus:False 2m36s KubeDB Ops-manager Operator node in cluster; ConditionStatus:False + Warning node in cluster; ConditionStatus:True 2m15s KubeDB Ops-manager Operator node in cluster; ConditionStatus:True + Normal ScaleUpBroker 2m10s KubeDB Ops-manager Operator Successfully Scaled Up Broker + Warning patch pet setkafka-prod-controller; ConditionStatus:True 2m5s KubeDB Ops-manager Operator patch pet setkafka-prod-controller; ConditionStatus:True + Warning node in cluster; ConditionStatus:True 2m KubeDB Ops-manager Operator node in cluster; ConditionStatus:True + Normal ScaleUpController 115s KubeDB Ops-manager Operator Successfully Scaled Up Controller + Normal Starting 115s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-prod + Normal Successful 115s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-hscale-up-topology +``` + +Now, we are going to verify the number of replicas this cluster has from the Kafka object, number of pods the petset have, + +**Broker Replicas** + +```bash +$ kubectl get kafka -n demo kafka-prod -o json | jq '.spec.topology.broker.replicas' +3 + +$ kubectl get petset -n demo kafka-prod-broker -o json | jq '.spec.replicas' +3 +``` + +Now let's connect to a kafka instance and run a kafka internal command to check the number of replicas of topology cluster for both broker and controller., + +**Broker** + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-broker-api-versions.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties +kafka-prod-broker-0.kafka-prod-pods.demo.svc.cluster.local:9092 (id: 0 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +kafka-prod-broker-1.kafka-prod-pods.demo.svc.cluster.local:9092 (id: 1 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +kafka-prod-broker-2.kafka-prod-pods.demo.svc.cluster.local:9092 (id: 2 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +``` + +**Controller** + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-metadata-quorum.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties describe --status | grep CurrentObservers +CurrentObservers: [0,1,2] +``` + +From all the above outputs we can see that the both brokers and controller of the topology kafka is `3`. That means we have successfully scaled up the replicas of the Kafka topology cluster. + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the kafka topology cluster to meet the desired number of replicas after scaling. + +#### Create KafkaOpsRequest + +In order to scale down the replicas of the kafka topology cluster, we have to create a `KafkaOpsRequest` CR with our desired replicas. Below is the YAML of the `KafkaOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-down-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-prod + horizontalScaling: + topology: + broker: 2 + controller: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `kafka-prod` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on kafka. +- `spec.horizontalScaling.topology.broker` specifies the desired replicas after scaling for the broker nodes. +- `spec.horizontalScaling.topology.controller` specifies the desired replicas after scaling for the controller nodes. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/horizontal-scaling/kafka-hscale-down-topology.yaml +kafkaopsrequest.ops.kubedb.com/kfops-hscale-down-topology created +``` + +#### Verify Topology cluster replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Kafka` object and related `PetSets` and `Pods`. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ watch kubectl get kafkaopsrequest -n demo +NAME TYPE STATUS AGE +kfops-hscale-down-topology HorizontalScaling Successful 2m32s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe kafkaopsrequests -n demo kfops-hscale-down-topology +Name: kfops-hscale-down-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T11:14:18Z + Generation: 1 + Resource Version: 357545 + UID: b786d791-6ba8-4f1c-ade8-9443e049cede +Spec: + Apply: IfReady + Database Ref: + Name: kafka-prod + Horizontal Scaling: + Topology: + Broker: 2 + Controller: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-08-02T11:14:18Z + Message: Kafka ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-08-02T11:14:46Z + Message: Successfully Scaled Down Broker + Observed Generation: 1 + Reason: ScaleDownBroker + Status: True + Type: ScaleDownBroker + Last Transition Time: 2024-08-02T11:14:36Z + Message: reassign partitions; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReassignPartitions + Last Transition Time: 2024-08-02T11:14:36Z + Message: is pet set patched; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsPetSetPatched + Last Transition Time: 2024-08-02T11:14:37Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-08-02T11:14:37Z + Message: delete pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePvc + Last Transition Time: 2024-08-02T11:15:26Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-08-02T11:15:31Z + Message: Successfully Scaled Down Controller + Observed Generation: 1 + Reason: ScaleDownController + Status: True + Type: ScaleDownController + Last Transition Time: 2024-08-02T11:15:38Z + Message: successfully reconciled the Kafka with modified node + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T11:15:43Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T11:15:44Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T11:15:53Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T11:15:58Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T11:15:58Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T11:16:08Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T11:16:13Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T11:16:13Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T11:16:58Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T11:17:03Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T11:17:03Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T11:17:13Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T11:17:18Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T11:17:19Z + Message: Successfully completed horizontally scale kafka cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 8m35s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-hscale-down-topology + Normal Starting 8m35s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-prod + Normal Successful 8m35s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-hscale-down-topology + Warning reassign partitions; ConditionStatus:True 8m17s KubeDB Ops-manager Operator reassign partitions; ConditionStatus:True + Warning is pet set patched; ConditionStatus:True 8m17s KubeDB Ops-manager Operator is pet set patched; ConditionStatus:True + Warning get pod; ConditionStatus:True 8m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 8m16s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 8m16s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 8m12s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 8m12s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 8m12s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Normal ScaleDownBroker 8m7s KubeDB Ops-manager Operator Successfully Scaled Down Broker + Warning reassign partitions; ConditionStatus:True 7m31s KubeDB Ops-manager Operator reassign partitions; ConditionStatus:True + Warning is pet set patched; ConditionStatus:True 7m31s KubeDB Ops-manager Operator is pet set patched; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m31s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 7m31s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 7m31s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 7m27s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 7m27s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m27s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Normal ScaleDownController 7m22s KubeDB Ops-manager Operator Successfully Scaled Down Controller + Normal UpdatePetSets 7m15s KubeDB Ops-manager Operator successfully reconciled the Kafka with modified node + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 7m10s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 7m9s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 7m5s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 7m KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 6m55s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 6m55s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 6m50s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 6m45s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 6m40s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 6m40s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 6m35s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 5m55s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 5m50s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 5m50s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 5m45s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 5m40s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Normal RestartNodes 5m35s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 5m35s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-prod + Normal Successful 5m34s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-hscale-down-topology +``` + +Now, we are going to verify the number of replicas this cluster has from the Kafka object, number of pods the petset have, + +**Broker Replicas** + +```bash +$ kubectl get kafka -n demo kafka-prod -o json | jq '.spec.topology.broker.replicas' +2 + +$ kubectl get petset -n demo kafka-prod-broker -o json | jq '.spec.replicas' +2 +``` + +**Controller Replicas** + +```bash +$ kubectl get kafka -n demo kafka-prod -o json | jq '.spec.topology.controller.replicas' +2 + +$ kubectl get petset -n demo kafka-prod-controller -o json | jq '.spec.replicas' +2 +``` + +Now let's connect to a kafka instance and run a kafka internal command to check the number of replicas for both broker and controller nodes, + +**Broker** + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-broker-api-versions.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties +kafka-prod-broker-0.kafka-prod-pods.demo.svc.cluster.local:9092 (id: 0 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +kafka-prod-broker-1.kafka-prod-pods.demo.svc.cluster.local:9092 (id: 1 rack: null) -> ( + Produce(0): 0 to 9 [usable: 9], + Fetch(1): 0 to 15 [usable: 15], + ListOffsets(2): 0 to 8 [usable: 8], + Metadata(3): 0 to 12 [usable: 12], + LeaderAndIsr(4): UNSUPPORTED, + StopReplica(5): UNSUPPORTED, + UpdateMetadata(6): UNSUPPORTED, + ControlledShutdown(7): UNSUPPORTED, + OffsetCommit(8): 0 to 8 [usable: 8], + OffsetFetch(9): 0 to 8 [usable: 8], + FindCoordinator(10): 0 to 4 [usable: 4], + JoinGroup(11): 0 to 9 [usable: 9], + Heartbeat(12): 0 to 4 [usable: 4], + LeaveGroup(13): 0 to 5 [usable: 5], + SyncGroup(14): 0 to 5 [usable: 5], + DescribeGroups(15): 0 to 5 [usable: 5], + ListGroups(16): 0 to 4 [usable: 4], + SaslHandshake(17): 0 to 1 [usable: 1], + ApiVersions(18): 0 to 3 [usable: 3], + CreateTopics(19): 0 to 7 [usable: 7], + DeleteTopics(20): 0 to 6 [usable: 6], + DeleteRecords(21): 0 to 2 [usable: 2], + InitProducerId(22): 0 to 4 [usable: 4], + OffsetForLeaderEpoch(23): 0 to 4 [usable: 4], + AddPartitionsToTxn(24): 0 to 4 [usable: 4], + AddOffsetsToTxn(25): 0 to 3 [usable: 3], + EndTxn(26): 0 to 3 [usable: 3], + WriteTxnMarkers(27): 0 to 1 [usable: 1], + TxnOffsetCommit(28): 0 to 3 [usable: 3], + DescribeAcls(29): 0 to 3 [usable: 3], + CreateAcls(30): 0 to 3 [usable: 3], + DeleteAcls(31): 0 to 3 [usable: 3], + DescribeConfigs(32): 0 to 4 [usable: 4], + AlterConfigs(33): 0 to 2 [usable: 2], + AlterReplicaLogDirs(34): 0 to 2 [usable: 2], + DescribeLogDirs(35): 0 to 4 [usable: 4], + SaslAuthenticate(36): 0 to 2 [usable: 2], + CreatePartitions(37): 0 to 3 [usable: 3], + CreateDelegationToken(38): 0 to 3 [usable: 3], + RenewDelegationToken(39): 0 to 2 [usable: 2], + ExpireDelegationToken(40): 0 to 2 [usable: 2], + DescribeDelegationToken(41): 0 to 3 [usable: 3], + DeleteGroups(42): 0 to 2 [usable: 2], + ElectLeaders(43): 0 to 2 [usable: 2], + IncrementalAlterConfigs(44): 0 to 1 [usable: 1], + AlterPartitionReassignments(45): 0 [usable: 0], + ListPartitionReassignments(46): 0 [usable: 0], + OffsetDelete(47): 0 [usable: 0], + DescribeClientQuotas(48): 0 to 1 [usable: 1], + AlterClientQuotas(49): 0 to 1 [usable: 1], + DescribeUserScramCredentials(50): 0 [usable: 0], + AlterUserScramCredentials(51): 0 [usable: 0], + DescribeQuorum(55): 0 to 1 [usable: 1], + AlterPartition(56): UNSUPPORTED, + UpdateFeatures(57): 0 to 1 [usable: 1], + Envelope(58): UNSUPPORTED, + DescribeCluster(60): 0 [usable: 0], + DescribeProducers(61): 0 [usable: 0], + UnregisterBroker(64): 0 [usable: 0], + DescribeTransactions(65): 0 [usable: 0], + ListTransactions(66): 0 [usable: 0], + AllocateProducerIds(67): UNSUPPORTED, + ConsumerGroupHeartbeat(68): UNSUPPORTED +) +``` + +**Controller** + +```bash +$ kubectl exec -it -n demo kafka-prod-controller-0 -- kafka-metadata-quorum.sh --bootstrap-server localhost:9092 --command-config config/clientauth.properties describe --status | grep CurrentObservers +CurrentObservers: [0,1] +``` + +From all the above outputs we can see that the replicas of both broker and controller of the topology cluster is `2`. That means we have successfully scaled down the replicas of the Kafka topology cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kf -n demo kafka-prod +kubectl delete kafkaopsrequest -n demo kfops-hscale-up-topology kfops-hscale-down-topology +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Kafka with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/scaling/vertical-scaling/_index.md b/docs/guides/kafka/scaling/vertical-scaling/_index.md new file mode 100644 index 0000000000..8eeb4e12f0 --- /dev/null +++ b/docs/guides/kafka/scaling/vertical-scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Vertical Scaling +menu: + docs_{{ .version }}: + identifier: kf-vertical-scaling + name: Vertical Scaling + parent: kf-scaling + weight: 20 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/kafka/scaling/vertical-scaling/combined.md b/docs/guides/kafka/scaling/vertical-scaling/combined.md new file mode 100644 index 0000000000..5554224259 --- /dev/null +++ b/docs/guides/kafka/scaling/vertical-scaling/combined.md @@ -0,0 +1,308 @@ +--- +title: Vertical Scaling Kafka Combined Cluster +menu: + docs_{{ .version }}: + identifier: kf-vertical-scaling-combined + name: Combined Cluster + parent: kf-vertical-scaling + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale Kafka Combined Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a Kafka combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [Combined](/docs/guides/kafka/clustering/combined-cluster/index.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + - [Vertical Scaling Overview](/docs/guides/kafka/scaling/vertical-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/mongodb](/docs/examples/kafka) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on Combined Cluster + +Here, we are going to deploy a `Kafka` combined cluster using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare Kafka Combined Cluster + +Now, we are going to deploy a `Kafka` combined cluster database with version `3.6.1`. + +### Deploy Kafka Combined Cluster + +In this section, we are going to deploy a Kafka combined cluster. Then, in the next section we will update the resources of the database using `KafkaOpsRequest` CRD. Below is the YAML of the `Kafka` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev + namespace: demo +spec: + replicas: 2 + version: 3.6.1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/kafka-combined.yaml +kafka.kubedb.com/kafka-dev created +``` + +Now, wait until `kafka-dev` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-dev kubedb.com/v1 3.6.1 Provisioning 0s +kafka-dev kubedb.com/v1 3.6.1 Provisioning 24s +. +. +kafka-dev kubedb.com/v1 3.6.1 Ready 92s +``` + +Let's check the Pod containers resources, + +```bash +$ kubectl get pod -n demo kafka-dev-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` +This is the default resources of the Kafka combined cluster set by the `KubeDB` operator. + +We are now ready to apply the `KafkaOpsRequest` CR to update the resources of this database. + +### Vertical Scaling + +Here, we are going to update the resources of the combined cluster to meet the desired resources after scaling. + +#### Create KafkaOpsRequest + +In order to update the resources of the database, we have to create a `KafkaOpsRequest` CR with our desired resources. Below is the YAML of the `KafkaOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-vscale-combined + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: kafka-dev + verticalScaling: + node: + resources: + requests: + memory: "1.2Gi" + cpu: "0.6" + limits: + memory: "1.2Gi" + cpu: "0.6" + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `kafka-dev` cluster. +- `spec.type` specifies that we are performing `VerticalScaling` on kafka. +- `spec.VerticalScaling.node` specifies the desired resources after scaling. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/vertical-scaling/kafka-vertical-scaling-combined.yaml +kafkaopsrequest.ops.kubedb.com/kfops-vscale-combined created +``` + +#### Verify Kafka Combined cluster resources updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `Kafka` object and related `PetSets` and `Pods`. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequest -n demo +NAME TYPE STATUS AGE +kfops-vscale-combined VerticalScaling Successful 3m56s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-vscale-combined +Name: kfops-vscale-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T05:59:06Z + Generation: 1 + Resource Version: 336197 + UID: 5fd90feb-eed2-4130-8762-442f2f4d2698 +Spec: + Apply: IfReady + Database Ref: + Name: kafka-dev + Timeout: 5m + Type: VerticalScaling + Vertical Scaling: + Node: + Resources: + Limits: + Cpu: 0.6 + Memory: 1.2Gi + Requests: + Cpu: 0.6 + Memory: 1.2Gi +Status: + Conditions: + Last Transition Time: 2024-08-02T05:59:06Z + Message: Kafka ops-request has started to vertically scaling the kafka nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-08-02T05:59:09Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T05:59:14Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-0 + Last Transition Time: 2024-08-02T05:59:14Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-0 + Last Transition Time: 2024-08-02T05:59:29Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-0 + Last Transition Time: 2024-08-02T05:59:34Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-1 + Last Transition Time: 2024-08-02T05:59:34Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-1 + Last Transition Time: 2024-08-02T06:00:59Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-1 + Last Transition Time: 2024-08-02T06:01:04Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-08-02T06:01:04Z + Message: Successfully completed the vertical scaling for kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m38s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-vscale-combined + Normal Starting 2m38s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-dev + Normal Successful 2m38s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-vscale-combined + Normal UpdatePetSets 2m35s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:kafka-dev-0 2m30s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-0 2m30s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-0 2m25s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-0 2m15s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Warning get pod; ConditionStatus:True; PodName:kafka-dev-1 2m10s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-1 2m10s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-1 2m5s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-1 45s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Normal RestartPods 40s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 40s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-dev + Normal Successful 40s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-vscale-combined +``` + +Now, we are going to verify from one of the Pod yaml whether the resources of the combined cluster has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo kafka-dev-1 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "600m", + "memory": "1288490188800m" + }, + "requests": { + "cpu": "600m", + "memory": "1288490188800m" + } +} +``` + +The above output verifies that we have successfully scaled up the resources of the Kafka combined cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete mg -n demo kafka-dev +kubectl delete kafkaopsrequest -n demo kfops-vscale-combined +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/scaling/vertical-scaling/overview.md b/docs/guides/kafka/scaling/vertical-scaling/overview.md new file mode 100644 index 0000000000..92be3de07e --- /dev/null +++ b/docs/guides/kafka/scaling/vertical-scaling/overview.md @@ -0,0 +1,54 @@ +--- +title: Kafka Vertical Scaling Overview +menu: + docs_{{ .version }}: + identifier: kf-vertical-scaling-overview + name: Overview + parent: kf-vertical-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Kafka Vertical Scaling + +This guide will give an overview on how KubeDB Ops-manager operator updates the resources(for example CPU and Memory etc.) of the `Kafka`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + +## How Vertical Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator updates the resources of the `Kafka`. Open the image in a new tab to see the enlarged version. + +
+  Vertical scaling process of Kafka +
Fig: Vertical scaling process of Kafka
+
+ +The vertical scaling process consists of the following steps: + +1. At first, a user creates a `Kafka` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Kafka` CR. + +3. When the operator finds a `Kafka` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to update the resources(for example `CPU`, `Memory` etc.) of the `Kafka` cluster, the user creates a `KafkaOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `KafkaOpsRequest` CR. + +6. When it finds a `KafkaOpsRequest` CR, it halts the `Kafka` object which is referred from the `KafkaOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Kafka` object during the vertical scaling process. + +7. Then the `KubeDB` Ops-manager operator will update resources of the PetSet Pods to reach desired state. + +8. After the successful update of the resources of the PetSet's replica, the `KubeDB` Ops-manager operator updates the `Kafka` object to reflect the updated state. + +9. After the successful update of the `Kafka` resources, the `KubeDB` Ops-manager operator resumes the `Kafka` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on updating resources of Kafka database using `KafkaOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/kafka/scaling/vertical-scaling/topology.md b/docs/guides/kafka/scaling/vertical-scaling/topology.md new file mode 100644 index 0000000000..2629b1eb08 --- /dev/null +++ b/docs/guides/kafka/scaling/vertical-scaling/topology.md @@ -0,0 +1,395 @@ +--- +title: Vertical Scaling Kafka Topology Cluster +menu: + docs_{{ .version }}: + identifier: kf-vertical-scaling-topology + name: Topology Cluster + parent: kf-vertical-scaling + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale Kafka Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a Kafka topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [Topology](/docs/guides/kafka/clustering/topology-cluster/index.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + - [Vertical Scaling Overview](/docs/guides/kafka/scaling/vertical-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/mongodb](/docs/examples/kafka) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on Topology Cluster + +Here, we are going to deploy a `Kafka` topology cluster using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare Kafka Topology Cluster + +Now, we are going to deploy a `Kafka` topology cluster database with version `3.6.1`. + +### Deploy Kafka Topology Cluster + +In this section, we are going to deploy a Kafka topology cluster. Then, in the next section we will update the resources of the database using `KafkaOpsRequest` CRD. Below is the YAML of the `Kafka` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + topology: + broker: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/kafka-topology.yaml +kafka.kubedb.com/kafka-prod created +``` + +Now, wait until `kafka-prod` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-prod kubedb.com/v1 3.6.1 Provisioning 0s +kafka-prod kubedb.com/v1 3.6.1 Provisioning 24s +. +. +kafka-prod kubedb.com/v1 3.6.1 Ready 92s +``` + +Let's check the Pod containers resources for both `broker` and `controller` of the Kafka topology cluster. Run the following command to get the resources of the `broker` and `controller` containers of the Kafka topology cluster + +```bash +$ kubectl get pod -n demo kafka-prod-broker-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` + +```bash +$ kubectl get pod -n demo kafka-prod-controller-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` +This is the default resources of the Kafka topology cluster set by the `KubeDB` operator. + +We are now ready to apply the `KafkaOpsRequest` CR to update the resources of this database. + +### Vertical Scaling + +Here, we are going to update the resources of the topology cluster to meet the desired resources after scaling. + +#### Create KafkaOpsRequest + +In order to update the resources of the database, we have to create a `KafkaOpsRequest` CR with our desired resources. Below is the YAML of the `KafkaOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-vscale-topology + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: kafka-prod + verticalScaling: + broker: + resources: + requests: + memory: "1.2Gi" + cpu: "0.6" + limits: + memory: "1.2Gi" + cpu: "0.6" + controller: + resources: + requests: + memory: "1.1Gi" + cpu: "0.6" + limits: + memory: "1.1Gi" + cpu: "0.6" + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `kafka-prod` cluster. +- `spec.type` specifies that we are performing `VerticalScaling` on kafka. +- `spec.VerticalScaling.node` specifies the desired resources after scaling. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/scaling/vertical-scaling/kafka-vertical-scaling-topology.yaml +kafkaopsrequest.ops.kubedb.com/kfops-vscale-topology created +``` + +#### Verify Kafka Topology cluster resources updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `Kafka` object and related `PetSets` and `Pods`. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequest -n demo +NAME TYPE STATUS AGE +kfops-vscale-topology VerticalScaling Successful 3m56s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-vscale-topology +Name: kfops-vscale-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T06:09:46Z + Generation: 1 + Resource Version: 337300 + UID: ca298c0a-e08d-4c78-acbc-40eb5e96532d +Spec: + Apply: IfReady + Database Ref: + Name: kafka-prod + Timeout: 5m + Type: VerticalScaling + Vertical Scaling: + Broker: + Resources: + Limits: + Cpu: 0.6 + Memory: 1.2Gi + Requests: + Cpu: 0.6 + Memory: 1.2Gi + Controller: + Resources: + Limits: + Cpu: 0.6 + Memory: 1.1Gi + Requests: + Cpu: 0.6 + Memory: 1.1Gi +Status: + Conditions: + Last Transition Time: 2024-08-02T06:09:46Z + Message: Kafka ops-request has started to vertically scaling the kafka nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-08-02T06:09:50Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T06:09:55Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T06:09:55Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T06:10:00Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T06:10:05Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T06:10:05Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T06:10:15Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T06:10:20Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T06:10:20Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T06:10:35Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T06:10:40Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T06:10:40Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T06:10:55Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T06:11:00Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-08-02T06:11:00Z + Message: Successfully completed the vertical scaling for kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m32s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-vscale-topology + Normal Starting 3m32s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-prod + Normal Successful 3m32s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-vscale-topology + Normal UpdatePetSets 3m28s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 3m23s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 3m23s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 3m18s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 3m13s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 3m13s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 3m8s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 3m3s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 2m58s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 2m58s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 2m53s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 2m43s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 2m38s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 2m38s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 2m33s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 2m23s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Normal RestartPods 2m18s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 2m18s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-prod + Normal Successful 2m18s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-vscale-topology +``` +Now, we are going to verify from one of the Pod yaml whether the resources of the topology cluster has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo kafka-prod-broker-1 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "600m", + "memory": "1288490188800m" + }, + "requests": { + "cpu": "600m", + "memory": "1288490188800m" + } +} +$ kubectl get pod -n demo kafka-prod-controller-1 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "600m", + "memory": "1181116006400m" + }, + "requests": { + "cpu": "600m", + "memory": "1181116006400m" + } +} +``` + +The above output verifies that we have successfully scaled up the resources of the Kafka topology cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kf -n demo kafka-prod +kubectl delete kafkaopsrequest -n demo kfops-vscale-topology +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md).