From ab90098dc66df9e2e78ab6670b9c4b189fd22054 Mon Sep 17 00:00:00 2001 From: obaydullahmhs Date: Fri, 26 Jul 2024 17:59:11 +0600 Subject: [PATCH] Add Kafka Opsrequest and Autoscaler Docs Signed-off-by: obaydullahmhs --- .../configuration/configsecret-combined.yaml | 9 + .../configuration/configsecret-topology.yaml | 11 + .../kafka/configuration/kafka-combined.yaml | 28 + .../kafka/configuration/kafka-topology.yaml | 46 ++ docs/examples/kafka/restart/kafka.yaml | 44 ++ docs/examples/kafka/restart/ops.yaml | 11 + docs/guides/kafka/concepts/kafkaopsrequest.md | 622 ++++++++++++++++++ docs/guides/kafka/configuration/_index.md | 10 + .../kafka/configuration/kafka-combined.md | 176 +++++ .../kafka/configuration/kafka-topology.md | 219 ++++++ docs/guides/kafka/restart/_index.md | 10 + docs/guides/kafka/restart/restart.md | 251 +++++++ 12 files changed, 1437 insertions(+) create mode 100644 docs/examples/kafka/configuration/configsecret-combined.yaml create mode 100644 docs/examples/kafka/configuration/configsecret-topology.yaml create mode 100644 docs/examples/kafka/configuration/kafka-combined.yaml create mode 100644 docs/examples/kafka/configuration/kafka-topology.yaml create mode 100644 docs/examples/kafka/restart/kafka.yaml create mode 100644 docs/examples/kafka/restart/ops.yaml create mode 100644 docs/guides/kafka/concepts/kafkaopsrequest.md create mode 100644 docs/guides/kafka/configuration/_index.md create mode 100644 docs/guides/kafka/configuration/kafka-combined.md create mode 100644 docs/guides/kafka/configuration/kafka-topology.md create mode 100644 docs/guides/kafka/restart/_index.md create mode 100644 docs/guides/kafka/restart/restart.md diff --git a/docs/examples/kafka/configuration/configsecret-combined.yaml b/docs/examples/kafka/configuration/configsecret-combined.yaml new file mode 100644 index 0000000000..b32e9c98a7 --- /dev/null +++ b/docs/examples/kafka/configuration/configsecret-combined.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: configsecret-combined + namespace: demo +stringData: + server.properties: |- + log.retention.hours=100 + default.replication.factor=2 \ No newline at end of file diff --git a/docs/examples/kafka/configuration/configsecret-topology.yaml b/docs/examples/kafka/configuration/configsecret-topology.yaml new file mode 100644 index 0000000000..c32be5103c --- /dev/null +++ b/docs/examples/kafka/configuration/configsecret-topology.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: configsecret-topology + namespace: demo +stringData: + broker.properties: |- + log.retention.hours=100 + default.replication.factor=2 + controller.properties: |- + metadata.log.dir=/var/log/kafka/metadata-custom \ No newline at end of file diff --git a/docs/examples/kafka/configuration/kafka-combined.yaml b/docs/examples/kafka/configuration/kafka-combined.yaml new file mode 100644 index 0000000000..d458714d65 --- /dev/null +++ b/docs/examples/kafka/configuration/kafka-combined.yaml @@ -0,0 +1,28 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev + namespace: demo +spec: + replicas: 2 + version: 3.6.1 + podTemplate: + spec: + containers: + - name: kafka + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: DoNotTerminate \ No newline at end of file diff --git a/docs/examples/kafka/configuration/kafka-topology.yaml b/docs/examples/kafka/configuration/kafka-topology.yaml new file mode 100644 index 0000000000..331d2ba015 --- /dev/null +++ b/docs/examples/kafka/configuration/kafka-topology.yaml @@ -0,0 +1,46 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + configSecret: + name: configsecret-topology + topology: + broker: + replicas: 2 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: "500m" + memory: "1Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: "500m" + memory: "1Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/restart/kafka.yaml b/docs/examples/kafka/restart/kafka.yaml new file mode 100644 index 0000000000..b395dbecc3 --- /dev/null +++ b/docs/examples/kafka/restart/kafka.yaml @@ -0,0 +1,44 @@ +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + topology: + broker: + replicas: 2 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: "500m" + memory: "1Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: "500m" + memory: "1Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: DoNotTerminate \ No newline at end of file diff --git a/docs/examples/kafka/restart/ops.yaml b/docs/examples/kafka/restart/ops.yaml new file mode 100644 index 0000000000..8772b0f77e --- /dev/null +++ b/docs/examples/kafka/restart/ops.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: restart + namespace: demo +spec: + type: Restart + databaseRef: + name: kafka-prod + timeout: 5m + apply: Always \ No newline at end of file diff --git a/docs/guides/kafka/concepts/kafkaopsrequest.md b/docs/guides/kafka/concepts/kafkaopsrequest.md new file mode 100644 index 0000000000..d6934de198 --- /dev/null +++ b/docs/guides/kafka/concepts/kafkaopsrequest.md @@ -0,0 +1,622 @@ +--- +title: KafkaOpsRequests CRD +menu: + docs_{{ .version }}: + identifier: kf-opsrequest-concepts + name: Kafka + parent: kf-concepts-kafka + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + + +> New to KubeDB? Please start [here](/docs/README.md). + +# KafkaOpsRequest + +## What is KafkaOpsRequest + +`KafkaOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for [Kafka](https://kafka.apache.org/) administrative operations like database version updating, horizontal scaling, vertical scaling etc. in a Kubernetes native way. + +## KafkaOpsRequest CRD Specifications + +Like any official Kubernetes resource, a `KafkaOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `KafkaOpsRequest` CRs for different administrative operations is given below: + +**Sample `KafkaOpsRequest` for updating database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: update-version + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: kafka-prod + updateVersion: + targetVersion: 3.6.1 +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Horizontal Scaling of different component of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-dev + horizontalScaling: + node: 3 +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-down-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-prod + horizontalScaling: + topology: + broker: 2 + controller: 2 +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Vertical Scaling of different component of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-vscale-combined + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: kafka-dev + verticalScaling: + node: + resources: + requests: + memory: "1.5Gi" + cpu: "0.7" + limits: + memory: "2Gi" + cpu: "1" +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-vscale-topology + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: kafka-prod + verticalScaling: + broker: + resources: + requests: + memory: "1.5Gi" + cpu: "0.7" + limits: + memory: "2Gi" + cpu: "1" + controller: + resources: + requests: + memory: "1.5Gi" + cpu: "0.7" + limits: + memory: "2Gi" + cpu: "1" +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Reconfiguring different kafka mode:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfiugre-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + applyConfig: + server.properties: | + log.retention.hours=100 + default.replication.factor=2 +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfiugre-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + applyConfig: + broker.properties: | + log.retention.hours=100 + default.replication.factor=2 + controller.properties: | + metadata.log.dir=/var/log/kafka/metadata-custom +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfiugre-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + configSecret: + name: new-configsecret-combined +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfiugre-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + configSecret: + name: new-configsecret-topology +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Volume Expansion of different database components:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-volume-exp-combined + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: kafka-dev + volumeExpansion: + mode: "Online" + node: 2Gi +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-volume-exp-topology + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: kafka-prod + volumeExpansion: + mode: "Online" + broker: 2Gi + controller: 2Gi +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Reconfiguring TLS of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: kafka-prod + tls: + issuerRef: + name: kf-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + emailAddresses: + - abc@appscode.com +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: kafka-dev + tls: + rotateCertificates: true +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-change-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: kafka-prod + tls: + issuerRef: + name: kf-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: kafka-prod + tls: + remove: true +``` + +Here, we are going to describe the various sections of a `KafkaOpsRequest` crd. + +A `KafkaOpsRequest` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [Kafka](/docs/guides/kafka/concepts/kafka.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [Kafka](/docs/guides/kafka/concepts/kafka.md) object. + +### spec.type + +`spec.type` specifies the kind of operation that will be applied to the database. Currently, the following types of operations are allowed in `KafkaOpsRequest`. + +- `UpdateVersion` +- `HorizontalScaling` +- `VerticalScaling` +- `VolumeExpansion` +- `Reconfigure` +- `ReconfigureTLS` +- `Restart` + +> You can perform only one type of operation on a single `KafkaOpsRequest` CR. For example, if you want to update your database and scale up its replica then you have to create two separate `KafkaOpsRequest`. At first, you have to create a `KafkaOpsRequest` for updating. Once it is completed, then you can create another `KafkaOpsRequest` for scaling. + +### spec.updateVersion + +If you want to update you Kafka version, you have to specify the `spec.updateVersion` section that specifies the desired version information. This field consists of the following sub-field: + +- `spec.updateVersion.targetVersion` refers to a [KafkaVersion](/docs/guides/kafka/concepts/kafkaversion.md) CR that contains the Kafka version information where you want to update. + +> You can only update between Kafka versions. KubeDB does not support downgrade for Kafka. + +### spec.horizontalScaling + +If you want to scale-up or scale-down your Kafka cluster or different components of it, you have to specify `spec.horizontalScaling` section. This field consists of the following sub-field: + +- `spec.horizontalScaling.node` indicates the desired number of nodes for Kafka combined cluster after scaling. For example, if your cluster currently has 4 replica with combined node, and you want to add additional 2 nodes then you have to specify 6 in `spec.horizontalScaling.node` field. Similarly, if you want to remove one node from the cluster, you have to specify 3 in `spec.horizontalScaling.node` field. +- `spec.horizontalScaling.topology` indicates the configuration of topology nodes for Kafka topology cluster after scaling. This field consists of the following sub-field: + - `spec.horizontalScaling.topoloy.broker` indicates the desired number of broker nodes for Kafka topology cluster after scaling. + - `spec.horizontalScaling.topology.controller` indicates the desired number of controller nodes for Kafka topology cluster after scaling. + +> If the reference kafka object is combined cluster, then you can only specify `spec.horizontalScaling.node` field. If the reference kafka object is topology cluster, then you can only specify `spec.horizontalScaling.topology` field. You can not specify both fields at the same time. + +### spec.verticalScaling + +`spec.verticalScaling` is a required field specifying the information of `Kafka` resources like `cpu`, `memory` etc that will be scaled. This field consists of the following sub-fields: + +- `spec.verticalScaling.node` indicates the desired resources for combined Kafka cluster after scaling. +- `spec.verticalScaling.broker` indicates the desired resources for broker of Kafka topology cluster after scaling. +- `spec.verticalScaling.controller` indicates the desired resources for controller of Kafka topology cluster after scaling. + +> If the reference kafka object is combined cluster, then you can only specify `spec.verticalScaling.node` field. If the reference kafka object is topology cluster, then you can only specify `spec.verticalScaling.broker` or `spec.verticalScaling.controller` or both fields. You can not specify `spec.verticalScaling.node` field with any other fields at the same time, but you can specify `spec.verticalScaling.broker` and `spec.verticalScaling.controller` fields at the same time. + +All of them has the below structure: + +```yaml +requests: + memory: "200Mi" + cpu: "0.1" +limits: + memory: "300Mi" + cpu: "0.2" +``` + +Here, when you specify the resource request, the scheduler uses this information to decide which node to place the container of the Pod on and when you specify a resource limit for the container, the `kubelet` enforces those limits so that the running container is not allowed to use more of that resource than the limit you set. You can found more details from [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### spec.volumeExpansion + +> To use the volume expansion feature the storage class must support volume expansion + +If you want to expand the volume of your Kafka cluster or different components of it, you have to specify `spec.volumeExpansion` section. This field consists of the following sub-field: + +- `spec.mode` specifies the volume expansion mode. Supported values are `Online` & `Offline`. The default is `Online`. +- `spec.volumeExpansion.node` indicates the desired size for the persistent volume of a combined Kafka cluster. +- `spec.volumeExpansion.broker` indicates the desired size for the persistent volume for broker of a Kafka topology cluster. +- `spec.volumeExpansion.controller` indicates the desired size for the persistent volume for controller of a Kafka topology cluster. + +> If the reference kafka object is combined cluster, then you can only specify `spec.volumeExpansion.node` field. If the reference kafka object is topology cluster, then you can only specify `spec.volumeExpansion.broker` or `spec.volumeExpansion.controller` or both fields. You can not specify `spec.volumeExpansion.node` field with any other fields at the same time, but you can specify `spec.volumeExpansion.broker` and `spec.volumeExpansion.controller` fields at the same time. + +All of them refer to [Quantity](https://v1-22.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#quantity-resource-core) types of Kubernetes. + +Example usage of this field is given below: + +```yaml +spec: + volumeExpansion: + node: "2Gi" +``` + +This will expand the volume size of all the combined nodes to 2 GB. + +### spec.configuration + +If you want to reconfigure your Running Kafka cluster or different components of it with new custom configuration, you have to specify `spec.configuration` section. This field consists of the following sub-field: + +- `spec.configuration.configSecret` points to a secret in the same namespace of a Kafka resource, which contains the new custom configurations. If there are any configSecret set before in the database, this secret will replace it. The value of the field `spec.stringData` of the secret like below: +```yaml +server.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 +broker.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 +controller.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 +``` +> If you want to reconfigure a combined Kafka cluster, then you can only specify `server.properties` field. If you want to reconfigure a topology Kafka cluster, then you can specify `broker.properties` or `controller.properties` or both fields. You can not specify `server.properties` field with any other fields at the same time, but you can specify `broker.properties` and `controller.properties` fields at the same time. + +- `applyConfig` contains the new custom config as a string which will be merged with the previous configuration. + +- `applyConfig` is a map where key supports 3 values, namely `server.properties`, `broker.properties`, `controller.properties`. And value represents the corresponding configurations. + +```yaml + applyConfig: + server.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 + broker.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 + controller.properties: | + metadata.log.dir=/var/log/kafka/metadata-custom +``` + +- `removeCustomConfig` is a boolean field. Specify this field to true if you want to remove all the custom configuration from the deployed kafka cluster. + +### spec.tls + +If you want to reconfigure the TLS configuration of your Kafka i.e. add TLS, remove TLS, update issuer/cluster issuer or Certificates and rotate the certificates, you have to specify `spec.tls` section. This field consists of the following sub-field: + +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/kafka/concepts/kafka.md#spectls). +- `spec.tls.rotateCertificates` specifies that we want to rotate the certificate of this kafka. +- `spec.tls.remove` specifies that we want to remove tls from this kafka. + +### spec.timeout +As we internally retry the ops request steps multiple times, This `timeout` field helps the users to specify the timeout for those steps of the ops request (in second). +If a step doesn't finish within the specified timeout, the ops request will result in failure. + +### spec.apply +This field controls the execution of obsRequest depending on the database state. It has two supported values: `Always` & `IfReady`. +Use IfReady, if you want to process the opsRequest only when the database is Ready. And use Always, if you want to process the execution of opsReq irrespective of the Database state. + +### KafkaOpsRequest `Status` + +`.status` describes the current state and progress of a `KafkaOpsRequest` operation. It has the following fields: + +### status.phase + +`status.phase` indicates the overall phase of the operation for this `KafkaOpsRequest`. It can have the following three values: + +| Phase | Meaning | +|-------------|----------------------------------------------------------------------------------| +| Successful | KubeDB has successfully performed the operation requested in the KafkaOpsRequest | +| Progressing | KubeDB has started the execution of the applied KafkaOpsRequest | +| Failed | KubeDB has failed the operation requested in the KafkaOpsRequest | +| Denied | KubeDB has denied the operation requested in the KafkaOpsRequest | +| Skipped | KubeDB has skipped the operation requested in the KafkaOpsRequest | + +Important: Ops-manager Operator can skip an opsRequest, only if its execution has not been started yet & there is a newer opsRequest applied in the cluster. `spec.type` has to be same as the skipped one, in this case. + +### status.observedGeneration + +`status.observedGeneration` shows the most recent generation observed by the `KafkaOpsRequest` controller. + +### status.conditions + +`status.conditions` is an array that specifies the conditions of different steps of `KafkaOpsRequest` processing. Each condition entry has the following fields: + +- `types` specifies the type of the condition. KafkaOpsRequest has the following types of conditions: + +| Type | Meaning | +|-------------------------------|---------------------------------------------------------------------------| +| `Progressing` | Specifies that the operation is now in the progressing state | +| `Successful` | Specifies such a state that the operation on the database was successful. | +| `HaltDatabase` | Specifies such a state that the database is halted by the operator | +| `ResumeDatabase` | Specifies such a state that the database is resumed by the operator | +| `Failed` | Specifies such a state that the operation on the database failed. | +| `StartingBalancer` | Specifies such a state that the balancer has successfully started | +| `StoppingBalancer` | Specifies such a state that the balancer has successfully stopped | +| `UpdateShardImage` | Specifies such a state that the Shard Images has been updated | +| `UpdateReplicaSetImage` | Specifies such a state that the Replicaset Image has been updated | +| `UpdateConfigServerImage` | Specifies such a state that the ConfigServer Image has been updated | +| `UpdateMongosImage` | Specifies such a state that the Mongos Image has been updated | +| `UpdatePetSetResources` | Specifies such a state that the Petset resources has been updated | +| `UpdateShardResources` | Specifies such a state that the Shard resources has been updated | +| `UpdateReplicaSetResources` | Specifies such a state that the Replicaset resources has been updated | +| `UpdateConfigServerResources` | Specifies such a state that the ConfigServer resources has been updated | +| `UpdateMongosResources` | Specifies such a state that the Mongos resources has been updated | +| `ScaleDownReplicaSet` | Specifies such a state that the scale down operation of replicaset | +| `ScaleUpReplicaSet` | Specifies such a state that the scale up operation of replicaset | +| `ScaleUpShardReplicas` | Specifies such a state that the scale up operation of shard replicas | +| `ScaleDownShardReplicas` | Specifies such a state that the scale down operation of shard replicas | +| `ScaleDownConfigServer` | Specifies such a state that the scale down operation of config server | +| `ScaleUpConfigServer` | Specifies such a state that the scale up operation of config server | +| `ScaleMongos` | Specifies such a state that the scale down operation of replicaset | +| `VolumeExpansion` | Specifies such a state that the volume expansion operaton of the database | +| `ReconfigureReplicaset` | Specifies such a state that the reconfiguration of replicaset nodes | +| `ReconfigureMongos` | Specifies such a state that the reconfiguration of mongos nodes | +| `ReconfigureShard` | Specifies such a state that the reconfiguration of shard nodes | +| `ReconfigureConfigServer` | Specifies such a state that the reconfiguration of config server nodes | + +- The `status` field is a string, with possible values `True`, `False`, and `Unknown`. + - `status` will be `True` if the current transition succeeded. + - `status` will be `False` if the current transition failed. + - `status` will be `Unknown` if the current transition was denied. +- The `message` field is a human-readable message indicating details about the condition. +- The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. +- The `lastTransitionTime` field provides a timestamp for when the operation last transitioned from one state to another. +- The `observedGeneration` shows the most recent condition transition generation observed by the controller. diff --git a/docs/guides/kafka/configuration/_index.md b/docs/guides/kafka/configuration/_index.md new file mode 100644 index 0000000000..81167c2af8 --- /dev/null +++ b/docs/guides/kafka/configuration/_index.md @@ -0,0 +1,10 @@ +--- +title: Run Kafka with Custom Configuration +menu: + docs_{{ .version }}: + identifier: kf-configuration + name: Custom Configuration + parent: kf-kafka-guides + weight: 30 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/kafka/configuration/kafka-combined.md b/docs/guides/kafka/configuration/kafka-combined.md new file mode 100644 index 0000000000..1a07e96bdf --- /dev/null +++ b/docs/guides/kafka/configuration/kafka-combined.md @@ -0,0 +1,176 @@ +--- +title: Configuring Kafka Combined Cluster +menu: + docs_{{ .version }}: + identifier: kf-configuration-combined-cluster + name: Combined Cluster + parent: kf-configuration + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Configure Kafka Combined Cluster + +In Kafka combined cluster, every node can perform as broker and controller nodes simultaneously. In this tutorial, we will see how to configure a combined cluster. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install the KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create namespace demo +namespace/demo created + +$ kubectl get namespace +NAME STATUS AGE +demo Active 9s +``` + +> Note: YAML files used in this tutorial are stored in [here](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/kafka/configuration/ +) in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find Available StorageClass + +We will have to provide `StorageClass` in Kafka CR specification. Check available `StorageClass` in your cluster using the following command, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 1h +``` + +Here, we have `standard` StorageClass in our cluster from [Local Path Provisioner](https://github.com/rancher/local-path-provisioner). + +## Use Custom Configuration + +Say we want to change the default log retention time and default replication factor of creating a topic. Let's create the `server.properties` file with our desire configurations. + +**server.properties:** + +```properties +log.retention.hours=100 +default.replication.factor=2 +``` + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: configsecret-combined + namespace: demo +stringData: + server.properties: |- + log.retention.hours=100 + default.replication.factor=2 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/configuration/configsecret-combined.yaml +secret/configsecret-combined created +``` + +Now that the config secret is created, it needs to be mention in the [Kafka](/docs/guides/kafka/concepts/kafka.md) object's yaml: + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev + namespace: demo +spec: + replicas: 2 + version: 3.6.1 + configSecret: + name: configsecret-combined + podTemplate: + spec: + containers: + - name: kafka + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Now, create the Kafka object by the following command: + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/configuration/kafka-combined.yaml +kafka.kubedb.com/kafka-dev created +``` + +Now, wait for the Kafka to become ready: + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-dev kubedb.com/v1 3.6.1 Provisioning 0s +kafka-dev kubedb.com/v1 3.6.1 Provisioning 24s +. +. +kafka-dev kubedb.com/v1 3.6.1 Ready 92s +``` + +## Verify Configuration + +Let's exec into one of the kafka pod that we have created and check the configurations are applied or not: + +Exec into the Kafka pod: + +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- bash +kafka@kafka-dev-0:~$ +``` + +Now, execute the following commands to see the configurations: +```bash +kafka@kafka-dev-0:~$ kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep log.retention.hours + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} +kafka@kafka-dev-0:~$ kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep default.replication.factor + default.replication.factor=2 sensitive=false synonyms={STATIC_BROKER_CONFIG:default.replication.factor=2, DEFAULT_CONFIG:default.replication.factor=1} + default.replication.factor=2 sensitive=false synonyms={STATIC_BROKER_CONFIG:default.replication.factor=2, DEFAULT_CONFIG:default.replication.factor=1} +``` +Here, we can see that our given configuration is applied to the Kafka cluster for all brokers. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete kf -n demo kafka-dev + +$ kubectl delete secret -n demo configsecret-combined + +$ kubectl delete namespace demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). + diff --git a/docs/guides/kafka/configuration/kafka-topology.md b/docs/guides/kafka/configuration/kafka-topology.md new file mode 100644 index 0000000000..24db4b80cd --- /dev/null +++ b/docs/guides/kafka/configuration/kafka-topology.md @@ -0,0 +1,219 @@ +--- +title: Configuring Kafka Topology Cluster +menu: + docs_{{ .version }}: + identifier: kf-configuration-topology-cluster + name: Topology Cluster + parent: kf-configuration + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Configure Kafka Topology Cluster + +In Kafka topology cluster, broker and controller nodes run separately. In this tutorial, we will see how to configure a topology cluster. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install the KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create namespace demo +namespace/demo created + +$ kubectl get namespace +NAME STATUS AGE +demo Active 9s +``` + +> Note: YAML files used in this tutorial are stored in [here](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/kafka/configuration/ +) in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find Available StorageClass + +We will have to provide `StorageClass` in Kafka CR specification. Check available `StorageClass` in your cluster using the following command, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 1h +``` + +Here, we have `standard` StorageClass in our cluster from [Local Path Provisioner](https://github.com/rancher/local-path-provisioner). + +## Use Custom Configuration + +Say we want to change the default log retention time and default replication factor of creating a topic of brokers. Let's create the `broker.properties` file with our desire configurations. + +**broker.properties:** + +```properties +log.retention.hours=100 +default.replication.factor=2 +``` + +and we also want to change the metadata.log.dir of the all controller nodes. Let's create the `controller.properties` file with our desire configurations. + +**controller.properties:** + +```properties +metadata.log.dir=/var/log/kafka/metadata-custom +``` + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: configsecret-topology + namespace: demo +stringData: + broker.properties: |- + log.retention.hours=100 + default.replication.factor=2 + controller.properties: |- + metadata.log.dir=/var/log/kafka/metadata-custom +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/configuration/configsecret-topology.yaml +secret/configsecret-topology created +``` + +Now that the config secret is created, it needs to be mention in the [Kafka](/docs/guides/kafka/concepts/kafka.md) object's yaml: + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + configSecret: + name: configsecret-topology + topology: + broker: + replicas: 2 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: "500m" + memory: "1Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: "500m" + memory: "1Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Now, create the Kafka object by the following command: + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/configuration/kafka-topology.yaml +kafka.kubedb.com/kafka-prod created +``` + +Now, wait for the Kafka to become ready: + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-prod kubedb.com/v1 3.6.1 Provisioning 5s +kafka-prod kubedb.com/v1 3.6.1 Provisioning 7s +. +. +kafka-prod kubedb.com/v1 3.6.1 Ready 2m +``` + +## Verify Configuration + +Let's exec into one of the kafka broker pod that we have created and check the configurations are applied or not: + +Exec into the Kafka broker: + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- bash +kafka@kafka-prod-broker-0:~$ +``` + +Now, execute the following commands to see the configurations: +```bash +kafka@kafka-prod-broker-0:~$ kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep log.retention.hours + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} +kafka@kafka-prod-broker-0:~$ kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep default.replication.factor + default.replication.factor=2 sensitive=false synonyms={STATIC_BROKER_CONFIG:default.replication.factor=2, DEFAULT_CONFIG:default.replication.factor=1} + default.replication.factor=2 sensitive=false synonyms={STATIC_BROKER_CONFIG:default.replication.factor=2, DEFAULT_CONFIG:default.replication.factor=1} +``` +Here, we can see that our given configuration is applied to the Kafka cluster for all brokers. + +Now, let's exec into one of the kafka controller pod that we have created and check the configurations are applied or not: + +Exec into the Kafka controller: + +```bash +$ kubectl exec -it -n demo kafka-prod-controller-0 -- bash +kafka@kafka-prod-controller-0:~$ +``` + +Now, execute the following commands to see the metadata storage directory: +```bash +kafka@kafka-prod-controller-0:~$ ls /var/log/kafka/ +1000 cluster_id metadata-custom +``` + +Here, we can see that our given configuration is applied to the controller. Metadata log directory is changed to `/var/log/kafka/metadata-custom`. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete kf -n demo kafka-dev + +$ kubectl delete secret -n demo configsecret-combined + +$ kubectl delete namespace demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). + diff --git a/docs/guides/kafka/restart/_index.md b/docs/guides/kafka/restart/_index.md new file mode 100644 index 0000000000..d0d4240b4d --- /dev/null +++ b/docs/guides/kafka/restart/_index.md @@ -0,0 +1,10 @@ +--- +title: Restart Kafka +menu: + docs_{{ .version }}: + identifier: kf-restart + name: Restart + parent: kf-kafka-guides + weight: 46 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/kafka/restart/restart.md b/docs/guides/kafka/restart/restart.md new file mode 100644 index 0000000000..23a6160aa9 --- /dev/null +++ b/docs/guides/kafka/restart/restart.md @@ -0,0 +1,251 @@ +--- +title: Restart Kafka +menu: + docs_{{ .version }}: + identifier: kf-restart-details + name: Restart Kafka + parent: kf-restart + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Restart Kafka + +KubeDB supports restarting the Kafka database via a KafkaOpsRequest. Restarting is useful if some pods are got stuck in some phase, or they are not working correctly. This tutorial will show you how to use that. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/kafka](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/kafka) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Deploy Kafka + +In this section, we are going to deploy a Kafka database using KubeDB. + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.6.1 + topology: + broker: + replicas: 2 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: "500m" + memory: "1Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: "500m" + memory: "1Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: DoNotTerminate +``` + +- `spec.topology` represents the specification for kafka topology. + - `broker` denotes the broker node of kafka topology. + - `controller` denotes the controller node of kafka topology. + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/restart/kafka.yaml +kafka.kubedb.com/kafka-prod created +``` + +## Apply Restart opsRequest + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: restart + namespace: demo +spec: + type: Restart + databaseRef: + name: kafka-prod + timeout: 5m + apply: Always +``` + +- `spec.type` specifies the Type of the ops Request +- `spec.databaseRef` holds the name of the Kafka CR. It should be available in the same namespace as the opsRequest +- The meaning of `spec.timeout` & `spec.apply` fields will be found [here](/docs/guides/kafka/concepts/kafkaopsrequest.md#spectimeout) + +> Note: The method of restarting the combined node is exactly same as above. All you need, is to specify the corresponding Kafka name in `spec.databaseRef.name` section. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/restart/ops.yaml +kafkaopsrequest.ops.kubedb.com/restart created +``` + +Now the Ops-manager operator will first restart the controller pods, then broker of the referenced kafka. + +```shell +$ kubectl get kfops -n demo +NAME TYPE STATUS AGE +restart Restart Successful 119s + +$ kubectl get kfops -n demo restart -oyaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"KafkaOpsRequest","metadata":{"annotations":{},"name":"restart","namespace":"demo"},"spec":{"apply":"Always","databaseRef":{"name":"kafka-prod"},"timeout":"3m","type":"Restart"}} + creationTimestamp: "2024-07-26T10:12:10Z" + generation: 1 + name: restart + namespace: demo + resourceVersion: "24434" + uid: 956a374e-1d6f-4f68-828f-cfed4410b175 +spec: + apply: Always + databaseRef: + name: kafka-prod + timeout: 3m + type: Restart +status: + conditions: + - lastTransitionTime: "2024-07-26T10:12:10Z" + message: Kafka ops-request has started to restart kafka nodes + observedGeneration: 1 + reason: Restart + status: "True" + type: Restart + - lastTransitionTime: "2024-07-26T10:12:18Z" + message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + observedGeneration: 1 + status: "True" + type: GetPod--kafka-prod-controller-0 + - lastTransitionTime: "2024-07-26T10:12:18Z" + message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + observedGeneration: 1 + status: "True" + type: EvictPod--kafka-prod-controller-0 + - lastTransitionTime: "2024-07-26T10:12:23Z" + message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--kafka-prod-controller-0 + - lastTransitionTime: "2024-07-26T10:12:28Z" + message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + observedGeneration: 1 + status: "True" + type: GetPod--kafka-prod-controller-1 + - lastTransitionTime: "2024-07-26T10:12:28Z" + message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + observedGeneration: 1 + status: "True" + type: EvictPod--kafka-prod-controller-1 + - lastTransitionTime: "2024-07-26T10:12:38Z" + message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--kafka-prod-controller-1 + - lastTransitionTime: "2024-07-26T10:12:43Z" + message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + observedGeneration: 1 + status: "True" + type: GetPod--kafka-prod-broker-0 + - lastTransitionTime: "2024-07-26T10:12:43Z" + message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + observedGeneration: 1 + status: "True" + type: EvictPod--kafka-prod-broker-0 + - lastTransitionTime: "2024-07-26T10:13:18Z" + message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--kafka-prod-broker-0 + - lastTransitionTime: "2024-07-26T10:13:23Z" + message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + observedGeneration: 1 + status: "True" + type: GetPod--kafka-prod-broker-1 + - lastTransitionTime: "2024-07-26T10:13:23Z" + message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + observedGeneration: 1 + status: "True" + type: EvictPod--kafka-prod-broker-1 + - lastTransitionTime: "2024-07-26T10:13:28Z" + message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--kafka-prod-broker-1 + - lastTransitionTime: "2024-07-26T10:13:33Z" + message: Successfully Restarted Kafka nodes + observedGeneration: 1 + reason: RestartNodes + status: "True" + type: RestartNodes + - lastTransitionTime: "2024-07-26T10:13:33Z" + message: Controller has successfully restart the Kafka replicas + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kafkaopsrequest -n demo restart +kubectl delete kafka -n demo kafka-prod +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md).