From 59c595189ff56554166946e8a2ef09441de3169f Mon Sep 17 00:00:00 2001 From: David Grier Date: Wed, 6 Nov 2019 02:00:13 +0000 Subject: [PATCH 01/10] initial kafka commit --- playbooks/confluent-community.yml | 87 ++++++++++++ playbooks/create_groups.yml | 116 +++++++++++++++- playbooks/group_vars/kafka-nodes | 14 ++ playbooks/group_vars/kafka-nodes-templates | 126 +++++++++++++++++ playbooks/group_vars/kafka-server-nodes | 15 ++ playbooks/group_vars/kafka-zookeeper-nodes | 15 ++ .../files/confluent-kafka.service | 9 ++ .../confluent-community-broker/tasks/main.yml | 128 ++++++++++++++++++ .../templates/consumer.properties | 25 ++++ .../templates/kafka-rest.properties | 4 + .../templates/ksql-server.properties | 14 ++ .../templates/producer.properties | 45 ++++++ .../templates/server1.properties | 38 ++++++ .../files/confluent.repo | 13 ++ .../files/java_home.sh | 3 + .../handlers/main.yml | 5 + .../confluent-community-common/tasks/main.yml | 128 ++++++++++++++++++ .../tasks/monitoring.yml | 47 +++++++ .../templates/hosts.j2 | 5 + .../vars/redhat-7.yml | 17 +++ .../tasks/main.yml | 1 + .../templates/zookeeper.properties | 15 ++ 22 files changed, 866 insertions(+), 4 deletions(-) create mode 100644 playbooks/confluent-community.yml create mode 100644 playbooks/group_vars/kafka-nodes create mode 100644 playbooks/group_vars/kafka-nodes-templates create mode 100644 playbooks/group_vars/kafka-server-nodes create mode 100644 playbooks/group_vars/kafka-zookeeper-nodes create mode 100644 playbooks/roles/confluent-community-broker/files/confluent-kafka.service create mode 100644 playbooks/roles/confluent-community-broker/tasks/main.yml create mode 100644 playbooks/roles/confluent-community-broker/templates/consumer.properties create mode 100644 playbooks/roles/confluent-community-broker/templates/kafka-rest.properties create mode 100644 playbooks/roles/confluent-community-broker/templates/ksql-server.properties create mode 100644 playbooks/roles/confluent-community-broker/templates/producer.properties create mode 100644 playbooks/roles/confluent-community-broker/templates/server1.properties create mode 100644 playbooks/roles/confluent-community-common/files/confluent.repo create mode 100644 playbooks/roles/confluent-community-common/files/java_home.sh create mode 100644 playbooks/roles/confluent-community-common/handlers/main.yml create mode 100644 playbooks/roles/confluent-community-common/tasks/main.yml create mode 100644 playbooks/roles/confluent-community-common/tasks/monitoring.yml create mode 100644 playbooks/roles/confluent-community-common/templates/hosts.j2 create mode 100644 playbooks/roles/confluent-community-common/vars/redhat-7.yml create mode 100644 playbooks/roles/confluent-community-zookeeper/tasks/main.yml create mode 100644 playbooks/roles/confluent-community-zookeeper/templates/zookeeper.properties diff --git a/playbooks/confluent-community.yml b/playbooks/confluent-community.yml new file mode 100644 index 0000000..ab78572 --- /dev/null +++ b/playbooks/confluent-community.yml @@ -0,0 +1,87 @@ +--- +- include: create_groups.yml + +- name: Apply the common role to all nodes + hosts: kafka-cluster + any_errors_fatal: true + become: yes + pre_tasks: + - name: Show kafka-cluster info + debug: var=hostvars[inventory_hostname] + when: debug + + - name: include confluent community vars + include_vars: group_vars/confluent-community + + roles: + - confluent-community-common + +- name: Apply the zookeeper role to all zk nodes + hosts: kafka-zookeeper-cluster + any_errors_fatal: true + become: yes + pre_tasks: + - name: Show kafka-zookeeper-cluster info + debug: var=hostvars[inventory_hostname] + when: debug + + - name: include confluent-community vars + include_vars: group_vars/confluent-community + + roles: + - confluent-community-zookeeper + +- name: Apply the broker role to all broker nodes + hosts: kafka-broker-cluster + any_errors_fatal: true + become: yes + pre_tasks: + - name: Show hadoop-broker-cluster info + debug: var=hostvars[inventory_hostname] + when: debug + + - name: include confluent community vars + include_vars: group_vars/confluent-community + + roles: + - confluent-community-broker + +- name: "generate site facts" + hosts: localhost + any_errors_fatal: true + become: no + dnmemory: "{{ hostvars[groups['slave-nodes'][0]]['ansible_memtotal_mb'] / 1024 }}" + mnmemory: "{{ hostvars[groups['master-nodes'][0]]['ansible_memtotal_mb'] / 1024 }}" + cores: "{{ hostvars[groups['slave-nodes'][0]]['ansible_processor_count'] }}" + tasks: + - name: "gather site facts" + action: + module: confluentsitefacts.py + dnmemory="{{ dnmemory }}" + mnmemory="{{ mnmemory }}" + cores="{{ cores }}" + manager_server="localhost" + ambari_pass="admin" + cluster_name="{{ cluster_name }}" + compare="false" + current_facts="false" + +- name: Apply the confluent-community manager role to manager node group + hosts: confluent-community-manager + become: yes + pre_tasks: + - name: include confluent community vars + include_vars: group_vars/confluent-community + + roles: + - confluent-community-manager + post_tasks: + - name: Cleanup the temporary files + file: path={{ item }} state=absent + with_items: + - /tmp/cluster_blueprint + - /tmp/cluster_template + - /tmp/alert_targets + - /tmp/confluentrepo +# tags: +# - confluent-community-manager-only diff --git a/playbooks/create_groups.yml b/playbooks/create_groups.yml index 3416a66..114ea9e 100644 --- a/playbooks/create_groups.yml +++ b/playbooks/create_groups.yml @@ -5,7 +5,7 @@ gather_facts: False tasks: - name: Add all cluster nodes to the hadoop-cluster group - always_run: yes +# check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" @@ -18,9 +18,33 @@ - "{{ groups['master-nodes']|default([]) }}" - "{{ groups['slave-nodes']|default([]) }}" - "{{ groups['edge-nodes']|default([]) }}" - register: hadoop_cluster + register: hadoop-cluster when: "'hadoop-cluster' not in groups or groups['hadoop-cluster']|length < 1" + - name: Add all cluster nodes to the common-cluster group +# check_mode: no + add_host: + name: "{{ hostvars[item].inventory_hostname }}" + ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" + ansible_user: "{{ hostvars[item].ansible_user|default('root') }}" + ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + ansible_become_user: root + ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + groups: bd-cluster + with_flattened: + - "{{ groups['master-nodes']|default([]) }}" + - "{{ groups['slave-nodes']|default([]) }}" + - "{{ groups['edge-nodes']|default([]) }}" + - "{{ groups['kafka-nodes']|default([]) }}" + - "{{ groups['kube-nodes']|default([]) }}" + - "{{ groups['kube-masters']|default([]) }}" + register: bd-cluster + when: "'bd-cluster' not in groups or groups['bd-cluster']|length < 1" + + - name: debugging some vars + debug: + var: groups['hadoop-cluster'] + - name: "include cdh vars" include_vars: group_vars/cloudera when: distro == "cdh" @@ -29,8 +53,14 @@ include_vars: group_vars/hortonworks when: distro == "hdp" + - name: "include hdp vars" + include_vars: group_vars/kube-all.yml + + - name: "include hdp vars" + include_vars: group_vars/kube-cluster.yml + - name: Add the last masternode to ambari-node variable group - always_run: yes +# check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" @@ -42,4 +72,82 @@ with_items: "{{ groups['master-nodes']|sort|last }}" register: "{{ adminnode }}" when: "'ambari-node' not in groups or groups['ambari-node']|length < 1" - + + - name: Add kafka nodes nodes to the kafka-cluster group +# check_mode: no + add_host: + name: "{{ hostvars[item].inventory_hostname }}" + ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" + ansible_user: "{{ hostvars[item].ansible_user|default('root') }}" + ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + ansible_become_user: root + ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + groups: kafka-cluster + with_flattened: + - "{{ groups['kafka-nodes']|default([]) }}" + - "{{ groups['kafka-zk-nodes']|default([]) }}" + register: kafka-cluster + when: "'kafka-cluster' not in groups or groups['kafka-cluster']|length < 1" + + - name: Add kafka nodes to the kafka-zookeeper cluster group +# check_mode: no + add_host: + name: "{{ hostvars[item].inventory_hostname }}" + ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" + ansible_user: "{{ hostvars[item].ansible_user|default('root') }}" + ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + ansible_become_user: root + ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + groups: kafka-zookeeper-cluster + with_flattened: + - "{{ groups['kafka-zk-nodes']|default([]) }}" + register: kafka-zookeeper-cluster + when: "'kafka-zookeeper-cluster' not in groups or groups['kafka-zookeeper-cluster']|length < 1" + + - name: Add kafka nodes nodes to the kafka-broker-cluster group +# check_mode: no + add_host: + name: "{{ hostvars[item].inventory_hostname }}" + ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" + ansible_user: "{{ hostvars[item].ansible_user|default('root') }}" + ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + ansible_become_user: root + ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + groups: kafka-broker-cluster + with_flattened: + - "{{ groups['kafka-nodes']|default([]) }}" + register: kafka-broker-cluster + when: "'kafka-broker-cluster' not in groups or groups['kafka-broker-cluster']|length < 1" + + - name: Add kafka nodes nodes to the kafka-manager group +# check_mode: no + add_host: + name: "{{ hostvars[item].inventory_hostname }}" + ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" + ansible_user: "{{ hostvars[item].ansible_user|default('root') }}" + ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + ansible_become_user: root + ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + groups: kafka-manager + with_flattened: + - "{{ groups['kafka-nodes']|sort|last }}" + register: kafka-manager + when: "'kafka-manager' not in groups or groups['kafka-manager']|length < 1" + + - name: Add kube nodes to the kube-cluster group +# check_mode: no + add_host: + name: "{{ hostvars[item].inventory_hostname }}" + ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" + ansible_user: "{{ hostvars[item].ansible_user|default('root') }}" + ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + ansible_become_user: root + ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" + groups: kube-cluster + with_flattened: + - "{{ groups['kube-nodes']|default([]) }}" + - "{{ groups['kube-masters']|default([]) }}" + register: kubecluster + when: "'kube-cluster' not in groups or groups['kube-cluster']|length < 1" + + diff --git a/playbooks/group_vars/kafka-nodes b/playbooks/group_vars/kafka-nodes new file mode 100644 index 0000000..7fbb430 --- /dev/null +++ b/playbooks/group_vars/kafka-nodes @@ -0,0 +1,14 @@ +############################################################### +# use template file for example references # +# Default Rackspace kafka server node +############################################################### +cluster_interface: 'eth0' +cloud_nodes_count: 0 +cloud_image: 'CentOS 7 (PVHVM)' +# cloud_image: 'CentOS 6 (PVHVM)' +cloud_flavor: 'performance2-15' +build_kafka_cbs: true +cbs_disks_size: 200 +cbs_disks_type: 'SATA' +hadoop_disk: xvde +datanode_disks: ['xvdf', 'xvdg'] diff --git a/playbooks/group_vars/kafka-nodes-templates b/playbooks/group_vars/kafka-nodes-templates new file mode 100644 index 0000000..04dc14f --- /dev/null +++ b/playbooks/group_vars/kafka-nodes-templates @@ -0,0 +1,126 @@ +-- +--- +############################################################### +# use template file for example references # +# Default GCP kafka server node +############################################################### +cluster_interface: 'eth0' +cloud_nodes_count: 3 +cloud_image: 'projects/centos-cloud/global/images/centos-7-v20190916' +cloud_flavor: 'n1-standard-4' +build_extra_disks: true +extra_disks_size: 200 +extra_disks_type: 'SATA' +hadoop_disk: xvde +datanode_disks: ['xvdf', 'xvdg'] + +############################################################### +# use template file for example references # +# Default Rackspace kafka server node +############################################################### +cluster_interface: 'eth0' +cloud_nodes_count: 6 +cloud_image: 'CentOS 7 (PVHVM)' +# cloud_image: 'CentOS 6 (PVHVM)' +cloud_flavor: 'performance2-15' +build_kafka_cbs: true +cbs_disks_size: 200 +cbs_disks_type: 'SATA' +hadoop_disk: xvde +datanode_disks: ['xvdf', 'xvdg'] + +######################################### +## example for Rackspace cloud servers ## +## general1-8 flavor and CentOS 7 ## +## root filesystem used for /hadoop ## +## using the default public network ## +######################################### + +#cluster_interface: 'eth0' +#cloud_nodes_count: 3 +#cloud_image: 'CentOS 7 (PVHVM)' +# cloud_image: 'CentOS 6 (PVHVM)' +#cloud_flavor: 'performance2-15' +#build_datanode_cbs: true +#cbs_disks_size: 200 +#cbs_disks_type: 'SATA' +#hadoop_disk: xvde +#datanode_disks: ['xvdf', 'xvdg'] + +################################## +## example for static inventory ## +################################## + +# cluster_interface: 'bond1' +# bond_interfaces: ['eth4', 'eth6'] +# bond_netmask: '255.255.255.0' +# hadoop_disk: sdb + +############################################## +## example for Rackspace cloud servers ## +## performance2-15 flavor and CentOS 6 or 7 ## +## root filesystem used for /hadoop ## +## Namenode and Masterservices extra mounts ## +## using the default public network ## +############################################## +#cluster_interface: 'eth0' +#cloud_nodes_count: 3 +#cloud_image: 'CentOS 7 (PVHVM)' +#cloud_image: 'CentOS 6 (PVHVM)' +#cloud_flavor: 'performance2-15' +#build_datanode_cbs: true +#cbs_disks_size: 200 +#cbs_disks_type: 'SATA' +#hadoop_disk: xvde +#namenode_disk: xvdf +#masterservices_disk: xvdg +#datanode_disks: ['xvdf', 'xvdg'] + +############################################## +## example for Rackspace OnMetal servers ## +## performance2-15 flavor and CentOS 6 or 7 ## +## root filesystem used for /hadoop ## +## Namenode and Masterservices on SSD ## +## using the default public network ## +############################################## +#cluster_interface: 'bond0.101' +#cloud_nodes_count: 3 +#cloud_image: 'OnMetal - CentOS 7' +#cloud_flavor: 'onmetal-io1' +#build_datanode_cbs: true +#cbs_disks_size: 200 +#cbs_disks_type: 'SATA' +#hadoop_disk: sdb +#namenode_disk: sdb +#masterservices_disk: sdc +#datanode_disks: ['sdd', 'sde'] + +############################################# +## example for Rackspace cloud servers ## +## performance2-15 flavor and Ubuntu 14 ## +## ephemeral disk used for /hadoop ## +## using ServiceNet as the cluster network ## +############################################# + +# cluster_interface: 'eth1' +# cloud_nodes_count: 2 +# cloud_image: 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)' +# cloud_flavor: 'performance2-15' +# hadoop_disk: xvde + + +###################################### +## example for Rackspace OnMetal v2 ## +###################################### + +#cluster_interface: bond0 +#cloud_nodes_count: 3 +#cloud_image: 'OnMetal - CentOS 7' +## cloud_image: 'OnMetal - Ubuntu 14.04 LTS (Trusty Tahr)' +# cloud_flavor: 'onmetal-general2-small' +#cloud_flavor: 'onmetal-io1' +#hadoop_disk: sdb +#datanode_disks: sdc + + + diff --git a/playbooks/group_vars/kafka-server-nodes b/playbooks/group_vars/kafka-server-nodes new file mode 100644 index 0000000..7e2ff48 --- /dev/null +++ b/playbooks/group_vars/kafka-server-nodes @@ -0,0 +1,15 @@ +--- +############################################################### +# use template file for example references # +# ~/ansible-hadoop/playbooks/group_vars/master-nodes-templates# +############################################################### +cluster_interface: 'eth0' +cloud_nodes_count: 6 +cloud_image: 'CentOS 7 (PVHVM)' +# cloud_image: 'CentOS 6 (PVHVM)' +cloud_flavor: 'performance1-8' +build_kafka_cbs: true +cbs_disks_size: 200 +cbs_disks_type: 'SATA' +kafka_disk: xvde +#datanode_disks: ['xvdf', 'xvdg'] diff --git a/playbooks/group_vars/kafka-zookeeper-nodes b/playbooks/group_vars/kafka-zookeeper-nodes new file mode 100644 index 0000000..56bb115 --- /dev/null +++ b/playbooks/group_vars/kafka-zookeeper-nodes @@ -0,0 +1,15 @@ +--- +############################################################### +# use template file for example references # +# ~/ansible-hadoop/playbooks/group_vars/master-nodes-templates# +############################################################### +cluster_interface: 'eth0' +cloud_nodes_count: 3 +cloud_image: 'CentOS 7 (PVHVM)' +# cloud_image: 'CentOS 6 (PVHVM)' +cloud_flavor: 'performance2-4' +build_kafka_cbs: true +cbs_disks_size: 100 +cbs_disks_type: 'SATA' +zookeeper_disk: xvde +#datanode_disks: ['xvdf', 'xvdg'] diff --git a/playbooks/roles/confluent-community-broker/files/confluent-kafka.service b/playbooks/roles/confluent-community-broker/files/confluent-kafka.service new file mode 100644 index 0000000..5c50124 --- /dev/null +++ b/playbooks/roles/confluent-community-broker/files/confluent-kafka.service @@ -0,0 +1,9 @@ +[Service] +Type=simple +User=cp-kafka +Group=confluent +ExecStart=/usr/bin/kafka-server-start /etc/kafka/server.properties +LimitNOFILE=1000000 +TimeoutStopSec=180 +Restart=no +Environment=KAFKA_OPTS=-javaagent:/opt/prometheus/jmx_prometheus_javaagent-0.12.jar=7071:/opt/prometheus/kafka-2_0_0.yml diff --git a/playbooks/roles/confluent-community-broker/tasks/main.yml b/playbooks/roles/confluent-community-broker/tasks/main.yml new file mode 100644 index 0000000..a75b875 --- /dev/null +++ b/playbooks/roles/confluent-community-broker/tasks/main.yml @@ -0,0 +1,128 @@ +--- +- name: Load OS specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version }}.yml" + - "{{ ansible_os_family|lower }}-{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + +- include_vars: group_vars/confluent-community + when: distro == "cpc" + +- name: install confluent community repo + copy: + src: confluent.repo + dest: /etc/yum.repos.d/ + notify: yum-clean-metadata + +- name: add gpg key + rpm-key: + state: present + key: https://packages.confluent.io/rpm/5.3/archive.key + +- name: clean yum + yum: + +- name: Ensure required packages are installed (yum) + yum: + name: "{{ item }}" + update_cache: yes + state: installed + with_items: "{{ packages|default([]) }}" + when: ansible_os_family == "RedHat" + +- name: Ensure required packages are installed (apt) + apt: + name: "{{ item }}" + update_cache: yes + state: installed + with_items: "{{ packages|default([]) }}" + when: ansible_os_family == "Debian" + +- name: Upgrade all packages (yum) + yum: name=* state=latest + when: ansible_os_family == "RedHat" + +- name: Upgrade all packages (apt) + apt: upgrade=dist + when: ansible_os_family == "Debian" + +- name: Set nofile limits + lineinfile: dest=/etc/security/limits.conf + insertbefore="^# End of file" + state=present + line="{{ item }}" + with_items: + - "* soft nofile 32768" + - "* hard nofile 32768" + when: not azure + +- name: Set nproc limits + lineinfile: dest=/etc/security/limits.d/90-nproc.conf + insertafter=EOF + state=present + create=yes + line="{{ item }}" + mode=0644 + with_items: + - "* soft nproc 32768" + - "* hard nproc 32768" + when: not azure + +- name: Set swappiness to 1 + sysctl: name=vm.swappiness value=1 state=present ignoreerrors=yes + +- name: Set the tuned profile + copy: src=tuned.conf + dest=/etc/tuned/kafka/ + mode=0755 + when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "7" + +- name: Activate the tuned profile + shell: tuned-adm profile kafka + when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "7" + +- name: Get number of kernels in grub.conf + shell: grep -E "^[[:blank:]]*kernel" /boot/grub/grub.conf | grep -v transparent_hugepage; exit 0 + register: grep_result + when: ansible_os_family == "RedHat" and (ansible_distribution == "Amazon" or ansible_distribution_major_version == "6") and not azure + ignore_errors: true + +- name: Disable Transparent Huge Pages in Grub 1 + lineinfile: dest=/boot/grub/grub.conf + backrefs=True + state=present + regexp='(^\s*kernel(\s+(?!transparent_hugepage=never)[\w=/\-\.\,]+)*)\s*$' + line='\1 transparent_hugepage=never' + with_items: "{{ grep_result.stdout_lines | default('') }}" + when: ansible_os_family == "RedHat" and (ansible_distribution == "Amazon" or ansible_distribution_major_version == "6") and not azure + + +- name: Disable Transparent Huge Pages in Grub 2 + lineinfile: dest=/etc/default/grub + state=present + line='GRUB_CMDLINE_LINUX=$GRUB_CMDLINE_LINUX" transparent_hugepage=never"' + when: ansible_distribution_major_version|int > 6 and not azure + notify: Run update-grub + +- meta: flush_handlers + +- name: Disable Transparent Huge Pages until reboot + shell: echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag + ignore_errors: true + when: not azure + +- name: Reconfigure resolv.conf search + lineinfile: dest={{ resolv_conf }} + create=yes + regexp='^search\s+(?! {{ ansible_domain }} ).*$' + line='search {{ ansible_domain }}' + when: ansible_domain != "" and not use_dns + notify: Run resolvconf + +- meta: flush_handlers + diff --git a/playbooks/roles/confluent-community-broker/templates/consumer.properties b/playbooks/roles/confluent-community-broker/templates/consumer.properties new file mode 100644 index 0000000..2eccfb7 --- /dev/null +++ b/playbooks/roles/confluent-community-broker/templates/consumer.properties @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see org.apache.kafka.clients.consumer.ConsumerConfig for more details + +# list of brokers used for bootstrapping knowledge about the rest of the cluster +# format: host1:port1,host2:port2 ... +bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 + +# consumer group id +group.id=test-consumer-group + +# What to do when there is no initial offset in Kafka or if the current +# offset does not exist any more on the server: latest, earliest, none diff --git a/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties b/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties new file mode 100644 index 0000000..ba5319e --- /dev/null +++ b/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties @@ -0,0 +1,4 @@ +# Maintained by Ansible +bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 +listeners=http://0.0.0.0:8082 +client.ssl.endpoint.identification.algorithm= diff --git a/playbooks/roles/confluent-community-broker/templates/ksql-server.properties b/playbooks/roles/confluent-community-broker/templates/ksql-server.properties new file mode 100644 index 0000000..7a2d3b7 --- /dev/null +++ b/playbooks/roles/confluent-community-broker/templates/ksql-server.properties @@ -0,0 +1,14 @@ +# Maintained by Ansible + +bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 + +ksql.schema.registry.url=http://104.130.220.122:8081/ + +application.id=ksql-server + +listeners=http://0.0.0.0:8088 + +ksql.streams.state.dir=/var/lib/kafka-streams + +ssl.endpoint.identification.algorithm= + diff --git a/playbooks/roles/confluent-community-broker/templates/producer.properties b/playbooks/roles/confluent-community-broker/templates/producer.properties new file mode 100644 index 0000000..ad5e180 --- /dev/null +++ b/playbooks/roles/confluent-community-broker/templates/producer.properties @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see org.apache.kafka.clients.producer.ProducerConfig for more details + +############################# Producer Basics ############################# + +# list of brokers used for bootstrapping knowledge about the rest of the cluster +# format: host1:port1,host2:port2 ... +bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 + +# specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd +compression.type=none + +# name of the partitioner class for partitioning events; default partition spreads data randomly +#partitioner.class= + +# the maximum amount of time the client will wait for the response of a request +#request.timeout.ms= + +# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for +#max.block.ms= + +# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together +#linger.ms= + +# the maximum size of a request in bytes +#max.request.size= + +# the default batch size in bytes when batching multiple records sent to a partition +#batch.size= + +# the total bytes of memory the producer can use to buffer records waiting to be sent to the server +#buffer.memory= diff --git a/playbooks/roles/confluent-community-broker/templates/server1.properties b/playbooks/roles/confluent-community-broker/templates/server1.properties new file mode 100644 index 0000000..070da9c --- /dev/null +++ b/playbooks/roles/confluent-community-broker/templates/server1.properties @@ -0,0 +1,38 @@ +# Maintained by Ansible +#listeners=PLAINTEXT://:9092 +listeners=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 +main_nodes_ips_with_port: "{% set IP_ARR=[] %}{% for host in groups['mainnodes'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_ssh_host']) %}{% endif %}{% endfor %}{{IP_ARR|join(':3000,')} + + +zookeeper.connect=104.130.220.120:2181,104.130.220.119:2181,104.130.220.122:2181 +main_nodes_ips_with_port: "{% set IP_ARR=[] %}{% for host in groups['mainnodes'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_ssh_host']) %}{% endif %}{% endfor %}{{IP_ARR|join(':3000,')} + +log.dirs=/kafka/data +change to proper mount +broker.id=1 +{ broker_id } + +log.segment.bytes=1073741824 +socket.receive.buffer.bytes=102400 +socket.send.buffer.bytes=102400 +confluent.metrics.reporter.topic.replicas=3 +num.network.threads=8 +ssl.endpoint.identification.algorithm= +num.io.threads=16 +confluent.metrics.reporter.ssl.endpoint.identification.algorithm= +transaction.state.log.min.isr=2 +zookeeper.connection.timeout.ms=6000 +offsets.topic.replication.factor=3 +socket.request.max.bytes=104857600 +log.retention.check.interval.ms=300000 +group.initial.rebalance.delay.ms=0 +#metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter +num.recovery.threads.per.data.dir=2 +transaction.state.log.replication.factor=3 +#confluent.metrics.reporter.bootstrap.servers=104.130.220.116:9092 +log.retention.hours=168 +num.partitions=1 + +# Confluent Support +#confluent.support.metrics.enable=true +#confluent.support.customer.id=anonymous diff --git a/playbooks/roles/confluent-community-common/files/confluent.repo b/playbooks/roles/confluent-community-common/files/confluent.repo new file mode 100644 index 0000000..6fccc71 --- /dev/null +++ b/playbooks/roles/confluent-community-common/files/confluent.repo @@ -0,0 +1,13 @@ +[Confluent.dist] +name=Confluent repository (dist) +baseurl=https://packages.confluent.io/rpm/5.3/7 +gpgcheck=1 +gpgkey=https://packages.confluent.io/rpm/5.3/archive.key +enabled=1 + +[Confluent] +name=Confluent repository +baseurl=https://packages.confluent.io/rpm/5.3 +gpgcheck=1 +gpgkey=https://packages.confluent.io/rpm/5.3/archive.key +enabled=1 diff --git a/playbooks/roles/confluent-community-common/files/java_home.sh b/playbooks/roles/confluent-community-common/files/java_home.sh new file mode 100644 index 0000000..06b05ee --- /dev/null +++ b/playbooks/roles/confluent-community-common/files/java_home.sh @@ -0,0 +1,3 @@ +#set java home for all users + +export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64 diff --git a/playbooks/roles/confluent-community-common/handlers/main.yml b/playbooks/roles/confluent-community-common/handlers/main.yml new file mode 100644 index 0000000..9fcfb28 --- /dev/null +++ b/playbooks/roles/confluent-community-common/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: yum-clean-metadata + command: yum clean metadata + args: + warn: no diff --git a/playbooks/roles/confluent-community-common/tasks/main.yml b/playbooks/roles/confluent-community-common/tasks/main.yml new file mode 100644 index 0000000..a75b875 --- /dev/null +++ b/playbooks/roles/confluent-community-common/tasks/main.yml @@ -0,0 +1,128 @@ +--- +- name: Load OS specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version }}.yml" + - "{{ ansible_os_family|lower }}-{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + +- include_vars: group_vars/confluent-community + when: distro == "cpc" + +- name: install confluent community repo + copy: + src: confluent.repo + dest: /etc/yum.repos.d/ + notify: yum-clean-metadata + +- name: add gpg key + rpm-key: + state: present + key: https://packages.confluent.io/rpm/5.3/archive.key + +- name: clean yum + yum: + +- name: Ensure required packages are installed (yum) + yum: + name: "{{ item }}" + update_cache: yes + state: installed + with_items: "{{ packages|default([]) }}" + when: ansible_os_family == "RedHat" + +- name: Ensure required packages are installed (apt) + apt: + name: "{{ item }}" + update_cache: yes + state: installed + with_items: "{{ packages|default([]) }}" + when: ansible_os_family == "Debian" + +- name: Upgrade all packages (yum) + yum: name=* state=latest + when: ansible_os_family == "RedHat" + +- name: Upgrade all packages (apt) + apt: upgrade=dist + when: ansible_os_family == "Debian" + +- name: Set nofile limits + lineinfile: dest=/etc/security/limits.conf + insertbefore="^# End of file" + state=present + line="{{ item }}" + with_items: + - "* soft nofile 32768" + - "* hard nofile 32768" + when: not azure + +- name: Set nproc limits + lineinfile: dest=/etc/security/limits.d/90-nproc.conf + insertafter=EOF + state=present + create=yes + line="{{ item }}" + mode=0644 + with_items: + - "* soft nproc 32768" + - "* hard nproc 32768" + when: not azure + +- name: Set swappiness to 1 + sysctl: name=vm.swappiness value=1 state=present ignoreerrors=yes + +- name: Set the tuned profile + copy: src=tuned.conf + dest=/etc/tuned/kafka/ + mode=0755 + when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "7" + +- name: Activate the tuned profile + shell: tuned-adm profile kafka + when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "7" + +- name: Get number of kernels in grub.conf + shell: grep -E "^[[:blank:]]*kernel" /boot/grub/grub.conf | grep -v transparent_hugepage; exit 0 + register: grep_result + when: ansible_os_family == "RedHat" and (ansible_distribution == "Amazon" or ansible_distribution_major_version == "6") and not azure + ignore_errors: true + +- name: Disable Transparent Huge Pages in Grub 1 + lineinfile: dest=/boot/grub/grub.conf + backrefs=True + state=present + regexp='(^\s*kernel(\s+(?!transparent_hugepage=never)[\w=/\-\.\,]+)*)\s*$' + line='\1 transparent_hugepage=never' + with_items: "{{ grep_result.stdout_lines | default('') }}" + when: ansible_os_family == "RedHat" and (ansible_distribution == "Amazon" or ansible_distribution_major_version == "6") and not azure + + +- name: Disable Transparent Huge Pages in Grub 2 + lineinfile: dest=/etc/default/grub + state=present + line='GRUB_CMDLINE_LINUX=$GRUB_CMDLINE_LINUX" transparent_hugepage=never"' + when: ansible_distribution_major_version|int > 6 and not azure + notify: Run update-grub + +- meta: flush_handlers + +- name: Disable Transparent Huge Pages until reboot + shell: echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag + ignore_errors: true + when: not azure + +- name: Reconfigure resolv.conf search + lineinfile: dest={{ resolv_conf }} + create=yes + regexp='^search\s+(?! {{ ansible_domain }} ).*$' + line='search {{ ansible_domain }}' + when: ansible_domain != "" and not use_dns + notify: Run resolvconf + +- meta: flush_handlers + diff --git a/playbooks/roles/confluent-community-common/tasks/monitoring.yml b/playbooks/roles/confluent-community-common/tasks/monitoring.yml new file mode 100644 index 0000000..bdfd4be --- /dev/null +++ b/playbooks/roles/confluent-community-common/tasks/monitoring.yml @@ -0,0 +1,47 @@ +--- +- name: Load OS specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version }}.yml" + - "{{ ansible_os_family|lower }}-{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml + paths: + - ../vars + +- include_vars: group_vars/confluent-community + when: distro == "cpc" + +- name: Create Jolokia directory + file: + path: /opt/jolokia + state: directory + mode: 0755 + when: jolokia_enabled|bool + +- name: Download Jolokia Jar + get_url: + url: "http://search.maven.org/remotecontent?filepath=org/jolokia/jolokia-jvm/{{jolokia_version}}/jolokia-jvm-{{jolokia_version}}-agent.jar" + dest: "{{ jolokia_jar_path }}" + when: jolokia_enabled|bool + +- name: Create Prometheus install directory + file: + path: /opt/prometheus + state: directory + mode: 0755 + when: jmxexporter_enabled|bool + +- name: Download Prometheus JMX Exporter Jar + get_url: + url: https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.12.0/jmx_prometheus_javaagent-0.12.0.jar + dest: "{{ jmxexporter_jar_path }}" + when: jmxexporter_enabled|bool + +- name: Download kafka prom config + get_url: + url: https://github.com/prometheus/jmx_exporter/raw/master/example_configs/kafka-2_0_0.yml + dest "{{ prom_jxm_conf_path }} + when: jmxexporter_enabled|bool + diff --git a/playbooks/roles/confluent-community-common/templates/hosts.j2 b/playbooks/roles/confluent-community-common/templates/hosts.j2 new file mode 100644 index 0000000..bffd093 --- /dev/null +++ b/playbooks/roles/confluent-community-common/templates/hosts.j2 @@ -0,0 +1,5 @@ +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 +{% for node in groups['hadoop-cluster'] %} +{{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }} {{ hostvars[node]['ansible_nodename'] }} {{ hostvars[node]['ansible_hostname'] }} +{% endfor %} diff --git a/playbooks/roles/confluent-community-common/vars/redhat-7.yml b/playbooks/roles/confluent-community-common/vars/redhat-7.yml new file mode 100644 index 0000000..ab3cdff --- /dev/null +++ b/playbooks/roles/confluent-community-common/vars/redhat-7.yml @@ -0,0 +1,17 @@ +packages: + - curl + - wget + - xfsprogs + - confluent-community-2.12 + - java-1.8.0-openjdk-devel + + +epel_rpm_url: "http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm" + +epel_yum: "epel-release" + +resolv_conf: "/etc/resolv.conf" + +networking_path: "/etc/sysconfig/network-scripts" +networking_eth: "redhat-ifcfg-eth.j2" +networking_bond: "redhat-ifcfg-bond.j2" diff --git a/playbooks/roles/confluent-community-zookeeper/tasks/main.yml b/playbooks/roles/confluent-community-zookeeper/tasks/main.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/playbooks/roles/confluent-community-zookeeper/tasks/main.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/roles/confluent-community-zookeeper/templates/zookeeper.properties b/playbooks/roles/confluent-community-zookeeper/templates/zookeeper.properties new file mode 100644 index 0000000..f6d588d --- /dev/null +++ b/playbooks/roles/confluent-community-zookeeper/templates/zookeeper.properties @@ -0,0 +1,15 @@ +tickTime=2000 +dataDir=/zookeeper +clientPort=2181 +initLimit=5 +syncLimit=2 +server.1=104.130.220.122:2888:3888 +server.2=104.130.220.119:2888:3888 +server.3=104.130.220.120:2888:3888 +{% for node in groups['hadoop-cluster'] %} + +server. {{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }} + +{% endfor %} +autopurge.snapRetainCount=3 +autopurge.purgeInterval=24 From b9f4415121b5633255cb723a33fff83f9167b60b Mon Sep 17 00:00:00 2001 From: David Grier Date: Wed, 6 Nov 2019 02:53:37 +0000 Subject: [PATCH 02/10] confluent community vars --- playbooks/group_vars/confluent-community | 25 ++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 playbooks/group_vars/confluent-community diff --git a/playbooks/group_vars/confluent-community b/playbooks/group_vars/confluent-community new file mode 100644 index 0000000..19e4420 --- /dev/null +++ b/playbooks/group_vars/confluent-community @@ -0,0 +1,25 @@ +--- +cluster_name: 'KafkaPoc' +managernode: 'manager-node' +confluent_community_version: '3.1' +admin_password: 'admin' +services_password: 'AsdQwe123' +alerts_contact: 'root@localhost.localdomain' +wait: true +wait_timeout: 1800 # 30 minutes + +data_disks_filesystem: xfs +configure_firewall: false +custom_blueprint: false +custom_repo: false +custom_repo_url: '' + +zookeeper_starting_id: 1 + +broker_starting_myid: 1 + +jolokia_enabled: true +jmxexporter_enabled: true + +# set to true to show host variables +debug: false From cf82bd1c7be8d382cfe2a2d59819715300afc5dd Mon Sep 17 00:00:00 2001 From: David Grier Date: Wed, 6 Nov 2019 04:20:27 +0000 Subject: [PATCH 03/10] kafka groups --- playbooks/create_groups.yml | 33 ++++----------------------------- 1 file changed, 4 insertions(+), 29 deletions(-) diff --git a/playbooks/create_groups.yml b/playbooks/create_groups.yml index 114ea9e..41b02c6 100644 --- a/playbooks/create_groups.yml +++ b/playbooks/create_groups.yml @@ -35,9 +35,8 @@ - "{{ groups['master-nodes']|default([]) }}" - "{{ groups['slave-nodes']|default([]) }}" - "{{ groups['edge-nodes']|default([]) }}" - - "{{ groups['kafka-nodes']|default([]) }}" - - "{{ groups['kube-nodes']|default([]) }}" - - "{{ groups['kube-masters']|default([]) }}" + - "{{ groups['kafka-server-nodes']|default([]) }}" + - "{{ groups['kafka-zk-nodes']|default([]) }}" register: bd-cluster when: "'bd-cluster' not in groups or groups['bd-cluster']|length < 1" @@ -53,12 +52,6 @@ include_vars: group_vars/hortonworks when: distro == "hdp" - - name: "include hdp vars" - include_vars: group_vars/kube-all.yml - - - name: "include hdp vars" - include_vars: group_vars/kube-cluster.yml - - name: Add the last masternode to ambari-node variable group # check_mode: no add_host: @@ -84,7 +77,7 @@ ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" groups: kafka-cluster with_flattened: - - "{{ groups['kafka-nodes']|default([]) }}" + - "{{ groups['kafka-server-nodes']|default([]) }}" - "{{ groups['kafka-zk-nodes']|default([]) }}" register: kafka-cluster when: "'kafka-cluster' not in groups or groups['kafka-cluster']|length < 1" @@ -115,7 +108,7 @@ ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" groups: kafka-broker-cluster with_flattened: - - "{{ groups['kafka-nodes']|default([]) }}" + - "{{ groups['kafka-server-nodes']|default([]) }}" register: kafka-broker-cluster when: "'kafka-broker-cluster' not in groups or groups['kafka-broker-cluster']|length < 1" @@ -133,21 +126,3 @@ - "{{ groups['kafka-nodes']|sort|last }}" register: kafka-manager when: "'kafka-manager' not in groups or groups['kafka-manager']|length < 1" - - - name: Add kube nodes to the kube-cluster group -# check_mode: no - add_host: - name: "{{ hostvars[item].inventory_hostname }}" - ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" - ansible_user: "{{ hostvars[item].ansible_user|default('root') }}" - ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" - ansible_become_user: root - ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}" - groups: kube-cluster - with_flattened: - - "{{ groups['kube-nodes']|default([]) }}" - - "{{ groups['kube-masters']|default([]) }}" - register: kubecluster - when: "'kube-cluster' not in groups or groups['kube-cluster']|length < 1" - - From a7b8412eae9c800b82e96a41822fa11707942916 Mon Sep 17 00:00:00 2001 From: David Grier Date: Thu, 7 Nov 2019 03:31:26 +0000 Subject: [PATCH 04/10] fixed path var --- playbooks/roles/confluent-community-common/tasks/monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/roles/confluent-community-common/tasks/monitoring.yml b/playbooks/roles/confluent-community-common/tasks/monitoring.yml index bdfd4be..af034e1 100644 --- a/playbooks/roles/confluent-community-common/tasks/monitoring.yml +++ b/playbooks/roles/confluent-community-common/tasks/monitoring.yml @@ -42,6 +42,6 @@ - name: Download kafka prom config get_url: url: https://github.com/prometheus/jmx_exporter/raw/master/example_configs/kafka-2_0_0.yml - dest "{{ prom_jxm_conf_path }} + dest: "{{ jmxexporter_jar_path }}" when: jmxexporter_enabled|bool From d4977c4134d67f02947b071fefc14b35852aed12 Mon Sep 17 00:00:00 2001 From: David Grier Date: Thu, 7 Nov 2019 03:35:29 +0000 Subject: [PATCH 05/10] templates and task testing --- .../confluent-community-zookeeper/tasks/main.yml | 11 +++++++++++ .../confluent-community-zookeeper/templates/myid.j2 | 1 + .../templates/zookeeper.properties | 8 ++++---- 3 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 playbooks/roles/confluent-community-zookeeper/templates/myid.j2 diff --git a/playbooks/roles/confluent-community-zookeeper/tasks/main.yml b/playbooks/roles/confluent-community-zookeeper/tasks/main.yml index ed97d53..8ec99aa 100644 --- a/playbooks/roles/confluent-community-zookeeper/tasks/main.yml +++ b/playbooks/roles/confluent-community-zookeeper/tasks/main.yml @@ -1 +1,12 @@ --- +- name: Set Zookeeper Id + set_fact: zk_id={{item.0 + 1}} + with_indexed_items: "{{ groups['kafka-zk-nodes'] }}" + when: item.1 == "{{inventory_hostname}}" + +- name: Template zk id + template: + src: myid.j2 + dest: /zookeeper/myid + +- debug: var=ansible_facts diff --git a/playbooks/roles/confluent-community-zookeeper/templates/myid.j2 b/playbooks/roles/confluent-community-zookeeper/templates/myid.j2 new file mode 100644 index 0000000..c327c75 --- /dev/null +++ b/playbooks/roles/confluent-community-zookeeper/templates/myid.j2 @@ -0,0 +1 @@ +{{ zk_id }} diff --git a/playbooks/roles/confluent-community-zookeeper/templates/zookeeper.properties b/playbooks/roles/confluent-community-zookeeper/templates/zookeeper.properties index f6d588d..bc3ca97 100644 --- a/playbooks/roles/confluent-community-zookeeper/templates/zookeeper.properties +++ b/playbooks/roles/confluent-community-zookeeper/templates/zookeeper.properties @@ -3,12 +3,12 @@ dataDir=/zookeeper clientPort=2181 initLimit=5 syncLimit=2 -server.1=104.130.220.122:2888:3888 -server.2=104.130.220.119:2888:3888 -server.3=104.130.220.120:2888:3888 +#server.1=10.0.0.10:2888:3888 +#server.2=10.0.0.11:2888:3888 +#server.3=10.0.0.12:2888:3888 {% for node in groups['hadoop-cluster'] %} -server. {{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }} +server.{{ hostvars[node]['ansible_'~hostvars[node].zk_id }}={{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:2888:3888 {% endfor %} autopurge.snapRetainCount=3 From 4e366ca5ca57ac7d3564ea8ca2d1c66575f8a03a Mon Sep 17 00:00:00 2001 From: David Grier Date: Thu, 7 Nov 2019 03:45:13 +0000 Subject: [PATCH 06/10] provision rax for kafka nodes --- .../templates/server1.properties | 38 ------------------- 1 file changed, 38 deletions(-) delete mode 100644 playbooks/roles/confluent-community-broker/templates/server1.properties diff --git a/playbooks/roles/confluent-community-broker/templates/server1.properties b/playbooks/roles/confluent-community-broker/templates/server1.properties deleted file mode 100644 index 070da9c..0000000 --- a/playbooks/roles/confluent-community-broker/templates/server1.properties +++ /dev/null @@ -1,38 +0,0 @@ -# Maintained by Ansible -#listeners=PLAINTEXT://:9092 -listeners=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 -main_nodes_ips_with_port: "{% set IP_ARR=[] %}{% for host in groups['mainnodes'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_ssh_host']) %}{% endif %}{% endfor %}{{IP_ARR|join(':3000,')} - - -zookeeper.connect=104.130.220.120:2181,104.130.220.119:2181,104.130.220.122:2181 -main_nodes_ips_with_port: "{% set IP_ARR=[] %}{% for host in groups['mainnodes'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_ssh_host']) %}{% endif %}{% endfor %}{{IP_ARR|join(':3000,')} - -log.dirs=/kafka/data -change to proper mount -broker.id=1 -{ broker_id } - -log.segment.bytes=1073741824 -socket.receive.buffer.bytes=102400 -socket.send.buffer.bytes=102400 -confluent.metrics.reporter.topic.replicas=3 -num.network.threads=8 -ssl.endpoint.identification.algorithm= -num.io.threads=16 -confluent.metrics.reporter.ssl.endpoint.identification.algorithm= -transaction.state.log.min.isr=2 -zookeeper.connection.timeout.ms=6000 -offsets.topic.replication.factor=3 -socket.request.max.bytes=104857600 -log.retention.check.interval.ms=300000 -group.initial.rebalance.delay.ms=0 -#metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter -num.recovery.threads.per.data.dir=2 -transaction.state.log.replication.factor=3 -#confluent.metrics.reporter.bootstrap.servers=104.130.220.116:9092 -log.retention.hours=168 -num.partitions=1 - -# Confluent Support -#confluent.support.metrics.enable=true -#confluent.support.customer.id=anonymous From 0df789da40cc54bab1da0013961885a91c46e2f4 Mon Sep 17 00:00:00 2001 From: David Grier Date: Thu, 7 Nov 2019 03:55:10 +0000 Subject: [PATCH 07/10] fixed kafka groups --- playbooks/provision_rax.yml | 44 +++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/playbooks/provision_rax.yml b/playbooks/provision_rax.yml index c761fbe..3510147 100644 --- a/playbooks/provision_rax.yml +++ b/playbooks/provision_rax.yml @@ -85,3 +85,47 @@ wait_timeout: 900 register: rax when: cloud_nodes_count > 0 + + - name: Include kafka-server-nodes group variables + include_vars: group_vars/kafka-server-nodes + + - name: Create kafka server nodes + local_action: + module: rax + credentials: "{{ cloud_config.rax_credentials_file }}" + name: "kafka-%02d.{{ cloud_config.domain }}" + image: "{{ cloud_image }}" + flavor: "{{ cloud_flavor }}" + count: "{{ cloud_nodes_count }}" + region: "{{ cloud_config.rax_region }}" + key_name: "{{ cloud_config.ssh.keyname }}" + exact_count: yes + auto_increment: true + group: kafka-server-nodes + state: present + wait: true + wait_timeout: 900 + register: rax + when: cloud_nodes_count > 0 + + - name: Include kafka-zk-nodes group variables + include_vars: group_vars/kafka-zk-nodes + + - name: Create kafka zookeeper nodes + local_action: + module: rax + credentials: "{{ cloud_config.rax_credentials_file }}" + name: "kafka-zk-%02d.{{ cloud_config.domain }}" + image: "{{ cloud_image }}" + flavor: "{{ cloud_flavor }}" + count: "{{ cloud_nodes_count }}" + region: "{{ cloud_config.rax_region }}" + key_name: "{{ cloud_config.ssh.keyname }}" + exact_count: yes + auto_increment: true + group: kafka-zk-nodes + state: present + wait: true + wait_timeout: 900 + register: rax + when: cloud_nodes_count > 0 From b65cc37cd3a92510a0dadb84f9cd3d1d39ed9081 Mon Sep 17 00:00:00 2001 From: David Grier Date: Thu, 7 Nov 2019 15:31:26 +0000 Subject: [PATCH 08/10] kafka broker setup --- .../confluent-community-broker/tasks/main.yml | 138 ++---------------- .../templates/consumer.properties | 2 +- .../templates/kafka-rest.properties | 3 +- .../templates/ksql-server.properties | 6 +- .../templates/producer.properties | 2 +- .../templates/server.properties | 41 ++++++ 6 files changed, 60 insertions(+), 132 deletions(-) create mode 100644 playbooks/roles/confluent-community-broker/templates/server.properties diff --git a/playbooks/roles/confluent-community-broker/tasks/main.yml b/playbooks/roles/confluent-community-broker/tasks/main.yml index a75b875..7da46f2 100644 --- a/playbooks/roles/confluent-community-broker/tasks/main.yml +++ b/playbooks/roles/confluent-community-broker/tasks/main.yml @@ -1,128 +1,12 @@ --- -- name: Load OS specific variables - include_vars: "{{ item }}" - with_first_found: - - files: - - "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version }}.yml" - - "{{ ansible_os_family|lower }}-{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" - - defaults.yml - paths: - - ../vars - -- include_vars: group_vars/confluent-community - when: distro == "cpc" - -- name: install confluent community repo - copy: - src: confluent.repo - dest: /etc/yum.repos.d/ - notify: yum-clean-metadata - -- name: add gpg key - rpm-key: - state: present - key: https://packages.confluent.io/rpm/5.3/archive.key - -- name: clean yum - yum: - -- name: Ensure required packages are installed (yum) - yum: - name: "{{ item }}" - update_cache: yes - state: installed - with_items: "{{ packages|default([]) }}" - when: ansible_os_family == "RedHat" - -- name: Ensure required packages are installed (apt) - apt: - name: "{{ item }}" - update_cache: yes - state: installed - with_items: "{{ packages|default([]) }}" - when: ansible_os_family == "Debian" - -- name: Upgrade all packages (yum) - yum: name=* state=latest - when: ansible_os_family == "RedHat" - -- name: Upgrade all packages (apt) - apt: upgrade=dist - when: ansible_os_family == "Debian" - -- name: Set nofile limits - lineinfile: dest=/etc/security/limits.conf - insertbefore="^# End of file" - state=present - line="{{ item }}" - with_items: - - "* soft nofile 32768" - - "* hard nofile 32768" - when: not azure - -- name: Set nproc limits - lineinfile: dest=/etc/security/limits.d/90-nproc.conf - insertafter=EOF - state=present - create=yes - line="{{ item }}" - mode=0644 - with_items: - - "* soft nproc 32768" - - "* hard nproc 32768" - when: not azure - -- name: Set swappiness to 1 - sysctl: name=vm.swappiness value=1 state=present ignoreerrors=yes - -- name: Set the tuned profile - copy: src=tuned.conf - dest=/etc/tuned/kafka/ - mode=0755 - when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "7" - -- name: Activate the tuned profile - shell: tuned-adm profile kafka - when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "7" - -- name: Get number of kernels in grub.conf - shell: grep -E "^[[:blank:]]*kernel" /boot/grub/grub.conf | grep -v transparent_hugepage; exit 0 - register: grep_result - when: ansible_os_family == "RedHat" and (ansible_distribution == "Amazon" or ansible_distribution_major_version == "6") and not azure - ignore_errors: true - -- name: Disable Transparent Huge Pages in Grub 1 - lineinfile: dest=/boot/grub/grub.conf - backrefs=True - state=present - regexp='(^\s*kernel(\s+(?!transparent_hugepage=never)[\w=/\-\.\,]+)*)\s*$' - line='\1 transparent_hugepage=never' - with_items: "{{ grep_result.stdout_lines | default('') }}" - when: ansible_os_family == "RedHat" and (ansible_distribution == "Amazon" or ansible_distribution_major_version == "6") and not azure - - -- name: Disable Transparent Huge Pages in Grub 2 - lineinfile: dest=/etc/default/grub - state=present - line='GRUB_CMDLINE_LINUX=$GRUB_CMDLINE_LINUX" transparent_hugepage=never"' - when: ansible_distribution_major_version|int > 6 and not azure - notify: Run update-grub - -- meta: flush_handlers - -- name: Disable Transparent Huge Pages until reboot - shell: echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag - ignore_errors: true - when: not azure - -- name: Reconfigure resolv.conf search - lineinfile: dest={{ resolv_conf }} - create=yes - regexp='^search\s+(?! {{ ansible_domain }} ).*$' - line='search {{ ansible_domain }}' - when: ansible_domain != "" and not use_dns - notify: Run resolvconf - -- meta: flush_handlers - +- name: Set Broker Id + set_fact: broker_id={{item.0 + 1}} + with_indexed_items: "{{ groups['kafka-server-nodes'] }}" + when: item.1 == "{{inventory_hostname}}" + +- name: Template kafka server properties + template: + src: server.properties + dest: /etc/kafka/ + +- debug: var=ansible_facts diff --git a/playbooks/roles/confluent-community-broker/templates/consumer.properties b/playbooks/roles/confluent-community-broker/templates/consumer.properties index 2eccfb7..49b0f64 100644 --- a/playbooks/roles/confluent-community-broker/templates/consumer.properties +++ b/playbooks/roles/confluent-community-broker/templates/consumer.properties @@ -16,7 +16,7 @@ # list of brokers used for bootstrapping knowledge about the rest of the cluster # format: host1:port1,host2:port2 ... -bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 +bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} # consumer group id group.id=test-consumer-group diff --git a/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties b/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties index ba5319e..8305475 100644 --- a/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties +++ b/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties @@ -1,4 +1,5 @@ # Maintained by Ansible -bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 +#bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 +bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} listeners=http://0.0.0.0:8082 client.ssl.endpoint.identification.algorithm= diff --git a/playbooks/roles/confluent-community-broker/templates/ksql-server.properties b/playbooks/roles/confluent-community-broker/templates/ksql-server.properties index 7a2d3b7..b98c07c 100644 --- a/playbooks/roles/confluent-community-broker/templates/ksql-server.properties +++ b/playbooks/roles/confluent-community-broker/templates/ksql-server.properties @@ -1,8 +1,10 @@ # Maintained by Ansible -bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 +#bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 +bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} -ksql.schema.registry.url=http://104.130.220.122:8081/ +#ksql.schema.registry.url=http://104.130.220.122:8081/ +ksql.schema.registry.url=http://{{ hostvars[groups['kafka-ksql-schema-node'][0]][['ansible_', hostvars[groups['kafka-ksql-schema-node'][0]]['cluster_interface']]|join]['ipv4']['address'] }}:8081/ application.id=ksql-server diff --git a/playbooks/roles/confluent-community-broker/templates/producer.properties b/playbooks/roles/confluent-community-broker/templates/producer.properties index ad5e180..c5383cf 100644 --- a/playbooks/roles/confluent-community-broker/templates/producer.properties +++ b/playbooks/roles/confluent-community-broker/templates/producer.properties @@ -18,7 +18,7 @@ # list of brokers used for bootstrapping knowledge about the rest of the cluster # format: host1:port1,host2:port2 ... -bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 +bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} # specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd compression.type=none diff --git a/playbooks/roles/confluent-community-broker/templates/server.properties b/playbooks/roles/confluent-community-broker/templates/server.properties new file mode 100644 index 0000000..045769b --- /dev/null +++ b/playbooks/roles/confluent-community-broker/templates/server.properties @@ -0,0 +1,41 @@ +# Maintained by Ansible +listeners=PLAINTEXT://:9092 +#listeners=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 + +#{% for node in groups['kafka-server-nodes'] %} +#listeners={{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:2888:3888 +#{% endfor %} + +listeners={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} + +#zookeeper.connect=104.130.220.120:2181,104.130.220.119:2181,104.130.220.122:2181 +zookeeper.connect={% for node in groups['kafka-zk-nodes'] %}{{ node }}:2181{% if not loop.last %},{% endif %}{% endfor %} + +log.dirs=/kafka/data +change to proper mount +broker.id={{ broker_id }} + +log.segment.bytes=1073741824 +socket.receive.buffer.bytes=102400 +socket.send.buffer.bytes=102400 +confluent.metrics.reporter.topic.replicas=3 +num.network.threads=8 +ssl.endpoint.identification.algorithm= +num.io.threads=16 +confluent.metrics.reporter.ssl.endpoint.identification.algorithm= +transaction.state.log.min.isr=2 +zookeeper.connection.timeout.ms=6000 +offsets.topic.replication.factor=3 +socket.request.max.bytes=104857600 +log.retention.check.interval.ms=300000 +group.initial.rebalance.delay.ms=0 +#metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter +num.recovery.threads.per.data.dir=2 +transaction.state.log.replication.factor=3 +#confluent.metrics.reporter.bootstrap.servers=104.130.220.116:9092 +log.retention.hours=168 +num.partitions=1 + +# Confluent Support +#confluent.support.metrics.enable=true +#confluent.support.customer.id=anonymous From 0ae6b5cad2590a05dc3ee7830a21cdbdef52e4f7 Mon Sep 17 00:00:00 2001 From: David Grier Date: Thu, 7 Nov 2019 15:57:49 +0000 Subject: [PATCH 09/10] added hostvar logic for cluster int --- .../confluent-community-broker/templates/consumer.properties | 2 +- .../templates/kafka-rest.properties | 2 +- .../templates/ksql-server.properties | 2 +- .../confluent-community-broker/templates/producer.properties | 2 +- .../confluent-community-broker/templates/server.properties | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/playbooks/roles/confluent-community-broker/templates/consumer.properties b/playbooks/roles/confluent-community-broker/templates/consumer.properties index 49b0f64..5e87533 100644 --- a/playbooks/roles/confluent-community-broker/templates/consumer.properties +++ b/playbooks/roles/confluent-community-broker/templates/consumer.properties @@ -16,7 +16,7 @@ # list of brokers used for bootstrapping knowledge about the rest of the cluster # format: host1:port1,host2:port2 ... -bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} +bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:9092{% if not loop.last %},{% endif %}{% endfor %} # consumer group id group.id=test-consumer-group diff --git a/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties b/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties index 8305475..37036f6 100644 --- a/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties +++ b/playbooks/roles/confluent-community-broker/templates/kafka-rest.properties @@ -1,5 +1,5 @@ # Maintained by Ansible #bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 -bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} +bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:9092{% if not loop.last %},{% endif %}{% endfor %} listeners=http://0.0.0.0:8082 client.ssl.endpoint.identification.algorithm= diff --git a/playbooks/roles/confluent-community-broker/templates/ksql-server.properties b/playbooks/roles/confluent-community-broker/templates/ksql-server.properties index b98c07c..0a2fd87 100644 --- a/playbooks/roles/confluent-community-broker/templates/ksql-server.properties +++ b/playbooks/roles/confluent-community-broker/templates/ksql-server.properties @@ -1,7 +1,7 @@ # Maintained by Ansible #bootstrap.servers=104.130.220.120:9092,104.130.220.119:9092,104.130.220.122:9092 -bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} +bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:9092{% if not loop.last %},{% endif %}{% endfor %} #ksql.schema.registry.url=http://104.130.220.122:8081/ ksql.schema.registry.url=http://{{ hostvars[groups['kafka-ksql-schema-node'][0]][['ansible_', hostvars[groups['kafka-ksql-schema-node'][0]]['cluster_interface']]|join]['ipv4']['address'] }}:8081/ diff --git a/playbooks/roles/confluent-community-broker/templates/producer.properties b/playbooks/roles/confluent-community-broker/templates/producer.properties index c5383cf..08ade13 100644 --- a/playbooks/roles/confluent-community-broker/templates/producer.properties +++ b/playbooks/roles/confluent-community-broker/templates/producer.properties @@ -18,7 +18,7 @@ # list of brokers used for bootstrapping knowledge about the rest of the cluster # format: host1:port1,host2:port2 ... -bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} +bootstrap.servers={% for node in groups['kafka-server-nodes'] %}{{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:9092{% if not loop.last %},{% endif %}{% endfor %} # specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd compression.type=none diff --git a/playbooks/roles/confluent-community-broker/templates/server.properties b/playbooks/roles/confluent-community-broker/templates/server.properties index 045769b..624f6e1 100644 --- a/playbooks/roles/confluent-community-broker/templates/server.properties +++ b/playbooks/roles/confluent-community-broker/templates/server.properties @@ -6,10 +6,10 @@ listeners=PLAINTEXT://:9092 #listeners={{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:2888:3888 #{% endfor %} -listeners={% for node in groups['kafka-server-nodes'] %}{{ node }}:9092{% if not loop.last %},{% endif %}{% endfor %} +listeners={% for node in groups['kafka-server-nodes'] %}{{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:9092{% if not loop.last %},{% endif %}{% endfor %} #zookeeper.connect=104.130.220.120:2181,104.130.220.119:2181,104.130.220.122:2181 -zookeeper.connect={% for node in groups['kafka-zk-nodes'] %}{{ node }}:2181{% if not loop.last %},{% endif %}{% endfor %} +zookeeper.connect={% for node in groups['kafka-zk-nodes'] %}{{ hostvars[node]['ansible_'~hostvars[node].cluster_interface|default(hostvars[node].ansible_default_ipv4.alias)]['ipv4']['address'] }}:2181{% if not loop.last %},{% endif %}{% endfor %} log.dirs=/kafka/data change to proper mount From 544c74b3f0425f203315cc59b782a082479870f3 Mon Sep 17 00:00:00 2001 From: David Grier Date: Thu, 7 Nov 2019 16:23:39 +0000 Subject: [PATCH 10/10] adding checkmode --- playbooks/create_groups.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/playbooks/create_groups.yml b/playbooks/create_groups.yml index 41b02c6..39d1d9c 100644 --- a/playbooks/create_groups.yml +++ b/playbooks/create_groups.yml @@ -5,7 +5,7 @@ gather_facts: False tasks: - name: Add all cluster nodes to the hadoop-cluster group -# check_mode: no + check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" @@ -22,7 +22,7 @@ when: "'hadoop-cluster' not in groups or groups['hadoop-cluster']|length < 1" - name: Add all cluster nodes to the common-cluster group -# check_mode: no + check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" @@ -53,7 +53,7 @@ when: distro == "hdp" - name: Add the last masternode to ambari-node variable group -# check_mode: no + check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" @@ -67,7 +67,7 @@ when: "'ambari-node' not in groups or groups['ambari-node']|length < 1" - name: Add kafka nodes nodes to the kafka-cluster group -# check_mode: no + check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" @@ -83,7 +83,7 @@ when: "'kafka-cluster' not in groups or groups['kafka-cluster']|length < 1" - name: Add kafka nodes to the kafka-zookeeper cluster group -# check_mode: no + check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" @@ -98,7 +98,7 @@ when: "'kafka-zookeeper-cluster' not in groups or groups['kafka-zookeeper-cluster']|length < 1" - name: Add kafka nodes nodes to the kafka-broker-cluster group -# check_mode: no + check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}" @@ -113,7 +113,7 @@ when: "'kafka-broker-cluster' not in groups or groups['kafka-broker-cluster']|length < 1" - name: Add kafka nodes nodes to the kafka-manager group -# check_mode: no + check_mode: no add_host: name: "{{ hostvars[item].inventory_hostname }}" ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}"