Skip to content

Commit

Permalink
Merge pull request #119 from seraphin/conf_commun_dev
Browse files Browse the repository at this point in the history
Kafka Deployment
  • Loading branch information
magglass1 authored Nov 7, 2019
2 parents 4431ef8 + 544c74b commit 1ade411
Show file tree
Hide file tree
Showing 25 changed files with 812 additions and 4 deletions.
87 changes: 87 additions & 0 deletions playbooks/confluent-community.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
---
- include: create_groups.yml

- name: Apply the common role to all nodes
hosts: kafka-cluster
any_errors_fatal: true
become: yes
pre_tasks:
- name: Show kafka-cluster info
debug: var=hostvars[inventory_hostname]
when: debug

- name: include confluent community vars
include_vars: group_vars/confluent-community

roles:
- confluent-community-common

- name: Apply the zookeeper role to all zk nodes
hosts: kafka-zookeeper-cluster
any_errors_fatal: true
become: yes
pre_tasks:
- name: Show kafka-zookeeper-cluster info
debug: var=hostvars[inventory_hostname]
when: debug

- name: include confluent-community vars
include_vars: group_vars/confluent-community

roles:
- confluent-community-zookeeper

- name: Apply the broker role to all broker nodes
hosts: kafka-broker-cluster
any_errors_fatal: true
become: yes
pre_tasks:
- name: Show hadoop-broker-cluster info
debug: var=hostvars[inventory_hostname]
when: debug

- name: include confluent community vars
include_vars: group_vars/confluent-community

roles:
- confluent-community-broker

- name: "generate site facts"
hosts: localhost
any_errors_fatal: true
become: no
dnmemory: "{{ hostvars[groups['slave-nodes'][0]]['ansible_memtotal_mb'] / 1024 }}"
mnmemory: "{{ hostvars[groups['master-nodes'][0]]['ansible_memtotal_mb'] / 1024 }}"
cores: "{{ hostvars[groups['slave-nodes'][0]]['ansible_processor_count'] }}"
tasks:
- name: "gather site facts"
action:
module: confluentsitefacts.py
dnmemory="{{ dnmemory }}"
mnmemory="{{ mnmemory }}"
cores="{{ cores }}"
manager_server="localhost"
ambari_pass="admin"
cluster_name="{{ cluster_name }}"
compare="false"
current_facts="false"

- name: Apply the confluent-community manager role to manager node group
hosts: confluent-community-manager
become: yes
pre_tasks:
- name: include confluent community vars
include_vars: group_vars/confluent-community

roles:
- confluent-community-manager
post_tasks:
- name: Cleanup the temporary files
file: path={{ item }} state=absent
with_items:
- /tmp/cluster_blueprint
- /tmp/cluster_template
- /tmp/alert_targets
- /tmp/confluentrepo
# tags:
# - confluent-community-manager-only
91 changes: 87 additions & 4 deletions playbooks/create_groups.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
gather_facts: False
tasks:
- name: Add all cluster nodes to the hadoop-cluster group
always_run: yes
check_mode: no
add_host:
name: "{{ hostvars[item].inventory_hostname }}"
ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}"
Expand All @@ -18,9 +18,32 @@
- "{{ groups['master-nodes']|default([]) }}"
- "{{ groups['slave-nodes']|default([]) }}"
- "{{ groups['edge-nodes']|default([]) }}"
register: hadoop_cluster
register: hadoop-cluster
when: "'hadoop-cluster' not in groups or groups['hadoop-cluster']|length < 1"

- name: Add all cluster nodes to the common-cluster group
check_mode: no
add_host:
name: "{{ hostvars[item].inventory_hostname }}"
ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}"
ansible_user: "{{ hostvars[item].ansible_user|default('root') }}"
ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
ansible_become_user: root
ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
groups: bd-cluster
with_flattened:
- "{{ groups['master-nodes']|default([]) }}"
- "{{ groups['slave-nodes']|default([]) }}"
- "{{ groups['edge-nodes']|default([]) }}"
- "{{ groups['kafka-server-nodes']|default([]) }}"
- "{{ groups['kafka-zk-nodes']|default([]) }}"
register: bd-cluster
when: "'bd-cluster' not in groups or groups['bd-cluster']|length < 1"

- name: debugging some vars
debug:
var: groups['hadoop-cluster']

- name: "include cdh vars"
include_vars: group_vars/cloudera
when: distro == "cdh"
Expand All @@ -30,7 +53,7 @@
when: distro == "hdp"

- name: Add the last masternode to ambari-node variable group
always_run: yes
check_mode: no
add_host:
name: "{{ hostvars[item].inventory_hostname }}"
ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}"
Expand All @@ -42,4 +65,64 @@
with_items: "{{ groups['master-nodes']|sort|last }}"
register: "{{ adminnode }}"
when: "'ambari-node' not in groups or groups['ambari-node']|length < 1"


- name: Add kafka nodes nodes to the kafka-cluster group
check_mode: no
add_host:
name: "{{ hostvars[item].inventory_hostname }}"
ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}"
ansible_user: "{{ hostvars[item].ansible_user|default('root') }}"
ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
ansible_become_user: root
ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
groups: kafka-cluster
with_flattened:
- "{{ groups['kafka-server-nodes']|default([]) }}"
- "{{ groups['kafka-zk-nodes']|default([]) }}"
register: kafka-cluster
when: "'kafka-cluster' not in groups or groups['kafka-cluster']|length < 1"

- name: Add kafka nodes to the kafka-zookeeper cluster group
check_mode: no
add_host:
name: "{{ hostvars[item].inventory_hostname }}"
ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}"
ansible_user: "{{ hostvars[item].ansible_user|default('root') }}"
ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
ansible_become_user: root
ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
groups: kafka-zookeeper-cluster
with_flattened:
- "{{ groups['kafka-zk-nodes']|default([]) }}"
register: kafka-zookeeper-cluster
when: "'kafka-zookeeper-cluster' not in groups or groups['kafka-zookeeper-cluster']|length < 1"

- name: Add kafka nodes nodes to the kafka-broker-cluster group
check_mode: no
add_host:
name: "{{ hostvars[item].inventory_hostname }}"
ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}"
ansible_user: "{{ hostvars[item].ansible_user|default('root') }}"
ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
ansible_become_user: root
ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
groups: kafka-broker-cluster
with_flattened:
- "{{ groups['kafka-server-nodes']|default([]) }}"
register: kafka-broker-cluster
when: "'kafka-broker-cluster' not in groups or groups['kafka-broker-cluster']|length < 1"

- name: Add kafka nodes nodes to the kafka-manager group
check_mode: no
add_host:
name: "{{ hostvars[item].inventory_hostname }}"
ansible_host: "{{ hostvars[item].ansible_host|default(hostvars[item].ansible_ssh_host) }}"
ansible_user: "{{ hostvars[item].ansible_user|default('root') }}"
ansible_ssh_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
ansible_become_user: root
ansible_become_pass: "{{ hostvars[item].ansible_ssh_pass|default('') }}"
groups: kafka-manager
with_flattened:
- "{{ groups['kafka-nodes']|sort|last }}"
register: kafka-manager
when: "'kafka-manager' not in groups or groups['kafka-manager']|length < 1"
25 changes: 25 additions & 0 deletions playbooks/group_vars/confluent-community
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
---
cluster_name: 'KafkaPoc'
managernode: 'manager-node'
confluent_community_version: '3.1'
admin_password: 'admin'
services_password: 'AsdQwe123'
alerts_contact: '[email protected]'
wait: true
wait_timeout: 1800 # 30 minutes

data_disks_filesystem: xfs
configure_firewall: false
custom_blueprint: false
custom_repo: false
custom_repo_url: ''

zookeeper_starting_id: 1

broker_starting_myid: 1

jolokia_enabled: true
jmxexporter_enabled: true

# set to true to show host variables
debug: false
14 changes: 14 additions & 0 deletions playbooks/group_vars/kafka-nodes
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
###############################################################
# use template file for example references #
# Default Rackspace kafka server node
###############################################################
cluster_interface: 'eth0'
cloud_nodes_count: 0
cloud_image: 'CentOS 7 (PVHVM)'
# cloud_image: 'CentOS 6 (PVHVM)'
cloud_flavor: 'performance2-15'
build_kafka_cbs: true
cbs_disks_size: 200
cbs_disks_type: 'SATA'
hadoop_disk: xvde
datanode_disks: ['xvdf', 'xvdg']
126 changes: 126 additions & 0 deletions playbooks/group_vars/kafka-nodes-templates
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
--
---
###############################################################
# use template file for example references #
# Default GCP kafka server node
###############################################################
cluster_interface: 'eth0'
cloud_nodes_count: 3
cloud_image: 'projects/centos-cloud/global/images/centos-7-v20190916'
cloud_flavor: 'n1-standard-4'
build_extra_disks: true
extra_disks_size: 200
extra_disks_type: 'SATA'
hadoop_disk: xvde
datanode_disks: ['xvdf', 'xvdg']

###############################################################
# use template file for example references #
# Default Rackspace kafka server node
###############################################################
cluster_interface: 'eth0'
cloud_nodes_count: 6
cloud_image: 'CentOS 7 (PVHVM)'
# cloud_image: 'CentOS 6 (PVHVM)'
cloud_flavor: 'performance2-15'
build_kafka_cbs: true
cbs_disks_size: 200
cbs_disks_type: 'SATA'
hadoop_disk: xvde
datanode_disks: ['xvdf', 'xvdg']

#########################################
## example for Rackspace cloud servers ##
## general1-8 flavor and CentOS 7 ##
## root filesystem used for /hadoop ##
## using the default public network ##
#########################################

#cluster_interface: 'eth0'
#cloud_nodes_count: 3
#cloud_image: 'CentOS 7 (PVHVM)'
# cloud_image: 'CentOS 6 (PVHVM)'
#cloud_flavor: 'performance2-15'
#build_datanode_cbs: true
#cbs_disks_size: 200
#cbs_disks_type: 'SATA'
#hadoop_disk: xvde
#datanode_disks: ['xvdf', 'xvdg']

##################################
## example for static inventory ##
##################################

# cluster_interface: 'bond1'
# bond_interfaces: ['eth4', 'eth6']
# bond_netmask: '255.255.255.0'
# hadoop_disk: sdb

##############################################
## example for Rackspace cloud servers ##
## performance2-15 flavor and CentOS 6 or 7 ##
## root filesystem used for /hadoop ##
## Namenode and Masterservices extra mounts ##
## using the default public network ##
##############################################
#cluster_interface: 'eth0'
#cloud_nodes_count: 3
#cloud_image: 'CentOS 7 (PVHVM)'
#cloud_image: 'CentOS 6 (PVHVM)'
#cloud_flavor: 'performance2-15'
#build_datanode_cbs: true
#cbs_disks_size: 200
#cbs_disks_type: 'SATA'
#hadoop_disk: xvde
#namenode_disk: xvdf
#masterservices_disk: xvdg
#datanode_disks: ['xvdf', 'xvdg']

##############################################
## example for Rackspace OnMetal servers ##
## performance2-15 flavor and CentOS 6 or 7 ##
## root filesystem used for /hadoop ##
## Namenode and Masterservices on SSD ##
## using the default public network ##
##############################################
#cluster_interface: 'bond0.101'
#cloud_nodes_count: 3
#cloud_image: 'OnMetal - CentOS 7'
#cloud_flavor: 'onmetal-io1'
#build_datanode_cbs: true
#cbs_disks_size: 200
#cbs_disks_type: 'SATA'
#hadoop_disk: sdb
#namenode_disk: sdb
#masterservices_disk: sdc
#datanode_disks: ['sdd', 'sde']

#############################################
## example for Rackspace cloud servers ##
## performance2-15 flavor and Ubuntu 14 ##
## ephemeral disk used for /hadoop ##
## using ServiceNet as the cluster network ##
#############################################

# cluster_interface: 'eth1'
# cloud_nodes_count: 2
# cloud_image: 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)'
# cloud_flavor: 'performance2-15'
# hadoop_disk: xvde


######################################
## example for Rackspace OnMetal v2 ##
######################################

#cluster_interface: bond0
#cloud_nodes_count: 3
#cloud_image: 'OnMetal - CentOS 7'
## cloud_image: 'OnMetal - Ubuntu 14.04 LTS (Trusty Tahr)'
# cloud_flavor: 'onmetal-general2-small'
#cloud_flavor: 'onmetal-io1'
#hadoop_disk: sdb
#datanode_disks: sdc



15 changes: 15 additions & 0 deletions playbooks/group_vars/kafka-server-nodes
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
---
###############################################################
# use template file for example references #
# ~/ansible-hadoop/playbooks/group_vars/master-nodes-templates#
###############################################################
cluster_interface: 'eth0'
cloud_nodes_count: 6
cloud_image: 'CentOS 7 (PVHVM)'
# cloud_image: 'CentOS 6 (PVHVM)'
cloud_flavor: 'performance1-8'
build_kafka_cbs: true
cbs_disks_size: 200
cbs_disks_type: 'SATA'
kafka_disk: xvde
#datanode_disks: ['xvdf', 'xvdg']
Loading

0 comments on commit 1ade411

Please sign in to comment.