From 75268120e88ccaff2ec0a490be0993b1824245df Mon Sep 17 00:00:00 2001 From: "Jose A. Rivera" Date: Mon, 18 Jan 2016 17:40:11 -0600 Subject: [PATCH] Change the VM automation from phd ot vagrant+ansible. --- .gitignore | 10 + 00_ALL_post-install.sh | 79 ---- README.md | 14 +- phd/cluster_definition.conf.sample | 3 - phd/storage-ha-sgog.scenario | 210 --------- phd/storage-ha-smb-ad.scenario | 229 ---------- scripts/watch-ha.sh | 9 + vagrant-ansible/Vagrantfile | 409 ++++++++++++++++++ .../playbooks/files/99-no-dns.conf | 2 + vagrant-ansible/playbooks/files/ctdb | 4 + .../playbooks/files/export.conf.j2 | 20 + vagrant-ansible/playbooks/files/nodes.j2 | 3 + .../playbooks/files/resolv.conf.j2 | 3 + vagrant-ansible/playbooks/files/smb.conf.j2 | 51 +++ .../playbooks/files/storage-ha.conf.j2 | 28 ++ .../playbooks/group_vars/nfs_servers | 2 + .../playbooks/group_vars/smb_servers | 2 + .../playbooks/group_vars/storage-ha | 3 + vagrant-ansible/playbooks/raw-centos.yml | 20 + .../playbooks/roles/common/defaults/main.yml | 13 + .../playbooks/roles/common/handlers/main.yml | 4 + .../playbooks/roles/common/tasks/main.yml | 45 ++ .../roles/common/tasks/setup-Debian.yml | 4 + .../roles/common/tasks/setup-RedHat.yml | 40 ++ .../roles/common/templates/firewall.bash.j2 | 83 ++++ .../roles/common/templates/firewall.j2 | 41 ++ .../roles/glusterfs/defaults/main.yml | 2 + .../roles/glusterfs/files/glusterfs-epel.repo | 25 ++ .../playbooks/roles/glusterfs/tasks/main.yml | 14 + .../roles/glusterfs/tasks/setup-Debian.yml | 9 + .../roles/glusterfs/tasks/setup-RedHat.yml | 16 + .../playbooks/roles/glusterfs/vars/Debian.yml | 2 + .../playbooks/roles/glusterfs/vars/RedHat.yml | 2 + .../roles/storage-ha/defaults/main.yml | 3 + .../roles/storage-ha/files/storage-ha.repo | 7 + .../roles/storage-ha/handlers/main.yml | 5 + .../roles/storage-ha/tasks/conf-NFS.yml | 14 + .../roles/storage-ha/tasks/conf-SMB.yml | 13 + .../playbooks/roles/storage-ha/tasks/main.yml | 24 + .../roles/storage-ha/tasks/setup-AD.yml | 71 +++ .../roles/storage-ha/tasks/setup-RedHat.yml | 4 + .../roles/storage-ha/templates/smb.conf.j2 | 24 + .../playbooks/roles/storage-ha/vars/NFS.yml | 2 + .../playbooks/roles/storage-ha/vars/SMB.yml | 48 ++ .../playbooks/roles/storage-ha/vars/main.yml | 2 + vagrant-ansible/playbooks/storage-ha.yml | 192 ++++++++ 46 files changed, 1280 insertions(+), 530 deletions(-) create mode 100644 .gitignore delete mode 100755 00_ALL_post-install.sh delete mode 100644 phd/cluster_definition.conf.sample delete mode 100644 phd/storage-ha-sgog.scenario delete mode 100644 phd/storage-ha-smb-ad.scenario create mode 100755 scripts/watch-ha.sh create mode 100644 vagrant-ansible/Vagrantfile create mode 100644 vagrant-ansible/playbooks/files/99-no-dns.conf create mode 100644 vagrant-ansible/playbooks/files/ctdb create mode 100644 vagrant-ansible/playbooks/files/export.conf.j2 create mode 100644 vagrant-ansible/playbooks/files/nodes.j2 create mode 100644 vagrant-ansible/playbooks/files/resolv.conf.j2 create mode 100644 vagrant-ansible/playbooks/files/smb.conf.j2 create mode 100644 vagrant-ansible/playbooks/files/storage-ha.conf.j2 create mode 100644 vagrant-ansible/playbooks/group_vars/nfs_servers create mode 100644 vagrant-ansible/playbooks/group_vars/smb_servers create mode 100644 vagrant-ansible/playbooks/group_vars/storage-ha create mode 100644 vagrant-ansible/playbooks/raw-centos.yml create mode 100644 vagrant-ansible/playbooks/roles/common/defaults/main.yml create mode 100644 vagrant-ansible/playbooks/roles/common/handlers/main.yml create mode 100644 vagrant-ansible/playbooks/roles/common/tasks/main.yml create mode 100644 vagrant-ansible/playbooks/roles/common/tasks/setup-Debian.yml create mode 100644 vagrant-ansible/playbooks/roles/common/tasks/setup-RedHat.yml create mode 100644 vagrant-ansible/playbooks/roles/common/templates/firewall.bash.j2 create mode 100644 vagrant-ansible/playbooks/roles/common/templates/firewall.j2 create mode 100644 vagrant-ansible/playbooks/roles/glusterfs/defaults/main.yml create mode 100644 vagrant-ansible/playbooks/roles/glusterfs/files/glusterfs-epel.repo create mode 100644 vagrant-ansible/playbooks/roles/glusterfs/tasks/main.yml create mode 100644 vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-Debian.yml create mode 100644 vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-RedHat.yml create mode 100644 vagrant-ansible/playbooks/roles/glusterfs/vars/Debian.yml create mode 100644 vagrant-ansible/playbooks/roles/glusterfs/vars/RedHat.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/defaults/main.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/files/storage-ha.repo create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/handlers/main.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/tasks/conf-NFS.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/tasks/conf-SMB.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/tasks/main.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/tasks/setup-AD.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/tasks/setup-RedHat.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/templates/smb.conf.j2 create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/vars/NFS.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/vars/SMB.yml create mode 100644 vagrant-ansible/playbooks/roles/storage-ha/vars/main.yml create mode 100644 vagrant-ansible/playbooks/storage-ha.yml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..efce8e3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +hacking +.vagrant/ +*.sw* +custom.yml* +custom_files/ +active_vms.yml +active_vms.yaml +vagrant.yaml* +vagrant.yml* +host_vars/ diff --git a/00_ALL_post-install.sh b/00_ALL_post-install.sh deleted file mode 100755 index 8b28404..0000000 --- a/00_ALL_post-install.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# Prior to this, install base OS, configure the NICs, and set hostnames - -# Install EPEL -echo "Installing EPEL" - -verarch=(`yum version nogroups | grep "Installed" | cut -d \ -f 2 | tr \/ \ `) -vers=${verarch[0]} -arch=${verarch[1]} - -rpm --import http://ftp-stud.hs-esslingen.de/pub/epel//RPM-GPG-KEY-EPEL-$vers -rpm -Uvh http://ftp-stud.hs-esslingen.de/pub/epel/epel-release-latest-$vers.noarch.rpm - -echo "Adding repos" -# Configure repos for Gluster-related dependencies. -cat >/etc/yum.repos.d/gluster.repo < /etc/glusterfs-ganesha/exports/export.volume_export.conf -EXPORT{ - Export_Id = 90; - Path="/export"; - FSAL { - name = "GLUSTER"; - hostname="$(hostname -s)"; - volume="export"; - } - Access_type = RW; - Squash = No_root_squash; - Disable_ACL = TRUE; - Pseudo="/export"; - Protocols = "3,4" ; - Transports = "UDP,TCP" ; - SecType = "sys"; - Tag = "volume_export"; -} -END - -grep -qs "export.volume_export.conf" /etc/glusterfs-ganesha/nfs-ganesha.conf -if [[ ! $? ]]; then - cat << END >> /etc/glusterfs-ganesha/nfs-ganesha.conf -%include "/etc/glusterfs-ganesha/exports/export.volume_export.conf" -END -fi - -# Samba config -mkdir -p /data/shares/xfs - -cat << END > /etc/samba/smb.conf -[global] - workgroup = WORKGROUP - netbios name = STORAGE-HA - server string = Samba Server Version %v - security = user - log file = /var/log/samba/log.%m - max log size = 50 - server max protocol = SMB3 - clustering = Yes - load printers = No - disable spoolss = Yes - show add printer wizard = No - stat cache = No - printing = bsd - cups options = raw - print command = lpr -r -P'%p' %s - lpq command = lpq -P'%p' - lprm command = lprm -P'%p' %j - map archive = No - map readonly = no - store dos attributes = Yes - kernel share modes = No - -[gluster-share] - comment = For samba share of volume share - path = / - read only = No - guest ok = Yes - vfs objects = glusterfs - glusterfs:loglevel = 7 - glusterfs:logfile = /var/log/samba/glusterfs-share.%M.log - glusterfs:volume = share -END - -rm -f /etc/ctdb/nodes -for node in $(echo $PHD_ENV_nodes); do - ip=$(getent hosts $node | head -n 1 | awk '{print $1}') - echo "$ip" >> /etc/ctdb/nodes -done - -### CTDB RA -if [[ ! -f CTDB ]]; then - wget https://raw.githubusercontent.com/jarrpa/storage-ha/master/src/CTDB -fi -chmod 755 CTDB -cp -f CTDB /usr/lib/ocf/resource.d/heartbeat/CTDB - -echo -e "storage\nstorage" | passwd hacluster - -# Wipe leftover GlusterFS status -rm -rf /var/lib/glusterd/vols/* -rm -rf /var/lib/glusterd/peers/* - -# clean brick directories -rm -rf /data/bricks -mkdir -p /data/bricks/ctdb -mkdir -p /data/bricks/ganesha -mkdir -p /data/bricks/share -mkdir -p /data/bricks/export - -# remove rdma -sed -i "s/,rdma//" /etc/glusterfs/glusterd.vol - -service glusterd start - -exit 0 -.... - -## -# do peer probe -# create gluster volumes -## -target=$PHD_ENV_nodes1 -.... - -node_count=`wc -w <<< "${PHD_ENV_nodes}"` -gluster peer probe $PHD_ENV_nodes2 - -sleep 2 - -gluster volume create ctdb replica ${node_count} transport tcp \ - ${PHD_ENV_nodes1}:/data/bricks/ctdb \ - ${PHD_ENV_nodes2}:/data/bricks/ctdb \ - force -gluster volume set ctdb network.ping-timeout 10 -gluster volume set ctdb user.smb disable -gluster volume set ctdb nfs.disable on -gluster volume start ctdb -gluster volume list | grep ctdb - -gluster volume create ganesha replica 2 transport tcp \ - ${PHD_ENV_nodes1}:/data/bricks/ganesha \ - ${PHD_ENV_nodes2}:/data/bricks/ganesha \ - force -gluster volume set ganesha network.ping-timeout 10 -gluster volume set ganesha user.smb disable -gluster volume set ganesha nfs.disable on -gluster volume start ganesha -gluster volume list | grep ganesha - -gluster volume create share transport tcp \ - ${PHD_ENV_nodes1}:/data/bricks/share \ - ${PHD_ENV_nodes2}:/data/bricks/share \ - force -gluster volume set share user.smb disable -gluster volume set share nfs.disable on -gluster volume start share -gluster volume list | grep share - -gluster volume create export transport tcp \ - ${PHD_ENV_nodes1}:/data/bricks/export \ - ${PHD_ENV_nodes2}:/data/bricks/export \ - force -gluster volume set export user.smb disable -gluster volume set export nfs.disable on -gluster volume start export -gluster volume list | grep export -.... diff --git a/phd/storage-ha-smb-ad.scenario b/phd/storage-ha-smb-ad.scenario deleted file mode 100644 index 275e04d..0000000 --- a/phd/storage-ha-smb-ad.scenario +++ /dev/null @@ -1,229 +0,0 @@ -# Storage-HA-SGoG: Samba and Ganesha over GlusterFS - -################################# -# Scenario Requirements Section # -################################# -= REQUIREMENTS = -nodes=2 -floating_ips=1 - -packages=storage-ha-smb storage-ha-nfs - -cluster_init=0 - -###################### -# Deployment Scripts # -###################### -= SCRIPTS = - -## -# remove previously existing volumes and peers -## -target=$PHD_ENV_nodes1 -.... -chkconfig glusterd on -service glusterd start - -for vol in $(gluster volume list); do - echo y | gluster volume stop $vol force - echo y | gluster volume delete $vol -done - -for f in $(find /var/lib/glusterd/peers/ -type f); do - host=`sed -n 's/^hostname1=\(.*\)/\1/p' $f` - gluster peer detach $host force -done - -exit 0 -.... - -## -# Make sure all managed services are disabled -# setup samba configs -# patch required files -## -target=all -.... -service glusterd stop - -chkconfig ctdb off -chkconfig smb off -chkconfig nmb off -chkconfig winbind off -service ctdb stop -service smb stop -service nmb stop -service winbind stop - -chkconfig pacemaker on - -# AD-related config -grep -qs "PEERDNS" /etc/sysconfig/network-scripts/ifcfg-eth0 | grep -q "no" || cat << END >> /etc/sysconfig/network-scripts/ifcfg-eth0 -PEERDNS="no" -END - -cat << END > /etc/resolv.conf -nameserver 192.168.100.1 -search domain.com -END - - -# Ganesha config -mkdir -p /etc/glusterfs-ganesha/exports -cat << END > /etc/glusterfs-ganesha/exports/export.volume_export.conf -EXPORT{ - Export_Id = 90; - Path="/export"; - FSAL { - name = "GLUSTER"; - hostname="$(hostname -s)"; - volume="export"; - } - Access_type = RW; - Squash = No_root_squash; - Disable_ACL = TRUE; - Pseudo="/export"; - Protocols = "3,4" ; - Transports = "UDP,TCP" ; - SecType = "sys"; - Tag = "volume_export"; -} -END - -grep -qs "export.volume_export.conf" /etc/glusterfs-ganesha/nfs-ganesha.conf -if [[ ! $? ]]; then - cat << END >> /etc/glusterfs-ganesha/nfs-ganesha.conf -%include "/etc/glusterfs-ganesha/exports/export.volume_export.conf" -END -fi - -# Samba config -mkdir -p /data/shares/xfs - -cat << END > /etc/samba/smb.conf -[global] - workgroup = DOMAIN - realm = DOMAIN.COM - netbios name = STORAGE-HA - server string = Samba Server Version %v - security = ADS - log file = /var/log/samba/log.%m - max log size = 50 - server max protocol = SMB3 - clustering = Yes - load printers = No - disable spoolss = Yes - show add printer wizard = No - stat cache = No - winbind enum users = Yes - winbind enum groups = Yes - winbind use default domain = Yes - idmap config DOMAIN:range = 500-40000 - idmap config DOMAIN:backend = autorid - idmap config *:range = 70001-80000 - idmap config * : backend = tdb - printing = bsd - cups options = raw - print command = lpr -r -P'%p' %s - lpq command = lpq -P'%p' - lprm command = lprm -P'%p' %j - map archive = No - map readonly = no - store dos attributes = Yes - kernel share modes = No - -[gluster-share] - comment = For samba share of volume share - path = / - read only = No - guest ok = Yes - vfs objects = glusterfs - glusterfs:loglevel = 7 - glusterfs:logfile = /var/log/samba/glusterfs-share.%M.log - glusterfs:volume = share -END - -rm -f /etc/ctdb/nodes -for node in $(echo $PHD_ENV_nodes); do - ip=$(getent hosts $node | head -n 1 | awk '{print $1}') - echo "$ip" >> /etc/ctdb/nodes -done - -### CTDB RA -if [[ ! -f CTDB ]]; then - wget https://raw.githubusercontent.com/jarrpa/storage-ha/master/src/CTDB -fi -chmod 755 CTDB -cp -f CTDB /usr/lib/ocf/resource.d/heartbeat/CTDB - -echo -e "storage\nstorage" | passwd hacluster - -# Wipe leftover GlusterFS status -rm -rf /var/lib/glusterd/vols/* -rm -rf /var/lib/glusterd/peers/* - -# clean brick directories -rm -rf /data/bricks -mkdir -p /data/bricks/ctdb -mkdir -p /data/bricks/ganesha -mkdir -p /data/bricks/share -mkdir -p /data/bricks/export - -# remove rdma -sed -i "s/,rdma//" /etc/glusterfs/glusterd.vol - -service glusterd start - -exit 0 -.... - -## -# do peer probe -# create gluster volumes -## -target=$PHD_ENV_nodes1 -.... - -node_count=`wc -w <<< "${PHD_ENV_nodes}"` -gluster peer probe $PHD_ENV_nodes2 - -sleep 2 - -gluster volume create ctdb replica ${node_count} transport tcp \ - ${PHD_ENV_nodes1}:/data/bricks/ctdb \ - ${PHD_ENV_nodes2}:/data/bricks/ctdb \ - force -gluster volume set ctdb network.ping-timeout 10 -gluster volume set ctdb user.smb disable -gluster volume set ctdb nfs.disable on -gluster volume start ctdb -gluster volume list | grep ctdb - -gluster volume create ganesha replica 2 transport tcp \ - ${PHD_ENV_nodes1}:/data/bricks/ganesha \ - ${PHD_ENV_nodes2}:/data/bricks/ganesha \ - force -gluster volume set ganesha network.ping-timeout 10 -gluster volume set ganesha user.smb disable -gluster volume set ganesha nfs.disable on -gluster volume start ganesha -gluster volume list | grep ganesha - -gluster volume create share transport tcp \ - ${PHD_ENV_nodes1}:/data/bricks/share \ - ${PHD_ENV_nodes2}:/data/bricks/share \ - force -gluster volume set share user.smb disable -gluster volume set share nfs.disable on -gluster volume start share -gluster volume list | grep share - -gluster volume create export transport tcp \ - ${PHD_ENV_nodes1}:/data/bricks/export \ - ${PHD_ENV_nodes2}:/data/bricks/export \ - force -gluster volume set export user.smb disable -gluster volume set export nfs.disable on -gluster volume start export -gluster volume list | grep export -.... diff --git a/scripts/watch-ha.sh b/scripts/watch-ha.sh new file mode 100755 index 0000000..3c3941a --- /dev/null +++ b/scripts/watch-ha.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +CTDB_STATUS="" + +if [ -e /etc/ctdb/nodes ]; then + CTDB_STATUS="echo; ctdb status;" +fi + +watch -n1 "pcs status; ${CTDB_STATUS}" diff --git a/vagrant-ansible/Vagrantfile b/vagrant-ansible/Vagrantfile new file mode 100644 index 0000000..6081dfb --- /dev/null +++ b/vagrant-ansible/Vagrantfile @@ -0,0 +1,409 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +ENV['VAGRANT_DEFAULT_PROVIDER'] = 'libvirt' + +VAGRANTFILE_API_VERSION = "2" +EL_VER = "7" + +require 'yaml' +require 'io/console' + +projectdir = File.expand_path File.dirname(__FILE__) + +#============================================================================== +# +# Default VM settings +# + +settings = { + :vms => [ + { :name => 'node1', }, + { :name => 'node2', }, + { :name => 'node3', }, + { :name => 'node4', }, + ], + :vms_common => { + :box => 'centos/'+EL_VER, + :memory => 2048, + :cpus => 2, + :networks => [ + { + :netid => :public_network, + :auto_config => false, + :dev => "virbr0", + :type => "bridge", + }, + ], + :disks => [ + { + :size => 2, #gigabytes + :parts => [ + { + :fs => "xfs", + :mount => "/data", + :name => "data", + :size => "100%", + }, + ], + }, + ], + :sync_folders => [ + { :src => "../src", :dest => "/shared/source", }, + { :src => "../repo", :dest => "/shared/repo", }, + ], + }, + :groups => { + :ha_servers => [ "all" ], + :smb_servers => [ "all" ], + :gluster_servers => [ "all" ], + }, + :ha => { + :ha_name => "storage_ha", + :virtual_ips => [ + { :ip => "192.168.121.111" }, + { :ip => "192.168.121.112" }, + ], + }, + :samba => { + :setup_samba => true, + :config => nil, + }, + :ctdb => { + :setup_ctdb => true, + :config => nil, + }, + :ad => { + :setup_ad => false, + :domain => "domain.com", + :dns => "0.0.0.0", + }, + :gluster => { + :setup_gluster => true, + :bricks_dir => "/data/bricks", + :volumes => [ + { name: 'share' }, +# { name: "nfs_ganesha" }, + { name: 'ctdb', replica: "n", mount: "/shared/lock" }, + ], + }, +} + +#============================================================================== +# +# Load (if present) and write out custom settings +# + +custom_settings = {} + +f = File.join(projectdir, 'vagrant.yaml') + +if File.exists?(f) + custom_settings = YAML::load_file f +else + File.open(f, 'w') do |file| + file.write settings.to_yaml + end + puts "Wrote initial config: [ #{f} ]" + puts "Please verify settings and run your command again." + exit +end + +settings.merge!(custom_settings) + +File.open(f, 'w') do |file| + file.write settings.to_yaml +end + +vms = settings[:vms] +vms_common = settings[:vms_common] +groups = settings[:groups] +group_vars = settings[:group_vars] +samba = settings[:samba] +ctdb = settings[:ctdb] +ad = settings[:ad] +gluster = settings[:gluster] +ha = settings[:ha] + +#============================================================================== +# +# Derive virtual disk device names and partition numbers +# + +driveletters = ('b'..'z').to_a + +vms_common[:disks].each_with_index do |disk,disk_num| + disk[:num] = disk_num + disk[:dev_names] = { + :libvirt => "vd#{driveletters[disk[:num]]}", + } + disk[:parts].each_with_index do |part,part_num| + part[:num] = part_num + 1 + end +end + +#============================================================================== +# +# Define required software for groups +# + +group_defs = { + :smb_servers => { + :install_pkgs => " samba samba-winbind", + :services => [], + }, + :gluster_servers => { + :install_pkgs => " glusterfs-server glusterfs-client", + :services => [ "glusterd" ], + }, +} +if gluster[:setup_gluster] + group_defs[:smb_servers][:install_pkgs] << " samba-vfs-glusterfs" + group_defs[:smb_servers][:services].push "glusterd" +end +if ctdb[:setup_ctdb] + group_defs[:smb_servers][:install_pkgs] << " ctdb" + group_defs[:smb_servers][:services].push "ctdb" +else + group_defs[:smb_servers][:services].push "winbind" + group_defs[:smb_servers][:services].push "smb" + group_defs[:smb_servers][:services].push "nmb" +end + +#============================================================================== +# +# active_vms - Keep track of currently running VMs, since vagrant won't tell +# us directly. +# + +active_vms = [] + +f = File.join(projectdir, 'active_vms.yaml') + +if File.exists?(f) + active_vms = YAML::load_file f +end + +if ARGV[0] == "up" + cmd_names = ARGV.drop(1).delete_if { |x| x.start_with?("-") or active_vms.include?(x) } + if cmd_names.length > 0 then + active_vms.push(*cmd_names) + else + vms.each do |x| + if not active_vms.include?(x[:name]) + active_vms.push x[:name] + end + end + end +elsif ARGV[0] == "destroy" or ARGV[0] == "halt" + cmd_names = ARGV.drop(1).delete_if { |x| x.start_with?("-") or not active_vms.include?(x) } + if cmd_names.length > 0 then + active_vms.delete_if { |x| cmd_names.include?(x) } + else + active_vms = [] + end +end + +File.open(f, 'w+') do |file| + file.write active_vms.to_yaml +end + +if ENV['VAGRANT_LOG'] == 'debug' + p "active_vms: #{active_vms}" +end + +#============================================================================== +# +# Build group listings +# + +groups.each do |name,group| + if group.include? "all" + groups[name] = active_vms + else + group.each_with_index do |node,i| + case node + when "first" + groups[name][i] = active_vms[0] + when "last" + groups[name][i] = active_vms[-1] + when "not first" + groups[name] = active_vms.count > 1 ? active_vms[1..-1] : [ active_vms[0] ] + when "not last" + groups[name] = active_vms.count > 1 ? active_vms[0..-2] : [ active_vms[0] ] + when node.is_a?(Integer) + groups[name][i] = active_vms[node] + end + end + end +end +if ad[:setup_ad] and not groups.keys.include? "ad_server" + groups[:ad_server] = group[:smb_servers][0] +end + +#============================================================================== +# +# Collect packages to install and services to run +# + +install_pkgs = {} +services = {} +if active_vms.length > 0 + active_vms.each do |name| + install_pkgs[name] = "python python-simplejson libselinux-python xfsprogs gnupg " + if vms_common[:install_pkgs] + install_pkgs[name] << " " + vms_common[:install_pkgs] + end + + services[name] = [] + if vms_common[:services] + services[name].push vms_common[:services] + end + end + groups.each do |name,group| + group.each do |node| + if group_defs and group_defs[name] and group_defs[name][:install_pkgs] + install_pkgs[node] << group_defs[name][:install_pkgs] + end + if group_vars and group_vars[name] and group_vars[name][:install_pkgs] + install_pkgs[node] << " " + group_vars[name][:install_pkgs] + end + + if group_defs and group_defs[name] and group_defs[name][:services] + services[node].push group_defs[name][:services] + end + if group_vars and group_vars[name] and group_vars[name][:services] + services[node].push group_vars[name][:services] + end + end + end + vms.each do |vm| + if vm['install_pkgs'] + install_pkgs[name] << " " + vm['install_pkgs'] + end + if vm['services'] + services[name].push vm[:services] + end + end +end + +#============================================================================== +# +# Vagrant config +# + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + if Vagrant.has_plugin?("vagrant-cachier") + config.cache.scope = :box + config.cache.auto_detect = false + config.cache.enable :yum + end + + config.ssh.insert_key = false + + vms.each do |machine| + config.vm.define machine[:name] do |node| + node.vm.box = vms_common[:box] + node.vm.provider :libvirt do |domain| + domain.memory = vms_common[:memory] + domain.cpus = vms_common[:cpus] + end + + if vms_common[:disks] + vms_common[:disks].each do |disk| + node.vm.provider :libvirt do |lv| + lv.storage :file, :size => "#{disk[:size]}G", :device => "#{disk[:dev_names][:libvirt]}" + disk[:dev] = disk[:dev_names][:libvirt] + end + end + end + + if vms_common[:networks] + vms_common[:networks].each do |net| + netid = net[:netid] + netopts = net.except(:netid) + i = vms_common[:networks].index(net) + if machine[:networks] and i < machine[:networks].length + netopts.merge!(machine[:networks][i]) + end + node.vm.network netid, netopts + end + end + + if vms_common[:sync_folders] + vms_common[:sync_folders].each do |sync| + src = sync[:src] + dest = sync[:dest] + syncopts = sync.except(:src, :dest) + node.vm.synced_folder src, dest, syncopts + end + end + if machine[:sync_folders] + machine[:sync_folders].each do |sync| + src = sync[:src] + dest = sync[:dest] + syncopts = sync.except(:src, :dest) + node.vm.synced_folder src, dest, syncopts + end + end + + end + end + + if active_vms.length > 0 then + config.vm.define active_vms[0], primary: true do |node| + if ad[:setup_ad] + print "AD Administrator password: " + ad_passwd = STDIN.noecho(&:gets) + end + + system 'mkdir', '-p', 'playbooks/host_vars/' + active_vms.each do |node| + host_vars = {} + host_vars['install_pkgs'] = install_pkgs[node] + host_vars['services'] = services[node] + File.open('playbooks/host_vars/' + node.to_s, 'w+') do |file| + file.write host_vars.to_yaml + end + end + + playbooks = [] + playbooks.push("playbooks/raw-el7.yml") + custom_provision = "playbooks/custom.yml" + if File.exists?(custom_provision) + playbooks.push(custom_provision) + end + playbooks.push("playbooks/storage-ha.yml") + playbooks.each do |playbook| + node.vm.provision "ansible" do |ansible| +# ansible.verbose = "vvv" + ansible.playbook = playbook + ansible.groups = {} + groups.each do |name,group| + ansible.groups[name.to_s] = group + end + ansible.extra_vars = { + "el_ver" => EL_VER, + "extra_disks" => vms_common[:disks], + "vips" => ha[:virtual_ips], + "ha_name" => ha[:ha_name], + "samba" => samba, + "ctdb" => ctdb, + "ad" => ad, + "gluster" => gluster, + } + if ad[:setup_ad] + ansible.extra_vars['ad_passwd'] = ad_passwd + end + if vms_common[:extra_vars] + ansible.extra_vars.merge! vms_common[:extra_vars] + end + if ENV['EXTRA_VARS'] + ansible.extra_vars.merge! eval ENV['EXTRA_VARS'] + end + ansible.limit = "all" + end + end + end + end +end diff --git a/vagrant-ansible/playbooks/files/99-no-dns.conf b/vagrant-ansible/playbooks/files/99-no-dns.conf new file mode 100644 index 0000000..d435aba --- /dev/null +++ b/vagrant-ansible/playbooks/files/99-no-dns.conf @@ -0,0 +1,2 @@ +[main] +dns=none diff --git a/vagrant-ansible/playbooks/files/ctdb b/vagrant-ansible/playbooks/files/ctdb new file mode 100644 index 0000000..29544b1 --- /dev/null +++ b/vagrant-ansible/playbooks/files/ctdb @@ -0,0 +1,4 @@ +CTDB_NODES=/etc/ctdb/nodes +CTDB_RECOVERY_LOCK=/gluster/lock/lockfile +CTDB_MANAGES_SAMBA="no" +CTDB_MANAGES_WINBIND="no" diff --git a/vagrant-ansible/playbooks/files/export.conf.j2 b/vagrant-ansible/playbooks/files/export.conf.j2 new file mode 100644 index 0000000..53c9892 --- /dev/null +++ b/vagrant-ansible/playbooks/files/export.conf.j2 @@ -0,0 +1,20 @@ +EXPORT{ + Export_Id = 90; + Path="/{{ item.name }}"; +{% if item.FSAL|upper == "GLUSTER" %} + FSAL { + name = "GLUSTER"; + hostname="$(hostname -s)"; + volume="{{ item.name }}"; + } +{% endif %} + Access_type = RW; + Squash = No_root_squash; + Disable_ACL = TRUE; + Pseudo="/{{ item.name }}"; + Protocols = "3,4" ; + Transports = "UDP,TCP" ; + SecType = "sys"; + Tag = "volume_export"; +} +END diff --git a/vagrant-ansible/playbooks/files/nodes.j2 b/vagrant-ansible/playbooks/files/nodes.j2 new file mode 100644 index 0000000..1e40cc9 --- /dev/null +++ b/vagrant-ansible/playbooks/files/nodes.j2 @@ -0,0 +1,3 @@ +{% for host in groups['smb_servers'] %} +{{hostvars[host]['ansible_eth0']['ipv4']['address']}} +{% endfor %} diff --git a/vagrant-ansible/playbooks/files/resolv.conf.j2 b/vagrant-ansible/playbooks/files/resolv.conf.j2 new file mode 100644 index 0000000..ec6dd0f --- /dev/null +++ b/vagrant-ansible/playbooks/files/resolv.conf.j2 @@ -0,0 +1,3 @@ +nameserver {{ ad['dns'] }} +search {{ ad['domain'] }} +domain {{ ad['domain'] }} diff --git a/vagrant-ansible/playbooks/files/smb.conf.j2 b/vagrant-ansible/playbooks/files/smb.conf.j2 new file mode 100644 index 0000000..741794c --- /dev/null +++ b/vagrant-ansible/playbooks/files/smb.conf.j2 @@ -0,0 +1,51 @@ +[global] + netbios name = {{ ha_name|upper }} +{% if ad['setup_ad'] %} + realm = {{ ad['domain']|upper }} + workgroup = {{ ad['domain']|upper|regex_replace('^([^\\.]*).*$', '\\1') }} + security = ADS + winbind enum users = Yes + winbind enum groups = Yes + winbind use default domain = Yes + idmap config {{ ad['domain']|upper|regex_replace('^([^\\.]*).*$', '\\1') }}:range = 500-40000 + idmap config {{ ad['domain']|upper|regex_replace('^([^\\.]*).*$', '\\1') }}:backend = autorid + idmap config *:range = 70001-80000 + idmap config * : backend = tdb +{% else %} + workgroup = WORKGROUP + security = user +{% endif %} +{%- if ctdb['setup_ctdb'] %} + clustering = Yes +{% endif %} + log file = /var/log/samba/log.%m + max log size = 50 + server max protocol = SMB3 + load printers = No + disable spoolss = Yes + show add printer wizard = No + stat cache = No + printing = bsd + cups options = raw + print command = lpr -r -P'%p' %s + lpq command = lpq -P'%p' + lprm command = lprm -P'%p' %j + map archive = No + map readonly = no + store dos attributes = Yes + kernel share modes = No + debug pid = yes +{% if samba['config'] %} +{% for opt in samba['config'] %} + {{ opt }} = {{ samba['config'][opt] }} +{% endfor %} +{% endif %} + +{% if samba['shares'] %} +{% for share in samba['shares'] %} +[{{share}}] +{% for opt in samba['shares'][share] %} + {{ opt }} = {{ samba['shares'][share][opt] }} +{% endfor %} +{% endfor %} +{% endif %} diff --git a/vagrant-ansible/playbooks/files/storage-ha.conf.j2 b/vagrant-ansible/playbooks/files/storage-ha.conf.j2 new file mode 100644 index 0000000..93065a7 --- /dev/null +++ b/vagrant-ansible/playbooks/files/storage-ha.conf.j2 @@ -0,0 +1,28 @@ +# {{ ansible_managed }} +# Name of the HA cluster created. +HA_NAME="{{ ha_name }}" + +# Password of the hacluster user +HA_PASSWORD="hacluster" + +# The server on which cluster-wide configuration is managed. +# IP/Hostname +HA_SERVER="{{ hostvars[groups['ha_servers'][0]]['ansible_hostname'] }}" + +# The set of nodes that forms the HA cluster. +# Comma-deliminated IP/Hostname list +HA_NODES="{%- for host in groups['ha_servers'] -%}{{hostvars[host]['ansible_hostname']}}{% if not loop.last %},{% endif %}{%- endfor -%}" + +# [OPTIONAL] A subset of HA nodes that will serve as storage servers. +# Comma-deliminated IP/Hostname list +STORAGE_NODES="{%- for host in groups['gluster_servers'] -%}{{hostvars[host]['ansible_hostname']}}{% if not loop.last %},{% endif %}{%- endfor -%}" + +# Virtual IPs of each of the nodes specified above. +# Whitespace-deliminated IP address list +HA_VIPS="{{ vips|join(' ') }}" + +# Managed access methods +# Whitespace-delimited list. Valid values: +# nfs +# smb +HA_SERVICES="smb" diff --git a/vagrant-ansible/playbooks/group_vars/nfs_servers b/vagrant-ansible/playbooks/group_vars/nfs_servers new file mode 100644 index 0000000..4c38e94 --- /dev/null +++ b/vagrant-ansible/playbooks/group_vars/nfs_servers @@ -0,0 +1,2 @@ +--- +nfs: true diff --git a/vagrant-ansible/playbooks/group_vars/smb_servers b/vagrant-ansible/playbooks/group_vars/smb_servers new file mode 100644 index 0000000..dbbf3a4 --- /dev/null +++ b/vagrant-ansible/playbooks/group_vars/smb_servers @@ -0,0 +1,2 @@ +--- +smb: true diff --git a/vagrant-ansible/playbooks/group_vars/storage-ha b/vagrant-ansible/playbooks/group_vars/storage-ha new file mode 100644 index 0000000..c6a83e7 --- /dev/null +++ b/vagrant-ansible/playbooks/group_vars/storage-ha @@ -0,0 +1,3 @@ +--- +ansible_ssh_user: vagrant +ansible_ssh_private_key_file: ~/.vagrant.d/insecure_private_key diff --git a/vagrant-ansible/playbooks/raw-centos.yml b/vagrant-ansible/playbooks/raw-centos.yml new file mode 100644 index 0000000..8724563 --- /dev/null +++ b/vagrant-ansible/playbooks/raw-centos.yml @@ -0,0 +1,20 @@ +--- +- hosts: all + name: Raw Installation (RedHat) + sudo: yes + gather_facts: False + + tasks: + - name: Make sure playbook dependencies are installed + raw: > + for REPO in `find /home/vagrant/sync/playbooks -name '*.repo'`; do + cp $REPO /etc/yum.repos.d/; + done; + cd /etc/yum.repos.d/; + curl http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo; + curl http://download.gluster.org/pub/gluster/glusterfs/nfs-ganesha/2.1.0/EPEL.repo/nfs-ganesha.repo; + curl http://download.gluster.org/pub/gluster/glusterfs/samba/EPEL.repo/glusterfs-samba-epel.repo; + rpm --import http://download.gluster.org/pub/gluster/glusterfs/samba/EPEL.repo/pub.key; + rpm --import http://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ el_ver }}; + yum install -y http://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ el_ver }}.noarch.rpm; + yum install -y {{ install_pkgs }}; diff --git a/vagrant-ansible/playbooks/roles/common/defaults/main.yml b/vagrant-ansible/playbooks/roles/common/defaults/main.yml new file mode 100644 index 0000000..ee8c919 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/common/defaults/main.yml @@ -0,0 +1,13 @@ +--- +firewall_allowed_tcp_ports: + - "22" + - "25" +firewall_allowed_udp_ports: [] +firewall_forwarded_tcp_ports: [] +firewall_forwarded_udp_ports: [] +firewall_additional_rules: [] +firewall_log_dropped_packets: true +firewall_ports: [] +firewall_interfaces: [] +firewall_services: + - "ssh" diff --git a/vagrant-ansible/playbooks/roles/common/handlers/main.yml b/vagrant-ansible/playbooks/roles/common/handlers/main.yml new file mode 100644 index 0000000..af4889b --- /dev/null +++ b/vagrant-ansible/playbooks/roles/common/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: restart firewall + command: service iptables save + command: service firewall restart diff --git a/vagrant-ansible/playbooks/roles/common/tasks/main.yml b/vagrant-ansible/playbooks/roles/common/tasks/main.yml new file mode 100644 index 0000000..82a8526 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/common/tasks/main.yml @@ -0,0 +1,45 @@ +--- +# Include variables and define needed variables. +#- name: Include OS-specific variables. +# include_vars: "{{ ansible_os_family }}.yml" + +# Setup/install tasks. +#- include: setup-RedHat.yml +# when: ansible_os_family == 'RedHat' + +#- include: setup-Debian.yml +# when: ansible_os_family == 'Debian' + +- name: Enable firewall + service: name=firewalld state=started enabled=yes + +- name: Assign firewall interfaces + command: "firewall-cmd --permanent --add-interface={{ item }}" + with_items: "{{ firewall_interfaces }}" + when: firewall_interfaces + +- name: Enable firewall services + firewalld: service={{ item }} permanent=true state=enabled + with_items: "{{ firewall_services }}" + when: firewall_services + +- name: Enable firewall port + firewalld: port={{ item }} permanent=true state=enabled + with_items: "{{ firewall_ports }}" + when: firewall_ports + +- name: Reload firewall config + command: "firewall-cmd --reload" + +#- name: Flush iptables the first time playbook runs. +# command: iptables -F creates=/etc/init.d/firewall + +#- name: Copy firewall script into place. +# template: src=firewall.bash.j2 dest=/etc/firewall.bash owner=root group=root mode=0744 +# notify: restart firewall + +#- name: Copy firewall init script into place. +# template: src=firewall.j2 dest=/etc/init.d/firewall owner=root group=root mode=0755 + +#- name: Ensure the firewall is enabled and will start on boot. +# service: name=firewall state=started enabled=yes diff --git a/vagrant-ansible/playbooks/roles/common/tasks/setup-Debian.yml b/vagrant-ansible/playbooks/roles/common/tasks/setup-Debian.yml new file mode 100644 index 0000000..2b9f71f --- /dev/null +++ b/vagrant-ansible/playbooks/roles/common/tasks/setup-Debian.yml @@ -0,0 +1,4 @@ +--- +- name: Ensure common packages are installed. + apt: pkg=iptables state=installed + when: ansible_os_family == 'Debian' diff --git a/vagrant-ansible/playbooks/roles/common/tasks/setup-RedHat.yml b/vagrant-ansible/playbooks/roles/common/tasks/setup-RedHat.yml new file mode 100644 index 0000000..be6ec27 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/common/tasks/setup-RedHat.yml @@ -0,0 +1,40 @@ +--- +# EPEL +- name: Import EPEL GPG key. + rpm_key: + key: "http://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_version[:1] }}" + state: present + when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' + +- name: Install EPEL repo. + yum: pkg="http://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_version[:1] }}.noarch.rpm" state=installed + when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' + +# NFS +- name: Fetch NFS-Ganesha repo file + get_url: dest=/etc/yum.repos.d/nfs-ganesha.repo + url=http://download.gluster.org/pub/gluster/glusterfs/nfs-ganesha/2.1.0/EPEL.repo/nfs-ganesha.repo + +- name: Set NFS-Ganesha repo file permissions + file: owner=root group=root mode=0644 + path=/etc/yum.repos.d/nfs-ganesha.repo + +- name: Import NFS-Ganesha GPG key. + rpm_key: + key: "http://download.gluster.org/pub/gluster/glusterfs/nfs-ganesha/2.1.0/EPEL.repo/pub.key" + state: present + +# SMB +- name: Fetch Gluster Samba repo file + get_url: dest=/etc/yum.repos.d/gluster-samba.repo + url=http://download.gluster.org/pub/gluster/glusterfs/samba/EPEL.repo/glusterfs-samba-epel.repo + +- name: Set Gluster Samba repo file permissions + file: owner=root group=root mode=0644 + path=/etc/yum.repos.d/gluster-samba.repo + +- name: Import Gluster Samba GPG key. + rpm_key: + key: "http://download.gluster.org/pub/gluster/glusterfs/samba/EPEL.repo/pub.key" + state: present + diff --git a/vagrant-ansible/playbooks/roles/common/templates/firewall.bash.j2 b/vagrant-ansible/playbooks/roles/common/templates/firewall.bash.j2 new file mode 100644 index 0000000..bbe6436 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/common/templates/firewall.bash.j2 @@ -0,0 +1,83 @@ +#!/bin/bash +# iptables firewall for common LAMP servers. +# +# This file should be located at /etc/firewall.bash, and is meant to work with +# Jeff Geerling's firewall init script. +# +# Common port reference: +# 22: SSH +# 25: SMTP +# 80: HTTP +# 123: DNS +# 443: HTTPS +# 2222: SSH alternate +# 4949: Munin +# 6082: Varnish admin +# 8080: HTTP alternate (often used with Tomcat) +# 8983: Tomcat HTTP +# 8443: Tomcat HTTPS +# 9000: SonarQube +# +# @author Jeff Geerling + +# No spoofing. +if [ -e /proc/sys/net/ipv4/conf/all/rp_filter ] +then +for filter in /proc/sys/net/ipv4/conf/*/rp_filter +do +echo 1 > $filter +done +fi + +# Remove all rules and chains. +iptables -F +iptables -X + +# Accept traffic from loopback interface (localhost). +iptables -A INPUT -i lo -j ACCEPT + +# Forwarded ports. +{# Add a rule for each forwarded port #} +{% for forwarded_port in firewall_forwarded_tcp_ports %} +iptables -t nat -I PREROUTING -p tcp --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }} +iptables -t nat -I OUTPUT -p tcp -o lo --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }} +{% endfor %} +{% for forwarded_port in firewall_forwarded_udp_ports %} +iptables -t nat -I PREROUTING -p udp --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }} +iptables -t nat -I OUTPUT -p udp -o lo --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }} +{% endfor %} + +# Open ports. +{# Add a rule for each open port #} +{% for port in firewall_allowed_tcp_ports %} +iptables -A INPUT -p tcp -m tcp --dport {{ port }} -j ACCEPT +{% endfor %} +{% for port in firewall_allowed_udp_ports %} +iptables -A INPUT -p udp -m udp --dport {{ port }} -j ACCEPT +{% endfor %} + +# Accept icmp ping requests. +iptables -A INPUT -p icmp -j ACCEPT + +# Allow NTP traffic for time synchronization. +iptables -A OUTPUT -p udp --dport 123 -j ACCEPT +iptables -A INPUT -p udp --sport 123 -j ACCEPT + +# Additional custom rules. +{% for rule in firewall_additional_rules %} +{{ rule }} +{% endfor %} + +# Allow established connections: +iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT + +# Log EVERYTHING (ONLY for Debug). +# iptables -A INPUT -j LOG + +{% if firewall_log_dropped_packets %} +# Log other incoming requests (all of which are dropped) at 15/minute max. +iptables -A INPUT -m limit --limit 15/minute -j LOG --log-level 7 --log-prefix "Dropped by firewall: " +{% endif %} + +# Drop all other traffic. +iptables -A INPUT -j DROP diff --git a/vagrant-ansible/playbooks/roles/common/templates/firewall.j2 b/vagrant-ansible/playbooks/roles/common/templates/firewall.j2 new file mode 100644 index 0000000..870603b --- /dev/null +++ b/vagrant-ansible/playbooks/roles/common/templates/firewall.j2 @@ -0,0 +1,41 @@ +#! /bin/sh +# /etc/init.d/firewall +# +# Firewall init script, to be used with /etc/firewall.bash by Jeff Geerling. +# +# @author Jeff Geerling + +### BEGIN INIT INFO +# Provides: firewall +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start firewall at boot time. +# Description: Enable the firewall. +### END INIT INFO + +# Carry out specific functions when asked to by the system +case "$1" in + start) + echo "Starting firewall." + /etc/firewall.bash + ;; + stop) + echo "Stopping firewall." + iptables -F + ;; + restart) + echo "Restarting firewall." + /etc/firewall.bash + ;; + status) + echo -e "`iptables -L -n`" + ;; + *) + echo "Usage: /etc/init.d/firewall {start|stop|status|restart}" + exit 1 + ;; +esac + +exit 0 \ No newline at end of file diff --git a/vagrant-ansible/playbooks/roles/glusterfs/defaults/main.yml b/vagrant-ansible/playbooks/roles/glusterfs/defaults/main.yml new file mode 100644 index 0000000..5521b52 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/glusterfs/defaults/main.yml @@ -0,0 +1,2 @@ +--- +glusterfs_default_release: "" diff --git a/vagrant-ansible/playbooks/roles/glusterfs/files/glusterfs-epel.repo b/vagrant-ansible/playbooks/roles/glusterfs/files/glusterfs-epel.repo new file mode 100644 index 0000000..2fc86df --- /dev/null +++ b/vagrant-ansible/playbooks/roles/glusterfs/files/glusterfs-epel.repo @@ -0,0 +1,25 @@ +# Place this file in your /etc/yum.repos.d/ directory + +[glusterfs-epel] +name=GlusterFS is a clustered file-system capable of scaling to several petabytes. +baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/$basearch/ +enabled=1 +skip_if_unavailable=1 +gpgcheck=1 +gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key + +[glusterfs-noarch-epel] +name=GlusterFS is a clustered file-system capable of scaling to several petabytes. +baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/noarch +enabled=1 +skip_if_unavailable=1 +gpgcheck=1 +gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key + +[glusterfs-source-epel] +name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source +baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/SRPMS +enabled=0 +skip_if_unavailable=1 +gpgcheck=1 +gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key diff --git a/vagrant-ansible/playbooks/roles/glusterfs/tasks/main.yml b/vagrant-ansible/playbooks/roles/glusterfs/tasks/main.yml new file mode 100644 index 0000000..5274fab --- /dev/null +++ b/vagrant-ansible/playbooks/roles/glusterfs/tasks/main.yml @@ -0,0 +1,14 @@ +--- +# Include variables and define needed variables. +- name: Include OS-specific variables. + include_vars: "{{ ansible_os_family }}.yml" + +# Setup/install tasks. +#- include: setup-RedHat.yml +# when: ansible_os_family == 'RedHat' + +#- include: setup-Debian.yml +# when: ansible_os_family == 'Debian' + +- name: Ensure GlusterFS is started and enabled at boot. + service: "name={{ glusterfs_daemon }} state=started enabled=yes" diff --git a/vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-Debian.yml b/vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-Debian.yml new file mode 100644 index 0000000..0edd1ae --- /dev/null +++ b/vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-Debian.yml @@ -0,0 +1,9 @@ +--- +- name: Ensure GlusterFS is installed. + apt: + name: "{{ item }}" + state: installed + default_release: "{{ glusterfs_default_release }}" + with_items: + - glusterfs-server + - glusterfs-client diff --git a/vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-RedHat.yml b/vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-RedHat.yml new file mode 100644 index 0000000..1e187f9 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-RedHat.yml @@ -0,0 +1,16 @@ +--- +- name: Fetch GlusterFS repo file + get_url: dest=/etc/yum.repos.d/glusterfs-epel.repo + url=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo + +- name: Set GlusterFS repo file permissions + file: owner=root group=root mode=0644 + path=/etc/yum.repos.d/glusterfs-epel.repo + +- name: Import GlusterFS GPG key. + rpm_key: + key: "http://download.gluster.org/pub/gluster/glusterfs/LATEST/pub.key" + state: present + +- name: Ensure GlusterFS is installed. + yum: "name=glusterfs-server,glusterfs-client state=installed" diff --git a/vagrant-ansible/playbooks/roles/glusterfs/vars/Debian.yml b/vagrant-ansible/playbooks/roles/glusterfs/vars/Debian.yml new file mode 100644 index 0000000..13c595f --- /dev/null +++ b/vagrant-ansible/playbooks/roles/glusterfs/vars/Debian.yml @@ -0,0 +1,2 @@ +--- +glusterfs_daemon: glusterfs-server diff --git a/vagrant-ansible/playbooks/roles/glusterfs/vars/RedHat.yml b/vagrant-ansible/playbooks/roles/glusterfs/vars/RedHat.yml new file mode 100644 index 0000000..e931068 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/glusterfs/vars/RedHat.yml @@ -0,0 +1,2 @@ +--- +glusterfs_daemon: glusterd diff --git a/vagrant-ansible/playbooks/roles/storage-ha/defaults/main.yml b/vagrant-ansible/playbooks/roles/storage-ha/defaults/main.yml new file mode 100644 index 0000000..a441c8a --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/defaults/main.yml @@ -0,0 +1,3 @@ +--- +smb: false +nfs: false diff --git a/vagrant-ansible/playbooks/roles/storage-ha/files/storage-ha.repo b/vagrant-ansible/playbooks/roles/storage-ha/files/storage-ha.repo new file mode 100644 index 0000000..84dcd9d --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/files/storage-ha.repo @@ -0,0 +1,7 @@ +[storage-ha] +name=Storage-HA RPMs +baseurl=file:///shared/repo/el$releasever/ +enabled=1 +skip_if_unavailable=0 +gpgcheck=0 + diff --git a/vagrant-ansible/playbooks/roles/storage-ha/handlers/main.yml b/vagrant-ansible/playbooks/roles/storage-ha/handlers/main.yml new file mode 100644 index 0000000..7140473 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/handlers/main.yml @@ -0,0 +1,5 @@ +--- +# handlers file for storage-ha + +#- name: Reload samba +# service: name={{ samba_service_name }} state=reloaded diff --git a/vagrant-ansible/playbooks/roles/storage-ha/tasks/conf-NFS.yml b/vagrant-ansible/playbooks/roles/storage-ha/tasks/conf-NFS.yml new file mode 100644 index 0000000..e8c44cc --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/tasks/conf-NFS.yml @@ -0,0 +1,14 @@ +--- +- name: Configure NFS-Ganesha exports + template: src=export.conf.j2 dest=/etc/glusterfs-ganesha/exports/export.{{item.name}}.conf + with_items: "{{ nfs['exports'] }}" + +- name: Enable NFS-Ganesha exports + shell: > +grep -qs "export.{{ item.name }}.conf" /etc/glusterfs-ganesha/nfs-ganesha.conf +if [[ ! $? ]]; then + cat << END >> /etc/glusterfs-ganesha/nfs-ganesha.conf +%include "/etc/glusterfs-ganesha/exports/export.{{ item.name }}.conf" +END +fi + with_items: "{{ nfs['exports'] }}" diff --git a/vagrant-ansible/playbooks/roles/storage-ha/tasks/conf-SMB.yml b/vagrant-ansible/playbooks/roles/storage-ha/tasks/conf-SMB.yml new file mode 100644 index 0000000..4a5a548 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/tasks/conf-SMB.yml @@ -0,0 +1,13 @@ +--- +- name: Configure SMB services + service: "name={{ item.daemon }} state={{ item.state }} enabled={{ item.enabled }}" + with_items: + - { daemon: 'ctdb', state: 'stopped', enabled: 'no' } + - { daemon: 'smb', state: 'stopped', enabled: 'no' } + - { daemon: 'nmb', state: 'stopped', enabled: 'no' } + - { daemon: 'winbind', state: 'stopped', enabled: 'no' } + +- name: Configure Samba server + template: src=smb.conf.j2 + dest=/etc/samba/smb.conf + owner=root group=root mode=0644 diff --git a/vagrant-ansible/playbooks/roles/storage-ha/tasks/main.yml b/vagrant-ansible/playbooks/roles/storage-ha/tasks/main.yml new file mode 100644 index 0000000..7b10f3d --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/tasks/main.yml @@ -0,0 +1,24 @@ +--- +# tasks file for storage-ha + +#- include_vars: NFS.yml +# when: nfs + +- include_vars: SMB.yml + when: smb + +#- include: conf-NFS.yml +# when: nfs + +- include: conf-SMB.yml + when: smb + +- name: Configure HA user + user: name=hacluster password=buqSogFSZLJQM + +- name: Configure HA services + service: "name={{ item.daemon }} state={{ item.state|default(omit) }} enabled={{ item.enabled|default(omit) }}" + with_items: + - { daemon: 'pcsd', state: 'started', enabled: 'yes' } + - { daemon: 'pacemaker', enabled: 'yes' } + diff --git a/vagrant-ansible/playbooks/roles/storage-ha/tasks/setup-AD.yml b/vagrant-ansible/playbooks/roles/storage-ha/tasks/setup-AD.yml new file mode 100644 index 0000000..570380a --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/tasks/setup-AD.yml @@ -0,0 +1,71 @@ +--- +- name: Stop all Samba services + service: name={{item.name}} state={{item.state}} + with_items: + - { name: 'smb', state: 'stopped' } + - { name: 'nmb', state: 'stopped' } + - { name: 'winbind', state: 'stopped' } + - { name: 'ctdb', state: 'stopped' } + +- name: Mount CTDB reclock volume + mount: name=/shared/lock src=localhost:/ctdb fstype=glusterfs opts=_netdev,defaults,direct-io-mode=enable,transport=tcp,xlator-option=*client*.ping-timeout=10 state=mounted + when: ctdb['setup_ctdb'] + +- name: Restart CTDB + service: name={{item.name}} state={{item.state}} + with_items: + - { name: 'ctdb', state: 'restarted' } + when: ctdb['setup_ctdb'] + +- name: Verify CTDB is healthy + shell: while true; do sleep 1; status=$(ctdb status 2>/dev/null); rc=$?; if [ $rc -ne 0 ]; then exit $rc; fi; if ! echo $status | grep -qs 'UNHEALTHY (THIS'; then exit; fi; done + when: ctdb['setup_ctdb'] + +- name: Configure resolv.conf for Active Directory + template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744 + with_items: + - { src: 'files/resolv.conf.j2', dest: '/etc/resolv.conf' } + +- name: Is NetworkManager installed? + shell: which nmcli 2>&1 >/dev/null; exit $? + register: detect_nm + changed_when: False + failed_when: False + +- name: Disable NetworkManager DNS + template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744 + with_items: + - { src: 'files/99-no-dns.conf', dest: '/etc/NetworkManager/conf.d/99-no-dns.conf' } + when: detect_nm.rc == 0 + +- name: Restart NetworkManager + service: name="NetworkManager" state=restarted + when: detect_nm.rc == 0 + +- name: Configure nsswitch + shell: sed -ri '/^(passwd|group)/s/$/ winbind/' /etc/nsswitch.conf + +- name: Join Active Directory domain + shell: "net join -U Administrator%{{ ad_passwd }}" + run_once: true + delegate_to: "{{ groups['smb_servers'][0] }}" + +- name: Register Active Directory DNS + shell: "net ads -P dns register {{ ha_name }}.{{ ad_domain }} {{ vips|join(' ') }}" + run_once: true + delegate_to: "{{ groups['smb_servers'][0] }}" + +- name: Verify Active Directory domain membership + shell: net ads testjoin + run_once: true + delegate_to: "{{ groups['smb_servers'][0] }}" + +- name: Stop CTDB + service: name={{item.name}} state={{item.state}} + with_items: + - { name: 'ctdb', state: 'stopped' } + when: ctdb['setup_ctdb'] + +- name: Unmount CTDB reclock volume + mount: name=/gluster/lock src=localhost:/ctdb fstype=glusterfs state=unmounted + when: ctdb['setup_ctdb'] diff --git a/vagrant-ansible/playbooks/roles/storage-ha/tasks/setup-RedHat.yml b/vagrant-ansible/playbooks/roles/storage-ha/tasks/setup-RedHat.yml new file mode 100644 index 0000000..ac2f57f --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/tasks/setup-RedHat.yml @@ -0,0 +1,4 @@ +--- +- name: Install Storage-HA + yum: pkg=storage-ha,storage-ha-smb state=present + diff --git a/vagrant-ansible/playbooks/roles/storage-ha/templates/smb.conf.j2 b/vagrant-ansible/playbooks/roles/storage-ha/templates/smb.conf.j2 new file mode 100644 index 0000000..7922fd0 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/templates/smb.conf.j2 @@ -0,0 +1,24 @@ +## This file is managed by Ansible, all changes will be lost ## +# +{% macro print_hash(hash) %} +{% for key, value in hash.iteritems() %} + {{ "%-30s" | format(key) | replace("_"," ") }} = {{ value }} +{% endfor %} +{% endmacro %} +{% macro print_section(hash, section='global') %} +[{{ section }}] +{{ print_hash(hash) }} + +{% endmacro %} +# Default options +{{ print_section(samba_global) }} +{% if samba_global_custom is defined and samba_global_custom %} + # Custom options +{{ print_hash(hash=samba_global_custom) }} +{% endif %} +# Share definitions +{% if samba_shares is defined and samba_shares %} +{% for share in samba_shares.keys() %} +{{ print_section(samba_shares[share], share) }} +{% endfor %} +{% endif %} diff --git a/vagrant-ansible/playbooks/roles/storage-ha/vars/NFS.yml b/vagrant-ansible/playbooks/roles/storage-ha/vars/NFS.yml new file mode 100644 index 0000000..cd21505 --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/vars/NFS.yml @@ -0,0 +1,2 @@ +--- + diff --git a/vagrant-ansible/playbooks/roles/storage-ha/vars/SMB.yml b/vagrant-ansible/playbooks/roles/storage-ha/vars/SMB.yml new file mode 100644 index 0000000..88f8b8c --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/vars/SMB.yml @@ -0,0 +1,48 @@ +--- +# Allow access to Samba through firewall for specified networks +# If samba_allow is undefined or False, allow access from all +samba_allow: [] + +samba_workgroup: 'WORKGROUP' +samba_netbios_name: '{{ ansible_hostname }}' +samba_server_string: '%h file server' + +# Name of the /etc/init.d/ service script +samba_service_name: 'smb' + +# Which hash variable is used to configure [global] section in smb.conf +samba_global: '{{ samba_default_global }}' + +# You can specify additional options in a separate hash +samba_global_custom: False + +# Which hash of hashes is used to configure shares in smb.conf +samba_shares: '{{ samba_default_shares }}' + +# Default [global] configuration +samba_default_global: + + # Browsing / Identification + workgroup: '{{ samba_workgroup | default("WORKGROUP") }}' + netbios_name: '{{ samba_netbios_name | default(ansible_hostname) }}' + server_string: '{{ samba_server_string | default("%h file server") }}' + + # Authentication + security: 'user' + passdb_backend: 'tdbsam' + + # Disable printing by default + printing: 'bsd' + load_printers: 'no' + printcap_name: '/dev/null' + show_add_printer_wizard: 'no' + disable_spoolss: 'yes' + + +# Hash of hashes of default shares +samba_default_shares: + + 'homes': + comment: 'Home Directories' + browseable: 'no' + writable: 'yes' diff --git a/vagrant-ansible/playbooks/roles/storage-ha/vars/main.yml b/vagrant-ansible/playbooks/roles/storage-ha/vars/main.yml new file mode 100644 index 0000000..7d08b5e --- /dev/null +++ b/vagrant-ansible/playbooks/roles/storage-ha/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for storage-ha diff --git a/vagrant-ansible/playbooks/storage-ha.yml b/vagrant-ansible/playbooks/storage-ha.yml new file mode 100644 index 0000000..c4f9c06 --- /dev/null +++ b/vagrant-ansible/playbooks/storage-ha.yml @@ -0,0 +1,192 @@ +--- +# file: storage-ha.yml +- hosts: ha_servers + name: Initializing + sudo: yes + + tasks: + - name: Detect guest OS family + group_by: key={{ ansible_os_family }} + changed_when: False + + - name: Set hostname + hostname: "name={{ inventory_hostname }}{% if ad['domain'] is defined and ad['domain'] %}.{{ ad['domain'] }}{% endif %}" + + - name: Disable SELinux + selinux: state=disabled + + - name: Create extra disk partitions + shell: "{ blkid | grep -q /dev/{{item[0].dev}}{{item[1].num}} && echo FOUND; } || { parted /dev/{{item[0].dev}} mklabel msdos && parted /dev/{{item[0].dev}} mkpart primary 512 {{item[1].size}}; }" + register: part_result + changed_when: "'FOUND' not in part_result.stdout" + with_subelements: + - extra_disks + - parts + when: extra_disks is defined + + - name: Create extra disk filesystems + filesystem: fstype={{item[1].fs}} dev=/dev/{{item[0].dev}}{{item[1].num}} + with_subelements: + - extra_disks + - parts + when: extra_disks is defined + + - name: Mount extra disk filesystems + mount: name={{item[1].mount}} src=/dev/{{item[0].dev}}{{item[1].num}} fstype={{item[1].fs}} state=mounted + with_subelements: + - extra_disks + - parts + when: extra_disks is defined + +- hosts: RedHat + name: Server Setup (RedHat) + sudo: yes + gather_facts: False + + tasks: + - include: roles/common/tasks/setup-RedHat.yml + - include: roles/glusterfs/tasks/setup-RedHat.yml + when: gluster['setup_gluster'] + +- hosts: none + name: Server Setup (Common) + sudo: yes + + roles: + - common + + vars: + - firewall_services: + - ssh + - glusterfs + - samba + - samba-client + - nfs + - high-availability + - firewall_ports: + - '4379/tcp' + - firewall_interfaces: + - 'eth0' + - 'eth1' + +- hosts: gluster_servers:smb_servers + name: Server Setup (GlusterFS) + sudo: yes + + roles: + - glusterfs + + vars: + - gluster_default_cluster: "{%- for host in groups['gluster_servers'] -%}{{hostvars[host]['ansible_hostname']}}{% if not loop.last %},{% endif %}{%- endfor -%}" + - gluster_default_replica: "{{ 3 if groups['gluster_servers']|count >= 3 else (2 if groups['gluster_servers']|count == 2 else omit) }}" + + tasks: + - name: Ensure Gluster brick directories exist. + file: "path={{ [gluster['bricks_dir'], item.name]|join('/') }} state=directory mode=0775" + with_items: "{{ gluster['volumes'] }}" + when: "'gluster_servers' in group_names" + + - name: Probe Samba peers + command: gluster peer probe {{ item }} + with_items: "{{ groups['smb_servers'] }}" + run_once: true + when: "'gluster_servers' in group_names" + register: probe_result + changed_when: "'already' not in probe_result.stdout and 'localhost' not in probe_result.stdout" + + - name: Configure Gluster volumes. + gluster_volume: + state: present + name: "{{ item.name }}" + brick: "{{ [gluster['bricks_dir'], item.name]|join('/') }}" + replicas: > + {% if item.replica is defined and item.replica == 'n' -%} + {{ groups['gluster_servers']|count if groups['gluster_servers']|count > 1 else omit }} + {%- elif item.replica is defined and item.replica == 0 -%} + {{ omit }} + {%- else -%} + {{ item.replica | default(gluster_default_replica) }} + {%- endif %} + cluster: "{{ item.cluster | default(gluster_default_cluster) }}" + options: "{{ item.opts|default(omit) }}" + force: yes + run_once: true + when: "'gluster_servers' in group_names" + with_items: "{{ gluster['volumes'] }}" + register: result + until: result|success + + - name: Start Gluster volumes. + gluster_volume: + name: "{{ item.name }}" + state: started + run_once: true + when: "'gluster_servers' in group_names" + with_items: "{{ gluster['volumes'] }}" + + - name: Set volume permissions + shell: "mount -t glusterfs localhost:/{{ item.name }} /mnt && chmod -c {{ item.root_mode|default('777') }} /mnt; umount /mnt" + with_items: "{{ gluster['volumes'] }}" + run_once: true + when: "'gluster_servers' in group_names" + register: perms_result + changed_when: "'changed' in perms_result.stdout" + +- hosts: RedHat + name: Storage-HA Installation (RedHat) + sudo: yes + gather_facts: False + + tasks: + - include: roles/storage-ha/tasks/setup-RedHat.yml + +- hosts: smb_servers + name: Server Setup (SMB) + sudo: yes + +# roles: +# - samba + + tasks: + - name: Copy CTDB config files + template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744 + with_items: + - { src: "{% if ctdb['config'] is defined and ctdb['config'] %}{{ ctdb['config'] }}{% else %}'files/ctdb'{% endif %}", dest: '/etc/sysconfig/ctdb' } + - { src: 'files/nodes.j2', dest: '/etc/ctdb/nodes' } + when: ctdb['setup_ctdb'] is defined and ctdb['setup_ctdb'] + + - name: Copy Samba config files + template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744 + with_items: + - { src: "files/smb.conf.j2", dest: '/etc/samba/smb.conf' } + when: samba['setup_samba'] is defined and samba['setup_samba'] + +- hosts: ad_server + name: Active Directory Setup + sudo: yes + + tasks: + - include: roles/storage-ha/tasks/setup-AD.yml + when: ad['setup_ad'] + +- hosts: ha_servers + name: Storage-HA Configuration and Initialization + sudo: yes + + roles: + - storage-ha + + tasks: + - name: Copy Storage-HA config files + template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744 + with_items: + - { src: 'files/storage-ha.conf.j2', dest: '/etc/sysconfig/storage-ha.conf' } + - { src: 'files/storage-ha', dest: '/sbin/storage-ha' } + - { src: 'files/CTDB', dest: '/usr/lib/ocf/resource.d/heartbeat/CTDB' } + + - name: Teardown any pre-existing cluster. + shell: pcs cluster stop; pcs cluster destroy + + - name: Start Storage-HA + shell: storage-ha setup + run_once: true