-
Notifications
You must be signed in to change notification settings - Fork 11
/
01-configure-vm-services.yml
823 lines (740 loc) · 22.8 KB
/
01-configure-vm-services.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
---
# NOTE: before running this, give the VM some time to boot first.
- name: configure additional services on the services vm
hosts: services
become: yes
tasks:
- name: register and subscribe a system
include_tasks: includes/subscribe-and-update.yml
tags:
- rhsm
- name: install packages
package:
name:
- bind
- bind-chroot
- httpd
- haproxy
- tftp
- tftp-server
- syslinux-tftpboot
- dhcp-server
- java-1.8.0-openjdk
- java-1.8.0-openjdk-headless
- nfs-utils
state: latest
# some certificate management stuff
- name: create a directory for cert stuff
file:
path: "{{ x509_ca_location }}"
owner: root
group: root
mode: 0755
state: directory
- name: check that there is a CA cert
file:
path: "{{ x509_ca_location }}/ca.crt"
state: file
ignore_errors: yes
register: ca_cert_exists
- name: check if there is a CA key then
file:
path: "{{ x509_ca_location }}/ca.key"
state: file
ignore_errors: yes
register: ca_key_exists
when: ca_cert_exists.failed | default(false)
- name: check that the CA cert will live for at least three more years
openssl_certificate:
provider: assertonly
path: "{{ x509_ca_location }}/ca.crt"
valid_in: "+1095d"
ignore_errors: yes
register: ca_cert_valid
when: not (ca_cert_exists.failed | default(false))
- name: create a new private key
openssl_privatekey:
path: "{{ x509_ca_location }}/ca.key"
passphrase: "{{ x509_ca_pass }}"
cipher: AES-256-CBC
size: 8192
state: present
owner: root
group: root
mode: 0600
when:
- ca_cert_exists.failed | default(false)
- ca_key_exists.failed | default(false)
- name: create a new CSR
openssl_csr:
path: "{{ x509_ca_location }}/ca.csr"
privatekey_path: "{{ x509_ca_location }}/ca.key"
privatekey_passphrase: "{{ x509_ca_pass }}"
common_name: Red Hat GLS Course Signing Certificate
organization_name: Red Hat
organizational_unit_name: GLS
locality_name: Europe
country_name: EU
email_address: [email protected]
key_usage:
- cRLSign
- keyCertSign
key_usage_critical: no
basic_constraints:
- "CA:TRUE"
basic_constraints_critical: no
state: present
owner: root
group: root
mode: 0600
when: ca_cert_exists.failed | default(false)
- name: create a new self-signed ca certificate if needed
openssl_certificate:
select_crypto_backend: pyopenssl
provider: selfsigned
selfsigned_not_after: "{{ x509_validity_until }}"
path: "{{ x509_ca_location }}/ca.crt"
csr_path: "{{ x509_ca_location }}/ca.csr"
privatekey_path: "{{ x509_ca_location }}/ca.key"
privatekey_passphrase: "{{ x509_ca_pass }}"
force: yes
state: present
owner: root
group: root
mode: 0644
when: (ca_cert_exists.failed | default(false)) or (ca_cert_valid.failed | default(false))
# trust our own CA cert
- name: trust our own CA cert
copy:
src: "{{ x509_ca_location }}/ca.crt"
dest: /etc/pki/ca-trust/source/anchors/gls.pem
remote_src: yes
owner: root
group: root
mode: 0644
register: new_ca
- name: extract new trust information
command: /usr/bin/update-ca-trust extract
when: new_ca.changed
# generate other service certificates
- name: generate openvpn certs
vars:
cert_service_name: openvpn
cert_common_name: "{{ vms['services'].hostname }}"
include_tasks: includes/create-cert-artifacts.yml
tags:
- openvpn
- name: generate registry certs
vars:
cert_service_name: nexus
cert_common_name: "registry.{{ parent_domain }}"
include_tasks: includes/create-cert-artifacts.yml
# make openvpn certs available on control node for later
- name: pull CA and openvpn cert, and openvpn key
synchronize:
mode: pull
src: /var/www/html/ca/{{ item }}
dest: "{{ binary_artifact_path }}/{{ item }}"
perms: yes
loop:
- ca.crt
- openvpn.crt
- openvpn.key
tags:
- openvpn
# openvpn installation
- name: see if we need to install openvpn
command: rpm -q openvpn
args:
warn: no
register: openvpn_installed
changed_when: '"installed" in openvpn_installed.stdout'
ignore_errors: yes
tags:
- openvpn
- name: copy openvpn packages somewhere
copy:
src: "{{ binary_artifact_path }}/{{ item }}"
dest: /tmp
owner: root
group: root
mode: 0644
loop:
- openvpn-{{ openvpn_release }}.el8.x86_64.rpm
- pkcs11-helper-{{ pkcs11_release }}.el8.x86_64.rpm
when: openvpn_installed.failed
tags:
- openvpn
- name: install openvpn directly
command: >
/usr/bin/yum -y localinstall
/tmp/openvpn-{{ openvpn_release }}.el8.x86_64.rpm
/tmp/pkcs11-helper-{{ pkcs11_release }}.el8.x86_64.rpm
args:
warn: no
when: openvpn_installed.failed
tags:
- openvpn
- name: get rid of the rpm files
file:
path: "/tmp/{{ item }}"
state: absent
loop:
- openvpn-{{ openvpn_release }}.el8.x86_64.rpm
- pkcs11-helper-{{ pkcs11_release }}.el8.x86_64.rpm
when: openvpn_installed.failed
tags:
- openvpn
- name: make sure we have somewhere to put the certs
file:
path: /etc/openvpn/certs
state: directory
owner: openvpn
group: openvpn
mode: 0755
tags:
- openvpn
- name: publish the ca cert for ovpn
copy:
src: "{{ x509_ca_location }}/ca.crt"
dest: /etc/openvpn/certs/ca.crt
remote_src: yes
owner: openvpn
group: openvpn
mode: 0644
notify: (re)start the openvpn server
tags:
- openvpn
- name: publish the ovpn cert
copy:
src: "{{ x509_ca_location }}/openvpn.crt"
dest: /etc/openvpn/certs/cluster.crt
remote_src: yes
owner: openvpn
group: openvpn
mode: 0644
notify: (re)start the openvpn server
tags:
- openvpn
- name: publish the ovpn key
copy:
src: "{{ x509_ca_location }}/openvpn.key"
dest: /etc/openvpn/certs/cluster.key
remote_src: yes
owner: openvpn
group: openvpn
mode: 0600
notify: (re)start the openvpn server
tags:
- openvpn
- name: create DH params file
command: openssl dhparam -out /etc/openvpn/certs/dh.pem 2048
args:
creates: /etc/openvpn/certs/dh.pem
when: ovpn_enforce_dhparam
notify: (re)start the openvpn server
tags:
- openvpn
- name: fix dhparam ownership etc
file:
path: /etc/openvpn/certs/dh.pem
state: file
owner: openvpn
group: openvpn
mode: 0600
when: ovpn_enforce_dhparam
tags:
- openvpn
- name: configure openvpn
template:
src: templates/openvpn-server.conf.j2
dest: /etc/openvpn/server/cluster.conf
owner: root
group: root
mode: 0644
notify: (re)start the openvpn server
tags:
- openvpn
- name: make sure the firewall accepts openvpn
firewalld:
service: openvpn
state: enabled
immediate: yes
permanent: yes
tags:
- openvpn
- name: make sure ip forwarding is on
sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
tags:
- openvpn
- name: add interface tun0 to trusted zone
firewalld:
interface: tun0
zone: trusted
state: enabled
permanent: yes
immediate: yes
tags:
- openvpn
# dns server configuration
- name: configure named
template:
src: templates/named.conf.j2
dest: /etc/named.conf
owner: root
group: named
mode: 0640
notify: (re)start the dns server
- name: make sure the firewall accepts dns
firewalld:
service: dns
state: enabled
immediate: yes
permanent: yes
- name: make sure rfc zone files exist in chroot
file:
path: /var/named/chroot/var/named/{{ item }}
src: /var/named/{{ item }}
state: hard
loop:
- named.ca
- named.empty
- named.localhost
- named.loopback
notify: (re)start the dns server
- name: publish zone
template:
src: templates/named-ocp-dns.zone.j2
dest: /var/named/chroot/var/named/{{ parent_domain }}.zone
owner: root
group: named
mode: 0640
notify: (re)start the dns server
# web server configuration
- name: configure web server
template:
src: templates/httpd.conf.j2
dest: /etc/httpd/conf/httpd.conf
owner: root
group: root
mode: 0644
notify: (re)start the web server
- name: configure proxy catch-all vhost
vars:
create_proxy_config: yes
template:
src: templates/vhost.conf.j2
dest: /etc/httpd/conf.d/00_proxy_all.conf
owner: root
group: root
mode: 0644
notify: (re)start the web server
- name: configure the services vhost
template:
src: templates/vhost.conf.j2
dest: /etc/httpd/conf.d/99_services.conf
owner: root
group: root
mode: 0644
notify: (re)start the web server
- name: make sure the firewall accepts http
firewalld:
service: http
state: enabled
immediate: yes
permanent: yes
- name: httpd needs some seboolean love to connect to haproxy
seboolean:
name: httpd_can_network_connect
state: true
persistent: yes
# dhcp server configuration
- name: configure dhcp server
template:
src: templates/dhcpd.conf.j2
dest: /etc/dhcp/dhcpd.conf
owner: root
group: root
mode: 0644
notify: (re)start the dhcp server
- name: make sure the firewall accepts dhcp
firewalld:
service: dhcp
state: enabled
immediate: yes
permanent: yes
# tftp server configuration
- name: enable tftp server
systemd:
name: tftp.socket
enabled: yes
state: started
- name: make sure the firewall accepts tftp
firewalld:
service: tftp
state: enabled
immediate: yes
permanent: yes
- name: copy pxelinux files to tftp root
copy:
src: /tftpboot/{{ item }}
dest: /var/lib/tftpboot/{{ item }}
remote_src: yes
loop:
- pxelinux.0
- ldlinux.c32
# NOTE: Individual machine boot configs are created in their respective
# provisioning playbooks.
- name: create the pxelinux.cfg directory
file:
path: /var/lib/tftpboot/pxelinux.cfg
state: directory
owner: root
group: root
mode: 0755
# nexus configuration
- name: check if nexus is there and operational
file:
path: /opt/sonatype-work/nexus3/lock
state: file
ignore_errors: yes
register: nexus_is_there
- name: extract the nexus archive
unarchive:
src: "{{ binary_artifact_path }}/nexus-{{ nexus_release }}-unix.tar.gz"
dest: /opt
owner: root
group: root
notify: (re)start nexus
when: nexus_is_there.failed
- name: make sure we have somewhere to put the properties
file:
path: /opt/sonatype-work/nexus3/etc
state: directory
- name: publish nexus.properties
copy:
src: files/nexus.properties
dest: /opt/sonatype-work/nexus3/etc/nexus.properties
owner: root
group: root
mode: 0644
notify: (re)start nexus
- name: publish jetty-https.xml
template:
src: templates/nexus.jetty.xml.j2
dest: /opt/nexus-{{ nexus_release }}/etc/jetty/jetty-https.xml
owner: root
group: root
mode: 0644
notify: (re)start nexus
- name: make sure we have somewhere to put the keystore
file:
path: /opt/sonatype-work/nexus3/etc/ssl
state: directory
- name: create a pkcs12-formatted bundle
openssl_pkcs12:
action: export
path: "{{ x509_ca_location }}/nexus.p12"
passphrase: "{{ nexus_keystore_pass }}"
certificate_path: "{{ x509_ca_location }}/nexus.crt"
privatekey_path: "{{ x509_ca_location }}/nexus.key"
other_certificates: "{{ x509_ca_location }}/ca.crt"
friendly_name: "{{ nexus_key_alias }}"
state: present
owner: root
group: root
mode: 0640
register: pkcs12
- name: remove existing keystore if anything about p12 changed
file:
path: /opt/sonatype-work/nexus3/etc/ssl/keystore.jks
state: absent
when: pkcs12.changed
- name: about time we create a keystore
java_cert:
pkcs12_path: "{{ x509_ca_location }}/nexus.p12"
pkcs12_alias: "{{ nexus_key_alias }}"
pkcs12_password: "{{ nexus_keystore_pass }}"
cert_alias: "{{ nexus_key_alias }}"
keystore_create: yes
keystore_pass: "{{ nexus_keystore_pass }}"
keystore_path: /opt/sonatype-work/nexus3/etc/ssl/keystore.jks
keystore_type: JKS
state: present
notify: (re)start nexus
- name: check if there are any backups to speak of - DB check
become: no
delegate_to: localhost
file:
path: "{{ binary_artifact_path }}/nexus-db-backup.tar.gz"
state: file
ignore_errors: yes
register: db_backup_is_there
tags:
- nexus_restore
- name: check if there are any backups to speak of - blob check
become: no
delegate_to: localhost
file:
path: "{{ binary_artifact_path }}/nexus-blob-backup.tar.gz"
state: file
ignore_errors: yes
register: blob_backup_is_there
tags:
- nexus_restore
- name: reason over results just above
assert:
fail_msg: |
WARNING: Nexus backup is NOT present on the control node. Skipping restore.
You will have to run the first cluster installation with internet access in
order to seed the Nexus repository.
success_msg: |
GOOD: Nexus backup found on the control node. Will do a restore next.
that:
- not db_backup_is_there.failed
- not blob_backup_is_there.failed
ignore_errors: yes
register: to_do_or_not_to_do
tags:
- nexus_restore
# TODO: recreate backups without registry authentication options!
# https://support.sonatype.com/hc/en-us/articles/115013153887-Docker-Repository-Configuration-and-Client-Connection
- name: make sure we have somewhere to put the backups
file:
path: /opt/sonatype-work/nexus3/restore-from-backup
state: directory
when: to_do_or_not_to_do.evaluated_to is undefined
tags:
- nexus_restore
- name: put the config backups where they belong
unarchive:
src: "{{ binary_artifact_path }}/nexus-db-backup.tar.gz"
dest: /opt/sonatype-work/nexus3/restore-from-backup
owner: root
group: root
when: to_do_or_not_to_do.evaluated_to is undefined
notify: (re)start nexus
tags:
- nexus_restore
- name: extract the nexus blob backups
unarchive:
src: "{{ binary_artifact_path }}/nexus-blob-backup.tar.gz"
dest: /opt/sonatype-work/nexus3
owner: root
group: root
when: to_do_or_not_to_do.evaluated_to is undefined
notify: (re)start nexus
tags:
- nexus_restore
- name: add nexus.service for systemd
template:
src: templates/nexus.service.j2
dest: /etc/systemd/system/nexus.service
owner: root
group: root
mode: 0644
notify: (re)start nexus
- name: make sure the firewall is open for nexus
firewalld:
port: "{{ item }}"
state: enabled
immediate: yes
permanent: yes
loop:
- 5000/tcp
- 8081/tcp
- 8443/tcp
# add nfs server
- name: create exported directories
file:
path: "{{ nfs_parent }}/{{ item.name }}"
state: directory
owner: nobody
group: nobody
mode: 0770
loop: "{{ nfs_exports }}"
- name: now also generate the exports file
template:
src: templates/nfsexports.j2
dest: /etc/exports.d/ocp.exports
owner: root
group: root
mode: 0644
register: exports
- name: finally, run exportfs if anything got changed
command: /usr/sbin/exportfs -ar
when: exports.changed
- name: make sure the firewall is open for nfs as well
firewalld:
service: "{{ item }}"
state: enabled
immediate: yes
permanent: yes
loop:
- rpc-bind
- nfs
- nfs3
- mountd
- name: enable the nfs server service and start it if not yet there
service:
name: nfs-server
enabled: yes
state: started
# the heavy duty handlers :)
handlers:
- name: (re)start the openvpn server
service:
name: openvpn-server@cluster
enabled: yes
state: restarted
- name: (re)start the dns server
service:
name: named-chroot
enabled: yes
state: restarted
- name: (re)start the web server
service:
name: httpd
enabled: yes
state: restarted
- name: (re)start the dhcp server
service:
name: dhcpd
enabled: yes
state: restarted
- name: (re)start nexus
service:
name: nexus
enabled: yes
state: restarted
- name: fix some local config stuffs after the handlers
hosts: services
become: yes
tasks:
- name: check that named is running and operational
service:
name: named-chroot
state: started
register: named_status
- name: do a simple query of a wildcard record
command: /usr/bin/dig -t A nonexistent.apps.{{ cluster_name }}.{{ parent_domain }}. @127.0.0.1
register: dnsquery
- name: check the dns query response
set_fact:
named_is_running: >
{{ dnsquery.stdout | regex_search('^nonexistent.apps.' ~ cluster_name ~ '.' ~ parent_domain ~ '.', multiline=True) }}
- name: edit /etc/resolv.conf and point it to localhost
lineinfile:
path: /etc/resolv.conf
regexp: "^nameserver .*"
line: "nameserver 127.0.0.1"
state: present
when: "named_is_running == 'nonexistent.apps.' ~ cluster_name ~ '.' ~ parent_domain ~ '.\n'"
- name: or fail if dns is not working
assert:
fail_msg: |
DNS does not appear to be running (or resolving correctly) on {{ inventory_hostname }}!!!
This will most certainly break the rest of deployment, so please review and fix it!
success_msg: |
Everything resolver-related appears to be in order. You can proceed to cluster deployment.
that:
- "named_is_running == 'nonexistent.apps.' ~ cluster_name ~ '.' ~ parent_domain ~ '.\n'"
# NOTE: there is a chicken-egg problem with haproxy. couldn't configure and
# (re)start haproxy in the main play. until the resolver is working properly,
# that will fail, as haproxy will try to look up backends.
- name: finally, configure haproxy
hosts: services
become: yes
tasks:
# haproxy configuration
- name: configure haproxy
template:
src: templates/haproxy.cfg.j2
dest: /etc/haproxy/haproxy.cfg
owner: root
group: root
mode: 0644
notify: (re)start haproxy
- name: make sure the firewall accepts https
firewalld:
service: https
state: enabled
immediate: yes
permanent: yes
- name: also make sure the firewall accepts all the weirdo ports
firewalld:
port: "{{ item }}/tcp"
state: enabled
immediate: yes
permanent: yes
loop:
- 81
- 6443
- 22623
- name: haproxy needs some seboolean love to connect to backends
seboolean:
name: haproxy_connect_any
state: true
persistent: yes
handlers:
- name: (re)start haproxy
service:
name: haproxy
enabled: yes
state: restarted
# ultimately, we have given nexus enough time to restore the backups, so we can
# now test it's up and remove those bak files.
- name: clean up nexus backups
hosts: services
become: yes
tasks:
- name: keep knocking until someone opens the door
become: no
delegate_to: localhost
uri:
method: GET
url: http://services.{{ parent_domain }}:8081/service/rest/v1/repositories
status_code: 200
register: nexus_is_up
until: nexus_is_up.status == 200
retries: 5
delay: 10
- name: find the backup files
find:
paths:
- /opt/sonatype-work/nexus3/restore-from-backup
patterns:
- '*.bak'
register: backups
- name: remove the bak files
file:
path: "{{ item.path }}"
state: absent
loop: "{{ backups.files }}"
# produce a note telling everyone to stop drinking coffee and start working
- name: hello! time to wake up!
hosts: localhost
become: no
gather_facts: no
tasks:
- name: produce a notification
pause:
prompt: |
*********************************************************************
* THE SERVICES VM PROVISIONING IS NOW COMPLETE. *
* *
* BEFORE THIS PROCESS CONTINUES, YOU MUST MAKE SURE OF TWO THINGS: *
* - YOUR RESOLVER MUST NOW INCLUDE ALL THE CLUSTER VMS AND LB *
* (you can establish a VPN connection to services for that NOW) *
* - YOU MUST DO YOUR INITIAL NEXUS CONFIGURATION ON THE SERVICES VM *
* (go to http://services:8081/ and log in as admin/admin123) *
* *
* THIS PROCESS WILL NOW PAUSE FOR 30 SECONDS TO GIVE YOU A CHANCE *
* TO CLEANLY INTERRUPT IT OR CHOOSE TO CONTINUE. (press Ctrl-C) *
*********************************************************************
seconds: 30
...