forked from ceph/ceph-ansible
-
Notifications
You must be signed in to change notification settings - Fork 0
/
purge-cluster.yml
423 lines (360 loc) · 11.2 KB
/
purge-cluster.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
---
# This playbook purges Ceph
# It removes: packages, configuration files and ALL THE DATA
#
# Use it like this:
# ansible-playbook purge-cluster.yml
# Prompts for confirmation to purge, defaults to no and
# doesn't purge the cluster. yes purges the cluster.
#
# ansible-playbook -e ireallymeanit=yes|no purge-cluster.yml
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: confirm whether user really meant to purge the cluster
hosts: localhost
vars_prompt:
- name: ireallymeanit
prompt: Are you sure you want to purge the cluster?
default: 'no'
private: no
tasks:
- name: exit playbook, if user did not mean to purge cluster
fail:
msg: >
"Exiting purge-cluster playbook, cluster was NOT purged.
To purge the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: stop ceph cluster
hosts:
- mons
- osds
- mdss
- rgws
become: yes
vars:
osd_group_name: osds
mon_group_name: mons
rgw_group_name: rgws
mds_group_name: mdss
rbdmirror_group_name: rbdmirrors
# When set to true both groups of packages are purged.
# This can cause problem with qemu-kvm
purge_all_packages: true
# When set to true and raw _multi_journal is used then block devices are also zapped
zap_block_devs: true
ceph_packages:
- ceph
- ceph-common
- ceph-fs-common
- ceph-fuse
- ceph-mds
- ceph-release
- ceph-radosgw
ceph_remaining_packages:
- libcephfs1
- librados2
- libradosstriper1
- librbd1
- python-cephfs
- python-rados
- python-rbd
cluster: ceph # name of the cluster
monitor_name: "{{ ansible_hostname }}"
mds_name: "{{ ansible_hostname }}"
handlers:
- name: restart machine
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
async: 1
poll: 0
ignore_errors: true
- name: wait for server to boot
local_action: wait_for port=22 host={{ inventory_hostname }} state=started delay=10 timeout=400
- name: remove data
file:
path: /var/lib/ceph
state: absent
tasks:
- name: check for a device list
fail:
msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/osds using the devices variable."
when:
osd_group_name in group_names and
devices is not defined and
osd_auto_discovery
- name: get osd numbers
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | cut -d '-' -f 2 ; fi"
register: osd_ids
changed_when: false
- name: are we using systemd
shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi"
register: systemd_unit_files
# after Hammer release
- name: stop ceph.target with systemd
service:
name: ceph.target
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0"
- name: stop ceph-osd with systemd
service:
name: ceph-osd@{{item}}
state: stopped
enabled: no
with_items: "{{ osd_ids.stdout_lines }}"
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0" and
osd_group_name in group_names
- name: stop ceph mons with systemd
service:
name: ceph-mon@{{ ansible_hostname }}
state: stopped
enabled: no
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0" and
mon_group_name in group_names
- name: stop ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_hostname }}
state: stopped
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0" and
mds_group_name in group_names
- name: stop ceph rgws with systemd
service:
name: ceph-radosgw@rgw.{{ ansible_hostname }}
state: stopped
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0" and
rgw_group_name in group_names
- name: stop ceph rbd mirror with systemd
service:
name: [email protected]
state: stopped
when:
ansible_os_family == 'RedHat' and
systemd_unit_files.stdout != "0" and
rbdmirror_group_name in group_names
# before infernalis release, using sysvinit scripts
# we use this test so we do not have to know which RPM contains the boot script
# or where it is placed.
- name: stop ceph osds
shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi"
when:
ansible_os_family == 'RedHat' and
osd_group_name in group_names
- name: stop ceph mons
shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi"
when:
ansible_os_family == 'RedHat' and
mon_group_name in group_names
- name: stop ceph mdss
shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi"
when:
ansible_os_family == 'RedHat' and
mds_group_name in group_names
- name: stop ceph rgws
shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi"
when:
ansible_os_family == 'RedHat' and
rgw_group_name in group_names
# Ubuntu 14.04
- name: stop ceph osds on ubuntu
shell: |
for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do
initctl stop ceph-osd cluster={{ cluster }} id=$id
done
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
osd_group_name in group_names
with_items: "{{ osd_ids.stdout_lines }}"
- name: stop ceph mons on ubuntu
command: initctl stop ceph-mon cluster={{ cluster }} id={{ monitor_name }}
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
mon_group_name in group_names
- name: stop ceph mdss on ubuntu
command: initctl stop ceph-mds cluster={{ cluster }} id={{ mds_name }}
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
mds_group_name in group_names
- name: stop ceph rgws on ubuntu
command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }}
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
rgw_group_name in group_names
- name: stop ceph rbd mirror on ubuntu
command: initctl stop ceph-rbd-mirorr cluster={{ cluster }} id=admin
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
rbdmirror_group_name in group_names
- name: check for anything running ceph
shell: "ps awux | grep -v grep | grep -q -- ceph-"
register: check_for_running_ceph
failed_when: check_for_running_ceph.rc == 0
- name: see if ceph-disk-created data partitions are present
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20data'"
failed_when: false
register: ceph_data_partlabels
- name: see if ceph-disk-created journal partitions are present
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20journal'"
failed_when: false
register: ceph_journal_partlabels
- name: get osd data mount points
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
register: mounted_osd
changed_when: false
- name: drop all cache
shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
when:
osd_group_name in group_names
- name: umount osd data partition
shell: umount {{ item }}
with_items:
- "{{ mounted_osd.stdout_lines }}"
when:
osd_group_name in group_names
- name: remove osd mountpoint tree
shell: rm -rf /var/lib/ceph/osd
register: remove_osd_mountpoints
failed_when: false
when:
osd_group_name in group_names
- name: remove monitor store and bootstrap keys
shell: rm -rf /var/lib/ceph/
failed_when: false
when:
mon_group_name in group_names
- name: is reboot needed
local_action: shell echo requesting reboot
notify:
- restart machine
- wait for server to boot
- remove data
when:
osd_group_name in group_names and
remove_osd_mountpoints.rc != 0
- name: see if ceph-disk is installed
shell: "which ceph-disk"
failed_when: false
register: ceph_disk_present
- name: zap osd disks
shell: ceph-disk zap "{{ item }}"
with_items: devices
when:
osd_group_name in group_names and
ceph_disk_present.rc == 0 and
ceph_data_partlabels.rc == 0 and
zap_block_devs
- name: zap journal devices
shell: ceph-disk zap "{{ item }}"
with_items: "{{ raw_journal_devices|default([])|unique }}"
when:
osd_group_name in group_names and
ceph_disk_present.rc == 0 and
ceph_journal_partlabels.rc == 0 and
zap_block_devs and
raw_multi_journal
- name: purge ceph packages with yum
yum:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_packages }}"
when:
ansible_pkg_mgr == 'yum'
- name: purge ceph packages with dnf
dnf:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_packages }}"
when:
ansible_pkg_mgr == 'dnf'
- name: purge ceph packages with apt
apt:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_packages }}"
when:
ansible_pkg_mgr == 'apt'
- name: purge remaining ceph packages with yum
yum:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_remaining_packages }}"
when:
ansible_pkg_mgr == 'yum' and
purge_all_packages == true
- name: purge remaining ceph packages with dnf
dnf:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_remaining_packages }}"
when:
ansible_pkg_mgr == 'dnf' and
purge_all_packages == true
- name: purge remaining ceph packages with apt
apt:
name: "{{ item }}"
state: absent
with_items:
- "{{ ceph_remaining_packages }}"
when:
ansible_pkg_mgr == 'apt' and
purge_all_packages == true
- name: remove config
file:
path: /etc/ceph
state: absent
- name: remove logs
file:
path: /var/log/ceph
state: absent
- name: remove from SysV
shell: "update-rc.d -f ceph remove"
when:
ansible_distribution == 'Ubuntu'
- name: remove Upstart nad SysV files
shell: "find /etc -name '*ceph*' -delete"
when:
ansible_distribution == 'Ubuntu'
- name: remove Upstart and apt logs and cache
shell: "find /var -name '*ceph*' -delete"
when:
ansible_distribution == 'Ubuntu'
- name: request data removal
local_action: shell echo requesting data removal
notify:
- remove data
- name: purge yum cache
command: yum clean all
when:
ansible_pkg_mgr == 'yum'
- name: purge dnf cache
command: dnf clean all
when:
ansible_pkg_mgr == 'dnf'
- name: purge RPM cache in /tmp
file:
path: /tmp/rh-storage-repo
state: absent
- name: clean apt
shell: apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
when:
ansible_pkg_mgr == 'apt'