Skip to content

Latest commit

 

History

History
111 lines (86 loc) · 2.49 KB

File metadata and controls

111 lines (86 loc) · 2.49 KB

删除文件系统

删除文件系统

查看文件系统

[root@ceph01 ~]# ceph fs ls
name: k8s-cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

删除文件系统

[root@ceph01 ~]# ceph fs fail k8s-cephfs
k8s-cephfs marked not joinable; MDS cannot join the cluster. All MDS ranks marked failed.
[root@ceph01 ~]#     ceph fs rm k8s-cephfs --yes-i-really-mean-it

删除池

查看池

[root@ceph01 ~]# ceph osd pool ls
ssd-demo-pool
nvme-demo-pool
rbd-demo-pool
rbd-k8s-pool
cephfs_data
cephfs_metadata

删除池

ceph osd pool delete ssd-demo-pool ssd-demo-pool --yes-i-really-really-mean-it
ceph osd pool delete nvme-demo-pool nvme-demo-pool --yes-i-really-really-mean-it
ceph osd pool delete rbd-demo-pool rbd-demo-pool --yes-i-really-really-mean-it
ceph osd pool delete rbd-k8s-pool rbd-k8s-pool --yes-i-really-really-mean-it
ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it
ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it

删除OSD

ceph管理节点执行

#!/bin/bash
osd_list=`ceph osd ls`
for var in $osd_list;
do
    ceph osd crush rm osd.$var
    ceph auth del osd.$var
done
ceph osd down all
ceph osd out all
ceph osd rm all

所有ceph osd节点执行

for i in `ls /var/lib/ceph/osd/`;
do
    umount /var/lib/ceph/osd/$i
done
  • 对于umount: /var/lib/ceph/osd/ceph-*:目标忙。的情况,执行以下操作
[root@node2 ~]# fuser -mv  /var/lib/ceph/osd/ceph-1
                         用户     进程号 权限   命令
    /var/lib/ceph/osd/ceph-1:
                         root     kernel mount /var/lib/ceph/osd/ceph-1
                         ceph       5979 F.... ceph-osd
    [root@node2 ~]# kill -9 5979
    
    [root@node2 ~]# fuser -mv  /var/lib/ceph/osd/ceph-1
                         用户     进程号 权限   命令
    /var/lib/ceph/osd/ceph-1:
                         root     kernel mount /var/lib/ceph/osd/ceph-1
    
    [root@node2 ~]# umount /var/lib/ceph/osd/ceph-1
  • 手动擦除盘上数据(所有数据节点)
# 从DM中移除硬盘对应的编码
    dmsetup remove_all
    # 格式化分区
    yum install gdisk -y
    sgdisk -z /dev/<?>
  • ceph管理节点执行
ceph-deploy disk zap <node-hostname> /dev/<?>

卸载组件并清空目录

ceph-deploy purge ceph01 ceph02 ceph03
ceph-deploy purgedata ceph01 ceph02 ceph03
ceph-deploy forgetkeys