From ee05e82ceaa9ce5a80bb6c06db49d78f2c752131 Mon Sep 17 00:00:00 2001 From: Liang Zheng Date: Fri, 1 Dec 2023 18:00:30 +0800 Subject: [PATCH] osd: support create osd with metadata partition Currently, when rook provisions OSDs(in the OSD prepare job), rook effectively run a c-v command such as the following. ```console ceph-volume lvm batch --prepare --db-devices ``` but c-v lvm batch only supports disk and lvm, instead of disk partitions. We can resort to `ceph-volume lvm prepare` to implement it. Signed-off-by: Liang Zheng --- .github/workflows/canary-integration-test.yml | 45 +++ .../CRDs/Cluster/ceph-cluster-crd.md | 6 +- pkg/daemon/ceph/osd/volume.go | 144 +++++--- pkg/daemon/ceph/osd/volume_test.go | 331 +++++++++++++++++- tests/scripts/github-action-helper.sh | 3 + 5 files changed, 475 insertions(+), 54 deletions(-) diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index 3367cd680ed0..af0b3964cdc8 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -391,6 +391,51 @@ jobs: with: name: canary + osd-with-metadata-partition-device: + runs-on: ubuntu-20.04 + if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" + steps: + - name: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: consider debugging + uses: ./.github/workflows/tmate_debug + with: + use-tmate: ${{ secrets.USE_TMATE }} + + - name: setup cluster resources + uses: ./.github/workflows/canary-test-config + + - name: validate-yaml + run: tests/scripts/github-action-helper.sh validate_yaml + + - name: use local disk as OSD metadata partition + run: | + export BLOCK="/dev/$(tests/scripts/github-action-helper.sh find_extra_block_dev)" + tests/scripts/github-action-helper.sh use_local_disk + tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --bluestore-type block.db --osd-count 1 + + - name: deploy cluster + run: | + tests/scripts/github-action-helper.sh deploy_cluster osd_with_metadata_partition_device + + - name: wait for prepare pod + run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1 + + - name: wait for ceph to be ready + run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 + + - name: check-ownerreferences + run: tests/scripts/github-action-helper.sh check_ownerreferences + + - name: collect common logs + if: always() + uses: ./.github/workflows/collect-logs + with: + name: canary + osd-with-metadata-device: runs-on: ubuntu-20.04 if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" diff --git a/Documentation/CRDs/Cluster/ceph-cluster-crd.md b/Documentation/CRDs/Cluster/ceph-cluster-crd.md index 865c9278be0e..9d732066853b 100755 --- a/Documentation/CRDs/Cluster/ceph-cluster-crd.md +++ b/Documentation/CRDs/Cluster/ceph-cluster-crd.md @@ -478,7 +478,7 @@ See the table in [OSD Configuration Settings](#osd-configuration-settings) to kn The following storage selection settings are specific to Ceph and do not apply to other backends. All variables are key-value pairs represented as strings. -* `metadataDevice`: Name of a device or lvm to use for the metadata of OSDs on each node. Performance can be improved by using a low latency device (such as SSD or NVMe) as the metadata device, while other spinning platter (HDD) devices on a node are used to store data. Provisioning will fail if the user specifies a `metadataDevice` but that device is not used as a metadata device by Ceph. Notably, `ceph-volume` will not use a device of the same device class (HDD, SSD, NVMe) as OSD devices for metadata, resulting in this failure. +* `metadataDevice`: Name of a device, [partition](#limitations-of-metadata-device) or lvm to use for the metadata of OSDs on each node. Performance can be improved by using a low latency device (such as SSD or NVMe) as the metadata device, while other spinning platter (HDD) devices on a node are used to store data. Provisioning will fail if the user specifies a `metadataDevice` but that device is not used as a metadata device by Ceph. Notably, `ceph-volume` will not use a device of the same device class (HDD, SSD, NVMe) as OSD devices for metadata, resulting in this failure. * `databaseSizeMB`: The size in MB of a bluestore database. Include quotes around the size. * `walSizeMB`: The size in MB of a bluestore write ahead log (WAL). Include quotes around the size. * `deviceClass`: The [CRUSH device class](https://ceph.io/community/new-luminous-crush-device-classes/) to use for this selection of storage devices. (By default, if a device's class has not already been set, OSDs will automatically set a device's class to either `hdd`, `ssd`, or `nvme` based on the hardware properties exposed by the Linux kernel.) These storage classes can then be used to select the devices backing a storage pool by specifying them as the value of [the pool spec's `deviceClass` field](../Block-Storage/ceph-block-pool-crd.md#spec). @@ -498,6 +498,10 @@ Allowed configurations are: | crypt | | | | mpath | | | +#### Limitations of metadata device +- If `metadataDevice` is specified in the global OSD configuration or in the node level OSD configuration, the metadata device will be shared between all OSDs on the same node. In other words, OSDs will be initialized by `lvm batch`. In this case, we can't use partition device. +- If `metadataDevice` is specified in the device local configuration, we can use partition as metadata device. In other words, OSDs are initialized by `lvm prepare`. + ### Annotations and Labels Annotations and Labels can be specified so that the Rook components will have those annotations / labels added to them. diff --git a/pkg/daemon/ceph/osd/volume.go b/pkg/daemon/ceph/osd/volume.go index a1e403e182dc..03b5259dbb48 100644 --- a/pkg/daemon/ceph/osd/volume.go +++ b/pkg/daemon/ceph/osd/volume.go @@ -47,6 +47,10 @@ const ( dbDeviceFlag = "--db-devices" cephVolumeCmd = "ceph-volume" cephVolumeMinDBSize = 1024 // 1GB + + blockDBFlag = "--block.db" + blockDBSizeFlag = "--block.db-size" + dataFlag = "--data" ) // These are not constants because they are used by the tests @@ -665,6 +669,12 @@ func (a *OsdAgent) initializeDevicesLVMMode(context *clusterd.Context, devices * } metadataDevices[md]["devices"] = deviceArg } + if metadataDevice.Type == sys.PartType { + if a.metadataDevice != "" && device.Config.MetadataDevice == "" { + return errors.Errorf("Partition device %s can not be specified as metadataDevice in the global OSD configuration or in the node level OSD configuration", md) + } + metadataDevices[md]["part"] = "true" // ceph-volume lvm batch only supports disk and lvm + } deviceDBSizeMB := getDatabaseSize(a.storeConfig.DatabaseSizeMB, device.Config.DatabaseSizeMB) if a.storeConfig.IsValidStoreType() && deviceDBSizeMB > 0 { if deviceDBSizeMB < cephVolumeMinDBSize { @@ -721,76 +731,110 @@ func (a *OsdAgent) initializeDevicesLVMMode(context *clusterd.Context, devices * for md, conf := range metadataDevices { + // Do not change device names if udev persistent names are passed + mdPath := md + if !strings.HasPrefix(mdPath, "/dev") { + mdPath = path.Join("/dev", md) + } + + var hasPart bool mdArgs := batchArgs osdsPerDevice := 1 - if _, ok := conf["osdsperdevice"]; ok { + if part, ok := conf["part"]; ok && part == "true" { + hasPart = true + } + if hasPart { + // ceph-volume lvm prepare --data {vg/lv} --block.wal {partition} --block.db {/path/to/device} + baseArgs := []string{"-oL", cephVolumeCmd, "--log-path", logPath, "lvm", "prepare", storeFlag} + if a.storeConfig.EncryptedDevice { + baseArgs = append(baseArgs, encryptedFlag) + } + mdArgs = baseArgs + devices := strings.Split(conf["devices"], " ") + if len(devices) > 1 { + logger.Warningf("partition metadataDevice %s can only be used by one data device", md) + } + if _, ok := conf["osdsperdevice"]; ok { + logger.Warningf("`ceph-volume osd prepare` doesn't support multiple OSDs per device") + } mdArgs = append(mdArgs, []string{ - osdsPerDeviceFlag, - conf["osdsperdevice"], + dataFlag, + devices[0], + blockDBFlag, + mdPath, }...) - v, _ := strconv.Atoi(conf["osdsperdevice"]) - if v > 1 { - osdsPerDevice = v + if _, ok := conf["databasesizemb"]; ok { + mdArgs = append(mdArgs, []string{ + blockDBSizeFlag, + conf["databasesizemb"], + }...) + } + } else { + if _, ok := conf["osdsperdevice"]; ok { + mdArgs = append(mdArgs, []string{ + osdsPerDeviceFlag, + conf["osdsperdevice"], + }...) + v, _ := strconv.Atoi(conf["osdsperdevice"]) + if v > 1 { + osdsPerDevice = v + } } + if _, ok := conf["databasesizemb"]; ok { + mdArgs = append(mdArgs, []string{ + databaseSizeFlag, + conf["databasesizemb"], + }...) + } + mdArgs = append(mdArgs, strings.Split(conf["devices"], " ")...) + mdArgs = append(mdArgs, []string{ + dbDeviceFlag, + mdPath, + }...) } + if _, ok := conf["deviceclass"]; ok { mdArgs = append(mdArgs, []string{ crushDeviceClassFlag, conf["deviceclass"], }...) } - if _, ok := conf["databasesizemb"]; ok { - mdArgs = append(mdArgs, []string{ - databaseSizeFlag, - conf["databasesizemb"], - }...) - } - mdArgs = append(mdArgs, strings.Split(conf["devices"], " ")...) - // Do not change device names if udev persistent names are passed - mdPath := md - if !strings.HasPrefix(mdPath, "/dev") { - mdPath = path.Join("/dev", md) - } - - mdArgs = append(mdArgs, []string{ - dbDeviceFlag, - mdPath, - }...) - - // Reporting - reportArgs := append(mdArgs, []string{ - "--report", - }...) + if !hasPart { + // Reporting + reportArgs := append(mdArgs, []string{ + "--report", + }...) - if err := context.Executor.ExecuteCommand(baseCommand, reportArgs...); err != nil { - return errors.Wrap(err, "failed ceph-volume report") // fail return here as validation provided by ceph-volume - } + if err := context.Executor.ExecuteCommand(baseCommand, reportArgs...); err != nil { + return errors.Wrap(err, "failed ceph-volume report") // fail return here as validation provided by ceph-volume + } - reportArgs = append(reportArgs, []string{ - "--format", - "json", - }...) + reportArgs = append(reportArgs, []string{ + "--format", + "json", + }...) - cvOut, err := context.Executor.ExecuteCommandWithOutput(baseCommand, reportArgs...) - if err != nil { - return errors.Wrapf(err, "failed ceph-volume json report: %s", cvOut) // fail return here as validation provided by ceph-volume - } + cvOut, err := context.Executor.ExecuteCommandWithOutput(baseCommand, reportArgs...) + if err != nil { + return errors.Wrapf(err, "failed ceph-volume json report: %s", cvOut) // fail return here as validation provided by ceph-volume + } - logger.Debugf("ceph-volume reports: %+v", cvOut) + logger.Debugf("ceph-volume reports: %+v", cvOut) - var cvReports []cephVolReportV2 - if err = json.Unmarshal([]byte(cvOut), &cvReports); err != nil { - return errors.Wrap(err, "failed to unmarshal ceph-volume report json") - } + var cvReports []cephVolReportV2 + if err = json.Unmarshal([]byte(cvOut), &cvReports); err != nil { + return errors.Wrap(err, "failed to unmarshal ceph-volume report json") + } - if len(strings.Split(conf["devices"], " "))*osdsPerDevice != len(cvReports) { - return errors.Errorf("failed to create enough required devices, required: %s, actual: %v", cvOut, cvReports) - } + if len(strings.Split(conf["devices"], " "))*osdsPerDevice != len(cvReports) { + return errors.Errorf("failed to create enough required devices, required: %s, actual: %v", cvOut, cvReports) + } - for _, report := range cvReports { - if report.BlockDB != mdPath && !strings.HasSuffix(mdPath, report.BlockDB) { - return errors.Errorf("wrong db device for %s, required: %s, actual: %s", report.Data, mdPath, report.BlockDB) + for _, report := range cvReports { + if report.BlockDB != mdPath && !strings.HasSuffix(mdPath, report.BlockDB) { + return errors.Errorf("wrong db device for %s, required: %s, actual: %s", report.Data, mdPath, report.BlockDB) + } } } diff --git a/pkg/daemon/ceph/osd/volume_test.go b/pkg/daemon/ceph/osd/volume_test.go index 46f3ea9d4883..6c6cb64e40be 100644 --- a/pkg/daemon/ceph/osd/volume_test.go +++ b/pkg/daemon/ceph/osd/volume_test.go @@ -626,6 +626,7 @@ func TestConfigureCVDevices(t *testing.T) { } func testBaseArgs(args []string) error { + // stdbuf -oL ceph-volume --log-path /tmp/ceph-log lvm batch --prepare --bluestore --yes --osds-per-device 1 --crush-device-class hdd /dev/sda --db-devices /dev/sdl1 --report if args[1] == "ceph-volume" && args[2] == "--log-path" && args[3] == "/tmp/ceph-log" && args[4] == "lvm" && args[5] == "batch" && args[6] == "--prepare" && args[7] == "--bluestore" && args[8] == "--yes" { return nil } @@ -633,6 +634,15 @@ func testBaseArgs(args []string) error { return errors.Errorf("unknown args %s ", args) } +func testBasePrepareArgs(args []string) error { + // stdbuf -oL ceph-volume --log-path /tmp/ceph-log lvm prepare --bluestore --crush-device-class hdd --data /dev/sda --block.db /dev/sdj1 + if args[1] == "ceph-volume" && args[2] == "--log-path" && args[3] == "/tmp/ceph-log" && args[4] == "lvm" && args[5] == "prepare" && args[6] == "--bluestore" { + return nil + } + + return errors.Errorf("unknown args %s ", args) +} + func TestInitializeBlock(t *testing.T) { os.Setenv(oposd.OSDStoreTypeVarName, "bluestore") // Common vars for all the tests @@ -895,6 +905,207 @@ func TestInitializeBlock(t *testing.T) { } } + // Test with metadata partition devices specified in the global OSD configuration or in the node level OSD configuration + { + devices := &DeviceOsdMapping{ + Entries: map[string]*DeviceOsdIDEntry{ + "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda"}}, + "sdc": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sdc"}}, + }, + } + + a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 0}}, nodeName: "node1", storeConfig: config.StoreConfig{StoreType: "bluestore"}, metadataDevice: "/dev/sdb1"} + context := &clusterd.Context{ + Devices: []*sys.LocalDisk{ + {Name: "sda"}, + {Name: "sdc"}, + {Name: "sdb1", Type: sys.PartType}, + }, + } + + err := a.initializeDevicesLVMMode(context, devices) + logger.Warning(err) + assert.ErrorContains(t, err, "can not be specified as metadataDevice in the global OSD configuration or in the node level OSD configuration") + } + + // Test with two devices specifies the same partition, and multiple OSDs per device + { + devices := &DeviceOsdMapping{ + Entries: map[string]*DeviceOsdIDEntry{ + "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "sdb1"}}, + "sdc": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sdc", MetadataDevice: "sdb1"}}, + }, + } + + executor := &exectest.MockExecutor{} + executor.MockExecuteCommand = func(command string, args ...string) error { + logger.Infof("%s %v", command, args) + + // Validate base common args + err := testBasePrepareArgs(args) + if err != nil { + return err + } + + if args[7] == "--data" && args[8] == "/dev/sda" && args[9] == "--block.db" && args[10] == "/dev/sdb1" { + return nil + } + if args[7] == "--data" && args[8] == "/dev/sdc" && args[9] == "--block.db" && args[10] == "/dev/sdb1" { + return nil + } + + return errors.Errorf("unknown command %s %s", command, args) + } + + a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 0}}, nodeName: "node1", storeConfig: config.StoreConfig{OSDsPerDevice: 3, StoreType: "bluestore"}} + context := &clusterd.Context{ + Executor: executor, + Devices: []*sys.LocalDisk{ + {Name: "sda"}, + {Name: "sdc"}, + {Name: "sdb1", Type: sys.PartType}, + }, + } + + err := a.initializeDevicesLVMMode(context, devices) + if err != nil { + assert.NoError(t, err, "failed metadata test") + } else { + logger.Info("success, go to next test") + } + } + + // Test with metadata partition devices + { + devices := &DeviceOsdMapping{ + Entries: map[string]*DeviceOsdIDEntry{ + "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "sdb1"}}, + }, + } + + executor := &exectest.MockExecutor{} + executor.MockExecuteCommand = func(command string, args ...string) error { + logger.Infof("%s %v", command, args) + + // Validate base common args + err := testBasePrepareArgs(args) + if err != nil { + return err + } + + if args[7] == "--data" && args[8] == "/dev/sda" && args[9] == "--block.db" && args[10] == "/dev/sdb1" { + return nil + } + + return errors.Errorf("unknown command %s %s", command, args) + } + + a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 0}}, nodeName: "node1", storeConfig: config.StoreConfig{StoreType: "bluestore"}} + context := &clusterd.Context{ + Executor: executor, + Devices: []*sys.LocalDisk{ + {Name: "sda"}, {Name: "sdb1", Type: sys.PartType}, + }, + } + + err := a.initializeDevicesLVMMode(context, devices) + if err != nil { + assert.NoError(t, err, "failed metadata test") + } else { + logger.Info("success, go to next test") + } + } + + // Test with two metadata partition devices, one is lvm, one is partition + { + metadataDevicePath := "/dev/test-rook-vg/test-rook-lv" + devices := &DeviceOsdMapping{ + Entries: map[string]*DeviceOsdIDEntry{ + "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "sdb1"}}, + "sdc": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sdc", MetadataDevice: metadataDevicePath}}, + }, + } + + executor := &exectest.MockExecutor{ + MockExecuteCommand: func(command string, args ...string) error { + logger.Infof("%s %v", command, args) + + var err error + // Validate base common args + if args[8] == "--yes" { + // lvm will use `ceph-volume lvm batch` + err = testBaseArgs(args) + } else { + // partition will use `ceph-volume lvm prepare` + err = testBasePrepareArgs(args) + } + if err != nil { + return err + } + + // for partition + if args[7] == "--data" && args[8] == "/dev/sda" && args[9] == "--block.db" && args[10] == "/dev/sdb1" { + return nil + } + + // First command for lvm + if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sdc" && args[12] == "--db-devices" && args[13] == metadataDevicePath { + return nil + } + + // Second command for lvm + if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sdc" && args[12] == "--db-devices" && args[13] == metadataDevicePath && args[14] == "--report" { + return nil + } + + return errors.Errorf("unknown command %s %s", command, args) + }, + + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("%s %v", command, args) + + // Validate base common args + err := testBaseArgs(args) + if err != nil { + return "", err + } + + // First command + if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sdc" && args[12] == "--db-devices" && args[13] == metadataDevicePath { + return fmt.Sprintf(`[{"block_db": "%s", "data": "%s"}]`, metadataDevicePath, "/dev/sdc"), nil + } + + return "", errors.Errorf("unknown command %s %s", command, args) + }, + } + + a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 0}}, nodeName: "node1", storeConfig: config.StoreConfig{StoreType: "bluestore"}} + context := &clusterd.Context{ + Executor: executor, + Devices: []*sys.LocalDisk{ + {Name: "sda"}, + {Name: "sdb1", Type: sys.PartType}, + { + Name: "sdc", + Type: "disk", + DevLinks: "/dev/disk/by-id/wwn-0x6f4ee080051fd00029bb505f1df6ee3a /dev/disk/by-path/pci-0000:3b:00.0-scsi-0:2:0:0", + }, + { + Name: "vg-test-rook-lv", + Type: "lvm", + DevLinks: "/dev/mapper/test--rook--vg-test--rook--lv /dev/test-rook-vg/test-rook-lv", + }, + }, + } + + err := a.initializeDevicesLVMMode(context, devices) + if err != nil { + assert.NoError(t, err, "failed metadata test") + } else { + logger.Info("success, go to next test") + } + } + // Test with metadata devices with dev by-id { metadataDeviceByIDPath := "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_1TB_XXX" @@ -969,6 +1180,58 @@ func TestInitializeBlock(t *testing.T) { } } + // Test with metadata partition devices with dev by-id + { + metadataDeviceByIDPath := "/dev/disk/by-id/nvme-BC511_NVMe_SK_hynix_512GB_CD08N413611008838-part1" + metadataDevicePath := "/dev/nvme0n1p1" + devices := &DeviceOsdMapping{ + Entries: map[string]*DeviceOsdIDEntry{ + "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: metadataDeviceByIDPath}}, + }, + } + + executor := &exectest.MockExecutor{} + executor.MockExecuteCommand = func(command string, args ...string) error { + logger.Infof("%s %v", command, args) + + // Validate base common args + err := testBasePrepareArgs(args) + if err != nil { + return err + } + + if args[7] == "--data" && args[8] == "/dev/sda" && args[9] == "--block.db" && args[10] == metadataDevicePath { + return nil + } + + return errors.Errorf("unknown command %s %s", command, args) + } + + a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 0}}, nodeName: "node1", storeConfig: config.StoreConfig{StoreType: "bluestore"}} + context := &clusterd.Context{ + Executor: executor, + Devices: []*sys.LocalDisk{ + { + Name: "sda", + Type: "disk", + DevLinks: "/dev/disk/by-id/wwn-0x6f4ee080051fd00029bb505f1df6ee3a /dev/disk/by-path/pci-0000:3b:00.0-scsi-0:2:0:0", + }, + { + Name: "nvme0n1p1", + Type: "part", + DevLinks: "/dev/disk/by-id/nvme-BC511_NVMe_SK_hynix_512GB_CD08N413611008838-part1 /dev/disk/by-partuuid/7cf003d3-3a56-4011-b736-b5a741b0aabc", + }, + }, + } + + err := a.initializeDevicesLVMMode(context, devices) + if err != nil { + assert.NoError(t, err, "failed metadata partition device by-id test") + } else { + logger.Info("success, go to next test") + } + } + // Test with metadata devices with dev by-path { devices := &DeviceOsdMapping{ @@ -1051,6 +1314,66 @@ func TestInitializeBlock(t *testing.T) { logger.Info("success, go to next test") } + // Test with metadata partition devices with dev by-path + { + devices := &DeviceOsdMapping{ + Entries: map[string]*DeviceOsdIDEntry{ + "sda": { + Data: -1, + Metadata: nil, + Config: DesiredDevice{ + Name: "/dev/sda", + MetadataDevice: "/dev/disk/by-path/pci-0000:3a:00.0-nvme-1-part1", + }, + }, + }, + } + metadataDevicePath := "/dev/nvme0n1p1" + + executor := &exectest.MockExecutor{} + executor.MockExecuteCommand = func(command string, args ...string) error { + logger.Infof("%s %v", command, args) + + // Validate base common args + err := testBasePrepareArgs(args) + if err != nil { + return err + } + + if args[7] == "--data" && args[8] == "/dev/sda" && args[9] == "--block.db" && args[10] == metadataDevicePath { + return nil + } + + return errors.Errorf("unknown command %s %s", command, args) + } + + agent := &OsdAgent{ + clusterInfo: &cephclient.ClusterInfo{ + CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 0}, + }, + nodeName: "node1", + storeConfig: config.StoreConfig{StoreType: "bluestore"}, + } + context := &clusterd.Context{Executor: executor, + Devices: []*sys.LocalDisk{ + { + Name: "sda", + Type: "disk", + DevLinks: "/dev/disk/by-id/wwn-0x6f4ee080051fd00029bb505f1df6ee3a /dev/disk/by-path/pci-0000:3b:00.0-scsi-0:2:0:0", + }, + { + Name: "nvme0n1p1", + Type: "part", + DevLinks: "/dev/disk/by-path/pci-0000:3a:00.0-nvme-1-part1 /dev/disk/by-id/nvme-BC511_NVMe_SK_hynix_512GB_CD08N413611008838-part1 /dev/disk/by-partuuid/7cf003d3-3a56-4011-b736-b5a741b0aabc", + }, + }, + } + + err := agent.initializeDevicesLVMMode(context, devices) + assert.NoError(t, err, "failed metadata device by-path test") + logger.Info("success, go to next test") + } + // Test with metadata devices with lvm { metadataDevicePath := "/dev/test-rook-vg/test-rook-lv" @@ -1121,7 +1444,7 @@ func TestInitializeBlock(t *testing.T) { DevLinks: "/dev/disk/by-id/wwn-0x6f4ee080051fd00029bb505f1df6ee3a /dev/disk/by-path/pci-0000:3b:00.0-scsi-0:2:0:0", }, { - Name: "nvme0n1", + Name: "vg-test-rook-lv", Type: "lvm", DevLinks: "/dev/mapper/test--rook--vg-test--rook--lv /dev/test-rook-vg/test-rook-lv", }, @@ -1262,7 +1585,8 @@ func TestInitializeBlockPVC(t *testing.T) { } executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { logger.Infof("%s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] == "--osd-id" && args[8] == "3" { + if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] == "--osd-id" && args[8] == "3" || + args[1] == "ceph-volume" && args[4] == "raw" && args[5] == "prepare" && args[6] == "--bluestore" && args[9] == "--osd-id" && args[10] == "3" { return initializeBlockPVCTestResult, nil } @@ -1283,7 +1607,8 @@ func TestInitializeBlockPVC(t *testing.T) { } executor.MockExecuteCommandWithCombinedOutput = func(command string, args ...string) (string, error) { logger.Infof("%s %v", command, args) - if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] != "--osd-id" && args[8] != "3" { + if args[1] == "ceph-volume" && args[2] == "raw" && args[3] == "prepare" && args[4] == "--bluestore" && args[7] != "--osd-id" && args[8] != "3" || + args[1] == "ceph-volume" && args[4] == "raw" && args[5] == "prepare" && args[6] == "--bluestore" && args[9] != "--osd-id" && args[10] != "3" { return initializeBlockPVCTestResult, nil } diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh index aa97801eb7b9..c7b3ae0d2017 100755 --- a/tests/scripts/github-action-helper.sh +++ b/tests/scripts/github-action-helper.sh @@ -280,6 +280,9 @@ function deploy_cluster() { sed -i "s|#deviceFilter:|deviceFilter: ${BLOCK/\/dev\//}\n config:\n osdsPerDevice: \"2\"|g" cluster-test.yaml elif [ "$1" = "osd_with_metadata_device" ]; then sed -i "s|#deviceFilter:|deviceFilter: ${BLOCK/\/dev\//}\n config:\n metadataDevice: /dev/test-rook-vg/test-rook-lv|g" cluster-test.yaml + elif [ "$1" = "osd_with_metadata_partition_device" ]; then + yq w -i -d0 cluster-test.yaml spec.storage.devices[0].name ${BLOCK}2 + yq w -i -d0 cluster-test.yaml spec.storage.devices[0].config.metadataDevice ${BLOCK}1 elif [ "$1" = "encryption" ]; then sed -i "s|#deviceFilter:|deviceFilter: ${BLOCK/\/dev\//}\n config:\n encryptedDevice: \"true\"|g" cluster-test.yaml elif [ "$1" = "lvm" ]; then