From fbc56d972ec70f537e06b74cac65ec3af998e8c3 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Mon, 27 Mar 2023 19:41:00 +0200 Subject: [PATCH 01/19] build: containerize deployment - [x] Containerize and isolate spdk build (ubi8 and ubi9) - [x] Containerize nvme-of build (ubi9) - [x] Manage Python package dependencies and metadata (pdm) - [x] Remove dependencies from /usr/libexec (since those are not intended for external usage). - [x] Simplify build & deployment (docker-compose instead of make) - [x] Add containerized Ceph cluster - [x] Update Makefile - [x] Update docs Signed-off-by: Ernesto Puerta --- .dockerignore | 9 + .env | 44 +++++ .gitignore | 8 +- Dockerfile | 118 ++++++++++++ Dockerfile.ceph | 90 ++++++++++ Dockerfile.spdk | 114 ++++++++++++ Makefile | 69 +++---- README.md | 425 +++++++++++++++++++++++++++----------------- ceph-nvmeof.conf | 13 +- control/__main__.py | 3 +- control/cli.py | 70 +++++--- control/grpc.py | 38 ++-- control/server.py | 36 ++-- control/state.py | 2 +- docker-compose.yaml | 141 +++++++++++++++ mk/autohelp.mk | 24 +++ mk/containerized.mk | 66 +++++++ mk/demo.mk | 24 +++ mk/misc.mk | 11 ++ pdm.lock | 147 +++++++++++++++ pdm.toml | 2 + pyproject.toml | 36 ++++ requirements.txt | 2 - 23 files changed, 1215 insertions(+), 277 deletions(-) create mode 100644 .dockerignore create mode 100644 .env create mode 100644 Dockerfile create mode 100644 Dockerfile.ceph create mode 100644 Dockerfile.spdk create mode 100644 docker-compose.yaml create mode 100644 mk/autohelp.mk create mode 100644 mk/containerized.mk create mode 100644 mk/demo.mk create mode 100644 mk/misc.mk create mode 100644 pdm.lock create mode 100644 pdm.toml create mode 100644 pyproject.toml delete mode 100644 requirements.txt diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..07450ea7 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +** +# Exclude everything except: +!control/*.py +!proto/*.proto +!pyproject.toml +!pdm.lock +!pdm.toml +!README.md +!LICENSE diff --git a/.env b/.env new file mode 100644 index 00000000..108921f7 --- /dev/null +++ b/.env @@ -0,0 +1,44 @@ +# Globals +VERSION=0.0.1 +CEPH_VERSION=17.2.6 +SPDK_VERSION=23.01 +MAINTAINER=Ceph Developers + +# NVMe-oF +NVMEOF_VERSION=${VERSION} +NVMEOF_CONFIG=./ceph-nvmeof.conf +NVMEOF_SPDK_VERSION=${SPDK_VERSION} +NVMEOF_NAME=ceph-nvmeof +NVMEOF_SUMMARY=Ceph NVMe over Fabrics Gateway +NVMEOF_DESCRIPTION="Service to provide block storage on top of Ceph for +platforms (e.g.: VMWare) without native Ceph support (RBD), replacing +existing approaches (iSCSI) with a newer and more versatile standard +(NVMe-oF)." +NVMEOF_URL=https://github.com/ceph/ceph-nvmeof +NVMEOF_TAGS=ceph,nvme-of,nvme-of gateway,rbd,block storage +NVMEOF_WANTS=ceph,rbd +NVMEOF_EXPOSE_SERVICES=4420/tcp:nvme,5500/tcp:grpc,8009/tcp:nvme-disc +NVMEOF_GIT_REPO=https://github.com/ceph/ceph-nvmeof.git + +# NVMe-oF CLI +MVMEOF_CLI_VERSION=${VERSION} +NVMEOF_CLI_NAME=ceph-nvmeof-cli +NVMEOF_CLI_SUMMARY=Ceph NVMe over Fabrics CLI +NVMEOF_CLI_DESCRIPTION=Command line interface for Ceph NVMe over Fabrics Gateway + +# SPDK +SPDK_CEPH_VERSION=${CEPH_VERSION} +SPDK_NAME=SPDK +SPDK_SUMMARY=Build Ultra High-Performance Storage Applications with the Storage Performance Development Kit +SPDK_DESCRIPTION=The Storage Performance Development Kit (SPDK) provides a set of tools and libraries for writing high performance, scalable, user-mode storage applications +SPDK_URL=https://spdk.io + +SPDK_PKGDEP_ARGS=--rbd +SPDK_CONFIGURE_ARGS=--with-rbd --disable-tests --disable-unit-tests --disable-examples +SPDK_MAKEFLAGS= +SPDK_CENTOS_BASE=https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/Packages/ +SPDK_CENTOS_REPO_VER=9.0-21.el9 + +# Ceph Cluster +CEPH_CLUSTER_VERSION=${CEPH_VERSION} +CEPH_VSTART_ARGS=--without-dashboard --memstore diff --git a/.gitignore b/.gitignore index b4c6048b..199331c0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,8 @@ - +*.swp +*_pb2*.py* __pycache__ -spdk -control/generated/gateway_pb2_grpc.py -control/generated/gateway_pb2.py +__pypackages__ +.pdm-python server.crt server.key client.crt diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..2b70d721 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,118 @@ +# syntax = docker/dockerfile:1.4 + +ARG NVMEOF_SPDK_VERSION +ARG NVMEOF_TARGET # either 'gateway' or 'cli' + +#------------------------------------------------------------------------------ +# Base image for NVMEOF_TARGET=cli (nvmeof-cli) +FROM registry.access.redhat.com/ubi9/ubi AS base-cli +ENTRYPOINT ["python3", "-m", "control.cli"] +CMD [] + +#------------------------------------------------------------------------------ +# Base image for NVMEOF_TARGET=gateway (nvmeof-gateway) +FROM quay.io/ceph/spdk:${NVMEOF_SPDK_VERSION:-NULL} AS base-gateway +RUN \ + --mount=type=cache,target=/var/cache/dnf \ + --mount=type=cache,target=/var/lib/dnf \ + dnf install -y python3-rados +ENTRYPOINT ["python3", "-m", "control"] +CMD ["-c", "/src/ceph-nvmeof.conf"] + +#------------------------------------------------------------------------------ +# Intermediate layer for Python set-up +FROM base-$NVMEOF_TARGET AS python-intermediate + +RUN \ + --mount=type=cache,target=/var/cache/dnf \ + --mount=type=cache,target=/var/lib/dnf \ + dnf update -y + +ENV PYTHONUNBUFFERED=1 \ + PYTHONIOENCODING=UTF-8 \ + LC_ALL=C.UTF-8 \ + LANG=C.UTF-8 \ + PIP_NO_CACHE_DIR=off \ + PYTHON_MAJOR=3 \ + PYTHON_MINOR=9 \ + PDM_ONLY_BINARY=:all: + +ARG APPDIR=/src + +ARG NVMEOF_NAME \ + NVMEOF_SUMMARY \ + NVMEOF_DESCRIPTION \ + NVMEOF_URL \ + NVMEOF_VERSION \ + NVMEOF_MAINTAINER \ + NVMEOF_TAGS \ + NVMEOF_WANTS \ + NVMEOF_EXPOSE_SERVICES \ + BUILD_DATE \ + NVMEOF_GIT_REPO \ + NVMEOF_GIT_BRANCH \ + NVMEOF_GIT_COMMIT + +# Generic labels +LABEL name="$NVMEOF_NAME" \ + version="$NVMEOF_VERSION" \ + summary="$NVMEOF_SUMMARY" \ + description="$NVMEOF_DESCRIPTION" \ + maintainer="$NVMEOF_MAINTAINER" \ + release="" \ + url="$NVMEOF_URL" \ + build-date="$BUILD_DATE" \ + vcs-ref="$NVMEOF_GIT_COMMIT" + +# k8s-specific labels +LABEL io.k8s.display-name="$NVMEOF_SUMMARY" \ + io.k8s.description="$NVMEOF_DESCRIPTION" + +# k8s-specific labels +LABEL io.openshift.tags="$NVMEOF_TAGS" \ + io.openshift.wants="$NVMEOF_WANTS" \ + io.openshift.expose-services="$NVMEOF_EXPOSE_SERVICES" + +# Ceph-specific labels +LABEL io.ceph.component="$NVMEOF_NAME" \ + io.ceph.summary="$NVMEOF_SUMMARY" \ + io.ceph.description="$NVMEOF_DESCRIPTION" \ + io.ceph.url="$NVMEOF_URL" \ + io.ceph.version="$NVMEOF_VERSION" \ + io.ceph.maintainer="$NVMEOF_MAINTAINER" \ + io.ceph.git.repo="$NVMEOF_GIT_REPO" \ + io.ceph.git.branch="$NVMEOF_GIT_BRANCH" \ + io.ceph.git.commit="$NVMEOF_GIT_COMMIT" + +ENV PYTHONPATH=$APPDIR/proto:$APPDIR/__pypackages__/$PYTHON_MAJOR.$PYTHON_MINOR/lib + +WORKDIR $APPDIR + +#------------------------------------------------------------------------------ +FROM python-intermediate AS builder + +ENV PDM_SYNC_FLAGS="-v --no-isolation --no-self --no-editable" + +# https://pdm.fming.dev/latest/usage/advanced/#use-pdm-in-a-multi-stage-dockerfile +RUN \ + --mount=type=cache,target=/var/cache/dnf \ + --mount=type=cache,target=/var/lib/dnf \ + dnf install -y python3-pip +RUN \ + --mount=type=cache,target=/root/.cache/pip \ + pip install -U pip setuptools + +RUN \ + --mount=type=cache,target=/root/.cache/pip \ + pip install pdm +COPY pyproject.toml pdm.lock pdm.toml ./ +RUN \ + --mount=type=cache,target=/root/.cache/pdm \ + pdm sync $PDM_SYNC_FLAGS + +COPY . . +RUN pdm run protoc + +#------------------------------------------------------------------------------ +FROM python-intermediate +COPY --from=builder $APPDIR . \ No newline at end of file diff --git a/Dockerfile.ceph b/Dockerfile.ceph new file mode 100644 index 00000000..aab1bdf5 --- /dev/null +++ b/Dockerfile.ceph @@ -0,0 +1,90 @@ +# syntax = docker/dockerfile:1.4 +FROM quay.io/centos/centos:stream9-minimal AS build + +ARG CEPH_CLUSTER_VERSION + +COPY < $(HUGEPAGES_DIR)' + @echo Actual Hugepages allocation: $$(cat $(HUGEPAGES_DIR)) + @[ $$(cat $(HUGEPAGES_DIR)) -eq $(HUGEPAGES) ] + +build push pull: SVC = spdk nvmeof nvmeof-cli ceph + +build: export NVMEOF_GIT_BRANCH != git name-rev --name-only HEAD +build: export NVMEOF_GIT_COMMIT != git rev-parse HEAD +build: export SPDK_GIT_REPO != git -C spdk remote get-url origin +build: export SPDK_GIT_BRANCH != git -C spdk name-rev --name-only HEAD +build: export SPDK_GIT_COMMIT != git rev-parse HEAD:spdk +build: export BUILD_DATE != date -u +"%Y-%m-%dT%H:%M:%SZ" + + +up: SVC = nvmeof ## Services +up: override OPTS += --no-build --abort-on-container-exit --remove-orphans --scale nvmeof=$(SCALE) + +clean: $(CLEAN) ## Clean-up environment + +help: AUTOHELP_SUMMARY = Makefile to build and deploy the Ceph NVMe-oF Gateway +help: autohelp + +.PHONY: all setup clean help diff --git a/README.md b/README.md index 78ace56c..bfdeba8b 100644 --- a/README.md +++ b/README.md @@ -1,199 +1,288 @@ -# nvmeof-gateway - -Management gateway daemon to setup access to Ceph storage over NVMeoF - -This daemon runs as root. It provides the ability to export existing RBD images as NVMeoF namespaces. Creation of RBD images is not within the scope of this daemon. - -# Initial configuration - -1. The daemon is a gRPC server, so the host running the server will need to install gRPC packages: - - $ make setup - -2. Modify the config file (default ceph-nvmeof.conf) to reflect the IP/ Port where the server can be reached: - - addr = - port = - -3. To [enable mTLS](#mtls-configuration-for-testing-purposes) using self signed certificates, edit the config file to set: - - enable_auth = True # Setting this to False will open an insecure port - -4. Compile protobuf files for gRPC: - - $ make grpc - -5. SPDK is included in this repository as a submodule. Edit the config file to set: - - spdk_path = - spdk_tgt = - -6. Setup SPDK - - Navigate to the spdk folder & install dependencies: - - $ ./scripts/pkgdep.sh - - Initialize configuration: - - $ apt install librbd-dev - $ ./configure --with-rbd - - Build the SPDK app: - - $ make - - SPDK requires hugepages to be set up: - - $ sh -c 'echo 4096 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages' - -7. Start the gateway server daemon: - - $ python3 -m control [-c CONFIG] - - -# CLI Usage - -The CLI tool can be used to initiate a connection to the gateway and run commands to configure the NVMe targets. - -Run the tool with the -h flag to see a list of available commands: - - $ python3 -m control.cli -h - usage: python3 -m control.cli [-h] [-c CONFIG] - {create_bdev,delete_bdev,create_subsystem,delete_subsystem,add_namespace,remove_namespace,add_host,remove_host,create_listener,delete_listener,get_subsystems} ... - - CLI to manage NVMe gateways - - positional arguments: - {create_bdev,delete_bdev,create_subsystem,delete_subsystem,add_namespace,remove_namespace,add_host,remove_host,create_listener,delete_listener,get_subsystems} - - optional arguments: - -h, --help show this help message and exit - -c CONFIG, --config CONFIG - Path to config file - -Example: - - $ python3 -m control.cli create_bdev -h - usage: python3 -m control.cli create_bdev [-h] -i IMAGE -p POOL [-b BDEV_NAME] [-s BLOCK_SIZE] - - optional arguments: - -h, --help show this help message and exit - -i IMAGE, --image IMAGE - RBD image name - -p POOL, --pool POOL Ceph pool name - -b BDEV_NAME, --bdev BDEV_NAME - Bdev name - -s BLOCK_SIZE, --block_size BLOCK_SIZE - Block size - -# mTLS Configuration for testing purposes +# Ceph NVMe over Fabrics (NVMe-oF) Gateway + +This project provides block storage on top of Ceph for platforms (e.g.: VMWare) without +native Ceph support (RBD), replacing existing approaches (iSCSI) with a newer and [more +versatile standard (NVMe-oF)](https://nvmexpress.org/specification/nvme-of-specification/). + +Essentially, it allows to export existing RBD images as NVMe-oF namespaces. +The [creation and management of RBD images](https://docs.ceph.com/en/latest/rbd/) is not within the scope of this component. + + +## Installation +### Requirements + +* Linux-based system with at least 16 GB of available RAM. [Fedora 37](https://fedoraproject.org/) is recommended. +* `moby-engine` (`docker-engine`) (v20.10) and `docker-compose` (v1.29). These versions are just indicative. +* `make` (only needed to launch `docker-compose` commands). +* SELinux in permissive mode. + +To install these dependencies in Fedora: +```bash +sudo dnf install -y make moby-engine docker-compose +``` +Some [post-installation steps](https://docs.docker.com/engine/install/linux-postinstall/) are be required to use `docker` with regular users: +```bash +sudo groupadd docker +sudo usermod -aG docker $USER +``` +### Steps + +To launch a containerized environment with a Ceph cluster and a NVMe-oF gateway (this is not the [prescribed deployment for production purposes](https://docs.ceph.com/en/quincy/install/#recommended-methods); for testing and development tasks alone): + +1. Get this repo: + ```bash + git clone https://github.com/ceph/ceph-nvmeof.git + cd ceph-nvmeof + git submodule update --init --recursive + ``` +1. Configure the environment (basically to allocate huge-pages, which requires entering password): + ```bash + make setup + ``` +1. Download the container images: + ```bash + make pull + ``` +1. Deploy the containers locally: + ```bash + make up + ``` +1. Check that the deployment is up and running: + ```bash + $ make ps + + Name Command State Ports + ----------------------------------------------------------------------------------------------------------------------- + ceph sh -c ./vstart.sh --new $V ... Up (healthy) 5000/tcp, 6789/tcp, 6800/tcp, 6801/tcp, 6802/tcp, + 6803/tcp, 6804/tcp, 6805/tcp, 80/tcp + nvmeof_nvmeof_1 python3 -m control -c ceph ... Up 0.0.0.0:4420->4420/tcp,:::4420->4420/tcp, + 0.0.0.0:5500->5500/tcp,:::5500->5500/tcp, + 0.0.0.0:8009->8009/tcp,:::8009->8009/tcp + ``` +1. The environment is ready to provide block storage on Ceph via NVMe-oF. +## Usage Demo + +### Configuring the NVMe-oF Gateway + +The following command executes all the steps required to set up the NVMe-oF environment: +```bash +$ make demo + +DOCKER_BUILDKIT=1 docker-compose exec ceph-vstart-cluster bash -c "rbd info demo_image || rbd create demo_image --size 10M" +rbd: error opening image demo_image: (2) No such file or directory + +DOCKER_BUILDKIT=1 docker-compose run --rm ceph-nvmeof-cli --server-address ceph-nvmeof --server-port 5500 create_bdev --pool rbd --image demo_image --bdev demo_bdev +Creating nvmeof_ceph-nvmeof-cli_run ... done +INFO:__main__:Created bdev demo_bdev: True + +DOCKER_BUILDKIT=1 docker-compose run --rm ceph-nvmeof-cli --server-address ceph-nvmeof --server-port 5500 create_subsystem --subnqn nqn.2016-06.io.spdk:cnode1 --serial SPDK00000000000001 +Creating nvmeof_ceph-nvmeof-cli_run ... done +INFO:__main__:Created subsystem nqn.2016-06.io.spdk:cnode1: True + +DOCKER_BUILDKIT=1 docker-compose run --rm ceph-nvmeof-cli --server-address ceph-nvmeof --server-port 5500 add_namespace --subnqn nqn.2016-06.io.spdk:cnode1 --bdev demo_bdev +Creating nvmeof_ceph-nvmeof-cli_run ... done +INFO:__main__:Added namespace 1 to nqn.2016-06.io.spdk:cnode1: True + +DOCKER_BUILDKIT=1 docker-compose run --rm ceph-nvmeof-cli --server-address ceph-nvmeof --server-port 5500 create_listener --subnqn nqn.2016-06.io.spdk:cnode1 -s 4420 +Creating nvmeof_ceph-nvmeof-cli_run ... done +INFO:__main__:Created nqn.2016-06.io.spdk:cnode1 listener: True + +DOCKER_BUILDKIT=1 docker-compose run --rm ceph-nvmeof-cli --server-address ceph-nvmeof --server-port 5500 add_host --subnqn nqn.2016-06.io.spdk:cnode1 --host "*" +Creating nvmeof_ceph-nvmeof-cli_run ... done +INFO:__main__:Allowed open host access to nqn.2016-06.io.spdk:cnode1: True +``` +#### Manual Steps + +The same configuration can also be manually run: + +1. First of all, let's create the `nvmeof-cli` shortcut to interact with the NVMe-oF gateway: + ```bash + eval $(make alias) + ``` +1. In order to start working with the NVMe-oF gateway, we need to create an RBD image first (`demo_image` in the `rbd` pool): + ```bash + make rbd + ``` +1. Create a bdev (Block Device) from an RBD image: + ```bash + nvmeof-cli create_bdev --pool rbd --image demo_image --bdev demo_bdev + ``` +1. Create a subsystem: + ```bash + nvmeof-cli create_subsystem --subnqn nqn.2016-06.io.spdk:cnode1 --serial SPDK00000000000001 + ``` +1. Add a namespace: + ```bash + nvmeof-cli add_namespace --subnqn nqn.2016-06.io.spdk:cnode1 --bdev demo_bdev + ``` +1. Create a listener so that NVMe initiators can connect to: + ```bash + nvmeof-cli create_listener ---subnqn nqn.2016-06.io.spdk:cnode1 -s 4420 + ``` +1. Define which hosts can connect: + ```bash + nvmeof-cli add_host --subnqn nqn.2016-06.io.spdk:cnode1 --host "*" + ``` + + +### Mounting the NVMe-oF volume + +Once the NVMe-oF target is + +1. Install requisite packages: + ```bash + sudo dnf install nvme-cli + sudo modprobe nvme-fabrics + ``` +1. Ensure that the listener is reachable from the NVMe-oF initiator: + ```bash + $ sudo nvme discover -t tcp -a 192.168.13.3 -s 4420 + + Discovery Log Number of Records 1, Generation counter 2 + =====Discovery Log Entry 0====== + trtype: tcp + adrfam: ipv4 + subtype: nvme subsystem + treq: not required + portid: 0 + trsvcid: 4420 + subnqn: nqn.2016-06.io.spdk:cnode1 + traddr: 192.168.13.3 + eflags: not specified + sectype: none + ``` + +1. Connect to desired subsystem: + ```bash + sudo nvme connect -t tcp --traddr 192.168.13.3 -s 4420 -n nqn.2016-06.io.spdk:cnode1 + ``` +1. List the available NVMe targets: + ```bash + $ sudo nvme list + Node Generic SN Model Namespace Usage Format FW Rev + --------------------- --------------------- -------------------- ---------------------------------------- --------- -------------------------- ---------------- -------- + /dev/nvme1n1 /dev/ng1n1 SPDK00000000000001 SPDK bdev Controller 1 10,49 MB / 10,49 MB 4 KiB + 0 B 23.01 + ... + ``` +1. Create a filesystem on the desired target: + ```bash + $ sudo mkfs /dev/nvme1n1 + mke2fs 1.46.5 (30-Dec-2021) + Discarding device blocks: done + Creating filesystem with 2560 4k blocks and 2560 inodes + + Allocating group tables: done + Writing inode tables: done + Writing superblocks and filesystem accounting information: done + ``` +1. Mount and use the storage volume + ```bash + $ mkdir /mnt/nvmeof + $ sudo mount /dev/nvme1n1 /mnt/nvmeof + + $ ls /mnt/nvmeof + lost+found + + $ sudo bash -c "echo Hello NVMe-oF > /mnt/nvmeof/hello.txt" + + $ cat /mnt/nvmeof/hello.txt + Hello NVMe-oF + ``` + +## Advanced + +### mTLS Configuration for testing purposes For testing purposes, self signed certificates and keys can be generated locally using OpenSSL. For the server, generate credentials for server name 'my.server' in files called server.key and server.crt: - - $ openssl req -x509 -newkey rsa:4096 -nodes -keyout server.key -out server.crt -days 3650 -subj '/CN=my.server' +```bash +$ openssl req -x509 -newkey rsa:4096 -nodes -keyout server.key -out server.crt -days 3650 -subj '/CN=my.server' +``` For client: - - $ openssl req -x509 -newkey rsa:4096 -nodes -keyout client.key -out client.crt -days 3650 -subj '/CN=client1' +```bash +$ openssl req -x509 -newkey rsa:4096 -nodes -keyout client.key -out client.crt -days 3650 -subj '/CN=client1' +``` Indicate the location of the keys and certificates in the config file: +```ini +[mtls] - [mtls] - - server_key = ./server.key - client_key = ./client.key - server_cert = ./server.crt - client_cert = ./client.crt - -# Example NVMe volume access - -1. Start the gateway server: - - $ python3 -m control - INFO:root:SPDK PATH: /path/to/spdk - INFO:root:Starting /path/to/spdk/tgt/nvmf_tgt all -u - INFO:root:Attempting to initialize SPDK: server_addr: /var/tmp/spdk.sock, port: 5260, conn_retries: 3, timeout: 60.0 - INFO: Setting log level to ERROR - INFO:JSONRPCClient(/var/tmp/spdk.sock):Setting log level to ERROR +server_key = ./server.key +client_key = ./client.key +server_cert = ./server.crt +client_cert = ./client.crt +``` +### Huge-Pages -2. Run the CLI (ensure a ceph pool 'rbd' with an rbdimage 'mytestdevimage' is created prior to this step): +[DPDK requires hugepages](https://doc.dpdk.org/guides/linux_gsg/sys_reqs.html#linux-gsg-hugepages) to be set up: - $ python3 -m control.cli create_bdev -i mytestdevimage -p rbd -b Ceph0 - INFO:root:Created bdev Ceph0: True - - $ python3 -m control.cli create_subsystem -n nqn.2016-06.io.spdk:cnode1 -s SPDK00000000000001 - INFO:root:Created subsystem nqn.2016-06.io.spdk:cnode1: True - - $ python3 -m control.cli add_namespace -n nqn.2016-06.io.spdk:cnode1 -b Ceph0 - INFO:root:Added namespace 1 to nqn.2016-06.io.spdk:cnode1: True - - $ python3 -m control.cli add_host -n nqn.2016-06.io.spdk:cnode1 -t '*' - INFO:root:Allowed open host access to nqn.2016-06.io.spdk:cnode1: True - - $ python3 -m control.cli create_listener -n nqn.2016-06.io.spdk:cnode1 -s 5001 - INFO:root:Created nqn.2016-06.io.spdk:cnode1 listener: True +```bash +sh -c 'echo 4096 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages' +``` -3. On the storage client system (ubuntu-21.04): +This is automatically done in the `make setup` step. - - Install requisite packages +## Development - $ apt install nvme-cli - $ modprobe nvme-fabrics +### Set-up +The development environment relies on containers (specifically `docker-compose`) for building and running the components. This has the benefit that, besides `docker` and `docker-compose`, no more dependencies need to be installed in the host environment. - - Run nvme command to discover available subsystems +Once the GitHub repo has been cloned, remember to initialize its git submodules (`spdk`, which in turn depends on other submodules): +```bash +git submodule update --init --recursive +``` - $ nvme discover -t tcp -a 192.168.50.4 -s 5001 +For building, SELinux might cause issues, so it's better to set it to permissive mode: +```bash +# Change it for the running session +sudo setenforce 0 - Discovery Log Number of Records 1, Generation counter 6 - =====Discovery Log Entry 0====== - trtype: tcp - adrfam: ipv4 - subtype: nvme subsystem - treq: not required - portid: 0 - trsvcid: 5001 - subnqn: nqn.2016-06.io.spdk:cnode1 - traddr: 192.168.50.4 - sectype: none +# Persist the change across boots +sudo sed -i -E 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config +``` +### Building - - Connect to desired subsystem +To avoid having to deal with `docker-compose` commands, this provides a `Makefile` that wraps those as regular `make` targets: - $ nvme connect -t tcp --traddr 192.168.50.4 -s 5001 -n nqn.2016-06.io.spdk:cnode1 +To build the container images from the local sources: - - List targets that are available +```bash +make build +``` - $ nvme list - Node SN Model Namespace Usage Format FW Rev - ---------------- -------------------- ---------------------------------------- --------- -------------------------- ---------------- -------- - /dev/nvme0n1 SPDK00000000000001 SPDK bdev Controller 1 6.44 GB / 6.44 GB 4 KiB + 0 B 21.04 +The resulting images should be like these: +```bash +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +quay.io/ceph/nvmeof-cli 0.0.1 8277cd0cce2d 7 minutes ago 995MB +quay.io/ceph/nvmeof 0.0.1 34d7230dcce8 7 minutes ago 439MB +quay.io/ceph/vstart-cluster 17.2.6 cb2560975055 8 minutes ago 1.27GB +quay.io/ceph/spdk 23.01 929e22e22ffd 8 minutes ago 342MB +``` - - Create a filesystem on the desired target +* `spdk` is an intermediate image that contains an RPM-based installation of spdk with `rbd` support enabled. +* `nvmeof` is built from the `spdk` container by installing the Python package. +* `nvmeof-cli` provides a containerized environment to run CLI commands that manage the `nvmeof` service via gRPC. +* `ceph` is a sandboxed (vstart-based) Ceph cluster for testing purposes. - $ mkfs /dev/nvme0n1 +For building a specific service: +```bash +make build SVC=nvmeof +``` - mke2fs 1.45.7 (28-Jan-2021) - Creating filesystem with 1572864 4k blocks and 393216 inodes - Filesystem UUID: 1308f6ff-621b-4d17-b127-65eded31abe2 - Superblock backups stored on blocks: - 32768, 98304, 163840, 229376, 294912, 819200, 884736 +## Troubleshooting - Allocating group tables: done - Writing inode tables: done - Writing superblocks and filesystem accounting information: done +## Contributing and Support - - Mount and use the storage volume +See [`CONTRIBUTING.md`](CONTRIBUTING.md). - $ mount /dev/nvme0n1 /mnt +## Code of Conduct - $ ls /mnt - lost+found +See [Ceph's Code of Conduct](https://ceph.io/en/code-of-conduct/). - $ echo "NVMe volume" > /mnt/test.txt +## License - $ ls /mnt - lost+found test.txt - +See [`LICENSE`](LICENSE). \ No newline at end of file diff --git a/ceph-nvmeof.conf b/ceph-nvmeof.conf index c40f065e..84af0756 100644 --- a/ceph-nvmeof.conf +++ b/ceph-nvmeof.conf @@ -8,34 +8,31 @@ # [gateway] - name = group = -addr = 127.0.0.1 +addr = 192.168.13.3 port = 5500 enable_auth = False state_update_notify = True state_update_interval_sec = 5 [ceph] - pool = rbd +# config_file = /var/lib/ceph/ceph.conf config_file = /etc/ceph/ceph.conf [mtls] - server_key = ./server.key client_key = ./client.key server_cert = ./server.crt client_cert = ./client.crt [spdk] - -spdk_path = /path/to/spdk -tgt_path = spdk/build/bin/nvmf_tgt +tgt_path = /usr/local/bin/nvmf_tgt rpc_socket = /var/tmp/spdk.sock +#tgt_cmd_extra_args = --env-context="--no-huge -m1024" --iova-mode=va timeout = 60.0 -log_level = ERROR +log_level = WARN # conn_retries = 10 # Example value: -m 0x3 -L all diff --git a/control/__main__.py b/control/__main__.py index 240d3457..78b49536 100644 --- a/control/__main__.py +++ b/control/__main__.py @@ -19,7 +19,8 @@ logger.setLevel(logging.DEBUG) parser = argparse.ArgumentParser(prog="python3 -m control", - description="Manage NVMe gateways") + description="Manage NVMe gateways", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "-c", "--config", diff --git a/control/cli.py b/control/cli.py index a6b85571..e252a14e 100644 --- a/control/cli.py +++ b/control/cli.py @@ -11,9 +11,9 @@ import grpc import json import logging -from .generated import gateway_pb2_grpc as pb2_grpc -from .generated import gateway_pb2 as pb2 -from .config import GatewayConfig +import sys +from proto import gateway_pb2_grpc as pb2_grpc +from proto import gateway_pb2 as pb2 def argument(*name_or_flags, **kwargs): @@ -35,11 +35,29 @@ def __init__(self): prog="python3 -m control.cli", description="CLI to manage NVMe gateways") self.parser.add_argument( - "-c", - "--config", - default="ceph-nvmeof.conf", + "--server-address", + default="localhost", type=str, - help="Path to config file", + help="Server address", + ) + self.parser.add_argument( + "--server-port", + default=5500, + type=int, + help="Server port", + ) + self.parser.add_argument( + "--client-key", + type=argparse.FileType("rb"), + help="Path to the client key file") + self.parser.add_argument( + "--client-cert", + type=argparse.FileType("rb"), + help="Path to the client certificate file") + self.parser.add_argument( + "--server-cert", + type=argparse.FileType("rb"), + help="Path to the server certificate file" ) self.subparsers = self.parser.add_subparsers(dest="subcommand") @@ -93,24 +111,22 @@ def stub(self): raise AttributeError("stub is None. Set with connect method.") return self._stub - def connect(self, config): + def connect(self, host, port, client_key, client_cert, server_cert): """Connects to server and sets stub.""" - - # Read in configuration parameters - host = config.get("gateway", "addr") - port = config.get("gateway", "port") - enable_auth = config.getboolean("gateway", "enable_auth") server = "{}:{}".format(host, port) - if enable_auth: - + if client_key and client_cert: # Create credentials for mutual TLS and a secure channel - with open(config.get("mtls", "client_cert"), "rb") as f: + self.logger.info("Enable server auth since both --client-key and --client-cert are provided") + with client_cert as f: client_cert = f.read() - with open(config.get("mtls", "client_key"), "rb") as f: + with client_key as f: client_key = f.read() - with open(config.get("mtls", "server_cert"), "rb") as f: - server_cert = f.read() + if server_cert: + with server_cert as f: + server_cert = f.read() + else: + self.logger.warn("No server certificate file was provided") credentials = grpc.ssl_channel_credentials( root_certificates=server_cert, @@ -338,14 +354,18 @@ def get_subsystems(self, args): def main(args=None): client = GatewayClient() parsed_args = client.cli.parser.parse_args(args) - config = GatewayConfig(parsed_args.config) - client.connect(config) if parsed_args.subcommand is None: client.cli.parser.print_help() - else: - call_function = getattr(client, parsed_args.func.__name__) - call_function(parsed_args) + return 0 + server_address = parsed_args.server_address + server_port = parsed_args.server_port + client_key = parsed_args.client_key + client_cert = parsed_args.client_cert + server_cert = parsed_args.server_cert + client.connect(server_address, server_port, client_key, client_cert, server_cert) + call_function = getattr(client, parsed_args.func.__name__) + call_function(parsed_args) if __name__ == "__main__": - main() + sys.exit(main()) diff --git a/control/grpc.py b/control/grpc.py index 6733a45a..08561d79 100644 --- a/control/grpc.py +++ b/control/grpc.py @@ -12,9 +12,13 @@ import json import uuid import logging + +import spdk.rpc.bdev as rpc_bdev +import spdk.rpc.nvmf as rpc_nvmf + from google.protobuf import json_format -from .generated import gateway_pb2 as pb2 -from .generated import gateway_pb2_grpc as pb2_grpc +from proto import gateway_pb2 as pb2 +from proto import gateway_pb2_grpc as pb2_grpc class GatewayService(pb2_grpc.GatewayServicer): @@ -27,16 +31,14 @@ class GatewayService(pb2_grpc.GatewayServicer): logger: Logger instance to track server events gateway_name: Gateway identifier gateway_state: Methods for target state persistence - spdk_rpc: Module methods for SPDK spdk_rpc_client: Client of SPDK RPC server """ - def __init__(self, config, gateway_state, spdk_rpc, spdk_rpc_client): + def __init__(self, config, gateway_state, spdk_rpc_client): self.logger = logging.getLogger(__name__) self.config = config self.gateway_state = gateway_state - self.spdk_rpc = spdk_rpc self.spdk_rpc_client = spdk_rpc_client self.gateway_name = self.config.get("gateway", "name") @@ -51,7 +53,7 @@ def create_bdev(self, request, context=None): f" {request.rbd_pool_name}/{request.rbd_image_name}" f" with block size {request.block_size}") try: - bdev_name = self.spdk_rpc.bdev.bdev_rbd_create( + bdev_name = rpc_bdev.bdev_rbd_create( self.spdk_rpc_client, name=name, pool_name=request.rbd_pool_name, @@ -84,7 +86,7 @@ def delete_bdev(self, request, context=None): self.logger.info(f"Received request to delete bdev {request.bdev_name}") try: - ret = self.spdk_rpc.bdev.bdev_rbd_delete( + ret = rpc_bdev.bdev_rbd_delete( self.spdk_rpc_client, request.bdev_name, ) @@ -113,7 +115,7 @@ def create_subsystem(self, request, context=None): self.logger.info( f"Received request to create subsystem {request.subsystem_nqn}") try: - ret = self.spdk_rpc.nvmf.nvmf_create_subsystem( + ret = rpc_nvmf.nvmf_create_subsystem( self.spdk_rpc_client, nqn=request.subsystem_nqn, serial_number=request.serial_number, @@ -147,7 +149,7 @@ def delete_subsystem(self, request, context=None): self.logger.info( f"Received request to delete subsystem {request.subsystem_nqn}") try: - ret = self.spdk_rpc.nvmf.nvmf_delete_subsystem( + ret = rpc_nvmf.nvmf_delete_subsystem( self.spdk_rpc_client, nqn=request.subsystem_nqn, ) @@ -176,7 +178,7 @@ def add_namespace(self, request, context=None): self.logger.info(f"Received request to add {request.bdev_name} to" f" {request.subsystem_nqn}") try: - nsid = self.spdk_rpc.nvmf.nvmf_subsystem_add_ns( + nsid = rpc_nvmf.nvmf_subsystem_add_ns( self.spdk_rpc_client, nqn=request.subsystem_nqn, bdev_name=request.bdev_name, @@ -212,7 +214,7 @@ def remove_namespace(self, request, context=None): self.logger.info(f"Received request to remove {request.nsid} from" f" {request.subsystem_nqn}") try: - ret = self.spdk_rpc.nvmf.nvmf_subsystem_remove_ns( + ret = rpc_nvmf.nvmf_subsystem_remove_ns( self.spdk_rpc_client, nqn=request.subsystem_nqn, nsid=request.nsid, @@ -244,7 +246,7 @@ def add_host(self, request, context=None): if request.host_nqn == "*": # Allow any host access to subsystem self.logger.info(f"Received request to allow any host to" f" {request.subsystem_nqn}") - ret = self.spdk_rpc.nvmf.nvmf_subsystem_allow_any_host( + ret = rpc_nvmf.nvmf_subsystem_allow_any_host( self.spdk_rpc_client, nqn=request.subsystem_nqn, disable=False, @@ -254,7 +256,7 @@ def add_host(self, request, context=None): self.logger.info( f"Received request to add host {request.host_nqn} to" f" {request.subsystem_nqn}") - ret = self.spdk_rpc.nvmf.nvmf_subsystem_add_host( + ret = rpc_nvmf.nvmf_subsystem_add_host( self.spdk_rpc_client, nqn=request.subsystem_nqn, host=request.host_nqn, @@ -289,7 +291,7 @@ def remove_host(self, request, context=None): self.logger.info( f"Received request to disable any host access to" f" {request.subsystem_nqn}") - ret = self.spdk_rpc.nvmf.nvmf_subsystem_allow_any_host( + ret = rpc_nvmf.nvmf_subsystem_allow_any_host( self.spdk_rpc_client, nqn=request.subsystem_nqn, disable=True, @@ -299,7 +301,7 @@ def remove_host(self, request, context=None): self.logger.info( f"Received request to remove host_{request.host_nqn} from" f" {request.subsystem_nqn}") - ret = self.spdk_rpc.nvmf.nvmf_subsystem_remove_host( + ret = rpc_nvmf.nvmf_subsystem_remove_host( self.spdk_rpc_client, nqn=request.subsystem_nqn, host=request.host_nqn, @@ -345,7 +347,7 @@ def create_listener(self, request, context=None): else: traddr = request.traddr - ret = self.spdk_rpc.nvmf.nvmf_subsystem_add_listener( + ret = rpc_nvmf.nvmf_subsystem_add_listener( self.spdk_rpc_client, nqn=request.nqn, trtype=request.trtype, @@ -399,7 +401,7 @@ def delete_listener(self, request, context=None): else: traddr = request.traddr - ret = self.spdk_rpc.nvmf.nvmf_subsystem_remove_listener( + ret = rpc_nvmf.nvmf_subsystem_remove_listener( self.spdk_rpc_client, nqn=request.nqn, trtype=request.trtype, @@ -435,7 +437,7 @@ def get_subsystems(self, request, context): self.logger.info(f"Received request to get subsystems") try: - ret = self.spdk_rpc.nvmf.nvmf_get_subsystems(self.spdk_rpc_client) + ret = rpc_nvmf.nvmf_get_subsystems(self.spdk_rpc_client) self.logger.info(f"get_subsystems: {ret}") except Exception as ex: self.logger.error(f"get_subsystems failed with: \n {ex}") diff --git a/control/server.py b/control/server.py index 596586bf..ab7aa8f5 100644 --- a/control/server.py +++ b/control/server.py @@ -20,8 +20,13 @@ import logging from concurrent import futures from google.protobuf import json_format -from .generated import gateway_pb2 as pb2 -from .generated import gateway_pb2_grpc as pb2_grpc + +import spdk.rpc +import spdk.rpc.client as rpc_client +import spdk.rpc.nvmf as rpc_nvmf + +from proto import gateway_pb2 as pb2 +from proto import gateway_pb2_grpc as pb2_grpc from .state import GatewayState, LocalGatewayState, OmapGatewayState, GatewayStateHandler from .grpc import GatewayService @@ -45,7 +50,6 @@ class GatewayServer: logger: Logger instance to track server events gateway_rpc: GatewayService implementation server: gRPC server instance to receive gateway client requests - spdk_rpc: Module methods for SPDK spdk_rpc_client: Client of SPDK RPC server spdk_rpc_ping_client: Ping client of SPDK RPC server spdk_process: Subprocess running SPDK NVMEoF target application @@ -86,6 +90,7 @@ def __exit__(self, exc_type, exc_value, traceback): def serve(self): """Starts gateway server.""" + self.logger.debug("Starting serve") # Start SPDK self._start_spdk() @@ -96,7 +101,7 @@ def serve(self): gateway_state = GatewayStateHandler(self.config, local_state, omap_state, self.gateway_rpc_caller) self.gateway_rpc = GatewayService(self.config, gateway_state, - self.spdk_rpc, self.spdk_rpc_client) + self.spdk_rpc_client) self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) pb2_grpc.add_GatewayServicer_to_server(self.gateway_rpc, self.server) @@ -145,20 +150,15 @@ def _add_server_listener(self): def _start_spdk(self): """Starts SPDK process.""" - # Get path and import SPDK's RPC modules - spdk_path = self.config.get("spdk", "spdk_path") - sys.path.append(os.path.join(spdk_path, "spdk/python")) - self.logger.info(f"SPDK PATH: {spdk_path}") - import spdk.rpc as spdk_rpc - self.spdk_rpc = spdk_rpc - # Start target - tgt_path = self.config.get("spdk", "tgt_path") + self.logger.debug("Configuring server") + spdk_tgt_path = self.config.get("spdk", "tgt_path") + self.logger.info(f"SPDK Target Path: {spdk_tgt_path}") spdk_rpc_socket = self.config.get("spdk", "rpc_socket") + self.logger.info(f"SPDK Socket: {spdk_rpc_socket}") spdk_tgt_cmd_extra_args = self.config.get_with_default( "spdk", "tgt_cmd_extra_args", "") - spdk_cmd = os.path.join(spdk_path, tgt_path) - cmd = [spdk_cmd, "-u", "-r", spdk_rpc_socket] + cmd = [spdk_tgt_path, "-u", "-r", spdk_rpc_socket] if spdk_tgt_cmd_extra_args: cmd += shlex.split(spdk_tgt_cmd_extra_args) self.logger.info(f"Starting {' '.join(cmd)}") @@ -180,14 +180,14 @@ def _start_spdk(self): f" conn_retries: {conn_retries}, timeout: {timeout}", }) try: - self.spdk_rpc_client = self.spdk_rpc.client.JSONRPCClient( + self.spdk_rpc_client = rpc_client.JSONRPCClient( spdk_rpc_socket, None, timeout, log_level=log_level, conn_retries=conn_retries, ) - self.spdk_rpc_ping_client = self.spdk_rpc.client.JSONRPCClient( + self.spdk_rpc_ping_client = rpc_client.JSONRPCClient( spdk_rpc_socket, None, timeout, @@ -221,7 +221,7 @@ def _create_transport(self, trtype): raise try: - status = self.spdk_rpc.nvmf.nvmf_create_transport( + status = rpc_nvmf.nvmf_create_transport( self.spdk_rpc_client, **args) except Exception as ex: self.logger.error( @@ -241,7 +241,7 @@ def keep_alive(self): def _ping(self): """Confirms communication with SPDK process.""" try: - ret = self.spdk_rpc.spdk_get_version(self.spdk_rpc_ping_client) + ret = spdk.rpc.spdk_get_version(self.spdk_rpc_ping_client) return True except Exception as ex: self.logger.error(f"spdk_get_version failed with: \n {ex}") diff --git a/control/state.py b/control/state.py index cd0c42ba..3d7928b1 100644 --- a/control/state.py +++ b/control/state.py @@ -146,7 +146,7 @@ def reset(self, omap_state): class OmapGatewayState(GatewayState): """Persists gateway NVMeoF target state to an OMAP object. - Handles reads/writes of persistent NVMeoF target state data in key/value + Handles reads/writes of persistent NVMeoF target state data in key/value format within an OMAP object. Class attributes: diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 00000000..b2f31519 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,141 @@ +version: "3.8" +services: + spdk: + image: quay.io/ceph/spdk:$SPDK_VERSION + profiles: + - build + build: + context: spdk/ + dockerfile: ../Dockerfile.spdk + args: + SPDK_VERSION: + SPDK_CEPH_VERSION: + SPDK_PKGDEP_ARGS: + SPDK_CONFIGURE_ARGS: + SPDK_MAKEFLAGS: + SPDK_NAME: + SPDK_SUMMARY: + SPDK_DESCRIPTION: + SPDK_URL: + SPDK_MAINTAINER: $MAINTAINER + BUILD_DATE: + SPDK_GIT_REPO: + SPDK_GIT_BRANCH: + SPDK_GIT_COMMIT: + labels: + io.ceph.nvmeof: + ceph: + image: quay.io/ceph/vstart-cluster:$CEPH_CLUSTER_VERSION + container_name: ceph + build: + context: . + dockerfile: Dockerfile.ceph + args: + CEPH_CLUSTER_VERSION: + labels: + io.ceph.nvmeof: + environment: + CEPH_VSTART_ARGS: + VSTART_ARGS: --without-dashboard --memstore + TOUCHFILE: /tmp/ceph.touch + entrypoint: >- + sh -c './vstart.sh --new $$VSTART_ARGS && + ceph osd pool create rbd && + sleep infinity' + healthcheck: + test: ceph osd pool stats rbd + start_period: 6s + interval: 3s + volumes: + - ceph-conf:/etc/ceph + networks: + default: + ipv4_address: 192.168.13.2 + nvmeof-base: + image: quay.io/ceph/nvmeof:$NVMEOF_VERSION + build: + context: . + args: + NVMEOF_TARGET: gateway + NVMEOF_SPDK_VERSION: + NVMEOF_NAME: + NVMEOF_SUMMARY: + NVMEOF_DESCRIPTION: + NVMEOF_URL: + NVMEOF_VERSION: + NVMEOF_MAINTAINER: $MAINTAINER + NVMEOF_TAGS: + NVMEOF_WANTS: + NVMEOF_EXPOSE_SERVICES: + BUILD_DATE: + NVMEOF_GIT_REPO: + NVMEOF_GIT_BRANCH: + NVMEOF_GIT_COMMIT: + labels: + io.ceph.nvmeof: + hostname: nvmeof + volumes: + # sudo bash -c 'echo 1024 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages' + # https://spdk.io/doc/containers.html + # TODO: Pending of https://github.com/spdk/spdk/issues/2973 + - /dev/hugepages:/dev/hugepages + - ceph-conf:/etc/ceph:ro + - $NVMEOF_CONFIG:/src/ceph-nvmeof.conf + cap_add: + - SYS_ADMIN # huge-pages + - CAP_SYS_NICE # RTE + - SYS_PTRACE # gdb + networks: + default: + ipv4_address: 192.168.13.3 + ports: + - "4420:4420" # I/O controllers + - "5500:5500" # Gateway + - "8009:8009" # Discovery + nvmeof: + extends: + service: nvmeof-base + depends_on: + ceph: + condition: service_healthy + nvmeof-devel: + # Runs from source code in current dir + extends: + service: nvmeof-base + depends_on: + ceph: + condition: service_healthy + volumes: + - ./control:/src/control + nvmeof-cli: + image: quay.io/ceph/nvmeof-cli:$NVMEOF_VERSION + build: + context: . + args: + NVMEOF_TARGET: cli + NVMEOF_NAME: $NVMEOF_CLI_NAME + NVMEOF_SUMMARY: $NVMEOF_CLI_SUMMARY + NVMEOF_DESCRIPTION: $NVMEOF_CLI_DESCRIPTION + NVMEOF_URL: + NVMEOF_VERSION: + NVMEOF_MAINTAINER: $MAINTAINER + NVMEOF_TAGS: "" + NVMEOF_WANTS: "" + NVMEOF_EXPOSE_SERVICES: "" + BUILD_DATE: + NVMEOF_GIT_REPO: + NVMEOF_GIT_BRANCH: + NVMEOF_GIT_COMMIT: + labels: + io.ceph.nvmeof: +volumes: + ceph-conf: + labels: + io.ceph.nvmeof: +networks: + default: + ipam: + config: + - subnet: 192.168.13.0/24 + labels: + io.ceph.nvmeof: \ No newline at end of file diff --git a/mk/autohelp.mk b/mk/autohelp.mk new file mode 100644 index 00000000..f55f6f37 --- /dev/null +++ b/mk/autohelp.mk @@ -0,0 +1,24 @@ +# Auto-generate Makefile help from comments (##) in targets and global +# variables. +# Usage: +# hello: ## This target prints Hello World +# LANGUAGE := esperanto ## Set the language for the Hello World message + +autohelp: BOLD != tput bold +autohelp: NORMAL != tput sgr0 +autohelp: + @echo $(AUTOHELP_SUMMARY) + @echo + @echo "Usage:" + @echo " make $(BOLD)[target] [target]$(NORMAL) ... $(BOLD)OPTION$(NORMAL)=value ..." + @echo + @echo Targets: + @for file in $(MAKEFILE_LIST); do \ + awk 'BEGIN {FS = "## "}; /^##/ {printf "\n %s\n", $$2}' $$file; \ + awk 'BEGIN {FS = ":.*?## "}; \ + /^\w+:.*##/ {printf " $(BOLD)%-15s$(NORMAL) %s\n", $$1, $$2}' $$file | sort; \ + grep -q "^\w.*=.*## " $$file && echo -e "\n Options:"; \ + awk 'BEGIN {FS = "( [!?]?= | ?## )"}; \ + /^\w.*=.*## / {printf " $(BOLD)%-15s$(NORMAL) %s (Default: %s)\n", $$1, $$3, $$2} \ + ' $$file | sort; \ + done diff --git a/mk/containerized.mk b/mk/containerized.mk new file mode 100644 index 00000000..158dc096 --- /dev/null +++ b/mk/containerized.mk @@ -0,0 +1,66 @@ +## Deployment commands (docker-compose): + +# Docker and docker-compose specific commands +DOCKER = docker +DOCKER_COMPOSE = docker-compose ## Docker-compose command +DOCKER_COMPOSE_COMMANDS = pull build push up run exec ps top images logs port \ + pause unpause stop restart down events + +SVC ?= ## Docker-compose services +OPTS ?= ## Docker-compose subcommand options +SCALE ?= 1 ## Number of instances +CMD ?= ## Command to run with run/exec targets + +.PHONY: $(DOCKER_COMPOSE_COMMANDS) shell +$(DOCKER_COMPOSE_COMMANDS): + $(DOCKER_COMPOSE_ENV) $(DOCKER_COMPOSE) $@ $(OPTS) $(SVC) $(CMD) + +pull: ## Download SVC images + +build: ## Build SVC images +build: DOCKER_COMPOSE_ENV = DOCKER_BUILDKIT=1 + +push: ## Push SVC container images to a registry. Requires previous "docker login" + +up: ## Launch services + +run: ## Run command CMD inside SVC containers +run: SVC = +run: override OPTS += --rm + +shell: ## Exec shell inside running SVC containers +shell: CMD = bash +shell: exec + +exec: ## Run command inside an existing container + +ps: ## Display status of SVC containers + +top: ## Display running processes in SVC containers + +port: ## Print public port for a port binding + +logs: ## View SVC logs +logs: MAX_LOGS = 40 +logs: OPTS += --follow --tail=$(MAX_LOGS) + +images: ## List images + +pause: ## Pause running deployment +unpause: ## Resume paused deployment + +stop: ## Stop SVC + +restart: ## Restart SVC + +down: ## Shut down deployment +down: override SVC = +down: override OPTS += --volumes --remove-orphans + +events: ## Receive real-time events from containers + +docker_compose_clean: down + $(DOCKER) system prune --all --force --volumes --filter label="io.ceph.nvmeof" + +CLEAN += docker_compose_clean +ALL += pull up ps diff --git a/mk/demo.mk b/mk/demo.mk new file mode 100644 index 00000000..3434a5d7 --- /dev/null +++ b/mk/demo.mk @@ -0,0 +1,24 @@ +## Demo: + +# rbd +RBD_IMAGE_NAME = demo_image ## Name of the RBD image +RBD_IMAGE_SIZE = 10M ## Size of the RBD image + +rbd: exec +rbd: SVC = ceph +rbd: CMD = bash -c "rbd info $(RBD_IMAGE_NAME) || rbd create $(RBD_IMAGE_NAME) --size $(RBD_IMAGE_SIZE)" + +# demo +BDEV_NAME = demo_bdev ## Name of the bdev +NQN = nqn.2016-06.io.spdk:cnode1 ## NVMe Qualified Name address +SERIAL = SPDK00000000000001 ## Serial number +LISTENER_PORT = 4420 ## Listener port + +demo: rbd ## Expose RBD_IMAGE_NAME as NVMe-oF target + $(NVMEOF_CLI) create_bdev --pool rbd --image $(RBD_IMAGE_NAME) --bdev $(BDEV_NAME) + $(NVMEOF_CLI) create_subsystem --subnqn $(NQN) --serial $(SERIAL) + $(NVMEOF_CLI) add_namespace --subnqn $(NQN) --bdev $(BDEV_NAME) + $(NVMEOF_CLI) create_listener --subnqn $(NQN) -s $(LISTENER_PORT) + $(NVMEOF_CLI) add_host --subnqn $(NQN) --host "*" + +.PHONY: demo rbd diff --git a/mk/misc.mk b/mk/misc.mk new file mode 100644 index 00000000..c9758ed7 --- /dev/null +++ b/mk/misc.mk @@ -0,0 +1,11 @@ +## Miscellaneous: + +# nvmeof_cli +SERVER_ADDRESS = nvmeof ## Address of the nvmeof gateway +SERVER_PORT = 5500 ## Port of the nvmeof gateway +NVMEOF_CLI = $(DOCKER_COMPOSE_ENV) $(DOCKER_COMPOSE) run --rm nvmeof-cli --server-address $(SERVER_ADDRESS) --server-port $(SERVER_PORT) + +alias: ## Print bash alias command for the nvmeof-cli. Usage: "eval $(make alias)" + @echo alias nvmeof-cli=\"$(NVMEOF_CLI)\" + +.PHONY: alias diff --git a/pdm.lock b/pdm.lock new file mode 100644 index 00000000..1b7fcef2 --- /dev/null +++ b/pdm.lock @@ -0,0 +1,147 @@ +# This file is @generated by PDM. +# It is not intended for manual editing. + +[[package]] +name = "grpcio" +version = "1.51.3" +requires_python = ">=3.7" +summary = "HTTP/2-based RPC framework" + +[[package]] +name = "grpcio-tools" +version = "1.51.3" +requires_python = ">=3.7" +summary = "Protobuf code generator for gRPC" +dependencies = [ + "grpcio>=1.51.3", + "protobuf<5.0dev,>=4.21.6", + "setuptools", +] + +[[package]] +name = "protobuf" +version = "4.22.3" +requires_python = ">=3.7" +summary = "" + +[[package]] +name = "setuptools" +version = "67.6.1" +requires_python = ">=3.7" +summary = "Easily download, build, install, upgrade, and uninstall Python packages" + +[metadata] +lock_version = "4.2" +groups = ["default"] +content_hash = "sha256:7c5ff98836a77ca8db2cdb86e2527f5325c8b4293934dbc3b50270ec6a71280b" + +[metadata.files] +"grpcio 1.51.3" = [ + {url = "https://files.pythonhosted.org/packages/04/ff/bf51e638082314fd845f48cb761bca09b7ed9b20f2f7b87a6ec64a252f6b/grpcio-1.51.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2f8ff75e61e1227ba7a3f16b2eadbcc11d0a54096d52ab75a6b88cfbe56f55d1"}, + {url = "https://files.pythonhosted.org/packages/06/06/5798d75123f63a7dbe57c99f3bfb63738e0adee867b1842477915d22fd87/grpcio-1.51.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:eef0450a4b5ed11feab639bf3eb1b6e23d0efa9b911bf7b06fb60e14f5f8a585"}, + {url = "https://files.pythonhosted.org/packages/0b/19/5e50ff77c8afe20089667140f020e20ce54e938bdf12082d835d9c43a7d5/grpcio-1.51.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c1b9f8afa62ff265d86a4747a2990ec5a96e4efce5d5888f245a682d66eca47"}, + {url = "https://files.pythonhosted.org/packages/1f/a9/bb1e03aa439dc69782320d5b54d58e58c9f85c0f2e4a981e9cc33c1bf7d8/grpcio-1.51.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d81528ffe0e973dc840ec73a4132fd18b8203ad129d7410155d951a0a7e4f5d0"}, + {url = "https://files.pythonhosted.org/packages/35/a1/892b5b7e1a8d976bc37f41fe78f7d087be4e5e08c04a6b7f7a9f8ccf68b4/grpcio-1.51.3-cp311-cp311-win32.whl", hash = "sha256:040eb421613b57c696063abde405916dd830203c184c9000fc8c3b3b3c950325"}, + {url = "https://files.pythonhosted.org/packages/37/33/4dd62ff8b87d025c1a7eef8d8ab3e974786bbe4b955e5acf326e071f7a16/grpcio-1.51.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:68a7514b754e38e8de9075f7bb4dee919919515ec68628c43a894027e40ddec4"}, + {url = "https://files.pythonhosted.org/packages/39/0f/303234f7c0a948c9198cd90cfc22913b9560d9acda3efd3f23f8cba0cb25/grpcio-1.51.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:30e09b5e0531685e176f49679b6a3b190762cc225f4565e55a899f5e14b3aa62"}, + {url = "https://files.pythonhosted.org/packages/48/b6/df70d74896f8b0e24720b0550c90f327369419bd608f6fb6f3dad8bceba8/grpcio-1.51.3-cp38-cp38-win32.whl", hash = "sha256:5694448256e3cdfe5bd358f1574a3f2f51afa20cc834713c4b9788d60b7cc646"}, + {url = "https://files.pythonhosted.org/packages/4a/2e/e89d0636a408a3c5c422fb314b427379eec71fbf83562a5dd936f2eebeb6/grpcio-1.51.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:881ecb34feabf31c6b3b9bbbddd1a5b57e69f805041e5a2c6c562a28574f71c4"}, + {url = "https://files.pythonhosted.org/packages/4b/89/6cce22849467e51dec89967455607cdf2f5383b7bb71d6ae78e65622cf09/grpcio-1.51.3-cp310-cp310-win32.whl", hash = "sha256:6604f614016127ae10969176bbf12eb0e03d2fb3d643f050b3b69e160d144fb4"}, + {url = "https://files.pythonhosted.org/packages/51/b0/f5a341b7472d7c35793a115c5925df8e04d9860b873fbd4d2afd151d2c1f/grpcio-1.51.3-cp38-cp38-linux_armv7l.whl", hash = "sha256:54b0c29bdd9a3b1e1b61443ab152f060fc719f1c083127ab08d03fac5efd51be"}, + {url = "https://files.pythonhosted.org/packages/57/fe/c6d6a173a275bc0ceec783b973fa15df84a3ef72c99ac992683df2f147db/grpcio-1.51.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cd9a5e68e79c5f031500e67793048a90209711e0854a9ddee8a3ce51728de4e5"}, + {url = "https://files.pythonhosted.org/packages/58/0a/2ba9c2ae852f2b03b3fba0c8815158809d0f8b4b699d212f85cb065efc96/grpcio-1.51.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3709048fe0aa23dda09b3e69849a12055790171dab9e399a72ea8f9dfbf9ac80"}, + {url = "https://files.pythonhosted.org/packages/63/49/154520dd3d73bd1cba508e4ffebcaddfd2fea7cd94647c3143552a505501/grpcio-1.51.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:5eed34994c095e2bf7194ffac7381c6068b057ef1e69f8f08db77771350a7566"}, + {url = "https://files.pythonhosted.org/packages/69/12/d2e6e92ab554cb5d30f58ac897f372fd22f9d9de58bf7a2655f31d7b513c/grpcio-1.51.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6972b009638b40a448d10e1bc18e2223143b8a7aa20d7def0d78dd4af4126d12"}, + {url = "https://files.pythonhosted.org/packages/73/0c/b3457d96a5f23cda9c50512e0e9c2e44003882dcaac3ea1b019753d35bde/grpcio-1.51.3-cp37-cp37m-linux_armv7l.whl", hash = "sha256:d5cd1389669a847555df54177b911d9ff6f17345b2a6f19388707b7a9f724c88"}, + {url = "https://files.pythonhosted.org/packages/74/a5/370aae45076ca9298c1f35f40e9096996ff1608afb72a78faa1c746c8027/grpcio-1.51.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2cd2e4cefb724cab1ba2df4b7535a9980531b9ec51b4dbb5f137a1f3a3754ef0"}, + {url = "https://files.pythonhosted.org/packages/79/c3/a27fc8f926d6fb5caedcaddc0b48b4dea4482b7230278fce83f233a2c7f7/grpcio-1.51.3-cp310-cp310-linux_armv7l.whl", hash = "sha256:f601aaeae18dab81930fb8d4f916b0da21e89bb4b5f7367ef793f46b4a76b7b0"}, + {url = "https://files.pythonhosted.org/packages/84/d5/7f50bc084a97acaa3735fd0337d6d6642c3654f0ec7a3561f0db382361d4/grpcio-1.51.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:be1bf35ce82cdbcac14e39d5102d8de4079a1c1a6a06b68e41fcd9ef64f9dd28"}, + {url = "https://files.pythonhosted.org/packages/85/3b/0b5a3609750ecd714c8e79cbb5e075113466ee3a69337f478d044d4ebc52/grpcio-1.51.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e860a3222139b41d430939bbec2ec9c3f6c740938bf7a04471a9a8caaa965a2e"}, + {url = "https://files.pythonhosted.org/packages/8b/01/c654c06210bdb3428eedfcbe25e3f7fe52953c69713f13b90d52e82b27b8/grpcio-1.51.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f9a7d88082b2a17ae7bd3c2354d13bab0453899e0851733f6afa6918373f476"}, + {url = "https://files.pythonhosted.org/packages/8b/ba/a0994523e2dd3d6747ef1dc9458aba8c907953bed062f809fcbdd5f32020/grpcio-1.51.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:82b0ad8ac825d4bb31bff9f638557c045f4a6d824d84b21e893968286f88246b"}, + {url = "https://files.pythonhosted.org/packages/8d/0b/6b75908dac1028c0e7d070088e10951a3fe8f5ecc189ed12175526568a89/grpcio-1.51.3-cp38-cp38-win_amd64.whl", hash = "sha256:3ea4341efe603b049e8c9a5f13c696ca37fcdf8a23ca35f650428ad3606381d9"}, + {url = "https://files.pythonhosted.org/packages/8e/eb/7c8c879bbc1eebdd44bb5b1ab0fd839a93f0032efd8331bbce973e90ece4/grpcio-1.51.3-cp311-cp311-win_amd64.whl", hash = "sha256:2a8e17286c4240137d933b8ca506465472248b4ce0fe46f3404459e708b65b68"}, + {url = "https://files.pythonhosted.org/packages/9c/77/2bb4dedcca48788daccf4494b0dd7863ca19608712c3d27055db447c4197/grpcio-1.51.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:eafbe7501a3268d05f2e450e1ddaffb950d842a8620c13ec328b501d25d2e2c3"}, + {url = "https://files.pythonhosted.org/packages/a1/28/0b99458b0786438d93fc50eedafca6f9e35781eecbfadc45952164b2f61d/grpcio-1.51.3-cp37-cp37m-win32.whl", hash = "sha256:cd0daac21d9ef5e033a5100c1d3aa055bbed28bfcf070b12d8058045c4e821b1"}, + {url = "https://files.pythonhosted.org/packages/a2/9a/f22a1d98cb335886faab72cf28593141a994b58e3f32e702a14e0ad9345b/grpcio-1.51.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2fdd6333ce96435408565a9dbbd446212cd5d62e4d26f6a3c0feb1e3c35f1cc8"}, + {url = "https://files.pythonhosted.org/packages/a4/54/b37502344596eac20781b390e8fc1dd4f7ee5b5f377a9551b0127ffde094/grpcio-1.51.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c8abbc5f837111e7bd619612eedc223c290b0903b952ce0c7b00840ea70f14"}, + {url = "https://files.pythonhosted.org/packages/a9/5c/1a95a30c168c840378d626c19f3009b9d5a2b383b9c07ead2d5642773f61/grpcio-1.51.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3667c06e37d6cd461afdd51cefe6537702f3d1dc5ff4cac07e88d8b4795dc16f"}, + {url = "https://files.pythonhosted.org/packages/ae/37/3be84cf4876015292a62e32b8d2d228e55b21ac3723f930e9bcaf35f6837/grpcio-1.51.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:54e36c2ee304ff15f2bfbdc43d2b56c63331c52d818c364e5b5214e5bc2ad9f6"}, + {url = "https://files.pythonhosted.org/packages/b6/b4/8a2753341e9340acb1ee38ad6dfc9a043c5a5d337a82fef5cb9bc17472b6/grpcio-1.51.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7a0d0bf44438869d307f85a54f25a896ad6b4b0ca12370f76892ad732928d87"}, + {url = "https://files.pythonhosted.org/packages/bb/75/97c76bbe6adb92abb345fcd56cff0992b94593af79b79c1e6d4cb961d287/grpcio-1.51.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b69c7adc7ed60da1cb1b502853db61f453fc745f940cbcc25eb97c99965d8f41"}, + {url = "https://files.pythonhosted.org/packages/be/bb/24135e0756753c4a453ab3674187c49efe5b25d5b99a41cd4407ab937621/grpcio-1.51.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c02abd55409bfb293371554adf6a4401197ec2133dd97727c01180889014ba4d"}, + {url = "https://files.pythonhosted.org/packages/c6/a3/a23bd54dbd82f612b1ba50373c109f48c0fcdcfb2dc39d505d9c61f5e819/grpcio-1.51.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8de30f0b417744288cec65ec8cf84b8a57995cf7f1e84ccad2704d93f05d0aae"}, + {url = "https://files.pythonhosted.org/packages/d3/d3/b7afa10bca20d20802ba8e4d0808226e4a75348ac7e775fca16262baeab5/grpcio-1.51.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:165b05af77e6aecb4210ae7663e25acf234ba78a7c1c157fa5f2efeb0d6ec53c"}, + {url = "https://files.pythonhosted.org/packages/d6/cf/2944fb5ab903209d40c528b979419974053029d10f1b203bed3d74cbccb1/grpcio-1.51.3-cp310-cp310-win_amd64.whl", hash = "sha256:e95c7ccd4c5807adef1602005513bf7c7d14e5a41daebcf9d8d30d8bf51b8f81"}, + {url = "https://files.pythonhosted.org/packages/db/a4/93808eb20a213c6bb96b78efec5f68a7ace9cc95e5909660492aaecf95b9/grpcio-1.51.3-cp39-cp39-win_amd64.whl", hash = "sha256:22bdfac4f7f27acdd4da359b5e7e1973dc74bf1ed406729b07d0759fde2f064b"}, + {url = "https://files.pythonhosted.org/packages/de/d3/ec1d211accb6f2554c0c92127ee698b7e79732aec432b12725bb1b678796/grpcio-1.51.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c831f31336e81243f85b6daff3e5e8a123302ce0ea1f2726ad752fd7a59f3aee"}, + {url = "https://files.pythonhosted.org/packages/e5/96/7f33311cf45d528c2de20904a21f7577eedc15d163ddb3b93efab3b81de8/grpcio-1.51.3-cp39-cp39-linux_armv7l.whl", hash = "sha256:6c677581ce129f5fa228b8f418cee10bd28dd449f3a544ea73c8ba590ee49d0b"}, + {url = "https://files.pythonhosted.org/packages/f0/2e/39b7624a4e41685de88691b96ac2bd119a5e233e4daae5769646dcd14d62/grpcio-1.51.3-cp39-cp39-win32.whl", hash = "sha256:6c99a73a6260bdf844b2e5ddad02dcd530310f80e1fa72c300fa19c1c7496962"}, + {url = "https://files.pythonhosted.org/packages/f0/8b/1ca221fba685f4885cab34d6803b530ca8d27e8a4c42101bbe66d6a25467/grpcio-1.51.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:ffaaf7e93fcb437356b5a4b23bf36e8a3d0221399ff77fd057e4bc77776a24be"}, + {url = "https://files.pythonhosted.org/packages/f9/5d/7370c37383cd2cf948f5fa5861a02f4d953d344e001d590e101f594d74aa/grpcio-1.51.3-cp311-cp311-linux_armv7l.whl", hash = "sha256:5e77ee138100f0bb55cbd147840f87ee6241dbd25f09ea7cd8afe7efff323449"}, + {url = "https://files.pythonhosted.org/packages/fa/2d/0b73ab7f96a82e7cd7287c5cc75bdc762d41e0dca28c03cc372292b765a0/grpcio-1.51.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:49ede0528e9dac7e8a9fe30b16c73b630ddd9a576bf4b675eb6b0c53ee5ca00f"}, + {url = "https://files.pythonhosted.org/packages/fd/6b/2bb1192d0c5ddc527ef4ba7ee18f2581149681d373e6294f8fbb1a1b2156/grpcio-1.51.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:200d69857f9910f7458b39b9bcf83ee4a180591b40146ba9e49314e3a7419313"}, +] +"grpcio-tools 1.51.3" = [ + {url = "https://files.pythonhosted.org/packages/02/34/2986c6c6dffffc6e79ffe1d870b12c81ecd0fdf352f39174de1145483bf1/grpcio_tools-1.51.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:89a68adcb4238aba69f3a364ac02c9a46e55b9e3fd8af1c6f384079abfa9347c"}, + {url = "https://files.pythonhosted.org/packages/05/6b/e5aec7fa16d1a73316cefb3ec51a402e3269bee5a6d11fe0c3bb1f15d6bc/grpcio_tools-1.51.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb554408e0ec5ff5201013f268726d9eef8e5bd1fd4b4e09c46c0b4a9de8b64c"}, + {url = "https://files.pythonhosted.org/packages/09/77/c26101788563ea0c3a3322a3fdd47aaff9fc3aa8aac044b35c6e25db1801/grpcio_tools-1.51.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d177da43e7f6fde6715df4a3015ae13158166bc2845ac7f9cfb526eafb41b8"}, + {url = "https://files.pythonhosted.org/packages/11/66/3e69e7a6feba8fed82e09226530aee57097fc572a5078d447e95b7165571/grpcio_tools-1.51.3-cp311-cp311-win32.whl", hash = "sha256:7427939455735fbf2ea88c37f1585c9c8b809eec7b447642f34465eb4d26020b"}, + {url = "https://files.pythonhosted.org/packages/13/5d/41ba672b62fd121f125849b4a64a17a06903b59edaa22cf2a3292b1f2816/grpcio_tools-1.51.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b486a99bdf2722e68a9d59769389e2fb86878b6f293be5111f7678e364a0c359"}, + {url = "https://files.pythonhosted.org/packages/15/55/beaa921f50ad15ee23deda32a5f51a6c663b4b87c36ec7c0c2c252735c50/grpcio_tools-1.51.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:35f885c5afd8e6a77d320f5a9624b439a93f9be2b87fa7b7948c1ad7b2ba0894"}, + {url = "https://files.pythonhosted.org/packages/1c/55/e1d3ee3ecc3b49b28c4f5b618d80e2792818b1a3a1e165ca0c569b3bf061/grpcio_tools-1.51.3-cp311-cp311-linux_armv7l.whl", hash = "sha256:b50f9b8a6482a90c1a41e731a879a130f7dea267065d0a06f47c9160ce5d01c3"}, + {url = "https://files.pythonhosted.org/packages/1c/a3/fc11a7ce111c4458f75db2caba79fb4da73e0d0096d76656d197b6be45b3/grpcio_tools-1.51.3-cp39-cp39-win32.whl", hash = "sha256:980e632710ba05e04364c6f276e905d5d367437f1ce2265ce7b96b5c1eac5693"}, + {url = "https://files.pythonhosted.org/packages/23/68/efa2b4d7a453430df81e11374b0fe9667f4267829dcf86b706d4c90048d1/grpcio_tools-1.51.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8bbf412c357999f88d87f421fd48b4b114fc037fec7bbaed0cb7620c24a5e44"}, + {url = "https://files.pythonhosted.org/packages/29/97/b37f8528998542f8695d0be87b2e5887ba4a8c1f445a82f971c1b51fc9cc/grpcio_tools-1.51.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1166744c40821bb0aa605d2af2287fac367756f858a3d18f4c3d25bc0b92757b"}, + {url = "https://files.pythonhosted.org/packages/2e/1b/8edc63842cef0b592977a46a4b04aa6004506c778678d3ef3e81d1127596/grpcio_tools-1.51.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253b639fb79a4d28ce494ae40e5695bf1e2cb4a05f205fc433c46b2049ab4d99"}, + {url = "https://files.pythonhosted.org/packages/38/62/a43b23884a342f1435e338c70f4f4259c7425f4a8c35949db04ba7cdba9a/grpcio_tools-1.51.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:233fc56f054424232e2086f444004413e33c699174ce6ee0e279c25227243fec"}, + {url = "https://files.pythonhosted.org/packages/3c/c8/54e8018ffbca2f1ebc45a1c602e0189d21ac32f2eca874fe5be0b925e77a/grpcio_tools-1.51.3-cp38-cp38-win_amd64.whl", hash = "sha256:584b201fb39307dcb1affcf2647656a0e6244423ef1659cc6caa3ff85c5ae5c1"}, + {url = "https://files.pythonhosted.org/packages/40/8d/0ad5c57bb2c0186ad537f8dd60bac97479f2e3f90545ea1a7d801d7debb7/grpcio_tools-1.51.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793f9edef82f600a3324f8a3d8cd8318a8d02f28fb54f8236cbb35ce0928d186"}, + {url = "https://files.pythonhosted.org/packages/41/bd/1bcc11b060e0e702e0d7667406670346951c6dfd69502c46bc093256791d/grpcio_tools-1.51.3-cp39-cp39-win_amd64.whl", hash = "sha256:5f4c47b14e66f80365cd5667ecc2f7fb0eb91e02c4e54362041b758feaa00511"}, + {url = "https://files.pythonhosted.org/packages/57/10/a183071449f4dce7a3e585cf6c4b4979adc30647e2913d53d5477882df99/grpcio_tools-1.51.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:405656b3cf9639427e6c30a795570cba4a7c06b88a3145866f7d2c05b7e048b4"}, + {url = "https://files.pythonhosted.org/packages/5a/91/f8ff971aa5d2874048ef6e7641071fc6a24ff371355a3ddfa2f46149a7c8/grpcio_tools-1.51.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:fbb742e10bd548031b8d80f7c28eb70c7c3a9850f8e99c98cd496f19a05f9fee"}, + {url = "https://files.pythonhosted.org/packages/5f/c0/af6dbe9994bb895aaa570792ea81fc6c22868678ae6565f4dd4a13026e07/grpcio_tools-1.51.3-cp310-cp310-linux_armv7l.whl", hash = "sha256:779ac1ad2258b8debaa45595bfb3814806ed8880e3ea7f194e551d76a6255969"}, + {url = "https://files.pythonhosted.org/packages/61/61/d671603df82ae67075196f50511bb276fdbe25579597b7b27e895ddd653e/grpcio_tools-1.51.3-cp38-cp38-linux_armv7l.whl", hash = "sha256:7fd18d8d211fbfd337fc12e5bdd57e62368f636addf901d290e68a39f1dfea38"}, + {url = "https://files.pythonhosted.org/packages/64/6d/f4e128c3cf88bcbc33e7c60aedeb7569c6c567f9e8bd59223399aeb1e5a5/grpcio_tools-1.51.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:046c0b1e372d4acf552aa0c8f5e830f019d67b75f25aeb0968d15fbdd3eaabd3"}, + {url = "https://files.pythonhosted.org/packages/67/47/655760fb907afc75c661ceb28ae3271c27c16ac83b12d6061f07fc6e2cd6/grpcio_tools-1.51.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ef849687c7f2bd7f3277edc7c7cafc7042823d0fb078e3c01c861eb0c96ed181"}, + {url = "https://files.pythonhosted.org/packages/72/8e/b2ae3338447334418ebba124baf7ca5f6238c4980c483abf3f2682bf62ed/grpcio_tools-1.51.3-cp310-cp310-win_amd64.whl", hash = "sha256:077adaee431c2b040dd77923964577087c32e828908e8fa2e53f8e003ad408c9"}, + {url = "https://files.pythonhosted.org/packages/76/f3/a7b7b9caf525c3ebfb7eb3092c5d79470d04aa118d0c0f11ba2569c12dad/grpcio_tools-1.51.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c6b145587d6062e2335f0b3286501dd6853a1ea50bd466a913351b7c48e5f20"}, + {url = "https://files.pythonhosted.org/packages/87/a0/4908c43aac558837a5918b3990c9a13028cfebd92249eefe5ac442884fec/grpcio_tools-1.51.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f2df233a3e7db23d9b516cb5e2bfe029465f40a72978bee0584e44e7860ea73f"}, + {url = "https://files.pythonhosted.org/packages/8b/8f/0834f02287526cd003ad56d100ec41b2c51ad677a2754c7322b258eec2f8/grpcio_tools-1.51.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f7583735542ced7d30baec6cc21bffeaffcec1523bf807e8f8f0047113b6d30a"}, + {url = "https://files.pythonhosted.org/packages/8f/ce/1d2ab7fb53c136869135d178026fb51e227762d2cdf04ad66e629992cdb0/grpcio_tools-1.51.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:781896c488e07b9463196045e6725e52d018cd7d0e1062d4ab1eee2647ca9170"}, + {url = "https://files.pythonhosted.org/packages/aa/75/87712b067c7ce43861d0f9a56b846c506f900c0883799cab3484b69253bf/grpcio_tools-1.51.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:715c792679224171c0584e9f235b921d76f8990deb38b0d1215d0469301d9cd9"}, + {url = "https://files.pythonhosted.org/packages/aa/fe/6c33cb5e43aa9e9939d0b298bc08e3d8929ebba0561adc61950d9dbccef5/grpcio_tools-1.51.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2fade12de08923b350475ca16d0d0bd68578c30fce89147aa0f94ef5759bc5a9"}, + {url = "https://files.pythonhosted.org/packages/b4/1f/55f65067c44cff92008a8bbc6493f0a5aabfe1532e85b876d3d0cfc7315a/grpcio_tools-1.51.3-cp37-cp37m-win32.whl", hash = "sha256:f8d17271fc58ed3503dd571c79917e126deca51f85f093770a9606e806aac9dc"}, + {url = "https://files.pythonhosted.org/packages/ba/1c/4a546c6632c7b19b06fc7c5bf4ea0ddbc2bb9fd556ae525bea9fcbbcbf8f/grpcio_tools-1.51.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ece44f42b10e0bceb49235be1e361e1ee69afee7f938c82fb656a601a4a720e3"}, + {url = "https://files.pythonhosted.org/packages/ba/c5/0862cc10413cc7be9ab8fd5bb3967fbb3fd26e55bb880b21d45bc6008c2a/grpcio_tools-1.51.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:158c5bfe7e157fd9a944bde9f7dfe3b468416666e4fade77cd17caa3edc8bd81"}, + {url = "https://files.pythonhosted.org/packages/c9/02/a19e7e0ad8b015e4e1b1395c7477e9c638174231fc02213f5298432d9c8e/grpcio_tools-1.51.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efc90b0287908c46281eb61933acaa1b96a575d0160fc98b5c64b9dec46f60d1"}, + {url = "https://files.pythonhosted.org/packages/d5/7a/306320eb6597a6f89ee02f73ddf7b7608b8dd2f4da4f7b8b94949d6ca544/grpcio_tools-1.51.3-cp310-cp310-win32.whl", hash = "sha256:8e9df40db7a0edd403b539cc142d6114270e35debf723a5b4a7a93d5c30fffc0"}, + {url = "https://files.pythonhosted.org/packages/dc/6a/02952bc9ce4d094ce50fea3f43859c888c9d0f21f40f7a1a61abb38b06a9/grpcio_tools-1.51.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d4ffb6325ed489065dbdca764cf37c3a29376bc657874116c9af788d7a0d2ee4"}, + {url = "https://files.pythonhosted.org/packages/e3/b2/8c88f5bfd2181b1ecef103f71b8f1411517441a8359bab4895180bbbe2a4/grpcio_tools-1.51.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:a836a72c657f751244cdb358c3461a89627e6d02654079d2450cfe361800428c"}, + {url = "https://files.pythonhosted.org/packages/eb/db/b72f516b4d6aba545dc1b64aad62ba6debf64650db69682ef7332055040b/grpcio_tools-1.51.3-cp38-cp38-win32.whl", hash = "sha256:35c1ee7c766eb586f04ba41fa7711eb847767eb277a1737998374ac57768f1f0"}, + {url = "https://files.pythonhosted.org/packages/ed/eb/1443b5b181d62609b4b45f1e5b270754ea96da81e4b538bb87465a545ea7/grpcio_tools-1.51.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e8df08b65f9379c3f103147b29542b0141ca84e77d0eee9114ca5f9b3f0d23"}, + {url = "https://files.pythonhosted.org/packages/f4/24/d7417b296b90f1e6464759b4bfb69b873aa7bbe69cc6809a55eb171a19e8/grpcio_tools-1.51.3-cp37-cp37m-linux_armv7l.whl", hash = "sha256:d2212c682529263b3c9e903092d0ccbb9fc6afba820e4c2fa52c2c27720cdcae"}, + {url = "https://files.pythonhosted.org/packages/f5/ec/a0c14f41ac7647baea9cb470f302d4b5984ab91d4f19e92e83e1d020c6a1/grpcio_tools-1.51.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7b3374f4a6579c58d16a5fab2e6b4e9bb8625a034a7f4cd6024f4d1cc12f2a0"}, + {url = "https://files.pythonhosted.org/packages/f7/a3/da6a56cfe2dbd07691c0e3b05edac2b1fef2484b70b0da2770db835e9cdd/grpcio_tools-1.51.3-cp311-cp311-win_amd64.whl", hash = "sha256:ba76d15fd149b575170fa32a1f6a9ff2b38ff9db223229a8ad6f53450a452688"}, + {url = "https://files.pythonhosted.org/packages/fb/b4/b01c9fae4ccb629eb47ba09555e89021e3a94185cb673ff4842f1e25c0a1/grpcio_tools-1.51.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:867fa1973fa8b0772077c15425f122f672a18b1c53709a8a2bff9d056db4c20e"}, + {url = "https://files.pythonhosted.org/packages/fb/cb/0aaa1ecdf64dea6f6df1137978212a8d2e109d20fa31b0cafa61d13f8de5/grpcio_tools-1.51.3-cp39-cp39-linux_armv7l.whl", hash = "sha256:e02231e21029f716a1d23a0b5e664fa243d147da33a3f55088a9529b860aa4ac"}, + {url = "https://files.pythonhosted.org/packages/ff/17/5dfbb5dd5d3f0add353e8f4905b7591f7af69b4df1ab2d5fd2c95f812e69/grpcio_tools-1.51.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:3c445a064b2ef3d3475e26e2add8ddb4ac2933741ecddf71d5b071a3ad078db4"}, + {url = "https://files.pythonhosted.org/packages/ff/fc/daa42e82f8bb30a20e1c8dd20cfd3a58ddb6224237d5c5d38989e2f55689/grpcio_tools-1.51.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:83bf605fe2b3591d3c8a78646f37c72c5832c4dd84b5f92405c17cb10b136be6"}, +] +"protobuf 4.22.3" = [ + {url = "https://files.pythonhosted.org/packages/25/ca/79af03ceec0f9439d8fb5c2c8d99454c5c4f8c7fe00c8e7dbb280a8177c8/protobuf-4.22.3-cp38-cp38-win_amd64.whl", hash = "sha256:f2f4710543abec186aee332d6852ef5ae7ce2e9e807a3da570f36de5a732d88e"}, + {url = "https://files.pythonhosted.org/packages/2f/db/42950497852aa35940a33e29118d8a2117fb20072bee08728f0948b70d7a/protobuf-4.22.3-cp38-cp38-win32.whl", hash = "sha256:f08aa300b67f1c012100d8eb62d47129e53d1150f4469fd78a29fa3cb68c66f2"}, + {url = "https://files.pythonhosted.org/packages/3d/df/045aa99824f00c732410463512c52c2137f0a8cb968be573e63c9a679a84/protobuf-4.22.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:d14fc1a41d1a1909998e8aff7e80d2a7ae14772c4a70e4bf7db8a36690b54425"}, + {url = "https://files.pythonhosted.org/packages/5b/98/1856887e6b5d707f06bad7a0a19a6f166819cde6df472cd463b3c54b4bc3/protobuf-4.22.3-cp37-cp37m-win32.whl", hash = "sha256:ecae944c6c2ce50dda6bf76ef5496196aeb1b85acb95df5843cd812615ec4b61"}, + {url = "https://files.pythonhosted.org/packages/5d/d5/ce54c05165aee15a3353bff5e18891699ffe5c580a827500412c14097584/protobuf-4.22.3-cp310-abi3-win32.whl", hash = "sha256:8b54f56d13ae4a3ec140076c9d937221f887c8f64954673d46f63751209e839a"}, + {url = "https://files.pythonhosted.org/packages/64/1a/607462fe9bd9815571b92510074a74f2012cca6b564ecc65a9d65ecec6da/protobuf-4.22.3-cp39-cp39-win32.whl", hash = "sha256:7cf56e31907c532e460bb62010a513408e6cdf5b03fb2611e4b67ed398ad046d"}, + {url = "https://files.pythonhosted.org/packages/6d/f6/695ae28e310ea84fbde27cbca851e3dbdc51fcef0f79f622166932b7b7e4/protobuf-4.22.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:70659847ee57a5262a65954538088a1d72dfc3e9882695cab9f0c54ffe71663b"}, + {url = "https://files.pythonhosted.org/packages/82/48/09fad23447675247e42f4262191505dbb4c9a67b372f593cd1465968c522/protobuf-4.22.3-cp37-cp37m-win_amd64.whl", hash = "sha256:d4b66266965598ff4c291416be429cef7989d8fae88b55b62095a2331511b3fa"}, + {url = "https://files.pythonhosted.org/packages/8f/fd/04c33233cbc10b183f19ac1d57f04e849bf62f80837aa332b95dfd2dc44a/protobuf-4.22.3-cp310-abi3-win_amd64.whl", hash = "sha256:7760730063329d42a9d4c4573b804289b738d4931e363ffbe684716b796bde51"}, + {url = "https://files.pythonhosted.org/packages/d3/4b/ba25359a15db99b12b59c1d68ccbd0c1d2d4c0c874d9b92f19b5f89d8293/protobuf-4.22.3-py3-none-any.whl", hash = "sha256:52f0a78141078077cfe15fe333ac3e3a077420b9a3f5d1bf9b5fe9d286b4d881"}, + {url = "https://files.pythonhosted.org/packages/f4/fd/d8d309382c71c5e83a1920ae9840410396e595e3b36229d96e3ba755687e/protobuf-4.22.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:13233ee2b9d3bd9a5f216c1fa2c321cd564b93d8f2e4f521a85b585447747997"}, + {url = "https://files.pythonhosted.org/packages/f8/70/6291e75633eeaa24fed46c9f66091bec184644e6159f392ac32eb92b1f65/protobuf-4.22.3-cp39-cp39-win_amd64.whl", hash = "sha256:e0e630d8e6a79f48c557cd1835865b593d0547dce221c66ed1b827de59c66c97"}, +] +"setuptools 67.6.1" = [ + {url = "https://files.pythonhosted.org/packages/0b/fc/8781442def77b0aa22f63f266d4dadd486ebc0c5371d6290caf4320da4b7/setuptools-67.6.1-py3-none-any.whl", hash = "sha256:e728ca814a823bf7bf60162daf9db95b93d532948c4c0bea762ce62f60189078"}, +] diff --git a/pdm.toml b/pdm.toml new file mode 100644 index 00000000..2af9d62c --- /dev/null +++ b/pdm.toml @@ -0,0 +1,2 @@ +[python] +use_venv = false diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..fd24cff3 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,36 @@ +[build-system] +requires = ["pdm-backend"] +build-backend = "pdm.backend" + +[project] +name = "ceph-nvmeof" +dynamic = ["version"] +description = "Service to provide Ceph storage over NVMe-oF protocol" +readme = "README.md" +requires-python = "~=3.9" +license = {file = "LICENSE"} +authors = [ + {name = "Ilya Dryomov", email = "idryomov@gmail.com"}, + {name = "Mykola Golub", email = "mykola.golub@clyso.com"}, + {name = "Sandy Kaur", email = "sandy.kaur@ibm.com"}, + {name = "Ernesto Puerta", email = "epuertat@redhat.com"}, + {name = "Yin Congmin", email = "congmin.yin@intel.com"}, + {name = "Scott Peterson", email = "scott.d.peterson@intel.com"}, + {name = "Jason Dillaman", email = "dillaman@redhat.com"}, + {name = "Anita Shekar", email = "anita.shekar@ibm.com"}, +] +keywords = [] +classifiers = [] # https://pypi.org/classifiers/ +dependencies = [ + "grpcio == 1.51.3", + "grpcio_tools == 1.51.3" +] + +[tool.pdm.scripts] +protoc = {call = "grpc_tools.command:build_package_protos('proto')"} + +[project.urls] +#homepage = "" +# documentation = "" +repository = "https://github.com/ceph/ceph-nvmeof.git" +# changelog = "" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index a6646e7b..00000000 --- a/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -grpcio -grpcio-tools From 21eee4ba9a29a6dd273a3b2921b72499b30453dc Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Mon, 26 Jun 2023 19:06:44 +0200 Subject: [PATCH 02/19] build: add basic CI test This adds a Github action that tests the basic commands and sets up a minimal Ceph NVMe-oF gateway deployment. Signed-off-by: Alexander Indenbaum Signed-off-by: Ernesto Puerta Co-authored-by: Alexander Indenbaum --- .github/workflows/build-container.yml | 72 +++++++++++++++++++++++++++ pyproject.toml | 1 + 2 files changed, 73 insertions(+) create mode 100644 .github/workflows/build-container.yml diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml new file mode 100644 index 00000000..75eb5e70 --- /dev/null +++ b/.github/workflows/build-container.yml @@ -0,0 +1,72 @@ +name: Build container Test +on: [push, pull_request, workflow_dispatch] +env: + # Control gRPC port + PORT: 5500 + +jobs: + build-container-test: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + # git submodule update --init --recursive + submodules: recursive + + - name: Build container images + run: make build + + - name: Setup huge pages + run: make setup + + - name: Compose Up + run: | + SVC=nvmeof OPTS="--detach --scale nvmeof=1" make up || (OPTS="" make logs; exit 1) + + - name: Wait for controller to start + run: | + until nc -z localhost $PORT; do + echo -n . + sleep 1 + done + echo + + - name: List containers + run: | + make ps + + - name: Create RBD image + run: | + echo "💁 ceph list pools:" + SVC=ceph OPTS="-T" CMD="ceph osd lspools" make exec + echo "💁 rbd create:" + SVC=ceph OPTS="-T" CMD="rbd create rbd/mytestdevimage --size 16" make exec + echo "💁 ls rbd:" + SVC=ceph OPTS="-T" CMD="rbd ls rbd" make exec + + - name: Run CLI + run: | + make run + + CMD="create_bdev -i mytestdevimage -p rbd -b Ceph0" make run + CMD="create_subsystem -n nqn.2016-06.io.spdk:cnode1 -s SPDK00000000000001" make run + CMD="add_namespace -n nqn.2016-06.io.spdk:cnode1 -b Ceph0" make run + CMD="add_host -n nqn.2016-06.io.spdk:cnode1 -t '*'" make run + CMD="create_listener -n nqn.2016-06.io.spdk:cnode1 -s 5001" make run + # should fail https://github.com/ceph/ceph-nvmeof/issues/137 + if CMD="create_bdev -i mytestdevimage -p rbd -b Ceph0" make run; then exit 1; fi + if CMD="create_bdev -i wrongimage -p rbd -b Ceph0" make run; then exit 1; fi + + - name: Display Logs + run: | + OPTS="" make logs + + - name: Compose Down + run: make down + + - name: Compose Stop + run: make stop + + - name: Compose Clean + run: make clean diff --git a/pyproject.toml b/pyproject.toml index fd24cff3..07a6bfbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,7 @@ authors = [ {name = "Jason Dillaman", email = "dillaman@redhat.com"}, {name = "Anita Shekar", email = "anita.shekar@ibm.com"}, ] +maintainers = [] keywords = [] classifiers = [] # https://pypi.org/classifiers/ dependencies = [ From eee90772ecac9327c2af7d747c2f5c6b58af29c3 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Wed, 28 Jun 2023 14:28:51 +0200 Subject: [PATCH 03/19] build: configure nofile As the number of hosts connected to the NVMe-oF gateway increases, the default nofile setting (max number of open file descriptors) of 1024 falls short, and needs to be increased. Signed-off-by: Ernesto Puerta --- .env | 3 +++ docker-compose.yaml | 2 ++ 2 files changed, 5 insertions(+) diff --git a/.env b/.env index 108921f7..82fa1b95 100644 --- a/.env +++ b/.env @@ -4,6 +4,9 @@ CEPH_VERSION=17.2.6 SPDK_VERSION=23.01 MAINTAINER=Ceph Developers +# Performance +NVMEOF_NOFILE = 20480 # Max number of open files (depends on number of hosts connected) + # NVMe-oF NVMEOF_VERSION=${VERSION} NVMEOF_CONFIG=./ceph-nvmeof.conf diff --git a/docker-compose.yaml b/docker-compose.yaml index b2f31519..91d5b5cc 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -85,6 +85,8 @@ services: - SYS_ADMIN # huge-pages - CAP_SYS_NICE # RTE - SYS_PTRACE # gdb + ulimits: + nofile: $NVMEOF_NOFILE networks: default: ipv4_address: 192.168.13.3 From 83e424076af8330dc9454c357685ec6a805bc45f Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Wed, 28 Jun 2023 14:32:58 +0200 Subject: [PATCH 04/19] ci: reduce hugepages In Github CI hosts, requesting 4 GB of hugepages is not supported. Reduced that to 1024 x 2 MB hugepages (2 GB). Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 75eb5e70..89ecfd06 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -18,7 +18,7 @@ jobs: run: make build - name: Setup huge pages - run: make setup + run: make setup HUGEPAGES=1024 - name: Compose Up run: | From bd1d37f439bc37aeee343b505e6679a41bb261f4 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Wed, 28 Jun 2023 18:52:19 +0200 Subject: [PATCH 05/19] ci: fix hugepages and workflow - Make .env bash-importable (add double quotes) - Expose ports as .env settings (NVMEOF_GW_PORT, NVMEOF_IO_PORT, NVMEOF_DISC_PORT) - Tune CI workflow to only run on push/pull-requests against 'devel' branch - Add scheduled CI workflow run (nightly) - Set hugepages in CI workflow to 256 - Fix issue with tput in non-TTY Github Action environment - Use native GRPC_DNS_RESOLVER, as the ares one doesn't properly deal with Docker hostname resolution Signed-off-by: Ernesto Puerta --- .env | 64 ++++++++++++----------- .github/workflows/build-container.yml | 75 ++++++++++++--------------- Dockerfile | 3 +- Makefile | 3 +- ceph-nvmeof.conf | 2 +- docker-compose.yaml | 12 ++--- mk/autohelp.mk | 4 +- 7 files changed, 79 insertions(+), 84 deletions(-) diff --git a/.env b/.env index 82fa1b95..abe302c7 100644 --- a/.env +++ b/.env @@ -1,47 +1,51 @@ # Globals -VERSION=0.0.1 -CEPH_VERSION=17.2.6 -SPDK_VERSION=23.01 -MAINTAINER=Ceph Developers +VERSION="0.0.1" +CEPH_VERSION="17.2.6" +SPDK_VERSION="23.01" +MAINTAINER="Ceph Developers " # Performance -NVMEOF_NOFILE = 20480 # Max number of open files (depends on number of hosts connected) +NVMEOF_NOFILE=20480 # Max number of open files (depends on number of hosts connected) # NVMe-oF -NVMEOF_VERSION=${VERSION} -NVMEOF_CONFIG=./ceph-nvmeof.conf -NVMEOF_SPDK_VERSION=${SPDK_VERSION} -NVMEOF_NAME=ceph-nvmeof -NVMEOF_SUMMARY=Ceph NVMe over Fabrics Gateway +NVMEOF_VERSION="${VERSION}" +NVMEOF_CONFIG="./ceph-nvmeof.conf" +NVMEOF_SPDK_VERSION="${SPDK_VERSION}" +NVMEOF_NAME="ceph-nvmeof" +NVMEOF_SUMMARY="Ceph NVMe over Fabrics Gateway" NVMEOF_DESCRIPTION="Service to provide block storage on top of Ceph for platforms (e.g.: VMWare) without native Ceph support (RBD), replacing existing approaches (iSCSI) with a newer and more versatile standard (NVMe-oF)." -NVMEOF_URL=https://github.com/ceph/ceph-nvmeof -NVMEOF_TAGS=ceph,nvme-of,nvme-of gateway,rbd,block storage -NVMEOF_WANTS=ceph,rbd -NVMEOF_EXPOSE_SERVICES=4420/tcp:nvme,5500/tcp:grpc,8009/tcp:nvme-disc -NVMEOF_GIT_REPO=https://github.com/ceph/ceph-nvmeof.git +NVMEOF_URL="https://github.com/ceph/ceph-nvmeof" +NVMEOF_TAGS="ceph,nvme-of,nvme-of gateway,rbd,block storage" +NVMEOF_WANTS="ceph,rbd" +NVMEOF_IP_ADDRESS="192.168.13.3" +NVMEOF_IO_PORT=4420 +NVMEOF_GW_PORT=5500 +NVMEOF_DISC_PORT=8009 +NVMEOF_EXPOSE_SERVICES="${NVMEOF_IO_PORT}/tcp:nvme,${NVMEOF_GW_PORT}/tcp:grpc,${NVMEOF_DISC_PORT}/tcp:nvme-disc" +NVMEOF_GIT_REPO="https://github.com/ceph/ceph-nvmeof.git" # NVMe-oF CLI -MVMEOF_CLI_VERSION=${VERSION} -NVMEOF_CLI_NAME=ceph-nvmeof-cli -NVMEOF_CLI_SUMMARY=Ceph NVMe over Fabrics CLI -NVMEOF_CLI_DESCRIPTION=Command line interface for Ceph NVMe over Fabrics Gateway +MVMEOF_CLI_VERSION="${VERSION}" +NVMEOF_CLI_NAME="ceph-nvmeof-cli" +NVMEOF_CLI_SUMMARY="Ceph NVMe over Fabrics CLI" +NVMEOF_CLI_DESCRIPTION="Command line interface for Ceph NVMe over Fabrics Gateway" # SPDK -SPDK_CEPH_VERSION=${CEPH_VERSION} -SPDK_NAME=SPDK -SPDK_SUMMARY=Build Ultra High-Performance Storage Applications with the Storage Performance Development Kit -SPDK_DESCRIPTION=The Storage Performance Development Kit (SPDK) provides a set of tools and libraries for writing high performance, scalable, user-mode storage applications -SPDK_URL=https://spdk.io +SPDK_CEPH_VERSION="${CEPH_VERSION}" +SPDK_NAME="SPDK" +SPDK_SUMMARY="Build Ultra High-Performance Storage Applications with the Storage Performance Development Kit" +SPDK_DESCRIPTION="The Storage Performance Development Kit (SPDK) provides a set of tools and libraries for writing high performance, scalable, user-mode storage applications" +SPDK_URL="https://spdk.io" -SPDK_PKGDEP_ARGS=--rbd -SPDK_CONFIGURE_ARGS=--with-rbd --disable-tests --disable-unit-tests --disable-examples +SPDK_PKGDEP_ARGS="--rbd" +SPDK_CONFIGURE_ARGS="--with-rbd --disable-tests --disable-unit-tests --disable-examples" SPDK_MAKEFLAGS= -SPDK_CENTOS_BASE=https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/Packages/ -SPDK_CENTOS_REPO_VER=9.0-21.el9 +SPDK_CENTOS_BASE="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/Packages/" +SPDK_CENTOS_REPO_VER="9.0-21.el9" # Ceph Cluster -CEPH_CLUSTER_VERSION=${CEPH_VERSION} -CEPH_VSTART_ARGS=--without-dashboard --memstore +CEPH_CLUSTER_VERSION="${CEPH_VERSION}" +CEPH_VSTART_ARGS="--without-dashboard --memstore" diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 89ecfd06..47a3432c 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -1,72 +1,65 @@ -name: Build container Test -on: [push, pull_request, workflow_dispatch] +name: Build, Deploy and Test Containers +on: + push: + branches: + - devel + pull_request: + branches: + - devel + schedule: + - cron: '0 0 * * *' + workflow_dispatch: env: - # Control gRPC port - PORT: 5500 + HUGEPAGES: 256 jobs: - build-container-test: + build: + name: Build and test runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v3 with: - # git submodule update --init --recursive submodules: recursive - name: Build container images run: make build - - name: Setup huge pages - run: make setup HUGEPAGES=1024 + - name: Setup huge-pages + run: make setup HUGEPAGES=$HUGEPAGES - - name: Compose Up + - name: Start containers run: | - SVC=nvmeof OPTS="--detach --scale nvmeof=1" make up || (OPTS="" make logs; exit 1) + make up OPTS=--detach || (make logs OPTS=''; exit 1) - - name: Wait for controller to start + - name: Wait for the Gateway to be listening + timeout-minutes: 1 run: | - until nc -z localhost $PORT; do + . .env + until nc -z localhost $NVMEOF_GW_PORT; do echo -n . sleep 1 done echo - name: List containers - run: | - make ps + run: make ps - - name: Create RBD image - run: | - echo "💁 ceph list pools:" - SVC=ceph OPTS="-T" CMD="ceph osd lspools" make exec - echo "💁 rbd create:" - SVC=ceph OPTS="-T" CMD="rbd create rbd/mytestdevimage --size 16" make exec - echo "💁 ls rbd:" - SVC=ceph OPTS="-T" CMD="rbd ls rbd" make exec + - name: List processes + run: make top - - name: Run CLI + - name: Ping run: | - make run + make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "dnf install -y iputils; ping -c1 nvmeof"' + + - name: Run test + run: . .env && make demo OPTS=-T # Disable TTY - CMD="create_bdev -i mytestdevimage -p rbd -b Ceph0" make run - CMD="create_subsystem -n nqn.2016-06.io.spdk:cnode1 -s SPDK00000000000001" make run - CMD="add_namespace -n nqn.2016-06.io.spdk:cnode1 -b Ceph0" make run - CMD="add_host -n nqn.2016-06.io.spdk:cnode1 -t '*'" make run - CMD="create_listener -n nqn.2016-06.io.spdk:cnode1 -s 5001" make run - # should fail https://github.com/ceph/ceph-nvmeof/issues/137 - if CMD="create_bdev -i mytestdevimage -p rbd -b Ceph0" make run; then exit 1; fi - if CMD="create_bdev -i wrongimage -p rbd -b Ceph0" make run; then exit 1; fi + - name: Display logs + run: make logs OPTS='' - - name: Display Logs - run: | - OPTS="" make logs - - - name: Compose Down + - name: Shut containers down run: make down - - name: Compose Stop - run: make stop - - - name: Compose Clean + - name: Clean up environment run: make clean diff --git a/Dockerfile b/Dockerfile index 2b70d721..8e5436fc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,6 +6,7 @@ ARG NVMEOF_TARGET # either 'gateway' or 'cli' #------------------------------------------------------------------------------ # Base image for NVMEOF_TARGET=cli (nvmeof-cli) FROM registry.access.redhat.com/ubi9/ubi AS base-cli +ENV GRPC_DNS_RESOLVER=native ENTRYPOINT ["python3", "-m", "control.cli"] CMD [] @@ -115,4 +116,4 @@ RUN pdm run protoc #------------------------------------------------------------------------------ FROM python-intermediate -COPY --from=builder $APPDIR . \ No newline at end of file +COPY --from=builder $APPDIR . diff --git a/Makefile b/Makefile index 855b618b..9bbfaf31 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,8 @@ build: export BUILD_DATE != date -u +"%Y-%m-%dT%H:%M:%SZ" up: SVC = nvmeof ## Services -up: override OPTS += --no-build --abort-on-container-exit --remove-orphans --scale nvmeof=$(SCALE) +up: OPTS ?= --abort-on-container-exit +up: override OPTS += --no-build --remove-orphans --scale nvmeof=$(SCALE) clean: $(CLEAN) ## Clean-up environment diff --git a/ceph-nvmeof.conf b/ceph-nvmeof.conf index 84af0756..d007341f 100644 --- a/ceph-nvmeof.conf +++ b/ceph-nvmeof.conf @@ -10,7 +10,7 @@ [gateway] name = group = -addr = 192.168.13.3 +addr = 0.0.0.0 port = 5500 enable_auth = False state_update_notify = True diff --git a/docker-compose.yaml b/docker-compose.yaml index 91d5b5cc..8f2d2fd7 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -89,11 +89,11 @@ services: nofile: $NVMEOF_NOFILE networks: default: - ipv4_address: 192.168.13.3 + ipv4_address: $NVMEOF_IP_ADDRESS ports: - - "4420:4420" # I/O controllers - - "5500:5500" # Gateway - - "8009:8009" # Discovery + - "$NVMEOF_IO_PORT:$NVMEOF_IO_PORT" # I/O controllers + - "$NVMEOF_GW_PORT:$NVMEOF_GW_PORT" # Gateway + - "$NVMEOF_DISC_PORT:$NVMEOF_DISC_PORT" # Discovery nvmeof: extends: service: nvmeof-base @@ -132,12 +132,8 @@ services: io.ceph.nvmeof: volumes: ceph-conf: - labels: - io.ceph.nvmeof: networks: default: ipam: config: - subnet: 192.168.13.0/24 - labels: - io.ceph.nvmeof: \ No newline at end of file diff --git a/mk/autohelp.mk b/mk/autohelp.mk index f55f6f37..d6270a73 100644 --- a/mk/autohelp.mk +++ b/mk/autohelp.mk @@ -4,8 +4,8 @@ # hello: ## This target prints Hello World # LANGUAGE := esperanto ## Set the language for the Hello World message -autohelp: BOLD != tput bold -autohelp: NORMAL != tput sgr0 +autohelp: BOLD != [ -z "$$PS1" ] && tput bold +autohelp: NORMAL != [ -z "$$PS1" ] && tput sgr0 autohelp: @echo $(AUTOHELP_SUMMARY) @echo From 8fd153683367b9af9f7f9cc764286cbc8cd72c5d Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Thu, 29 Jun 2023 19:23:50 +0200 Subject: [PATCH 06/19] cli: fix return code on error - Return non-zero error code on client-server errors - Add debug info to the CI job - Fix & improve docs (SELinux config, typos) Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 14 +- README.md | 10 +- control/cli.py | 213 +++++++++++--------------- 3 files changed, 105 insertions(+), 132 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 47a3432c..47950d41 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -1,4 +1,4 @@ -name: Build, Deploy and Test Containers +name: "Container: Build, Deploy, Test" on: push: branches: @@ -48,12 +48,14 @@ jobs: - name: List processes run: make top - - name: Ping - run: | - make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "dnf install -y iputils; ping -c1 nvmeof"' - + - name: Print hosts + run: make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "cat /etc/hosts"' + + - name: Print DNS config + run: make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "cat /etc/resolv.conf"' + - name: Run test - run: . .env && make demo OPTS=-T # Disable TTY + run: . .env && make demo SERVER_ADDRESS=$NVMEOF_IP_ADDRESS OPTS=-T # Disable TTY - name: Display logs run: make logs OPTS='' diff --git a/README.md b/README.md index bfdeba8b..cf693b4f 100644 --- a/README.md +++ b/README.md @@ -14,13 +14,17 @@ The [creation and management of RBD images](https://docs.ceph.com/en/latest/rbd/ * Linux-based system with at least 16 GB of available RAM. [Fedora 37](https://fedoraproject.org/) is recommended. * `moby-engine` (`docker-engine`) (v20.10) and `docker-compose` (v1.29). These versions are just indicative. * `make` (only needed to launch `docker-compose` commands). -* SELinux in permissive mode. +* SELinux in permissive mode: + ```bash + sed -i s/^SELINUX=.*$/SELINUX=permissive/ /etc/selinux/config + setenforce 0 + ``` To install these dependencies in Fedora: ```bash sudo dnf install -y make moby-engine docker-compose ``` -Some [post-installation steps](https://docs.docker.com/engine/install/linux-postinstall/) are be required to use `docker` with regular users: +Some [post-installation steps](https://docs.docker.com/engine/install/linux-postinstall/) are required to use `docker` with regular users: ```bash sudo groupadd docker sudo usermod -aG docker $USER @@ -285,4 +289,4 @@ See [Ceph's Code of Conduct](https://ceph.io/en/code-of-conduct/). ## License -See [`LICENSE`](LICENSE). \ No newline at end of file +See [`LICENSE`](LICENSE). diff --git a/control/cli.py b/control/cli.py index e252a14e..4115a09e 100644 --- a/control/cli.py +++ b/control/cli.py @@ -12,6 +12,9 @@ import json import logging import sys + +from functools import wraps + from proto import gateway_pb2_grpc as pb2_grpc from proto import gateway_pb2 as pb2 @@ -76,8 +79,16 @@ def decorator(func): # attribute to point to the subcommand's associated function for arg in args: parser.add_argument(*arg[0], **arg[1]) - parser.set_defaults(func=func) - return func + + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except grpc.RpcError as e: + self.parser.error( + f"{func.__name__} failed: code={e.code()} message={e.details()}") + parser.set_defaults(func=wrapper) + return wrapper return decorator @@ -153,31 +164,23 @@ def connect(self, host, port, client_key, client_cert, server_cert): ]) def create_bdev(self, args): """Creates a bdev from an RBD image.""" - - try: - req = pb2.create_bdev_req( - rbd_pool_name=args.pool, - rbd_image_name=args.image, - block_size=args.block_size, - bdev_name=args.bdev, - ) - ret = self.stub.create_bdev(req) - self.logger.info(f"Created bdev {ret.bdev_name}: {ret.status}") - except Exception as error: - self.logger.error(f"Failed to create bdev: \n {error}") + req = pb2.create_bdev_req( + rbd_pool_name=args.pool, + rbd_image_name=args.image, + block_size=args.block_size, + bdev_name=args.bdev, + ) + ret = self.stub.create_bdev(req) + self.logger.info(f"Created bdev {ret.bdev_name}: {ret.status}") @cli.cmd([ argument("-b", "--bdev", help="Bdev name", required=True), ]) def delete_bdev(self, args): """Deletes a bdev.""" - - try: - req = pb2.delete_bdev_req(bdev_name=args.bdev) - ret = self.stub.delete_bdev(req) - self.logger.info(f"Deleted bdev {args.bdev}: {ret.status}") - except Exception as error: - self.logger.error(f"Failed to delete bdev: \n {error}") + req = pb2.delete_bdev_req(bdev_name=args.bdev) + ret = self.stub.delete_bdev(req) + self.logger.info(f"Deleted bdev {args.bdev}: {ret.status}") @cli.cmd([ argument("-n", "--subnqn", help="Subsystem NQN", required=True), @@ -186,28 +189,20 @@ def delete_bdev(self, args): ]) def create_subsystem(self, args): """Creates a subsystem.""" - - try: - req = pb2.create_subsystem_req(subsystem_nqn=args.subnqn, - serial_number=args.serial, - max_namespaces=args.max_namespaces) - ret = self.stub.create_subsystem(req) - self.logger.info(f"Created subsystem {args.subnqn}: {ret.status}") - except Exception as error: - self.logger.error(f"Failed to create subsystem: \n {error}") + req = pb2.create_subsystem_req(subsystem_nqn=args.subnqn, + serial_number=args.serial, + max_namespaces=args.max_namespaces) + ret = self.stub.create_subsystem(req) + self.logger.info(f"Created subsystem {args.subnqn}: {ret.status}") @cli.cmd([ argument("-n", "--subnqn", help="Subsystem NQN", required=True), ]) def delete_subsystem(self, args): """Deletes a subsystem.""" - - try: - req = pb2.delete_subsystem_req(subsystem_nqn=args.subnqn) - ret = self.stub.delete_subsystem(req) - self.logger.info(f"Deleted subsystem {args.subnqn}: {ret.status}") - except Exception as error: - self.logger.error(f"Failed to delete subsystem: \n {error}") + req = pb2.delete_subsystem_req(subsystem_nqn=args.subnqn) + ret = self.stub.delete_subsystem(req) + self.logger.info(f"Deleted subsystem {args.subnqn}: {ret.status}") @cli.cmd([ argument("-n", "--subnqn", help="Subsystem NQN", required=True), @@ -216,16 +211,12 @@ def delete_subsystem(self, args): ]) def add_namespace(self, args): """Adds a namespace to a subsystem.""" - - try: - req = pb2.add_namespace_req(subsystem_nqn=args.subnqn, - bdev_name=args.bdev, - nsid=args.nsid) - ret = self.stub.add_namespace(req) - self.logger.info( - f"Added namespace {ret.nsid} to {args.subnqn}: {ret.status}") - except Exception as error: - self.logger.error(f"Failed to add namespace: \n {error}") + req = pb2.add_namespace_req(subsystem_nqn=args.subnqn, + bdev_name=args.bdev, + nsid=args.nsid) + ret = self.stub.add_namespace(req) + self.logger.info( + f"Added namespace {ret.nsid} to {args.subnqn}: {ret.status}") @cli.cmd([ argument("-n", "--subnqn", help="Subsystem NQN", required=True), @@ -233,16 +224,12 @@ def add_namespace(self, args): ]) def remove_namespace(self, args): """Removes a namespace from a subsystem.""" - - try: - req = pb2.remove_namespace_req(subsystem_nqn=args.subnqn, - nsid=args.nsid) - ret = self.stub.remove_namespace(req) - self.logger.info( - f"Removed namespace {args.nsid} from {args.subnqn}:" - f" {ret.status}") - except Exception as error: - self.logger.error(f"Failed to remove namespace: \n {error}") + req = pb2.remove_namespace_req(subsystem_nqn=args.subnqn, + nsid=args.nsid) + ret = self.stub.remove_namespace(req) + self.logger.info( + f"Removed namespace {args.nsid} from {args.subnqn}:" + f" {ret.status}") @cli.cmd([ argument("-n", "--subnqn", help="Subsystem NQN", required=True), @@ -250,20 +237,16 @@ def remove_namespace(self, args): ]) def add_host(self, args): """Adds a host to a subsystem.""" - - try: - req = pb2.add_host_req(subsystem_nqn=args.subnqn, - host_nqn=args.host) - ret = self.stub.add_host(req) - if args.host == "*": - self.logger.info( - f"Allowed open host access to {args.subnqn}: {ret.status}") - else: - self.logger.info( - f"Added host {args.host} access to {args.subnqn}:" - f" {ret.status}") - except Exception as error: - self.logger.error(f"Failed to add host: \n {error}") + req = pb2.add_host_req(subsystem_nqn=args.subnqn, + host_nqn=args.host) + ret = self.stub.add_host(req) + if args.host == "*": + self.logger.info( + f"Allowed open host access to {args.subnqn}: {ret.status}") + else: + self.logger.info( + f"Added host {args.host} access to {args.subnqn}:" + f" {ret.status}") @cli.cmd([ argument("-n", "--subnqn", help="Subsystem NQN", required=True), @@ -271,20 +254,16 @@ def add_host(self, args): ]) def remove_host(self, args): """Removes a host from a subsystem.""" - - try: - req = pb2.remove_host_req(subsystem_nqn=args.subnqn, - host_nqn=args.host) - ret = self.stub.remove_host(req) - if args.host == "*": - self.logger.info( - f"Disabled open host access to {args.subnqn}: {ret.status}") - else: - self.logger.info( - f"Removed host {args.host} access from {args.subnqn}:" - f" {ret.status}") - except Exception as error: - self.logger.error(f"Failed to remove host: \n {error}") + req = pb2.remove_host_req(subsystem_nqn=args.subnqn, + host_nqn=args.host) + ret = self.stub.remove_host(req) + if args.host == "*": + self.logger.info( + f"Disabled open host access to {args.subnqn}: {ret.status}") + else: + self.logger.info( + f"Removed host {args.host} access from {args.subnqn}:" + f" {ret.status}") @cli.cmd([ argument("-n", "--subnqn", help="Subsystem NQN", required=True), @@ -296,20 +275,16 @@ def remove_host(self, args): ]) def create_listener(self, args): """Creates a listener for a subsystem at a given IP/Port.""" - - try: - req = pb2.create_listener_req( - nqn=args.subnqn, - gateway_name=args.gateway_name, - trtype=args.trtype, - adrfam=args.adrfam, - traddr=args.traddr, - trsvcid=args.trsvcid, - ) - ret = self.stub.create_listener(req) - self.logger.info(f"Created {args.subnqn} listener: {ret.status}") - except Exception as error: - self.logger.error(f"Failed to create listener: \n {error}") + req = pb2.create_listener_req( + nqn=args.subnqn, + gateway_name=args.gateway_name, + trtype=args.trtype, + adrfam=args.adrfam, + traddr=args.traddr, + trsvcid=args.trsvcid, + ) + ret = self.stub.create_listener(req) + self.logger.info(f"Created {args.subnqn} listener: {ret.status}") @cli.cmd([ argument("-n", "--subnqn", help="Subsystem NQN", required=True), @@ -321,34 +296,26 @@ def create_listener(self, args): ]) def delete_listener(self, args): """Deletes a listener from a subsystem at a given IP/Port.""" - - try: - req = pb2.delete_listener_req( - nqn=args.subnqn, - gateway_name=args.gateway_name, - trtype=args.trtype, - adrfam=args.adrfam, - traddr=args.traddr, - trsvcid=args.trsvcid, - ) - ret = self.stub.delete_listener(req) - self.logger.info( - f"Deleted {args.traddr} from {args.subnqn}: {ret.status}") - except Exception as error: - self.logger.error(f"Failed to delete listener: \n {error}") + req = pb2.delete_listener_req( + nqn=args.subnqn, + gateway_name=args.gateway_name, + trtype=args.trtype, + adrfam=args.adrfam, + traddr=args.traddr, + trsvcid=args.trsvcid, + ) + ret = self.stub.delete_listener(req) + self.logger.info( + f"Deleted {args.traddr} from {args.subnqn}: {ret.status}") @cli.cmd() def get_subsystems(self, args): """Gets subsystems.""" - - try: - req = pb2.get_subsystems_req() - ret = self.stub.get_subsystems(req) - subsystems = json.loads(ret.subsystems) - formatted_subsystems = json.dumps(subsystems, indent=4) - self.logger.info(f"Get subsystems:\n{formatted_subsystems}") - except Exception as error: - self.logger.error(f"Failed to get subsystems: \n {error}") + req = pb2.get_subsystems_req() + ret = self.stub.get_subsystems(req) + subsystems = json.loads(ret.subsystems) + formatted_subsystems = json.dumps(subsystems, indent=4) + self.logger.info(f"Get subsystems:\n{formatted_subsystems}") def main(args=None): From f789b3dbca7845e992c8ad3955037836fc8f8ef2 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Tue, 4 Jul 2023 13:26:14 +0200 Subject: [PATCH 07/19] build: force install thrift 0.14 Works-Around: https://tracker.ceph.com/issues/61882 Signed-off-by: Ernesto Puerta --- Dockerfile.ceph | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Dockerfile.ceph b/Dockerfile.ceph index aab1bdf5..3a166983 100644 --- a/Dockerfile.ceph +++ b/Dockerfile.ceph @@ -37,9 +37,15 @@ ARG CEPH_PACKAGES="\ RUN rpm -vih https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm RUN rpm --import 'https://download.ceph.com/keys/release.asc' RUN cd /etc/yum.repos.d/ && curl -O https://copr.fedorainfracloud.org/coprs/ceph/el9/repo/epel-9/ceph-el9-epel-9.repo + +## WORKAROUND: remove when https://tracker.ceph.com/issues/61882 is fixed +RUN rpm -vih "https://buildlogs.centos.org/centos/9-stream/storage/x86_64/ceph-quincy/Packages/t/thrift-0.14.0-7.el9s.x86_64.rpm" + RUN \ --mount=type=cache,target=/var/cache/microdnf \ microdnf install -y \ + --enablerepo crb \ + --nobest \ --nodocs \ --setopt=install_weak_deps=0 \ --setopt=keepcache=1 \ From 8806e5262bd7ebda2dbe1b30857f494c2058d3b6 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Tue, 4 Jul 2023 14:16:52 +0200 Subject: [PATCH 08/19] ci: debug Github Action with tmate Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 47950d41..454caf10 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -1,4 +1,4 @@ -name: "Container: Build, Deploy, Test" +name: "CI" on: push: branches: @@ -14,7 +14,6 @@ env: jobs: build: - name: Build and test runs-on: ubuntu-latest steps: - name: Checkout code @@ -54,6 +53,11 @@ jobs: - name: Print DNS config run: make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "cat /etc/resolv.conf"' + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + with: + limit-access-to-actor: true + - name: Run test run: . .env && make demo SERVER_ADDRESS=$NVMEOF_IP_ADDRESS OPTS=-T # Disable TTY From 488db6d61362614985312bdd8230da487de012c7 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Tue, 4 Jul 2023 17:04:53 +0200 Subject: [PATCH 09/19] build: enable mlock in container Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 8 ++++---- docker-compose.yaml | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 454caf10..8037a199 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -53,10 +53,10 @@ jobs: - name: Print DNS config run: make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "cat /etc/resolv.conf"' - - name: Setup tmate session - uses: mxschmitt/action-tmate@v3 - with: - limit-access-to-actor: true + #- name: Setup tmate session + # uses: mxschmitt/action-tmate@v3 + # with: + # limit-access-to-actor: true - name: Run test run: . .env && make demo SERVER_ADDRESS=$NVMEOF_IP_ADDRESS OPTS=-T # Disable TTY diff --git a/docker-compose.yaml b/docker-compose.yaml index 8f2d2fd7..184fc294 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -79,6 +79,7 @@ services: # https://spdk.io/doc/containers.html # TODO: Pending of https://github.com/spdk/spdk/issues/2973 - /dev/hugepages:/dev/hugepages + - /dev/vfio/vfio:/dev/vfio/vfio - ceph-conf:/etc/ceph:ro - $NVMEOF_CONFIG:/src/ceph-nvmeof.conf cap_add: @@ -87,6 +88,7 @@ services: - SYS_PTRACE # gdb ulimits: nofile: $NVMEOF_NOFILE + memlock: -1 networks: default: ipv4_address: $NVMEOF_IP_ADDRESS From 063085ec4ef8c92efb82d8b21d91116130d8f585 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Wed, 5 Jul 2023 16:20:27 +0200 Subject: [PATCH 10/19] build: fix addr=0.0.0.0 issue Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 8 ++++---- ceph-nvmeof.conf | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 8037a199..83d0de75 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -13,7 +13,7 @@ env: HUGEPAGES: 256 jobs: - build: + build-and-test: runs-on: ubuntu-latest steps: - name: Checkout code @@ -53,14 +53,14 @@ jobs: - name: Print DNS config run: make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "cat /etc/resolv.conf"' + - name: Run test + run: . .env && make demo SERVER_ADDRESS=$NVMEOF_IP_ADDRESS OPTS=-T # Disable TTY + #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 # with: # limit-access-to-actor: true - - name: Run test - run: . .env && make demo SERVER_ADDRESS=$NVMEOF_IP_ADDRESS OPTS=-T # Disable TTY - - name: Display logs run: make logs OPTS='' diff --git a/ceph-nvmeof.conf b/ceph-nvmeof.conf index d007341f..77211487 100644 --- a/ceph-nvmeof.conf +++ b/ceph-nvmeof.conf @@ -10,7 +10,8 @@ [gateway] name = group = -addr = 0.0.0.0 +# Don't use 0.0.0.0 or nvme connect will fail with "no controller found: failed to write to nvme-fabrics device" +addr = 192.168.13.3 port = 5500 enable_auth = False state_update_notify = True From 9dc81f66d2d33d2566743cbcfe250026ea5253cf Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Wed, 5 Jul 2023 16:43:34 +0200 Subject: [PATCH 11/19] ci: test mounting of nvmeof device Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 29 +++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 83d0de75..e328ab39 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -47,14 +47,31 @@ jobs: - name: List processes run: make top - - name: Print hosts - run: make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "cat /etc/hosts"' + - name: Test + run: | + . .env + make demo SERVER_ADDRESS=$NVMEOF_IP_ADDRESS OPTS=-T # Disable TTY - - name: Print DNS config - run: make run SVC=nvmeof-cli OPTS=--entrypoint=bash CMD='-c "cat /etc/resolv.conf"' + - name: Get subsystems + run: | + eval $(make alias) + nvmeof-cli get_subsystems - - name: Run test - run: . .env && make demo SERVER_ADDRESS=$NVMEOF_IP_ADDRESS OPTS=-T # Disable TTY + - name: Test mounting nvmeof device locally + run: | + . .env + sudo modprobe nvme-fabrics + sudo apt install -y nvme-cli + sudo nvme list + sudo nvme discover -t tcp -a $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT + sudo nvme connect -t tcp --traddr $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT -n nqn.2016-06.io.spdk:cnode1 + sudo nvme list + NVMEOF_DEVICE=$(sudo nvme list -o json | jq '.Devices[] | select(.ModelNumber=="SPDK bdev Controller").DevicePath') + sudo mkfs $NVMEOF_DEVICE + MOUNT_POINT=$(mktemp -d) + sudo mount $NVMEOF_DEVICE $MOUNT_POINT + cd $MOUNT_POINT + touch test #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 From 6a760df964ce4720f1f20711324c5b04cce65483 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Thu, 6 Jul 2023 14:20:34 +0200 Subject: [PATCH 12/19] ci: fix alias issue in Github Actions Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index e328ab39..a071da03 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -54,6 +54,8 @@ jobs: - name: Get subsystems run: | + # https://github.com/actions/toolkit/issues/766 + shopt -s expand_aliases eval $(make alias) nvmeof-cli get_subsystems From 02c57a3147dfae5afabcb43f50f45450a92031e3 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Thu, 6 Jul 2023 14:45:04 +0200 Subject: [PATCH 13/19] ci: disable nvme initiatior test Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 30 +++++++++++++-------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index a071da03..927d3778 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -59,22 +59,22 @@ jobs: eval $(make alias) nvmeof-cli get_subsystems - - name: Test mounting nvmeof device locally - run: | - . .env - sudo modprobe nvme-fabrics - sudo apt install -y nvme-cli - sudo nvme list - sudo nvme discover -t tcp -a $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT - sudo nvme connect -t tcp --traddr $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT -n nqn.2016-06.io.spdk:cnode1 - sudo nvme list - NVMEOF_DEVICE=$(sudo nvme list -o json | jq '.Devices[] | select(.ModelNumber=="SPDK bdev Controller").DevicePath') - sudo mkfs $NVMEOF_DEVICE - MOUNT_POINT=$(mktemp -d) - sudo mount $NVMEOF_DEVICE $MOUNT_POINT - cd $MOUNT_POINT - touch test + #- name: Test mounting nvmeof device locally + # run: | + # . .env + # sudo modprobe nvme-fabrics + # sudo nvme list + # sudo nvme discover -t tcp -a $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT + # sudo nvme connect -t tcp --traddr $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT -n nqn.2016-06.io.spdk:cnode1 + # sudo nvme list + # NVMEOF_DEVICE=$(sudo nvme list -o json | jq '.Devices[] | select(.ModelNumber=="SPDK bdev Controller").DevicePath') + # sudo mkfs $NVMEOF_DEVICE + # MOUNT_POINT=$(mktemp -d) + # sudo mount $NVMEOF_DEVICE $MOUNT_POINT + # cd $MOUNT_POINT + # touch test + # For debugging purposes (provides an SSH connection to the runner) #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 # with: From 2089458c07cb86f3da665f22df012ba78122031a Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Thu, 6 Jul 2023 15:02:37 +0200 Subject: [PATCH 14/19] docs: update README Signed-off-by: Ernesto Puerta --- README.md | 152 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 146 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index cf693b4f..f7fbca52 100644 --- a/README.md +++ b/README.md @@ -7,51 +7,68 @@ versatile standard (NVMe-oF)](https://nvmexpress.org/specification/nvme-of-speci Essentially, it allows to export existing RBD images as NVMe-oF namespaces. The [creation and management of RBD images](https://docs.ceph.com/en/latest/rbd/) is not within the scope of this component. - ## Installation + ### Requirements * Linux-based system with at least 16 GB of available RAM. [Fedora 37](https://fedoraproject.org/) is recommended. -* `moby-engine` (`docker-engine`) (v20.10) and `docker-compose` (v1.29). These versions are just indicative. -* `make` (only needed to launch `docker-compose` commands). * SELinux in permissive mode: + ```bash sed -i s/^SELINUX=.*$/SELINUX=permissive/ /etc/selinux/config setenforce 0 ``` +### Dependencies + +* `moby-engine` (`docker-engine`) (v20.10) and `docker-compose` (v1.29). These versions are just indicative +* `make` (only needed to launch `docker-compose` commands). + To install these dependencies in Fedora: + ```bash sudo dnf install -y make moby-engine docker-compose ``` + Some [post-installation steps](https://docs.docker.com/engine/install/linux-postinstall/) are required to use `docker` with regular users: + ```bash sudo groupadd docker sudo usermod -aG docker $USER ``` + ### Steps -To launch a containerized environment with a Ceph cluster and a NVMe-oF gateway (this is not the [prescribed deployment for production purposes](https://docs.ceph.com/en/quincy/install/#recommended-methods); for testing and development tasks alone): +To launch a containerized environment with a Ceph cluster and a NVMe-oF gateway (this is not the [prescribed deployment for production purposes](https://docs.ceph.com/en/quincy/install/#recommended-methods), but for testing and development tasks alone): 1. Get this repo: + ```bash git clone https://github.com/ceph/ceph-nvmeof.git cd ceph-nvmeof git submodule update --init --recursive ``` -1. Configure the environment (basically to allocate huge-pages, which requires entering password): + +1. Configure the environment (basically to allocate huge-pages, which requires entering the user password): + ```bash make setup ``` + 1. Download the container images: + ```bash make pull ``` + 1. Deploy the containers locally: + ```bash make up ``` + 1. Check that the deployment is up and running: + ```bash $ make ps @@ -63,12 +80,15 @@ To launch a containerized environment with a Ceph cluster and a NVMe-oF gateway 0.0.0.0:5500->5500/tcp,:::5500->5500/tcp, 0.0.0.0:8009->8009/tcp,:::8009->8009/tcp ``` + 1. The environment is ready to provide block storage on Ceph via NVMe-oF. + ## Usage Demo ### Configuring the NVMe-oF Gateway The following command executes all the steps required to set up the NVMe-oF environment: + ```bash $ make demo @@ -95,35 +115,49 @@ DOCKER_BUILDKIT=1 docker-compose run --rm ceph-nvmeof-cli --server-address ceph- Creating nvmeof_ceph-nvmeof-cli_run ... done INFO:__main__:Allowed open host access to nqn.2016-06.io.spdk:cnode1: True ``` + #### Manual Steps The same configuration can also be manually run: 1. First of all, let's create the `nvmeof-cli` shortcut to interact with the NVMe-oF gateway: + ```bash eval $(make alias) ``` + 1. In order to start working with the NVMe-oF gateway, we need to create an RBD image first (`demo_image` in the `rbd` pool): + ```bash make rbd ``` + 1. Create a bdev (Block Device) from an RBD image: + ```bash nvmeof-cli create_bdev --pool rbd --image demo_image --bdev demo_bdev ``` + 1. Create a subsystem: + ```bash nvmeof-cli create_subsystem --subnqn nqn.2016-06.io.spdk:cnode1 --serial SPDK00000000000001 ``` + 1. Add a namespace: + ```bash nvmeof-cli add_namespace --subnqn nqn.2016-06.io.spdk:cnode1 --bdev demo_bdev ``` + 1. Create a listener so that NVMe initiators can connect to: + ```bash nvmeof-cli create_listener ---subnqn nqn.2016-06.io.spdk:cnode1 -s 4420 ``` + 1. Define which hosts can connect: + ```bash nvmeof-cli add_host --subnqn nqn.2016-06.io.spdk:cnode1 --host "*" ``` @@ -134,11 +168,14 @@ The same configuration can also be manually run: Once the NVMe-oF target is 1. Install requisite packages: + ```bash sudo dnf install nvme-cli sudo modprobe nvme-fabrics ``` + 1. Ensure that the listener is reachable from the NVMe-oF initiator: + ```bash $ sudo nvme discover -t tcp -a 192.168.13.3 -s 4420 @@ -157,10 +194,13 @@ Once the NVMe-oF target is ``` 1. Connect to desired subsystem: + ```bash sudo nvme connect -t tcp --traddr 192.168.13.3 -s 4420 -n nqn.2016-06.io.spdk:cnode1 ``` + 1. List the available NVMe targets: + ```bash $ sudo nvme list Node Generic SN Model Namespace Usage Format FW Rev @@ -168,7 +208,9 @@ Once the NVMe-oF target is /dev/nvme1n1 /dev/ng1n1 SPDK00000000000001 SPDK bdev Controller 1 10,49 MB / 10,49 MB 4 KiB + 0 B 23.01 ... ``` + 1. Create a filesystem on the desired target: + ```bash $ sudo mkfs /dev/nvme1n1 mke2fs 1.46.5 (30-Dec-2021) @@ -179,7 +221,9 @@ Once the NVMe-oF target is Writing inode tables: done Writing superblocks and filesystem accounting information: done ``` + 1. Mount and use the storage volume + ```bash $ mkdir /mnt/nvmeof $ sudo mount /dev/nvme1n1 /mnt/nvmeof @@ -195,21 +239,33 @@ Once the NVMe-oF target is ## Advanced +### Configuration + +This service comes with a pre-defined configuration that matches the most common use cases. For advanced configuration, please update the settings at the `.env` file. That file is automatically read by `docker-compose`. However, it's a perfectly valid bash source, so that it can also be used as: + +```bash +source .env +echo $NVMEOF_VERSION... +``` + ### mTLS Configuration for testing purposes For testing purposes, self signed certificates and keys can be generated locally using OpenSSL. For the server, generate credentials for server name 'my.server' in files called server.key and server.crt: + ```bash $ openssl req -x509 -newkey rsa:4096 -nodes -keyout server.key -out server.crt -days 3650 -subj '/CN=my.server' ``` For client: + ```bash $ openssl req -x509 -newkey rsa:4096 -nodes -keyout client.key -out client.crt -days 3650 -subj '/CN=client1' ``` Indicate the location of the keys and certificates in the config file: + ```ini [mtls] @@ -227,7 +283,7 @@ client_cert = ./client.crt sh -c 'echo 4096 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages' ``` -This is automatically done in the `make setup` step. +This is automatically done in the `make setup` step. The amount of hugepages can be configured with `make setup HUGEPAGES=512`. ## Development @@ -235,11 +291,13 @@ This is automatically done in the `make setup` step. The development environment relies on containers (specifically `docker-compose`) for building and running the components. This has the benefit that, besides `docker` and `docker-compose`, no more dependencies need to be installed in the host environment. Once the GitHub repo has been cloned, remember to initialize its git submodules (`spdk`, which in turn depends on other submodules): + ```bash git submodule update --init --recursive ``` For building, SELinux might cause issues, so it's better to set it to permissive mode: + ```bash # Change it for the running session sudo setenforce 0 @@ -247,6 +305,7 @@ sudo setenforce 0 # Persist the change across boots sudo sed -i -E 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config ``` + ### Building To avoid having to deal with `docker-compose` commands, this provides a `Makefile` that wraps those as regular `make` targets: @@ -258,6 +317,7 @@ make build ``` The resulting images should be like these: + ```bash $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE @@ -273,10 +333,90 @@ quay.io/ceph/spdk 23.01 929e22e22ffd 8 minutes ago 342MB * `ceph` is a sandboxed (vstart-based) Ceph cluster for testing purposes. For building a specific service: + ```bash make build SVC=nvmeof ``` +### Development containers + +To avoid having to re-build container on every code change, developer friendly containers are provided: + +```bash +make up SVC="nvmeof-devel" +``` + +Devel containers provide the same base layer as the production containers but with the source code mounted at run-time. + + +## Help + +To obtain a detailed list of `make` targets, run `make help`: + +``` +Makefile to build and deploy the Ceph NVMe-oF Gateway + +Usage: + make [target] [target] ... OPTION=value ... + +Targets: + + Basic targets: + clean Clean-up environment + setup Configure huge-pages (requires sudo/root password) + up Services + + Options: + up: SVC Services (Default: nvmeof) + + Deployment commands (docker-compose): + build Build SVC images + down Shut down deployment + events Receive real-time events from containers + exec Run command inside an existing container + images List images + logs View SVC logs + pause Pause running deployment + port Print public port for a port binding + ps Display status of SVC containers + pull Download SVC images + push Push SVC container images to a registry. Requires previous "docker login" + restart Restart SVC + run Run command CMD inside SVC containers + shell Exec shell inside running SVC containers + stop Stop SVC + top Display running processes in SVC containers + unpause Resume paused deployment + up Launch services + + Options: + CMD Command to run with run/exec targets (Default: ) + DOCKER_COMPOSE Docker-compose command (Default: docker-compose) + OPTS Docker-compose subcommand options (Default: ) + SCALE Number of instances (Default: 1) + SVC Docker-compose services (Default: ) + + Demo: + demo Expose RBD_IMAGE_NAME as NVMe-oF target + + Options: + BDEV_NAME Name of the bdev (Default: demo_bdev) + LISTENER_PORT Listener port (Default: 4420) + NQN NVMe Qualified Name address (Default: nqn.2016-06.io.spdk:cnode1) + RBD_IMAGE_NAME Name of the RBD image (Default: demo_image) + RBD_IMAGE_SIZE Size of the RBD image (Default: 10M) + SERIAL Serial number (Default: SPDK00000000000001) + + Miscellaneous: + alias Print bash alias command for the nvmeof-cli. Usage: "eval $(make alias)" + + Options: + SERVER_ADDRESS Address of the nvmeof gateway (Default: nvmeof) + SERVER_PORT Port of the nvmeof gateway (Default: 5500) +``` + +Targets may accept options: `make run SVC=nvme OPTS=--entrypoint=bash`. + ## Troubleshooting ## Contributing and Support From 67e2695b3e7a3c989c835cd2011e75afc2f71570 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Mon, 10 Jul 2023 18:13:52 +0200 Subject: [PATCH 15/19] build: read .env in Makefile This changes reads .env from Makefile which allows to simplify make configuration with host networking. Signed-off-by: Ernesto Puerta --- .env | 5 +---- Makefile | 1 + mk/misc.mk | 4 +--- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.env b/.env index abe302c7..c6fb74b1 100644 --- a/.env +++ b/.env @@ -13,10 +13,7 @@ NVMEOF_CONFIG="./ceph-nvmeof.conf" NVMEOF_SPDK_VERSION="${SPDK_VERSION}" NVMEOF_NAME="ceph-nvmeof" NVMEOF_SUMMARY="Ceph NVMe over Fabrics Gateway" -NVMEOF_DESCRIPTION="Service to provide block storage on top of Ceph for -platforms (e.g.: VMWare) without native Ceph support (RBD), replacing -existing approaches (iSCSI) with a newer and more versatile standard -(NVMe-oF)." +NVMEOF_DESCRIPTION="Service to provide block storage on top of Ceph for platforms (e.g.: VMWare) without native Ceph support (RBD), replacing existing approaches (iSCSI) with a newer and more versatile standard (NVMe-oF)." NVMEOF_URL="https://github.com/ceph/ceph-nvmeof" NVMEOF_TAGS="ceph,nvme-of,nvme-of gateway,rbd,block storage" NVMEOF_WANTS="ceph,rbd" diff --git a/Makefile b/Makefile index 9bbfaf31..8cc8668b 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ HUGEPAGES = 2048 # 4 GB HUGEPAGES_DIR = /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages # Includes +include .env include mk/containerized.mk include mk/demo.mk include mk/misc.mk diff --git a/mk/misc.mk b/mk/misc.mk index c9758ed7..021a8340 100644 --- a/mk/misc.mk +++ b/mk/misc.mk @@ -1,9 +1,7 @@ ## Miscellaneous: # nvmeof_cli -SERVER_ADDRESS = nvmeof ## Address of the nvmeof gateway -SERVER_PORT = 5500 ## Port of the nvmeof gateway -NVMEOF_CLI = $(DOCKER_COMPOSE_ENV) $(DOCKER_COMPOSE) run --rm nvmeof-cli --server-address $(SERVER_ADDRESS) --server-port $(SERVER_PORT) +NVMEOF_CLI = $(DOCKER_COMPOSE_ENV) $(DOCKER_COMPOSE) run --rm nvmeof-cli --server-address $(NVMEOF_IP_ADDRESS) --server-port $(NVMEOF_GW_PORT) alias: ## Print bash alias command for the nvmeof-cli. Usage: "eval $(make alias)" @echo alias nvmeof-cli=\"$(NVMEOF_CLI)\" From cf6d84f61a8b952663cec210479bfb6c24374192 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Thu, 13 Jul 2023 18:29:44 +0200 Subject: [PATCH 16/19] build: tune tcp options Resolves: https://github.com/ceph/ceph-nvmeof/issues/160 Other changes: - Move mk/demo.mk constants to .env - Decrease Hugepages to 2 GB Signed-off-by: Ernesto Puerta --- .env | 9 +++++++++ Makefile | 5 +++-- ceph-nvmeof.conf | 4 +--- mk/demo.mk | 14 +++----------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.env b/.env index c6fb74b1..4a25ba67 100644 --- a/.env +++ b/.env @@ -46,3 +46,12 @@ SPDK_CENTOS_REPO_VER="9.0-21.el9" # Ceph Cluster CEPH_CLUSTER_VERSION="${CEPH_VERSION}" CEPH_VSTART_ARGS="--without-dashboard --memstore" + +# Demo settings +RBD_POOL="rbd" +RBD_IMAGE_NAME="demo_image" +RBD_IMAGE_SIZE="10M" +BDEV_NAME="demo_bdev" +NQN="nqn.2016-06.io.spdk:cnode1" +SERIAL="SPDK00000000000001" +NVMEOF_FIRST_GATEWAY_NAME="gw-1" diff --git a/Makefile b/Makefile index 8cc8668b..2b2e3c07 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -HUGEPAGES = 2048 # 4 GB +HUGEPAGES = 1024 # 2 GB HUGEPAGES_DIR = /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages # Includes @@ -31,7 +31,8 @@ up: SVC = nvmeof ## Services up: OPTS ?= --abort-on-container-exit up: override OPTS += --no-build --remove-orphans --scale nvmeof=$(SCALE) -clean: $(CLEAN) ## Clean-up environment +clean: override HUGEPAGES = 0 +clean: $(CLEAN) setup ## Clean-up environment help: AUTOHELP_SUMMARY = Makefile to build and deploy the Ceph NVMe-oF Gateway help: autohelp diff --git a/ceph-nvmeof.conf b/ceph-nvmeof.conf index 77211487..e03a1b3a 100644 --- a/ceph-nvmeof.conf +++ b/ceph-nvmeof.conf @@ -10,7 +10,6 @@ [gateway] name = group = -# Don't use 0.0.0.0 or nvme connect will fail with "no controller found: failed to write to nvme-fabrics device" addr = 192.168.13.3 port = 5500 enable_auth = False @@ -19,7 +18,6 @@ state_update_interval_sec = 5 [ceph] pool = rbd -# config_file = /var/lib/ceph/ceph.conf config_file = /etc/ceph/ceph.conf [mtls] @@ -42,4 +40,4 @@ log_level = WARN # transports = tcp # Example value: {"max_queue_depth" : 16, "max_io_size" : 4194304, "io_unit_size" : 1048576, "zcopy" : false} -# transport_tcp_options = +transport_tcp_options = {"in_capsule_data_size" : 8192, "max_io_qpairs_per_ctrlr" : 7} diff --git a/mk/demo.mk b/mk/demo.mk index 3434a5d7..d9fcf86c 100644 --- a/mk/demo.mk +++ b/mk/demo.mk @@ -1,24 +1,16 @@ ## Demo: # rbd -RBD_IMAGE_NAME = demo_image ## Name of the RBD image -RBD_IMAGE_SIZE = 10M ## Size of the RBD image - rbd: exec rbd: SVC = ceph -rbd: CMD = bash -c "rbd info $(RBD_IMAGE_NAME) || rbd create $(RBD_IMAGE_NAME) --size $(RBD_IMAGE_SIZE)" +rbd: CMD = bash -c "rbd -p $(RBD_POOL) info $(RBD_IMAGE_NAME) || rbd -p $(RBD_POOL) create $(RBD_IMAGE_NAME) --size $(RBD_IMAGE_SIZE)" # demo -BDEV_NAME = demo_bdev ## Name of the bdev -NQN = nqn.2016-06.io.spdk:cnode1 ## NVMe Qualified Name address -SERIAL = SPDK00000000000001 ## Serial number -LISTENER_PORT = 4420 ## Listener port - demo: rbd ## Expose RBD_IMAGE_NAME as NVMe-oF target - $(NVMEOF_CLI) create_bdev --pool rbd --image $(RBD_IMAGE_NAME) --bdev $(BDEV_NAME) + $(NVMEOF_CLI) create_bdev --pool $(RBD_POOL) --image $(RBD_IMAGE_NAME) --bdev $(BDEV_NAME) $(NVMEOF_CLI) create_subsystem --subnqn $(NQN) --serial $(SERIAL) $(NVMEOF_CLI) add_namespace --subnqn $(NQN) --bdev $(BDEV_NAME) - $(NVMEOF_CLI) create_listener --subnqn $(NQN) -s $(LISTENER_PORT) + $(NVMEOF_CLI) create_listener --subnqn $(NQN) --trsvcid $(NVMEOF_IO_PORT) $(NVMEOF_CLI) add_host --subnqn $(NQN) --host "*" .PHONY: demo rbd From 6495c0c4a9032c573ab778f7cc32fc8aafa8d88a Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Fri, 14 Jul 2023 10:50:49 +0200 Subject: [PATCH 17/19] build: add update-lockfile rule When a Python package needs to be updated in pyproject.toml, the Python package manager PDM will check that against the lockfile (pdm.lock) and if any changes are found it will fail. This ensure builds are reproducible and consistent. In order to update, remove or add new packages, the lockfile needs to be updated. A new Makefile target (update-lockfile) has been added. It runs on a new container image (nvmeof-builder) and only updates the pdm.lock file: ```bash make update-lockfile git add pdm.lock ``` Signed-off-by: Ernesto Puerta --- .env | 2 ++ Dockerfile | 20 +++++++++++++------- Makefile | 15 ++++++++++----- docker-compose.yaml | 20 +++++++++++++------- mk/autohelp.mk | 2 +- mk/containerized.mk | 7 ++++--- 6 files changed, 43 insertions(+), 23 deletions(-) diff --git a/.env b/.env index 4a25ba67..d3632c92 100644 --- a/.env +++ b/.env @@ -6,6 +6,8 @@ MAINTAINER="Ceph Developers " # Performance NVMEOF_NOFILE=20480 # Max number of open files (depends on number of hosts connected) +HUGEPAGES=1024 # 2 GB +HUGEPAGES_DIR="/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" # NVMe-oF NVMEOF_VERSION="${VERSION}" diff --git a/Dockerfile b/Dockerfile index 8e5436fc..f661e041 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # syntax = docker/dockerfile:1.4 -ARG NVMEOF_SPDK_VERSION -ARG NVMEOF_TARGET # either 'gateway' or 'cli' +ARG NVMEOF_SPDK_VERSION \ + NVMEOF_TARGET # either 'gateway' or 'cli' #------------------------------------------------------------------------------ # Base image for NVMEOF_TARGET=cli (nvmeof-cli) @@ -90,9 +90,11 @@ ENV PYTHONPATH=$APPDIR/proto:$APPDIR/__pypackages__/$PYTHON_MAJOR.$PYTHON_MINOR/ WORKDIR $APPDIR #------------------------------------------------------------------------------ -FROM python-intermediate AS builder - -ENV PDM_SYNC_FLAGS="-v --no-isolation --no-self --no-editable" +FROM python-intermediate AS builder-base +ARG PDM_VERSION=2.7.4 \ + PDM_INSTALL_CMD=sync \ + PDM_INSTALL_FLAGS="-v --no-isolation --no-self --no-editable" +ENV PDM_INSTALL_FLAGS=$PDM_INSTALL_FLAGS # https://pdm.fming.dev/latest/usage/advanced/#use-pdm-in-a-multi-stage-dockerfile RUN \ @@ -105,11 +107,15 @@ RUN \ RUN \ --mount=type=cache,target=/root/.cache/pip \ - pip install pdm + pip install pdm==$PDM_VERSION + +#------------------------------------------------------------------------------ +FROM builder-base AS builder + COPY pyproject.toml pdm.lock pdm.toml ./ RUN \ --mount=type=cache,target=/root/.cache/pdm \ - pdm sync $PDM_SYNC_FLAGS + pdm "$PDM_INSTALL_CMD" $PDM_INSTALL_FLAGS COPY . . RUN pdm run protoc diff --git a/Makefile b/Makefile index 2b2e3c07..6a0bf187 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ -HUGEPAGES = 1024 # 2 GB -HUGEPAGES_DIR = /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages +# Make config +MAKEFLAGS += --no-builtin-rules --no-builtin-variables +.SUFFIXES: # Includes include .env @@ -17,7 +18,7 @@ setup: ## Configure huge-pages (requires sudo/root password) @echo Actual Hugepages allocation: $$(cat $(HUGEPAGES_DIR)) @[ $$(cat $(HUGEPAGES_DIR)) -eq $(HUGEPAGES) ] -build push pull: SVC = spdk nvmeof nvmeof-cli ceph +build push pull: SVC ?= spdk nvmeof nvmeof-cli ceph build: export NVMEOF_GIT_BRANCH != git name-rev --name-only HEAD build: export NVMEOF_GIT_COMMIT != git rev-parse HEAD @@ -26,7 +27,6 @@ build: export SPDK_GIT_BRANCH != git -C spdk name-rev --name-only HEAD build: export SPDK_GIT_COMMIT != git rev-parse HEAD:spdk build: export BUILD_DATE != date -u +"%Y-%m-%dT%H:%M:%SZ" - up: SVC = nvmeof ## Services up: OPTS ?= --abort-on-container-exit up: override OPTS += --no-build --remove-orphans --scale nvmeof=$(SCALE) @@ -34,7 +34,12 @@ up: override OPTS += --no-build --remove-orphans --scale nvmeof=$(SCALE) clean: override HUGEPAGES = 0 clean: $(CLEAN) setup ## Clean-up environment +update-lockfile: SVC=nvmeof-builder +update-lockfile: override OPTS+=--entrypoint=pdm +update-lockfile: CMD=update --no-sync --no-isolation --no-self --no-editable +update-lockfile: pyproject.toml run ## Update dependencies in lockfile (pdm.lock) + help: AUTOHELP_SUMMARY = Makefile to build and deploy the Ceph NVMe-oF Gateway help: autohelp -.PHONY: all setup clean help +.PHONY: all setup clean help update-lockfile diff --git a/docker-compose.yaml b/docker-compose.yaml index 184fc294..0aed574d 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -52,7 +52,6 @@ services: default: ipv4_address: 192.168.13.2 nvmeof-base: - image: quay.io/ceph/nvmeof:$NVMEOF_VERSION build: context: . args: @@ -89,23 +88,30 @@ services: ulimits: nofile: $NVMEOF_NOFILE memlock: -1 - networks: - default: - ipv4_address: $NVMEOF_IP_ADDRESS ports: - - "$NVMEOF_IO_PORT:$NVMEOF_IO_PORT" # I/O controllers - - "$NVMEOF_GW_PORT:$NVMEOF_GW_PORT" # Gateway - - "$NVMEOF_DISC_PORT:$NVMEOF_DISC_PORT" # Discovery + - "$NVMEOF_IO_PORT" # I/O controllers + - "$NVMEOF_GW_PORT" # Gateway + - "$NVMEOF_DISC_PORT" # Discovery nvmeof: extends: service: nvmeof-base + image: quay.io/ceph/nvmeof:$NVMEOF_VERSION depends_on: ceph: condition: service_healthy + nvmeof-builder: + extends: + service: nvmeof-base + image: nvmeof-builder + build: + target: builder-base + volumes: + - .:/src nvmeof-devel: # Runs from source code in current dir extends: service: nvmeof-base + image: quay.io/ceph/nvmeof:$NVMEOF_VERSION depends_on: ceph: condition: service_healthy diff --git a/mk/autohelp.mk b/mk/autohelp.mk index d6270a73..f81ff4d3 100644 --- a/mk/autohelp.mk +++ b/mk/autohelp.mk @@ -16,7 +16,7 @@ autohelp: @for file in $(MAKEFILE_LIST); do \ awk 'BEGIN {FS = "## "}; /^##/ {printf "\n %s\n", $$2}' $$file; \ awk 'BEGIN {FS = ":.*?## "}; \ - /^\w+:.*##/ {printf " $(BOLD)%-15s$(NORMAL) %s\n", $$1, $$2}' $$file | sort; \ + /^\w.*:.*##/ {printf " $(BOLD)%-15s$(NORMAL) %s\n", $$1, $$2}' $$file | sort; \ grep -q "^\w.*=.*## " $$file && echo -e "\n Options:"; \ awk 'BEGIN {FS = "( [!?]?= | ?## )"}; \ /^\w.*=.*## / {printf " $(BOLD)%-15s$(NORMAL) %s (Default: %s)\n", $$1, $$3, $$2} \ diff --git a/mk/containerized.mk b/mk/containerized.mk index 158dc096..cee510d2 100644 --- a/mk/containerized.mk +++ b/mk/containerized.mk @@ -17,16 +17,17 @@ $(DOCKER_COMPOSE_COMMANDS): pull: ## Download SVC images -build: ## Build SVC images -build: DOCKER_COMPOSE_ENV = DOCKER_BUILDKIT=1 +build: ## Build SVC images +build: DOCKER_COMPOSE_ENV = DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 push: ## Push SVC container images to a registry. Requires previous "docker login" up: ## Launch services run: ## Run command CMD inside SVC containers -run: SVC = +run: SVC ?= run: override OPTS += --rm +run: DOCKER_COMPOSE_ENV = DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 shell: ## Exec shell inside running SVC containers shell: CMD = bash From 9f5d4e4c8df54e307341962e7ffbe4a4056cb051 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Fri, 14 Jul 2023 11:02:35 +0200 Subject: [PATCH 18/19] doc: update dependencies Signed-off-by: Ernesto Puerta --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index f7fbca52..ff1e554e 100644 --- a/README.md +++ b/README.md @@ -348,6 +348,17 @@ make up SVC="nvmeof-devel" Devel containers provide the same base layer as the production containers but with the source code mounted at run-time. +### Adding, removing or updating Python depedencies + +Python dependencies are specified in the file `pyproject.toml` +([PEP-621](https://peps.python.org/pep-0621/)), specifically under the `dependencies` list. + +After modifying it, the dependency lockfile (`pdm.lock`) needs to be updated accordingly (otherwise container image builds will fail): + +```bash +make update-lockfile +git add pdm.lock +``` ## Help @@ -365,6 +376,7 @@ Targets: clean Clean-up environment setup Configure huge-pages (requires sudo/root password) up Services + update-lockfile Update dependencies in lockfile (pdm.lock) Options: up: SVC Services (Default: nvmeof) @@ -399,20 +411,8 @@ Targets: Demo: demo Expose RBD_IMAGE_NAME as NVMe-oF target - Options: - BDEV_NAME Name of the bdev (Default: demo_bdev) - LISTENER_PORT Listener port (Default: 4420) - NQN NVMe Qualified Name address (Default: nqn.2016-06.io.spdk:cnode1) - RBD_IMAGE_NAME Name of the RBD image (Default: demo_image) - RBD_IMAGE_SIZE Size of the RBD image (Default: 10M) - SERIAL Serial number (Default: SPDK00000000000001) - Miscellaneous: alias Print bash alias command for the nvmeof-cli. Usage: "eval $(make alias)" - - Options: - SERVER_ADDRESS Address of the nvmeof gateway (Default: nvmeof) - SERVER_PORT Port of the nvmeof gateway (Default: 5500) ``` Targets may accept options: `make run SVC=nvme OPTS=--entrypoint=bash`. From 6c176fd37001b64314d91ece1bacae2b4e3b3467 Mon Sep 17 00:00:00 2001 From: Ernesto Puerta Date: Fri, 14 Jul 2023 11:28:58 +0200 Subject: [PATCH 19/19] build: use ephemereal ports Previously, docker-compose used fixed port mappings (host and container ports matched, e.g: 4420:4420). However, that doesn't allow to deploy multiple instances of the same container (--scale nvmeof=N). Instead, ephemeral ports (randomly assigned by docker-compose) need to be used. Fortunately, docker-compose provides with the 'port' command, which maps container to host ports. Signed-off-by: Ernesto Puerta --- .github/workflows/build-container.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 927d3778..9e24bf49 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -35,7 +35,8 @@ jobs: timeout-minutes: 1 run: | . .env - until nc -z localhost $NVMEOF_GW_PORT; do + HOST_PORT=$(make -s port OPTS="--index=1" CMD="nvmeof $NVMEOF_GW_PORT" | tr ":" " ") + until nc -z $HOST_PORT; do echo -n . sleep 1 done