Skip to content

fix(grpc): throw error on wrong gateway listener #1

fix(grpc): throw error on wrong gateway listener

fix(grpc): throw error on wrong gateway listener #1

name: "CI"
on:
push:
branches:
- '*'
tags:
- 'v*'
pull_request:
workflow_dispatch:
release:
types:
- created
env:
WAIT_INTERVAL_SECS: 1
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
# git submodule update --init --recursive
submodules: recursive
- name: Build container images
run: make build
- name: Save container images
run: |
. .env
docker save $QUAY_NVMEOF:$NVMEOF_VERSION > nvmeof.tar
docker save $QUAY_NVMEOFCLI:$NVMEOF_VERSION > nvmeof-cli.tar
docker save $QUAY_CEPH:$CEPH_VERSION > vstart-cluster.tar
docker save bdevperf > bdevperf.tar
docker save nvmeof-devel > nvmeof-devel.tar
- name: Upload container images
uses: actions/upload-artifact@v3
with:
name: ceph_nvmeof_container_images-${{ github.run_number }}
path: |
nvmeof.tar
nvmeof-cli.tar
nvmeof-devel.tar
vstart-cluster.tar
bdevperf.tar
- name: Build stand-alone packages (RPMs and Python wheel)
id: build-standalone-packages
run: |
export EXPORT_DIR=$(mktemp -d)
make export-rpms
make export-python
echo "EXPORT_DIR=$EXPORT_DIR" >> "$GITHUB_ENV"
- name: Upload stand-alone packages
uses: actions/upload-artifact@v3
with:
name: ceph_nvmeof_standalone_packages-${{ github.run_number }}
path: |
${{ env.EXPORT_DIR }}/**
pytest:
needs: build
strategy:
fail-fast: false
matrix:
test: ["cli", "state", "multi_gateway", "server"]
runs-on: ubuntu-latest
env:
HUGEPAGES: 512 # for multi gateway test, approx 256 per gateway instance
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Setup huge pages
run: |
make setup HUGEPAGES=$HUGEPAGES
- name: Download container images
uses: actions/download-artifact@v3
with:
name: ceph_nvmeof_container_images-${{ github.run_number }}
- name: Load container images
run: |
docker load < nvmeof-devel.tar
docker load < vstart-cluster.tar
- name: Start ceph cluster
run: |
make up SVC=ceph OPTS="--detach"
- name: Wait for the ceph cluster container to become healthy
timeout-minutes: 3
run: |
while true; do
container_status=$(docker inspect --format='{{.State.Health.Status}}' ceph)
if [[ $container_status == "healthy" ]]; then
# success
exit 0
else
# Wait for a specific time before checking again
sleep ${{ env.WAIT_INTERVAL_SECS }}
echo -n .
fi
done
- name: Create RBD image
run: |
echo "💁 ceph list pools:"
make exec SVC=ceph OPTS="-T" CMD="ceph osd lspools"
echo "💁 rbd create:"
make exec SVC=ceph OPTS="-T" CMD="rbd create rbd/mytestdevimage --size 16"
echo "💁 ls rbd:"
make exec SVC=ceph OPTS="-T" CMD="rbd ls rbd"
- name: Run ${{ matrix.test }} test
run: |
# Run tests code in current dir
# Managing pytest’s output: https://docs.pytest.org/en/7.1.x/how-to/output.html
make protoc
make run SVC="nvmeof-devel" OPTS="--volume=$(pwd)/tests:/src/tests --entrypoint=python3" CMD="-m pytest --show-capture=all -s --full-trace -vv -rA tests/test_${{ matrix.test }}.py"
- name: Check coredump existence
if: success() || failure()
id: check_coredumps
uses: andstor/file-existence-action@20b4d2e596410855db8f9ca21e96fbe18e12930b # v2, pinned to SHA for security reasons
with:
files: "/tmp/coredump/core.*"
- name: Upload ${{ matrix.test }} test core dumps
if: steps.check_coredumps.outputs.files_exists == 'true'
uses: actions/upload-artifact@v3
with:
name: ceph_nvmeof_pytest_${{ matrix.test }}_cores-${{ github.run_number }}
path: |
/tmp/coredump/core.*
- name: Display logs
if: success() || failure()
run: |
make logs OPTS=""
- name: Tear down
if: success() || failure()
run: |
make down
make clean
demo:
needs: build
runs-on: ubuntu-latest
env:
HUGEPAGES: 512
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Setup huge-pages
run: make setup HUGEPAGES=$HUGEPAGES
- name: Download container images
uses: actions/download-artifact@v3
with:
name: ceph_nvmeof_container_images-${{ github.run_number }}
- name: Load container images
run: |
docker load < nvmeof.tar
docker load < nvmeof-cli.tar
docker load < vstart-cluster.tar
docker load < bdevperf.tar
- name: Start containers
run: |
make up OPTS=--detach
- name: Wait for the Gateway to be listening
timeout-minutes: 3
run: |
. .env
echo using gateway $NVMEOF_IP_ADDRESS port $NVMEOF_GW_PORT timeout ${{ env.WAIT_TIMEOUT_MINS }}
until nc -z $NVMEOF_IP_ADDRESS $NVMEOF_GW_PORT; do
echo -n .
sleep ${{ env.WAIT_INTERVAL_SECS }}
done
- name: List containers
if: success() || failure()
run: make ps
- name: List processes
if: success() || failure()
run: make top
- name: Test
run: |
make demo OPTS=-T
- name: Get subsystems
run: |
# https://github.com/actions/toolkit/issues/766
shopt -s expand_aliases
eval $(make alias)
nvmeof-cli get_subsystems
- name: Run bdevperf
run: |
# see https://spdk.io/doc/nvmf_multipath_howto.html
. .env
echo -n "ℹ️ Starting bdevperf container"
make up SVC=bdevperf OPTS="--detach"
sleep 10
echo "ℹ️ bdevperf start up logs"
make logs SVC=bdevperf
eval $(make run SVC=bdevperf OPTS="--entrypoint=env" | grep BDEVPERF_SOCKET | tr -d '\n\r' )
rpc="/usr/libexec/spdk/scripts/rpc.py"
echo "ℹ️ bdevperf bdev_nvme_set_options"
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_set_options -r -1"
echo "ℹ️ bdevperf tcp connect ip: $NVMEOF_IP_ADDRESS port: $NVMEOF_IO_PORT nqn: $NQN"
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_attach_controller -b Nvme0 -t tcp -a $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT -f ipv4 -n $NQN -l -1 -o 10"
echo "ℹ️ bdevperf perform_tests"
eval $(make run SVC=bdevperf OPTS="--entrypoint=env" | grep BDEVPERF_TEST_DURATION | tr -d '\n\r' )
timeout=$(expr $BDEVPERF_TEST_DURATION \* 2)
bdevperf="/usr/libexec/spdk/scripts/bdevperf.py"
make exec SVC=bdevperf OPTS=-T CMD="$bdevperf -v -t $timeout -s $BDEVPERF_SOCKET perform_tests"
- name: Check coredump existence
if: success() || failure()
id: check_coredumps
uses: andstor/file-existence-action@20b4d2e596410855db8f9ca21e96fbe18e12930b # v2, pinned to SHA for security reasons
with:
files: "/tmp/coredump/core.*"
- name: Upload demo core dumps
if: steps.check_coredumps.outputs.files_exists == 'true'
uses: actions/upload-artifact@v3
with:
name: ceph_nvmeof_demo_cores-${{ github.run_number }}
path: |
/tmp/coredump/core.*
#- name: Test mounting nvmeof device locally
# run: |
# . .env
# sudo modprobe nvme-fabrics
# sudo nvme list
# sudo nvme discover -t tcp -a $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT
# sudo nvme connect -t tcp --traddr $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT -n nqn.2016-06.io.spdk:cnode1
# sudo nvme list
# NVMEOF_DEVICE=$(sudo nvme list -o json | jq '.Devices[] | select(.ModelNumber=="SPDK bdev Controller").DevicePath')
# sudo mkfs $NVMEOF_DEVICE
# MOUNT_POINT=$(mktemp -d)
# sudo mount $NVMEOF_DEVICE $MOUNT_POINT
# cd $MOUNT_POINT
# touch test
# For debugging purposes (provides an SSH connection to the runner)
#- name: Setup tmate session
# uses: mxschmitt/action-tmate@v3
# with:
# limit-access-to-actor: true
- name: Display logs
if: success() || failure()
run: make logs OPTS=''
- name: Tear down
if: success() || failure()
run: |
make down
make clean
push-to-registry:
if: github.event_name == 'release' && startsWith(github.ref, 'refs/tags/v')
needs: [pytest, demo]
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Download container images
uses: actions/download-artifact@v3
with:
name: images
- name: Load container images
run: |
docker load < nvmeof.tar
docker load < nvmeof-cli.tar
- name: Login to quay.io
uses: docker/login-action@v2
with:
registry: ${{ vars.CONTAINER_REGISTRY }}
username: '${{ vars.CONTAINER_REGISTRY_USERNAME }}'
password: '${{ secrets.CONTAINER_REGISTRY_PASSWORD }}'
- name: Publish nvmeof containers when release/tag is created
run: |
make push