Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(tests) move to docker compose v2 #120

Merged
merged 4 commits into from
Aug 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -171,4 +171,4 @@ jobs:
if: failure()
run: |
pushd t/fixtures
docker-compose logs
docker compose logs
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -769,13 +769,13 @@ storage_config = {

### etcd

[etcd](https://etcd.io) based storage. Right now only `v2` protocol is supported.
[etcd](https://etcd.io) based storage. Right now only `v3` protocol is supported, and etcd server
version should be >= v3.4.0.
The default config is:

```lua
storage_config = {
http_host = 'http://127.0.0.1:4001',
protocol = 'v2',
key_prefix = '',
timeout = 60,
ssl_verify = false,
Expand Down
79 changes: 65 additions & 14 deletions lib/resty/acme/storage/etcd.lua
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,16 @@ function _M.new(conf)
conf = conf or {}
local self = setmetatable({}, mt)

if conf.protocol and conf.protocol ~= "v3" then
return nil, "only v3 protocol is supported"
end

local options = {
http_host = conf.http_host or "http://127.0.0.1:4001",
protocol = conf.protocol or "v2",
key_prefix = conf.key_prefix or "",
timeout = conf.timeout or 60,
ssl_verify = conf.ssl_verify,
protocol = "v3",
}

local client, err = etcd.new(options)
Expand All @@ -21,65 +25,112 @@ function _M.new(conf)
end

self.client = client
self.protocol_is_v2 = options.protocol == "v2"
return self, nil
end

local function grant(self, ttl)
local res, err = self.client:grant(ttl)
if err then
return nil, err
end
return res.body.ID
end

-- set the key regardless of it's existence
function _M:set(k, v, ttl)
local _, err = self.client:set(k, v, ttl)
k = "/" .. k

local lease_id, err
if ttl then
lease_id, err = grant(self, ttl)
if err then
return err
end
end

local _, err = self.client:set(k, v, { lease = lease_id })
if err then
return err
end
end

-- set the key only if the key doesn't exist
-- Note: the key created by etcd:setnx can't be attached to a lease later, it seems to be a bug
function _M:add(k, v, ttl)
local res, err = self.client:setnx(k, v, ttl)
if err then
return err
k = "/" .. k

local lease_id, err
if ttl then
lease_id, err = grant(self, ttl)
if err then
return err
end
end
if res and res.body and res.body.errorCode == 105 then


local compare = {
{
key = k,
target = "CREATE",
create_revision = 0,
}
}

local success = {
{
requestPut = {
key = k,
value = v,
lease = lease_id,
}
}
}

local v, err = self.client:txn(compare, success)
if err then
return nil, err
elseif v and v.body and not v.body.succeeded then
return "exists"
end
end

function _M:delete(k)
k = "/" .. k
local _, err = self.client:delete(k)
if err then
return err
end
end

function _M:get(k)
k = "/" .. k
local res, err = self.client:get(k)
if err then
return nil, err
elseif res.status == 404 and res.body and res.body.errorCode == 100 then
elseif res and res.body.kvs == nil then
return nil, nil
elseif res.status ~= 200 then
return nil, "etcd returned status " .. res.status
end
local node = res.body.node
-- is it already expired but not evited ?
if node.expiration and not node.ttl and self.protocol_is_v2 then
local node = res.body.kvs[1]
if not node then -- would this ever happen?
return nil, nil
end
return node.value
end

local empty_table = {}
function _M:list(prefix)
local res, err = self.client:get("/")
local res, err = self.client:readdir("/" .. prefix)
if err then
return nil, err
elseif not res or not res.body or not res.body.node or not res.body.node.nodes then
elseif not res or not res.body or not res.body.kvs then
return empty_table, nil
end
local ret = {}
-- offset 1 to strip leading "/" in original key
local prefix_length = #prefix + 1
for _, node in ipairs(res.body.node.nodes) do
for _, node in ipairs(res.body.kvs) do
local key = node.key
if key then
-- start from 2 to strip leading "/"
Expand Down
4 changes: 2 additions & 2 deletions t/e2e.t
Original file line number Diff line number Diff line change
Expand Up @@ -204,10 +204,10 @@ __DATA__
}
local out
for i=0,15,1 do
local proc = ngx_pipe.spawn({'bash', '-c', "echo q |openssl s_client -host 127.0.0.1 -servername ".. ngx.var.domain .. " -port 5001 -cipher ECDHE-RSA-AES128-GCM-SHA256|openssl x509 -noout -text && sleep 0.1"}, opts)
local proc = ngx_pipe.spawn({'bash', '-c', "echo q |openssl s_client -host 127.0.0.1 -servername ".. ngx.var.domain .. " -max_protocol TLSv1.2 -port 5001 -cipher ECDHE-RSA-AES128-GCM-SHA256|openssl x509 -noout -text && sleep 0.1"}, opts)
local data, err, partial = proc:stdout_read_all()
if ngx.re.match(data, ngx.var.domain) then
local proc2 = ngx_pipe.spawn({'bash', '-c', "echo q |openssl s_client -host 127.0.0.1 -servername ".. ngx.var.domain .. " -port 5001 -cipher ECDHE-ECDSA-AES128-GCM-SHA256|openssl x509 -noout -text && sleep 0.1"}, opts)
local proc2 = ngx_pipe.spawn({'bash', '-c', "echo q |openssl s_client -host 127.0.0.1 -servername ".. ngx.var.domain .. " -port 5001 -max_protocol TLSv1.2 -cipher ECDHE-ECDSA-AES128-GCM-SHA256|openssl x509 -noout -text && sleep 0.1"}, opts)
local data2, err, partial = proc2:stdout_read_all()
ngx.log(ngx.INFO, data, data2)
local f = io.open("/tmp/test2.1", "w")
Expand Down
69 changes: 69 additions & 0 deletions t/fixtures/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,75 @@ services:
acmenet:
ipv4_address: 10.30.50.3

consul:
image: hashicorp/consul
ports:
- "127.0.0.1:8500:8500"
command: agent -server -bootstrap-expect=1 -client=0.0.0.0
healthcheck:
test: ["CMD", "consul", "members"]
interval: 10s
timeout: 5s
retries: 3

vault:
image: hashicorp/vault
user: root
cap_add:
- IPC_LOCK
environment:
- VAULT_DEV_ROOT_TOKEN_ID=root
- VAULT_LOCAL_CONFIG={"listener":{"tcp":{"tls_key_file":"/tmp/key.pem","tls_cert_file":"/tmp/cert.pem","address":"0.0.0.0:8210"}}}
volumes:
- /tmp/key.pem:/tmp/key.pem
- /tmp/cert.pem:/tmp/cert.pem
ports:
- "127.0.0.1:8200:8200"
- "127.0.0.1:8210:8210"
command: server -dev
healthcheck:
test: ["CMD", "vault", "status", "-address", "http://127.0.0.1:8200"]
interval: 10s
timeout: 5s
retries: 3

etcd:
image: quay.io/coreos/etcd:v3.4.33
volumes:
- /usr/share/ca-certificates/:/etc/ssl/certs
ports:
- "4001:4001"
- "2380:2380"
- "2379:2379"
environment:
- HOST_IP=${HOST_IP}
command: >
etcd
-name etcd0
-advertise-client-urls http://${HOST_IP}:2379,http://${HOST_IP}:4001
-listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001
-initial-advertise-peer-urls http://${HOST_IP}:2380
-listen-peer-urls http://0.0.0.0:2380
-initial-cluster-token etcd-cluster-1
-initial-cluster etcd0=http://${HOST_IP}:2380
-initial-cluster-state new
healthcheck:
test: ["CMD", "etcdctl", "endpoint", "health"]
interval: 10s
timeout: 5s
retries: 3

dummy:
image: ubuntu
command: tail -f /dev/null
depends_on:
consul:
condition: service_healthy
vault:
condition: service_healthy
etcd:
condition: service_healthy

networks:
acmenet:
driver: bridge
Expand Down
25 changes: 14 additions & 11 deletions t/fixtures/prepare_env.sh
Original file line number Diff line number Diff line change
@@ -1,31 +1,34 @@
#!/bin/bash

echo "Prepare containers"
docker run -d -e CONSUL_CLIENT_INTERFACE='eth0' -e CONSUL_BIND_INTERFACE='eth0' -p 127.0.0.1:8500:8500 hashicorp/consul agent -server -bootstrap-expect=1
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export HOST_IP="$(hostname -I | awk '{print $1}')"

openssl req -x509 -newkey rsa:4096 -keyout /tmp/key.pem -out /tmp/cert.pem -days 1 -nodes -subj '/CN=some.vault'
chmod 777 /tmp/key.pem /tmp/cert.pem
docker run -d --user root --cap-add=IPC_LOCK -e VAULT_DEV_ROOT_TOKEN_ID=root --name=vault -e 'VAULT_LOCAL_CONFIG={"listener":{"tcp":{"tls_key_file":"/tmp/key.pem","tls_cert_file":"/tmp/cert.pem","address":"0.0.0.0:8210"}}}' -v /tmp/key.pem:/tmp/key.pem -v /tmp/cert.pem:/tmp/cert.pem -p 127.0.0.1:8200:8200 -p 127.0.0.1:8210:8210 hashicorp/vault server -dev
docker logs vault
docker run -d -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 -p 2380:2380 -p 2379:2379 --name etcd quay.io/coreos/etcd:v2.3.8 -name etcd0 -advertise-client-urls http://${HostIP}:2379,http://${HostIP}:4001 -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 -initial-advertise-peer-urls http://${HostIP}:2380 -listen-peer-urls http://0.0.0.0:2380 -initial-cluster-token etcd-cluster-1 -initial-cluster etcd0=http://${HostIP}:2380 -initial-cluster-state new
docker logs etcd

echo "Prepare containers"
pushd "$SCRIPT_DIR"
docker compose up -d || (
docker compose logs vault;
docker compose logs etcd;
docker compose logs consul;
exit 1
)
popd


echo "Prepare vault for JWT auth"
curl 'https://localhost:8210/v1/sys/auth/kubernetes.test' -k -X POST -H 'X-Vault-Token: root' -H 'Content-Type: application/json; charset=utf-8' --data-raw '{"path":"kubernetes.test","type":"jwt","config":{}}'
curl 'https://localhost:8210/v1/auth/kubernetes.test/config' -k -X PUT -H 'X-Vault-Token: root' -H 'content-type: application/json; charset=utf-8' --data-raw '{"jwt_validation_pubkeys":["-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtMCbmrsltFKqStOoxl8V\nK5ZlrIMb8d+W62yoXW1DKdg+cPNq0vGD94cxl9NjjRzlSR/NVZq6Q34c1lkbenPw\nf3CYfmbQupOKTJKhBdn9sFCCbW0gi6gQv0BaU3Pa8iGfVcZPctAtdbwmNKVd26hW\nmvnoJYhyewhY+j3ooLdnmh55cZU9w1VO0PaSf2zGSmCUeIao77jWcnkEauK2RrYv\nq5yB6w54Q71+lp2jZil9e4IJP/WqcS1CtmKgiWLoZuWNJXDWaa8LbcgQfsxudn3X\nsgHaYnAdZJOaCsDS/ablKmUOLIiI3TBM6dkUlBUMK9OgAsu+wBdX521rK3u+NNVX\n3wIDAQAB\n-----END PUBLIC KEY-----"],"default_role":"root","namespace_in_state":false,"provider_config":{}}'
curl 'https://localhost:8210/v1/auth/kubernetes.test/role/root' -k -X POST -H 'X-Vault-Token: root' -H 'content-type: application/json; charset=utf-8' --data-raw '{"token_policies":["acme"],"role_type":"jwt","user_claim":"kubernetes.io/serviceaccount/service-account.uid","bound_subject":"system:serviceaccount:kong:gateway-kong"}'
curl 'https://localhost:8210/v1/sys/policies/acl/acme' -k -X PUT -H 'X-Vault-Token: root' -H 'Content-Type: application/json; charset=utf-8' --data-raw '{"name":"acme","policy":"path \"secret/*\" {\n capabilities = [\"create\", \"read\", \"update\", \"delete\"]\n}"}'

echo "Prepare Pebble"
pushd t/fixtures
docker-compose up -d

# on macOS use host.docker.internal
if [[ "$OSTYPE" == 'darwin'* ]]; then
host_ip=$(docker run -it --rm alpine ping host.docker.internal -c1|grep -oE "\d+\.\d+\.\d+\.\d+"|head -n1)
# update the default ip in resolver
curl --request POST --data '{"ip":"'$host_ip'"}' http://localhost:8055/set-default-ipv4
fi
popd

echo "Generate certs"
openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:4096 -out /tmp/account.key
Expand Down
Loading
Loading