diff --git a/README.rst b/README.rst index 14908033ac..09d30f13f4 100644 --- a/README.rst +++ b/README.rst @@ -212,6 +212,7 @@ The following options can be configured on the server: pki.maxupdatefailhours 4 Maximum number of hours that a denylist update can fail pki.softfail true Do not reject certificates if their revocation status cannot be established when softfail is true **Storage** + storage.session.memcached.address [] List of Memcached server addresses. These can be a simple 'host:port' or a Memcached connection URL with scheme, auth and other options. storage.session.redis.address Redis session database server address. This can be a simple 'host:port' or a Redis connection URL with scheme, auth and other options. If not set it, defaults to an in-memory database. storage.session.redis.database Redis session database name, which is used as prefix every key. Can be used to have multiple instances use the same Redis instance. storage.session.redis.password Redis session database password. If set, it overrides the username in the connection URL. diff --git a/docs/index.rst b/docs/index.rst index bcef238e69..c6316da266 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -31,6 +31,7 @@ Nuts documentation pages/deployment/configuration.rst pages/deployment/migration.rst pages/deployment/recommended-deployment.rst + pages/deployment/clustering.rst pages/deployment/certificates.rst pages/deployment/docker.rst pages/deployment/storage.rst diff --git a/docs/pages/deployment/clustering.rst b/docs/pages/deployment/clustering.rst new file mode 100644 index 0000000000..e7520a28fa --- /dev/null +++ b/docs/pages/deployment/clustering.rst @@ -0,0 +1,21 @@ +.. _clustering: + +Clustering +########## + +Clustering is currently limited to nodes that have the ``did:nuts`` method disabled. +To enable clustering, you must support the following: + +- A clustered SQL database (SQLite is not supported) +- A clustered session storage (Redis sentinel is recommended) +- A clustered private key storage (Hashicorp Vault or Azure Keyvault) +- Read only mounts for configuration, policy, discovery and JSON-LD context files. + +It's recommended to use a level 4 load balancer to distribute the load across the nodes. +Each node should have a reverse proxy for TLS termination. + +Clustering will not work if you use one of the following: + +- The did:nuts method +- SQLite +- Disk based private key storage diff --git a/docs/pages/deployment/server_options.rst b/docs/pages/deployment/server_options.rst index e4c2c47f3f..f972825bbd 100755 --- a/docs/pages/deployment/server_options.rst +++ b/docs/pages/deployment/server_options.rst @@ -47,6 +47,7 @@ pki.maxupdatefailhours 4 Maximum number of hours that a denylist update can fail pki.softfail true Do not reject certificates if their revocation status cannot be established when softfail is true **Storage** + storage.session.memcached.address [] List of Memcached server addresses. These can be a simple 'host:port' or a Memcached connection URL with scheme, auth and other options. storage.session.redis.address Redis session database server address. This can be a simple 'host:port' or a Redis connection URL with scheme, auth and other options. If not set it, defaults to an in-memory database. storage.session.redis.database Redis session database name, which is used as prefix every key. Can be used to have multiple instances use the same Redis instance. storage.session.redis.password Redis session database password. If set, it overrides the username in the connection URL. diff --git a/docs/pages/deployment/storage.rst b/docs/pages/deployment/storage.rst index 413a48f592..ef83aa4b3b 100644 --- a/docs/pages/deployment/storage.rst +++ b/docs/pages/deployment/storage.rst @@ -44,6 +44,69 @@ Refer to the documentation of the driver for the database you are using for the Usage of SQLite is not recommended for production environments. Connections to a SQLite DB are restricted to 1, which will lead to severe performance reduction. +Session storage +*************** + +Session storage is used for storing access tokens, nonces and other volatile data. +Session data is volatile by nature. There are 3 supported session storage types: + +- In-memory +- Memcached +- Redis (standalone, cluster, sentinel) + +Local +===== + +This is the default and will store data in-memory. Any restart will wipe all data. +Data is also not shared if you run multiple nodes. + +Memcached +========= + +Memcached can be enabled with the following config: + +.. code-block:: yaml + + storage.session.memcached.address: + - localhost:11211 + +You can add multiple memcached servers to the list. +memcached is not capable of clustering. Each piece of data is stored on a single instance. +If you want true HA, you'll need to use Redis. +For more information on Memcached connection strings, refer to the `Memcached documentation `_. + +Redis +===== + +Redis is the only option if you want to run multiple nodes and the cache as HA. +Redis can be configured in standalone or sentinel mode. +Standalone: + +.. code-block:: yaml + + storage: + session: + redis: + address: localhost:6379 + username: user + password: pass + db: 0 + +Sentinel: + +.. code-block:: yaml + + storage: + session: + redis: + sentinel: + master: mymaster + nodes: + - localhost:26379 + - localhost:26380 + - localhost:26381 + + Private Keys ************ diff --git a/e2e-tests/clustering/memcached/docker-compose.yml b/e2e-tests/clustering/memcached/docker-compose.yml new file mode 100644 index 0000000000..bacdc0c1de --- /dev/null +++ b/e2e-tests/clustering/memcached/docker-compose.yml @@ -0,0 +1,65 @@ +services: + memcached: + image: memcached + command: + - --conn-limit=1024 + - --memory-limit=64 + - --threads=4 + nodeA-backend: + image: "${IMAGE_NODE_A:-nutsfoundation/nuts-node:master}" + ports: + - "18081:8081" + environment: + NUTS_CONFIGFILE: /opt/nuts/nuts.yaml + NUTS_URL: "https://nodeA" + NUTS_DISCOVERY_SERVER_IDS: "e2e-test" + NUTS_STORAGE_SESSION_MEMCACHED_ADDRESS: "memcached:11211" + volumes: + - "../shared/nuts.yaml:/opt/nuts/nuts.yaml:ro" + - "../../tls-certs/nodeA-backend-certificate.pem:/opt/nuts/certificate-and-key.pem:ro" + - "../../tls-certs/truststore.pem:/opt/nuts/truststore.pem:ro" + # did:web resolver uses the OS CA bundle, but e2e tests use a self-signed CA which can be found in truststore.pem + # So we need to mount that file to the OS CA bundle location, otherwise did:web resolving will fail due to untrusted certs. + - "../../tls-certs/truststore.pem:/etc/ssl/certs/Nuts_RootCA.pem:ro" + - "../shared/presentationexchangemapping.json:/opt/nuts/policies/presentationexchangemapping.json:ro" + - "../shared/discovery:/nuts/discovery:ro" + healthcheck: + interval: 1s # Make test run quicker by checking health status more often + nodeA: + image: nginx:1.25.1 + ports: + - "10443:443" + volumes: + - "../shared/node-A/nginx.conf:/etc/nginx/nginx.conf:ro" + - "../../tls-certs/nodeA-certificate.pem:/etc/nginx/ssl/server.pem:ro" + - "../../tls-certs/nodeA-certificate.pem:/etc/nginx/ssl/key.pem:ro" + - "../../tls-certs/truststore.pem:/etc/nginx/ssl/truststore.pem:ro" + - "../../scripts/oauth2.js:/etc/nginx/oauth2.js:ro" + nodeB-backend: + image: "${IMAGE_NODE_B:-nutsfoundation/nuts-node:master}" + ports: + - "28081:8081" + environment: + NUTS_CONFIGFILE: /opt/nuts/nuts.yaml + NUTS_URL: "https://nodeB" + volumes: + - "../shared/nuts.yaml:/opt/nuts/nuts.yaml:ro" + - "../../tls-certs/nodeB-certificate.pem:/opt/nuts/certificate-and-key.pem:ro" + - "../../tls-certs/truststore.pem:/opt/nuts/truststore.pem:ro" + - "../../tls-certs/truststore.pem:/etc/ssl/certs/truststore.pem:ro" + # did:web resolver uses the OS CA bundle, but e2e tests use a self-signed CA which can be found in truststore.pem + # So we need to mount that file to the OS CA bundle location, otherwise did:web resolving will fail due to untrusted certs. + - "../../tls-certs/truststore.pem:/etc/ssl/certs/Nuts_RootCA.pem:ro" + - "../shared/presentationexchangemapping.json:/opt/nuts/policies/presentationexchangemapping.json:ro" + - "../shared/discovery:/nuts/discovery:ro" + healthcheck: + interval: 1s # Make test run quicker by checking health status more often + nodeB: + image: nginx:1.25.1 + ports: + - "20443:443" + volumes: + - "../../shared_config/nodeB-http-nginx.conf:/etc/nginx/conf.d/nuts-http.conf:ro" + - "../../tls-certs/nodeB-certificate.pem:/etc/nginx/ssl/server.pem:ro" + - "../../tls-certs/nodeB-certificate.pem:/etc/nginx/ssl/key.pem:ro" + - "../../tls-certs/truststore.pem:/etc/nginx/ssl/truststore.pem:ro" \ No newline at end of file diff --git a/e2e-tests/clustering/memcached/run-test.sh b/e2e-tests/clustering/memcached/run-test.sh new file mode 100755 index 0000000000..a871c2fa20 --- /dev/null +++ b/e2e-tests/clustering/memcached/run-test.sh @@ -0,0 +1,160 @@ +#!/usr/bin/env bash +source ../../util.sh + +echo "------------------------------------" +echo "Cleaning up running Docker containers and volumes, and key material..." +echo "------------------------------------" +# --remove-orphans to ensure that DB containers of previous runs (on e.g. Postgres) are removed when testing with SQLite. +# Nothing breaks otherwise, but it prevents annoying warnings in the log. +docker compose down --remove-orphans +docker compose rm -f -v + +echo "------------------------------------" +echo "Starting Docker containers..." +echo "------------------------------------" +docker compose up -d +docker compose up --wait nodeA nodeA-backend nodeB nodeB-backend memcached + +echo "------------------------------------" +echo "Registering vendors..." +echo "------------------------------------" +# Register Vendor A +REQUEST="{\"subject\":\"vendorA\"}" +VENDOR_A_DIDDOC=$(echo $REQUEST | curl -X POST --data-binary @- http://localhost:18081/internal/vdr/v2/subject --header "Content-Type: application/json") +VENDOR_A_DID=$(echo $VENDOR_A_DIDDOC | jq -r .documents[0].id) +echo Vendor A DID: $VENDOR_A_DID + +# Register Vendor B +REQUEST="{\"subject\":\"vendorB\"}" +VENDOR_B_DIDDOC=$(echo $REQUEST | curl -X POST --data-binary @- http://localhost:28081/internal/vdr/v2/subject --header "Content-Type: application/json") +VENDOR_B_DID=$(echo $VENDOR_B_DIDDOC | jq -r .documents[0].id) +echo Vendor B DID: $VENDOR_B_DID + +# Issue NutsOrganizationCredential for Vendor B +REQUEST="{\"type\":\"NutsOrganizationCredential\",\"issuer\":\"${VENDOR_B_DID}\", \"credentialSubject\": {\"id\":\"${VENDOR_B_DID}\", \"organization\":{\"name\":\"Caresoft B.V.\", \"city\":\"Caretown\"}},\"withStatusList2021Revocation\": true}" +VENDOR_B_CREDENTIAL=$(echo $REQUEST | curl -X POST --data-binary @- http://localhost:28081/internal/vcr/v2/issuer/vc -H "Content-Type:application/json") +if echo $VENDOR_B_CREDENTIAL | grep -q "VerifiableCredential"; then + echo "VC issued" +else + echo "FAILED: Could not issue NutsOrganizationCredential to node-B" 1>&2 + echo $VENDOR_B_CREDENTIAL + exitWithDockerLogs 1 +fi + +RESPONSE=$(echo $VENDOR_B_CREDENTIAL | curl -X POST --data-binary @- http://localhost:28081/internal/vcr/v2/holder/vendorB/vc -H "Content-Type:application/json") +if echo $RESPONSE == ""; then + echo "VC stored in wallet" +else + echo "FAILED: Could not load NutsOrganizationCredential in node-B wallet" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +# Register vendor B on Discovery Service +echo "Registering vendor B on Discovery Service..." +REQUEST="{\"registrationParameters\":{\"key\":\"value\"}}" +RESPONSE=$(echo $REQUEST | curl -s -o /dev/null -w "%{http_code}" -X POST --data-binary @- http://localhost:28081/internal/discovery/v1/e2e-test/vendorB) +if [ $RESPONSE -eq 200 ]; then + echo "Vendor B registered on Discovery Service" +else + echo "FAILED: Could not register vendor B on Discovery Service" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +echo "---------------------------------------" +echo "Perform OAuth 2.0 rfc021 flow..." +echo "---------------------------------------" +REQUEST=$( +cat << EOF +{ + "authorization_server": "https://nodeA/oauth2/vendorA", + "client_id": "https://nodeB/oauth2/vendorB", + "scope": "test", + "credentials": [ + { + "@context": [ + "https://www.w3.org/2018/credentials/v1", + "https://nuts.nl/credentials/v1" + ], + "type": ["VerifiableCredential", "EmployeeCredential"], + "credentialSubject": { + "name": "John Doe", + "roleName": "Janitor", + "identifier": "123456" + } + } + ] +} +EOF +) +# Request access token +RESPONSE=$(echo $REQUEST | curl -X POST -s --data-binary @- http://localhost:28081/internal/auth/v2/vendorB/request-service-access-token -H "Content-Type: application/json") +if echo $RESPONSE | grep -q "access_token"; then + echo $RESPONSE | sed -E 's/.*"access_token":"([^"]*).*/\1/' > ./node-B/accesstoken.txt + echo "access token stored in ./node-B/accesstoken.txt" +else + echo "FAILED: Could not get access token from node-A" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi +DPOP_KID=$(echo $RESPONSE | sed -E 's/.*"dpop_kid":"([^"]*).*/\1/') +DPOP_KID=$(urlencode $DPOP_KID) +ACCESS_TOKEN=$(cat ./node-B/accesstoken.txt) + +echo "------------------------------------" +echo "Create DPoP header..." +echo "------------------------------------" +REQUEST="{\"htm\":\"GET\",\"htu\":\"https://nodeA:443/resource\", \"token\":\"$ACCESS_TOKEN\"}" +RESPONSE=$(echo $REQUEST | curl -X POST -s --data-binary @- http://localhost:28081/internal/auth/v2/dpop/$DPOP_KID -H "Content-Type: application/json") +if echo $RESPONSE | grep -q "dpop"; then + echo $RESPONSE | sed -E 's/.*"dpop":"([^"]*).*/\1/' > ./node-B/dpop.txt + echo "dpop token stored in ./node-B/dpop.txt" +else + echo "FAILED: Could not get dpop token from node-B" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +DPOP=$(cat ./node-B/dpop.txt) + +# Introspect access token with a form post +echo "------------------------------------" +echo "Introspect access token..." +echo "------------------------------------" +RESPONSE=$(curl -X POST -s --data "token=$ACCESS_TOKEN" http://localhost:18081/internal/auth/v2/accesstoken/introspect) +echo $RESPONSE +# Check that it contains "active": true +if echo $RESPONSE | grep -q "active.*true"; then + echo "access token is active" +else + echo "FAILED: Access token is not active" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi +# Check that it contains "employee_name":"John Doe" +if echo $RESPONSE | grep -q "employee_name.*John Doe"; then + echo "employee_name claim is present" +else + echo "FAILED: missing/invalid employee_name" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +echo "------------------------------------" +echo "Retrieving data..." +echo "------------------------------------" +RESPONSE=$(docker compose exec nodeB curl --http1.1 --insecure --cert /etc/nginx/ssl/server.pem --key /etc/nginx/ssl/key.pem https://nodeA:443/resource -H "Authorization: DPoP $ACCESS_TOKEN" -H "DPoP: $DPOP") +if echo $RESPONSE | grep -q "OK"; then + echo "success!" +else + echo "FAILED: Could not get resource from node-A" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +echo "------------------------------------" +echo "Stopping Docker containers..." +echo "------------------------------------" +docker compose down +rm node-*/*.txt \ No newline at end of file diff --git a/e2e-tests/clustering/redis/docker-compose.yml b/e2e-tests/clustering/redis/docker-compose.yml new file mode 100644 index 0000000000..e976f918fc --- /dev/null +++ b/e2e-tests/clustering/redis/docker-compose.yml @@ -0,0 +1,100 @@ +services: + redisA: + image: &image redis:latest + command: redis-server /redis/conf/redis.conf + volumes: + - "./master:/redis/conf:ro" + redisB: + image: *image + depends_on: + - redisA + command: redis-server /redis/conf/redis.conf + volumes: + - "./slave:/redis/conf:ro" + redisC: + image: *image + depends_on: + - redisA + command: redis-server /redis/conf/redis.conf + volumes: + - "./slave:/redis/conf:ro" + sentinelA: + image: *image + depends_on: + - redisA + command: redis-server /redis/conf/sentinel.conf --sentinel + volumes: + - "./sentinel/A:/redis/conf" + sentinelB: + image: *image + depends_on: + - redisA + command: redis-server /redis/conf/sentinel.conf --sentinel + volumes: + - "./sentinel/B:/redis/conf" + sentinelC: + image: *image + depends_on: + - redisA + command: redis-server /redis/conf/sentinel.conf --sentinel + volumes: + - "./sentinel/C:/redis/conf" + nodeA-backend: + image: "${IMAGE_NODE_A:-nutsfoundation/nuts-node:master}" + ports: + - "18081:8081" + environment: + NUTS_CONFIGFILE: /opt/nuts/nuts.yaml + NUTS_URL: "https://nodeA" + NUTS_DISCOVERY_SERVER_IDS: "e2e-test" + NUTS_STORAGE_SESSION_REDIS_SENTINEL_MASTER: "mymaster" + NUTS_STORAGE_SESSION_REDIS_SENTINEL_NODES: sentinelA:26379,sentinelB:26379,sentinelC:26379 + volumes: + - "../shared/nuts.yaml:/opt/nuts/nuts.yaml:ro" + - "../../tls-certs/nodeA-backend-certificate.pem:/opt/nuts/certificate-and-key.pem:ro" + - "../../tls-certs/truststore.pem:/opt/nuts/truststore.pem:ro" + # did:web resolver uses the OS CA bundle, but e2e tests use a self-signed CA which can be found in truststore.pem + # So we need to mount that file to the OS CA bundle location, otherwise did:web resolving will fail due to untrusted certs. + - "../../tls-certs/truststore.pem:/etc/ssl/certs/Nuts_RootCA.pem:ro" + - "../shared/presentationexchangemapping.json:/opt/nuts/policies/presentationexchangemapping.json:ro" + - "../shared/discovery:/nuts/discovery:ro" + healthcheck: + interval: 1s # Make test run quicker by checking health status more often + nodeA: + image: nginx:1.25.1 + ports: + - "10443:443" + volumes: + - "../shared/node-A/nginx.conf:/etc/nginx/nginx.conf:ro" + - "../../tls-certs/nodeA-certificate.pem:/etc/nginx/ssl/server.pem:ro" + - "../../tls-certs/nodeA-certificate.pem:/etc/nginx/ssl/key.pem:ro" + - "../../tls-certs/truststore.pem:/etc/nginx/ssl/truststore.pem:ro" + - "../../scripts/oauth2.js:/etc/nginx/oauth2.js:ro" + nodeB-backend: + image: "${IMAGE_NODE_B:-nutsfoundation/nuts-node:master}" + ports: + - "28081:8081" + environment: + NUTS_CONFIGFILE: /opt/nuts/nuts.yaml + NUTS_URL: "https://nodeB" + volumes: + - "../shared/nuts.yaml:/opt/nuts/nuts.yaml:ro" + - "../../tls-certs/nodeB-certificate.pem:/opt/nuts/certificate-and-key.pem:ro" + - "../../tls-certs/truststore.pem:/opt/nuts/truststore.pem:ro" + - "../../tls-certs/truststore.pem:/etc/ssl/certs/truststore.pem:ro" + # did:web resolver uses the OS CA bundle, but e2e tests use a self-signed CA which can be found in truststore.pem + # So we need to mount that file to the OS CA bundle location, otherwise did:web resolving will fail due to untrusted certs. + - "../../tls-certs/truststore.pem:/etc/ssl/certs/Nuts_RootCA.pem:ro" + - "../shared/presentationexchangemapping.json:/opt/nuts/policies/presentationexchangemapping.json:ro" + - "../shared/discovery:/nuts/discovery:ro" + healthcheck: + interval: 1s # Make test run quicker by checking health status more often + nodeB: + image: nginx:1.25.1 + ports: + - "20443:443" + volumes: + - "../../shared_config/nodeB-http-nginx.conf:/etc/nginx/conf.d/nuts-http.conf:ro" + - "../../tls-certs/nodeB-certificate.pem:/etc/nginx/ssl/server.pem:ro" + - "../../tls-certs/nodeB-certificate.pem:/etc/nginx/ssl/key.pem:ro" + - "../../tls-certs/truststore.pem:/etc/nginx/ssl/truststore.pem:ro" \ No newline at end of file diff --git a/e2e-tests/clustering/redis/master/redis.conf b/e2e-tests/clustering/redis/master/redis.conf new file mode 100644 index 0000000000..597381ab58 --- /dev/null +++ b/e2e-tests/clustering/redis/master/redis.conf @@ -0,0 +1,72 @@ + +protected-mode no +port 6379 +tcp-backlog 511 +timeout 0 +tcp-keepalive 300 +daemonize no +pidfile "/var/run/redis_6379.pid" +loglevel notice +logfile "" +databases 16 +always-show-logo no +set-proc-title yes +proc-title-template "{title} {listen-addr} {server-mode}" + +stop-writes-on-bgsave-error yes +rdbcompression yes +rdbchecksum yes +dbfilename "dump.rdb" +rdb-del-sync-files no +dir "/data" +replica-serve-stale-data yes +replica-read-only yes +repl-diskless-sync yes +repl-diskless-sync-delay 5 +repl-diskless-sync-max-replicas 0 +repl-diskless-load disabled +repl-disable-tcp-nodelay no +replica-priority 100 +acllog-max-len 128 +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no +lazyfree-lazy-user-del no +lazyfree-lazy-user-flush no +oom-score-adj no +oom-score-adj-values 0 200 800 +disable-thp yes +appendonly yes +appendfilename "appendonly.aof" +appenddirname "appendonlydir" +appendfsync everysec +no-appendfsync-on-rewrite no +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb +aof-load-truncated yes +aof-use-rdb-preamble yes +aof-timestamp-enabled no +slowlog-log-slower-than 10000 +slowlog-max-len 128 +latency-monitor-threshold 0 +notify-keyspace-events "" +hash-max-listpack-entries 512 +hash-max-listpack-value 64 +list-max-listpack-size -2 +list-compress-depth 0 +set-max-intset-entries 512 +zset-max-listpack-entries 128 +zset-max-listpack-value 64 +hll-sparse-max-bytes 3000 +stream-node-max-bytes 4kb +stream-node-max-entries 100 +activerehashing yes +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 +hz 10 +dynamic-hz yes +aof-rewrite-incremental-fsync yes +rdb-save-incremental-fsync yes +jemalloc-bg-thread yes diff --git a/e2e-tests/clustering/redis/run-test.sh b/e2e-tests/clustering/redis/run-test.sh new file mode 100755 index 0000000000..95a3a61220 --- /dev/null +++ b/e2e-tests/clustering/redis/run-test.sh @@ -0,0 +1,160 @@ +#!/usr/bin/env bash +source ../../util.sh + +echo "------------------------------------" +echo "Cleaning up running Docker containers and volumes, and key material..." +echo "------------------------------------" +# --remove-orphans to ensure that DB containers of previous runs (on e.g. Postgres) are removed when testing with SQLite. +# Nothing breaks otherwise, but it prevents annoying warnings in the log. +docker compose down --remove-orphans +docker compose rm -f -v + +echo "------------------------------------" +echo "Starting Docker containers..." +echo "------------------------------------" +docker compose up -d +docker compose up --wait nodeA nodeA-backend nodeB nodeB-backend sentinelA sentinelB sentinelC + +echo "------------------------------------" +echo "Registering vendors..." +echo "------------------------------------" +# Register Vendor A +REQUEST="{\"subject\":\"vendorA\"}" +VENDOR_A_DIDDOC=$(echo $REQUEST | curl -X POST --data-binary @- http://localhost:18081/internal/vdr/v2/subject --header "Content-Type: application/json") +VENDOR_A_DID=$(echo $VENDOR_A_DIDDOC | jq -r .documents[0].id) +echo Vendor A DID: $VENDOR_A_DID + +# Register Vendor B +REQUEST="{\"subject\":\"vendorB\"}" +VENDOR_B_DIDDOC=$(echo $REQUEST | curl -X POST --data-binary @- http://localhost:28081/internal/vdr/v2/subject --header "Content-Type: application/json") +VENDOR_B_DID=$(echo $VENDOR_B_DIDDOC | jq -r .documents[0].id) +echo Vendor B DID: $VENDOR_B_DID + +# Issue NutsOrganizationCredential for Vendor B +REQUEST="{\"type\":\"NutsOrganizationCredential\",\"issuer\":\"${VENDOR_B_DID}\", \"credentialSubject\": {\"id\":\"${VENDOR_B_DID}\", \"organization\":{\"name\":\"Caresoft B.V.\", \"city\":\"Caretown\"}},\"withStatusList2021Revocation\": true}" +VENDOR_B_CREDENTIAL=$(echo $REQUEST | curl -X POST --data-binary @- http://localhost:28081/internal/vcr/v2/issuer/vc -H "Content-Type:application/json") +if echo $VENDOR_B_CREDENTIAL | grep -q "VerifiableCredential"; then + echo "VC issued" +else + echo "FAILED: Could not issue NutsOrganizationCredential to node-B" 1>&2 + echo $VENDOR_B_CREDENTIAL + exitWithDockerLogs 1 +fi + +RESPONSE=$(echo $VENDOR_B_CREDENTIAL | curl -X POST --data-binary @- http://localhost:28081/internal/vcr/v2/holder/vendorB/vc -H "Content-Type:application/json") +if echo $RESPONSE == ""; then + echo "VC stored in wallet" +else + echo "FAILED: Could not load NutsOrganizationCredential in node-B wallet" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +# Register vendor B on Discovery Service +echo "Registering vendor B on Discovery Service..." +REQUEST="{\"registrationParameters\":{\"key\":\"value\"}}" +RESPONSE=$(echo $REQUEST | curl -s -o /dev/null -w "%{http_code}" -X POST --data-binary @- http://localhost:28081/internal/discovery/v1/e2e-test/vendorB) +if [ $RESPONSE -eq 200 ]; then + echo "Vendor B registered on Discovery Service" +else + echo "FAILED: Could not register vendor B on Discovery Service" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +echo "---------------------------------------" +echo "Perform OAuth 2.0 rfc021 flow..." +echo "---------------------------------------" +REQUEST=$( +cat << EOF +{ + "authorization_server": "https://nodeA/oauth2/vendorA", + "client_id": "https://nodeB/oauth2/vendorB", + "scope": "test", + "credentials": [ + { + "@context": [ + "https://www.w3.org/2018/credentials/v1", + "https://nuts.nl/credentials/v1" + ], + "type": ["VerifiableCredential", "EmployeeCredential"], + "credentialSubject": { + "name": "John Doe", + "roleName": "Janitor", + "identifier": "123456" + } + } + ] +} +EOF +) +# Request access token +RESPONSE=$(echo $REQUEST | curl -X POST -s --data-binary @- http://localhost:28081/internal/auth/v2/vendorB/request-service-access-token -H "Content-Type: application/json") +if echo $RESPONSE | grep -q "access_token"; then + echo $RESPONSE | sed -E 's/.*"access_token":"([^"]*).*/\1/' > ./node-B/accesstoken.txt + echo "access token stored in ./node-B/accesstoken.txt" +else + echo "FAILED: Could not get access token from node-A" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi +DPOP_KID=$(echo $RESPONSE | sed -E 's/.*"dpop_kid":"([^"]*).*/\1/') +DPOP_KID=$(urlencode $DPOP_KID) +ACCESS_TOKEN=$(cat ./node-B/accesstoken.txt) + +echo "------------------------------------" +echo "Create DPoP header..." +echo "------------------------------------" +REQUEST="{\"htm\":\"GET\",\"htu\":\"https://nodeA:443/resource\", \"token\":\"$ACCESS_TOKEN\"}" +RESPONSE=$(echo $REQUEST | curl -X POST -s --data-binary @- http://localhost:28081/internal/auth/v2/dpop/$DPOP_KID -H "Content-Type: application/json") +if echo $RESPONSE | grep -q "dpop"; then + echo $RESPONSE | sed -E 's/.*"dpop":"([^"]*).*/\1/' > ./node-B/dpop.txt + echo "dpop token stored in ./node-B/dpop.txt" +else + echo "FAILED: Could not get dpop token from node-B" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +DPOP=$(cat ./node-B/dpop.txt) + +# Introspect access token with a form post +echo "------------------------------------" +echo "Introspect access token..." +echo "------------------------------------" +RESPONSE=$(curl -X POST -s --data "token=$ACCESS_TOKEN" http://localhost:18081/internal/auth/v2/accesstoken/introspect) +echo $RESPONSE +# Check that it contains "active": true +if echo $RESPONSE | grep -q "active.*true"; then + echo "access token is active" +else + echo "FAILED: Access token is not active" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi +# Check that it contains "employee_name":"John Doe" +if echo $RESPONSE | grep -q "employee_name.*John Doe"; then + echo "employee_name claim is present" +else + echo "FAILED: missing/invalid employee_name" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +echo "------------------------------------" +echo "Retrieving data..." +echo "------------------------------------" +RESPONSE=$(docker compose exec nodeB curl --http1.1 --insecure --cert /etc/nginx/ssl/server.pem --key /etc/nginx/ssl/key.pem https://nodeA:443/resource -H "Authorization: DPoP $ACCESS_TOKEN" -H "DPoP: $DPOP") +if echo $RESPONSE | grep -q "OK"; then + echo "success!" +else + echo "FAILED: Could not get resource from node-A" 1>&2 + echo $RESPONSE + exitWithDockerLogs 1 +fi + +echo "------------------------------------" +echo "Stopping Docker containers..." +echo "------------------------------------" +docker compose down +rm node-*/*.txt \ No newline at end of file diff --git a/e2e-tests/clustering/redis/sentinel/A/sentinel.conf b/e2e-tests/clustering/redis/sentinel/A/sentinel.conf new file mode 100644 index 0000000000..44cc6e9e41 --- /dev/null +++ b/e2e-tests/clustering/redis/sentinel/A/sentinel.conf @@ -0,0 +1,14 @@ +protected-mode no +port 26379 +daemonize no +pidfile "/var/run/redis-sentinel.pid" +logfile "" +dir "/tmp" +sentinel monitor mymaster redisA 6379 2 +sentinel down-after-milliseconds mymaster 2000 +acllog-max-len 128 +sentinel deny-scripts-reconfig yes +sentinel resolve-hostnames yes +sentinel announce-hostnames yes + +# Generated by CONFIG REWRITE \ No newline at end of file diff --git a/e2e-tests/clustering/redis/sentinel/B/sentinel.conf b/e2e-tests/clustering/redis/sentinel/B/sentinel.conf new file mode 100644 index 0000000000..44cc6e9e41 --- /dev/null +++ b/e2e-tests/clustering/redis/sentinel/B/sentinel.conf @@ -0,0 +1,14 @@ +protected-mode no +port 26379 +daemonize no +pidfile "/var/run/redis-sentinel.pid" +logfile "" +dir "/tmp" +sentinel monitor mymaster redisA 6379 2 +sentinel down-after-milliseconds mymaster 2000 +acllog-max-len 128 +sentinel deny-scripts-reconfig yes +sentinel resolve-hostnames yes +sentinel announce-hostnames yes + +# Generated by CONFIG REWRITE \ No newline at end of file diff --git a/e2e-tests/clustering/redis/sentinel/C/sentinel.conf b/e2e-tests/clustering/redis/sentinel/C/sentinel.conf new file mode 100644 index 0000000000..44cc6e9e41 --- /dev/null +++ b/e2e-tests/clustering/redis/sentinel/C/sentinel.conf @@ -0,0 +1,14 @@ +protected-mode no +port 26379 +daemonize no +pidfile "/var/run/redis-sentinel.pid" +logfile "" +dir "/tmp" +sentinel monitor mymaster redisA 6379 2 +sentinel down-after-milliseconds mymaster 2000 +acllog-max-len 128 +sentinel deny-scripts-reconfig yes +sentinel resolve-hostnames yes +sentinel announce-hostnames yes + +# Generated by CONFIG REWRITE \ No newline at end of file diff --git a/e2e-tests/clustering/redis/slave/redis.conf b/e2e-tests/clustering/redis/slave/redis.conf new file mode 100644 index 0000000000..d5b33cb0c1 --- /dev/null +++ b/e2e-tests/clustering/redis/slave/redis.conf @@ -0,0 +1,71 @@ +protected-mode no +port 6379 +tcp-backlog 511 +timeout 0 +tcp-keepalive 300 +daemonize no +pidfile "/var/run/redis_6379.pid" +loglevel notice +logfile "" +databases 16 +always-show-logo no +set-proc-title yes +proc-title-template "{title} {listen-addr} {server-mode}" +stop-writes-on-bgsave-error yes +rdbcompression yes +rdbchecksum yes +dbfilename "dump.rdb" +rdb-del-sync-files no +dir "/data" +replicaof redisA 6379 +replica-serve-stale-data yes +replica-read-only yes +repl-diskless-sync yes +repl-diskless-sync-delay 5 +repl-diskless-sync-max-replicas 0 +repl-diskless-load disabled +repl-disable-tcp-nodelay no +replica-priority 100 +acllog-max-len 128 +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no +lazyfree-lazy-user-del no +lazyfree-lazy-user-flush no +oom-score-adj no +oom-score-adj-values 0 200 800 +disable-thp yes +appendonly yes +appendfilename "appendonly.aof" +appenddirname "appendonlydir" +appendfsync everysec +no-appendfsync-on-rewrite no +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb +aof-load-truncated yes +aof-use-rdb-preamble yes +aof-timestamp-enabled no +slowlog-log-slower-than 10000 +slowlog-max-len 128 +latency-monitor-threshold 0 +notify-keyspace-events "" +hash-max-listpack-entries 512 +hash-max-listpack-value 64 +list-max-listpack-size -2 +list-compress-depth 0 +set-max-intset-entries 512 +zset-max-listpack-entries 128 +zset-max-listpack-value 64 +hll-sparse-max-bytes 3000 +stream-node-max-bytes 4kb +stream-node-max-entries 100 +activerehashing yes +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 +hz 10 +dynamic-hz yes +aof-rewrite-incremental-fsync yes +rdb-save-incremental-fsync yes +jemalloc-bg-thread yes diff --git a/e2e-tests/clustering/run-tests.sh b/e2e-tests/clustering/run-tests.sh new file mode 100755 index 0000000000..9d3bddc61b --- /dev/null +++ b/e2e-tests/clustering/run-tests.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -e # make script fail if any of the tests returns a non-zero exit code + +echo "!!!!!!!!!!!!!!!!!!!!!!!!!" +echo "!! Running test: Redis !!" +echo "!!!!!!!!!!!!!!!!!!!!!!!!!" +pushd redis +./run-test.sh +popd + +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +echo "!! Running test: memcached !!" +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +pushd memcached +./run-test.sh +popd diff --git a/e2e-tests/clustering/shared/discovery/definition.json b/e2e-tests/clustering/shared/discovery/definition.json new file mode 100644 index 0000000000..f43a884224 --- /dev/null +++ b/e2e-tests/clustering/shared/discovery/definition.json @@ -0,0 +1,51 @@ +{ + "id": "e2e-test", + "endpoint": "http://nodeA-backend:8080/discovery/e2e-test", + "presentation_max_validity": 36000, + "presentation_definition": { + "id": "pd_eoverdracht_dev_care_organization", + "format": { + "ldp_vc": { + "proof_type": [ + "JsonWebSignature2020" + ] + } + }, + "input_descriptors": [ + { + "id": "id_nuts_care_organization_cred", + "constraints": { + "fields": [ + { + "path": [ + "$.type" + ], + "filter": { + "type": "string", + "const": "NutsOrganizationCredential" + } + }, + { + "path": [ + "$.credentialSubject.organization.name", + "$.credentialSubject[0].organization.name" + ], + "filter": { + "type": "string" + } + }, + { + "path": [ + "$.credentialSubject.organization.city", + "$.credentialSubject[0].organization.city" + ], + "filter": { + "type": "string" + } + } + ] + } + } + ] + } +} diff --git a/e2e-tests/clustering/shared/node-A/nginx.conf b/e2e-tests/clustering/shared/node-A/nginx.conf new file mode 100644 index 0000000000..f3a5372982 --- /dev/null +++ b/e2e-tests/clustering/shared/node-A/nginx.conf @@ -0,0 +1,71 @@ +load_module /usr/lib/nginx/modules/ngx_http_js_module.so; + +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log debug; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + js_import oauth2.js; + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; + + upstream nodeA-internal { + server nodeA-backend:8081; + } + upstream nodeA-external { + server nodeA-backend:8080; + } + + server { + server_name nodeA; + listen 443 ssl; + http2 on; + ssl_certificate /etc/nginx/ssl/server.pem; + ssl_certificate_key /etc/nginx/ssl/key.pem; + ssl_client_certificate /etc/nginx/ssl/truststore.pem; + ssl_verify_client optional; + ssl_verify_depth 1; + ssl_protocols TLSv1.3; + + location / { + proxy_set_header X-Ssl-Client-Cert $ssl_client_escaped_cert; + proxy_pass http://nodeA-external; + } + + # check access via token introspection as described by https://www.nginx.com/blog/validating-oauth-2-0-access-tokens-nginx/ + location /resource { + js_content oauth2.introspectAccessToken; + } + + # Location in javascript subrequest. + # this is needed to set headers and method + location /_oauth2_send_request { + internal; + proxy_method POST; + proxy_set_header Content-Type "application/x-www-form-urlencoded"; + proxy_pass http://nodeA-internal/internal/auth/v2/accesstoken/introspect; + } + location /_dpop_send_request { + internal; + proxy_method POST; + proxy_set_header Content-Type "application/json"; + proxy_pass http://nodeA-internal/internal/auth/v2/dpop/validate; + } + } +} diff --git a/e2e-tests/clustering/shared/presentationexchangemapping.json b/e2e-tests/clustering/shared/presentationexchangemapping.json new file mode 100644 index 0000000000..992655c56f --- /dev/null +++ b/e2e-tests/clustering/shared/presentationexchangemapping.json @@ -0,0 +1,101 @@ +{ + "test": { + "organization": { + "format": { + "ldp_vp": { + "proof_type": [ + "JsonWebSignature2020" + ] + }, + "ldp_vc": { + "proof_type": [ + "JsonWebSignature2020" + ] + } + }, + "id": "pd_any_care_organization_with_employee", + "name": "Care organization with employee", + "purpose": "Finding a care organization with logged in user for authorizing access to medical metadata", + "input_descriptors": [ + { + "id": "id_nuts_care_organization_cred", + "constraints": { + "fields": [ + { + "path": [ + "$.type" + ], + "filter": { + "type": "string", + "const": "NutsOrganizationCredential" + } + }, + { + "path": [ + "$.credentialSubject.organization.name" + ], + "filter": { + "type": "string" + } + }, + { + "path": [ + "$.credentialSubject.organization.city" + ], + "filter": { + "type": "string" + } + } + ] + } + }, + { + "id": "id_employee_credential_cred", + "constraints": { + "fields": [ + { + "path": [ + "$.type" + ], + "filter": { + "type": "string", + "const": "EmployeeCredential" + } + }, + { + "id": "employee_identifier", + "path": [ + "$.credentialSubject.identifier", + "$.credentialSubject[0].identifier" + ], + "filter": { + "type": "string" + } + }, + { + "id": "employee_name", + "path": [ + "$.credentialSubject.name", + "$.credentialSubject[0].name" + ], + "filter": { + "type": "string" + } + }, + { + "id": "employee_role", + "path": [ + "$.credentialSubject.roleName", + "$.credentialSubject[0].roleName" + ], + "filter": { + "type": "string" + } + } + ] + } + } + ] + } + } +} diff --git a/e2e-tests/oauth-flow/openid4vp/docker-compose.yml b/e2e-tests/oauth-flow/openid4vp/docker-compose.yml index 82e8367111..1540010e7e 100644 --- a/e2e-tests/oauth-flow/openid4vp/docker-compose.yml +++ b/e2e-tests/oauth-flow/openid4vp/docker-compose.yml @@ -53,6 +53,6 @@ services: image: nginx:1.25.1 volumes: - "./resource/nginx.conf:/etc/nginx/nginx.conf:ro" - - "../scripts/oauth2.js:/etc/nginx/oauth2.js:ro" + - "../../scripts/oauth2.js:/etc/nginx/oauth2.js:ro" depends_on: - nodeA-backend diff --git a/e2e-tests/oauth-flow/rfc021/docker-compose.yml b/e2e-tests/oauth-flow/rfc021/docker-compose.yml index 94c3941305..36543dd3ea 100644 --- a/e2e-tests/oauth-flow/rfc021/docker-compose.yml +++ b/e2e-tests/oauth-flow/rfc021/docker-compose.yml @@ -26,7 +26,7 @@ services: - "../../tls-certs/nodeA-certificate.pem:/etc/nginx/ssl/key.pem:ro" - "../../tls-certs/truststore.pem:/etc/nginx/ssl/truststore.pem:ro" - "./node-A/html:/etc/nginx/html:ro" - - "../scripts/oauth2.js:/etc/nginx/oauth2.js:ro" + - "../../scripts/oauth2.js:/etc/nginx/oauth2.js:ro" nodeB-backend: image: "${IMAGE_NODE_B:-nutsfoundation/nuts-node:master}" ports: diff --git a/e2e-tests/oauth-flow/scripts/oauth2.js b/e2e-tests/scripts/oauth2.js similarity index 100% rename from e2e-tests/oauth-flow/scripts/oauth2.js rename to e2e-tests/scripts/oauth2.js diff --git a/go.mod b/go.mod index 6e42f3b72e..723c13efdc 100644 --- a/go.mod +++ b/go.mod @@ -202,8 +202,19 @@ require ( ) require ( + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 // indirect + github.com/daangn/minimemcached v1.2.0 // indirect + github.com/eko/gocache/lib/v4 v4.1.6 // indirect + github.com/eko/gocache/store/go_cache/v4 v4.2.2 // indirect + github.com/eko/gocache/store/memcache/v4 v4.2.2 // indirect + github.com/eko/gocache/store/redis/v4 v4.2.2 // indirect + github.com/golang/mock v1.6.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/rs/zerolog v1.26.1 // indirect + golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect modernc.org/libc v1.55.3 // indirect modernc.org/strutil v1.2.0 // indirect diff --git a/go.sum b/go.sum index 85d031fe77..758e11af47 100644 --- a/go.sum +++ b/go.sum @@ -47,10 +47,17 @@ github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bradfitz/gomemcache v0.0.0-20220106215444-fb4bf637b56d/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/bradfitz/gomemcache v0.0.0-20230124162541-5f7a7d875746 h1:wAIE/kN63Oig1DdOzN7O+k4AbFh2cCJoKMFXrwRJtzk= +github.com/bradfitz/gomemcache v0.0.0-20230124162541-5f7a7d875746/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= +github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -79,8 +86,11 @@ github.com/chromedp/chromedp v0.11.1 h1:Spca8egFqUlv+JDW+yIs+ijlHlJDPufgrfXPwtq6 github.com/chromedp/chromedp v0.11.1/go.mod h1:lr8dFRLKsdTTWb75C/Ttol2vnBKOSnt0BW8R9Xaupi8= github.com/chromedp/sysutil v1.1.0 h1:PUFNv5EcprjqXZD9nJb9b/c9ibAbxiYo4exNWZyipwM= github.com/chromedp/sysutil v1.1.0/go.mod h1:WiThHUdltqCNKGc4gaU50XgYjwjYIhKWoHGPTUfWTJ8= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/daangn/minimemcached v1.2.0 h1:QoKTAxxVMu+oc8JqruosJe8grahl6mEBH+yuyfamINk= +github.com/daangn/minimemcached v1.2.0/go.mod h1:ewcvvKcPuzp5tQjELLUXDZJtb3L1UqxtUc8BjhJf4Q4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -103,6 +113,14 @@ github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/eknkc/basex v1.0.1 h1:TcyAkqh4oJXgV3WYyL4KEfCMk9W8oJCpmx1bo+jVgKY= github.com/eknkc/basex v1.0.1/go.mod h1:k/F/exNEHFdbs3ZHuasoP2E7zeWwZblG84Y7Z59vQRo= +github.com/eko/gocache/lib/v4 v4.1.6 h1:5WWIGISKhE7mfkyF+SJyWwqa4Dp2mkdX8QsZpnENqJI= +github.com/eko/gocache/lib/v4 v4.1.6/go.mod h1:HFxC8IiG2WeRotg09xEnPD72sCheJiTSr4Li5Ameg7g= +github.com/eko/gocache/store/go_cache/v4 v4.2.2 h1:tAI9nl6TLoJyKG1ujF0CS0n/IgTEMl+NivxtR5R3/hw= +github.com/eko/gocache/store/go_cache/v4 v4.2.2/go.mod h1:T9zkHokzr8K9EiC7RfMbDg6HSwaV6rv3UdcNu13SGcA= +github.com/eko/gocache/store/memcache/v4 v4.2.2 h1:VKfxytQ5bkcfF3LhmgkrqRiEU2yCN2/rJBUvF1fKZJw= +github.com/eko/gocache/store/memcache/v4 v4.2.2/go.mod h1:9lFU3tZPiej8E3J4ueZ0K9kIdiDQpRxu6WhtId5OsZA= +github.com/eko/gocache/store/redis/v4 v4.2.2 h1:Thw31fzGuH3WzJywsdbMivOmP550D6JS7GDHhvCJPA0= +github.com/eko/gocache/store/redis/v4 v4.2.2/go.mod h1:LaTxLKx9TG/YUEybQvPMij++D7PBTIJ4+pzvk0ykz0w= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= @@ -145,6 +163,7 @@ github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs= github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= @@ -161,6 +180,8 @@ github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= @@ -370,6 +391,8 @@ github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdM github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde h1:x0TT0RDC7UhAVbbWWBzr41ElhJx5tXPWkIHA2HWPRuw= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/piprate/json-gold v0.5.1-0.20230111113000-6ddbe6e6f19f h1:HlPa7RcxTCrva5izPfTEfvYecO7LTahgmMRD1Qp13xg= github.com/piprate/json-gold v0.5.1-0.20230111113000-6ddbe6e6f19f/go.mod h1:WZ501QQMbZZ+3pXFPhQKzNwS1+jls0oqov3uQ2WasLs= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= @@ -409,6 +432,9 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= +github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= @@ -478,6 +504,8 @@ github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJ github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= @@ -497,7 +525,9 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= @@ -511,6 +541,9 @@ golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= @@ -520,6 +553,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -534,6 +569,7 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= @@ -543,9 +579,12 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220519141025-dcacdad47464/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -595,11 +634,15 @@ golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190524210228-3d17549cdc6b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= diff --git a/storage/cmd/cmd.go b/storage/cmd/cmd.go index 833ed6b97f..8c2e0cd503 100644 --- a/storage/cmd/cmd.go +++ b/storage/cmd/cmd.go @@ -43,12 +43,14 @@ func FlagSet() *pflag.FlagSet { "Note: using SQLite is not recommended in production environments. "+ "If using SQLite anyways, remember to enable foreign keys ('_foreign_keys=on') and the write-ahead-log ('_journal_mode=WAL').") - flagSet.String("storage.session.redis.address", defs.Redis.Address, "Redis session database server address. This can be a simple 'host:port' or a Redis connection URL with scheme, auth and other options. "+ + flagSet.StringSlice("storage.session.memcached.address", defs.Session.Memcached.Address, "List of Memcached server addresses. These can be a simple 'host:port' or a Memcached connection URL with scheme, auth and other options.") + + flagSet.String("storage.session.redis.address", defs.Session.Redis.Address, "Redis session database server address. This can be a simple 'host:port' or a Redis connection URL with scheme, auth and other options. "+ "If not set it, defaults to an in-memory database.") - flagSet.String("storage.session.redis.username", defs.Redis.Username, "Redis session database username. If set, it overrides the username in the connection URL.") - flagSet.String("storage.session.redis.password", defs.Redis.Password, "Redis session database password. If set, it overrides the username in the connection URL.") - flagSet.String("storage.session.redis.database", defs.Redis.Database, "Redis session database name, which is used as prefix every key. Can be used to have multiple instances use the same Redis instance.") - flagSet.String("storage.session.redis.tls.truststorefile", defs.Redis.TLS.TrustStoreFile, "PEM file containing the trusted CA certificate(s) for authenticating remote Redis session servers. Can only be used when connecting over TLS (use 'rediss://' as scheme in address).") + flagSet.String("storage.session.redis.username", defs.Session.Redis.Username, "Redis session database username. If set, it overrides the username in the connection URL.") + flagSet.String("storage.session.redis.password", defs.Session.Redis.Password, "Redis session database password. If set, it overrides the username in the connection URL.") + flagSet.String("storage.session.redis.database", defs.Session.Redis.Database, "Redis session database name, which is used as prefix every key. Can be used to have multiple instances use the same Redis instance.") + flagSet.String("storage.session.redis.tls.truststorefile", defs.Session.Redis.TLS.TrustStoreFile, "PEM file containing the trusted CA certificate(s) for authenticating remote Redis session servers. Can only be used when connecting over TLS (use 'rediss://' as scheme in address).") return flagSet } diff --git a/storage/config.go b/storage/config.go index d44e814feb..3e33af2c38 100644 --- a/storage/config.go +++ b/storage/config.go @@ -42,6 +42,8 @@ type SQLConfig struct { // SessionConfig specifies config for the session storage engine. type SessionConfig struct { - // Type is the type of session storage engine to use. + // Memcached specifies config for the Memcached session storage engine. + Memcached MemcachedConfig `koanf:"memcached"` + // Redis specifies config for the Redis session storage engine. Redis RedisConfig `koanf:"redis"` } diff --git a/storage/engine.go b/storage/engine.go index 6826d0a3be..def57482ff 100644 --- a/storage/engine.go +++ b/storage/engine.go @@ -99,6 +99,18 @@ func (e *engine) CheckHealth() map[string]core.Health { } results["sql"] = sqlHealth } + if e.sessionDatabase != nil { + results["session"] = core.Health{ + Status: core.HealthStatusUp, + } + err := e.sessionDatabase.GetStore(defaultSessionDataTTL, "healthcheck").Get("does_not_exist", nil) + if err != nil && !errors.Is(err, ErrNotFound) { + results["session"] = core.Health{ + Status: core.HealthStatusDown, + Details: err.Error(), + } + } + } return results } @@ -178,6 +190,10 @@ func (e *engine) Configure(config core.ServerConfig) error { // session storage redisConfig := e.config.Session.Redis + memcachedConfig := e.config.Session.Memcached + if redisConfig.isConfigured() && memcachedConfig.isConfigured() { + return errors.New("only one of 'storage.session.redis' and 'storage.session.memcached' can be configured") + } if redisConfig.isConfigured() { redisDB, err := createRedisDatabase(redisConfig) if err != nil { @@ -188,6 +204,14 @@ func (e *engine) Configure(config core.ServerConfig) error { return fmt.Errorf("unable to configure redis client: %w", err) } e.sessionDatabase = NewRedisSessionDatabase(client, redisConfig.Database) + log.Logger().Info("Redis session storage support enabled.") + } else if memcachedConfig.isConfigured() { + memcachedClient, err := newMemcachedClient(memcachedConfig) + if err != nil { + return fmt.Errorf("unable to initialize memcached client: %w", err) + } + e.sessionDatabase = NewMemcachedSessionDatabase(memcachedClient) + log.Logger().Info("Memcached session storage support enabled.") } else { e.sessionDatabase = NewInMemorySessionDatabase() } diff --git a/storage/engine_test.go b/storage/engine_test.go index 0e6b68aff3..f45a8ebbb0 100644 --- a/storage/engine_test.go +++ b/storage/engine_test.go @@ -20,6 +20,13 @@ package storage import ( "errors" + "fmt" + "os" + "path" + "strings" + "testing" + "time" + "github.com/alicebob/miniredis/v2" "github.com/nuts-foundation/go-stoabs" "github.com/nuts-foundation/nuts-node/core" @@ -27,11 +34,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" - "os" - "path" - "strings" - "testing" - "time" ) func Test_New(t *testing.T) { @@ -240,28 +242,49 @@ func TestEngine_CheckHealth(t *testing.T) { expected := core.Health{Status: core.HealthStatusUp} e := setup(t) health := e.CheckHealth() - status, ok := health["sql"] - require.True(t, ok) - assert.Equal(t, expected, status) + t.Run("sql", func(t *testing.T) { + status, ok := health["sql"] + require.True(t, ok) + assert.Equal(t, expected, status) + }) + t.Run("session", func(t *testing.T) { + status, ok := health["session"] + require.True(t, ok) + assert.Equal(t, expected, status) + }) }) t.Run("fails", func(t *testing.T) { - expected := core.Health{ - Status: core.HealthStatusDown, - Details: "sql: database is closed", - } e := setup(t) db, err := e.sqlDB.DB() require.NoError(t, err) require.NoError(t, db.Close()) + e.sessionDatabase = NewErrorSessionDatabase(assert.AnError) health := e.CheckHealth() - status, ok := health["sql"] - require.True(t, ok) - assert.Equal(t, expected, status) + t.Run("sql", func(t *testing.T) { + expected := core.Health{ + Status: core.HealthStatusDown, + Details: "sql: database is closed", + } + + status, ok := health["sql"] + require.True(t, ok) + assert.Equal(t, expected, status) + }) + t.Run("session", func(t *testing.T) { + expected := core.Health{ + Status: core.HealthStatusDown, + Details: assert.AnError.Error(), + } + + status, ok := health["session"] + require.True(t, ok) + assert.Equal(t, expected, status) + }) }) } -func Test_engine_redisSessionDatabase(t *testing.T) { +func Test_engine_sessionDatabase(t *testing.T) { t.Run("redis", func(t *testing.T) { redis := miniredis.RunT(t) e := New().(*engine) @@ -278,4 +301,31 @@ func Test_engine_redisSessionDatabase(t *testing.T) { }) assert.IsType(t, redisSessionDatabase{}, e.GetSessionDatabase()) }) + t.Run("memcached", func(t *testing.T) { + memcached := memcachedTestServer(t) + e := New().(*engine) + e.config = Config{ + Session: SessionConfig{ + Memcached: MemcachedConfig{Address: []string{fmt.Sprintf("localhost:%d", memcached.Port())}}, + }, + } + dataDir := io.TestDirectory(t) + require.NoError(t, e.Configure(core.ServerConfig{Datadir: dataDir})) + require.NoError(t, e.Start()) + t.Cleanup(func() { + _ = e.Shutdown() + }) + assert.IsType(t, &MemcachedSessionDatabase{}, e.GetSessionDatabase()) + }) + t.Run("error on both redis and memcached", func(t *testing.T) { + e := New().(*engine) + e.config = Config{ + Session: SessionConfig{ + Memcached: MemcachedConfig{Address: []string{"localhost:1111"}}, + Redis: RedisConfig{Address: "localhost:1111"}, + }, + } + dataDir := io.TestDirectory(t) + require.Error(t, e.Configure(core.ServerConfig{Datadir: dataDir})) + }) } diff --git a/storage/interface.go b/storage/interface.go index b6aed53097..762d74b44d 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -80,6 +80,7 @@ type SessionDatabase interface { GetStore(ttl time.Duration, keys ...string) SessionStore // close stops any background processes and closes the database. close() + getFullKey(prefixes []string, key string) string } // SessionStore is a key-value store that holds session data. diff --git a/storage/memcached.go b/storage/memcached.go new file mode 100644 index 0000000000..0dfaeb1c54 --- /dev/null +++ b/storage/memcached.go @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2024 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package storage + +import "github.com/bradfitz/gomemcache/memcache" + +// MemcachedConfig holds the configuration for the memcached storage. +type MemcachedConfig struct { + Address []string `koanf:"address"` +} + +// isConfigured returns true if config the indicates Redis support should be enabled. +func (r MemcachedConfig) isConfigured() bool { + return len(r.Address) > 0 +} + +// newMemcachedClient creates a memcache.Client and performs a Ping() +func newMemcachedClient(config MemcachedConfig) (*memcache.Client, error) { + client := memcache.New(config.Address...) + err := client.Ping() + if err != nil { + _ = client.Close() + return nil, err + } + return client, err +} diff --git a/storage/memcached_test.go b/storage/memcached_test.go new file mode 100644 index 0000000000..83c415a86e --- /dev/null +++ b/storage/memcached_test.go @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2024 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package storage + +import ( + "fmt" + "github.com/daangn/minimemcached" + "github.com/stretchr/testify/require" + "testing" +) + +func Test_newMemcachedClient(t *testing.T) { + port, err := getRandomAvailablePort() + if err != nil { + t.Fatal(err) + } + + cfg := &minimemcached.Config{ + Port: uint16(port), + } + m, err := minimemcached.Run(cfg) + if err != nil { + t.Fatal(err) + } + + client, err := newMemcachedClient(MemcachedConfig{Address: []string{ + fmt.Sprintf("localhost:%d", m.Port()), + }}) + + defer client.Close() + defer m.Close() + + require.NoError(t, err) + require.NotNil(t, client) +} diff --git a/storage/redis.go b/storage/redis.go index 97dc37d44f..e9e77ae450 100644 --- a/storage/redis.go +++ b/storage/redis.go @@ -55,7 +55,7 @@ type RedisConfig struct { // isConfigured returns true if config the indicates Redis support should be enabled. func (r RedisConfig) isConfigured() bool { - return len(r.Address) > 0 + return r.Sentinel.enabled() || len(r.Address) > 0 } func (r RedisConfig) parse() (*redis.Options, error) { diff --git a/storage/session.go b/storage/session.go new file mode 100644 index 0000000000..77d90d5c5a --- /dev/null +++ b/storage/session.go @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package storage + +import ( + "context" + "encoding/json" + "errors" + "time" + + "github.com/bradfitz/gomemcache/memcache" + "github.com/eko/gocache/lib/v4/cache" + "github.com/eko/gocache/lib/v4/store" +) + +var _ SessionStore = (*SessionStoreImpl[[]byte])(nil) + +// defaultSessionDataTTL is the default time to live for session data +// some stores require a default +var defaultSessionDataTTL = 15 * time.Minute + +// StringOrBytes is a type that can be either a string or a byte slice +// used for generic type constraints +type StringOrBytes interface { + ~string | ~[]byte +} + +// SessionStoreImpl is an implementation of the SessionStore interface +// It handles logic for all session store types +type SessionStoreImpl[T StringOrBytes] struct { + underlying *cache.Cache[T] + ttl time.Duration + prefixes []string + db SessionDatabase +} + +func (s SessionStoreImpl[T]) Delete(key string) error { + err := s.underlying.Delete(context.Background(), s.db.getFullKey(s.prefixes, key)) + if err != nil { + if errors.Is(err, store.NotFound{}) || errors.Is(err, memcache.ErrCacheMiss) { + return nil + } + return err + } + return nil +} + +func (s SessionStoreImpl[T]) Exists(key string) bool { + val, err := s.underlying.Get(context.Background(), s.db.getFullKey(s.prefixes, key)) + if err != nil { + return false + } + return len(val) > 0 +} + +func (s SessionStoreImpl[T]) Get(key string, target interface{}) error { + val, err := s.underlying.Get(context.Background(), s.db.getFullKey(s.prefixes, key)) + if err != nil { + // memcache.ErrCacheMiss is added here since the abstraction layer doesn't map this error to NotFound + if errors.Is(err, store.NotFound{}) || errors.Is(err, memcache.ErrCacheMiss) { + return ErrNotFound + } + return err + } + if len(val) == 0 { + return ErrNotFound + } + + return json.Unmarshal([]byte(val), target) +} + +func (s SessionStoreImpl[T]) Put(key string, value interface{}) error { + bytes, err := json.Marshal(value) + if err != nil { + return err + } + return s.underlying.Set(context.Background(), s.db.getFullKey(s.prefixes, key), T(bytes), store.WithExpiration(s.ttl)) +} +func (s SessionStoreImpl[T]) GetAndDelete(key string, target interface{}) error { + if err := s.Get(key, target); err != nil { + return err + } + return s.underlying.Delete(context.Background(), s.db.getFullKey(s.prefixes, key)) +} diff --git a/storage/session_inmemory.go b/storage/session_inmemory.go index c5dfa96ccb..c65bbfcdc6 100644 --- a/storage/session_inmemory.go +++ b/storage/session_inmemory.go @@ -19,164 +19,47 @@ package storage import ( - "encoding/json" "strings" - "sync" "time" - "github.com/nuts-foundation/nuts-node/storage/log" + "github.com/eko/gocache/lib/v4/cache" + "github.com/eko/gocache/store/go_cache/v4" + gocacheclient "github.com/patrickmn/go-cache" ) var _ SessionDatabase = (*InMemorySessionDatabase)(nil) -var _ SessionStore = (*InMemorySessionStore)(nil) var sessionStorePruneInterval = 10 * time.Minute -type expiringEntry struct { - // Value stores the actual value as JSON - Value string - Expiry time.Time -} - // InMemorySessionDatabase is an in memory database that holds session data on a KV basis. // Keys could be access tokens, nonce's, authorization codes, etc. // All entries are stored with a TTL, so they will be removed automatically. type InMemorySessionDatabase struct { - done chan struct{} - mux sync.RWMutex - routines sync.WaitGroup - entries map[string]expiringEntry + underlying *cache.Cache[[]byte] } // NewInMemorySessionDatabase creates a new in memory session database. func NewInMemorySessionDatabase() *InMemorySessionDatabase { - result := &InMemorySessionDatabase{ - entries: map[string]expiringEntry{}, - done: make(chan struct{}, 10), + gocacheClient := gocacheclient.New(defaultSessionDataTTL, sessionStorePruneInterval) + gocacheStore := go_cache.NewGoCache(gocacheClient) + return &InMemorySessionDatabase{ + underlying: cache.New[[]byte](gocacheStore), } - result.startPruning(sessionStorePruneInterval) - return result } -func (i *InMemorySessionDatabase) GetStore(ttl time.Duration, keys ...string) SessionStore { - return InMemorySessionStore{ - ttl: ttl, - prefixes: keys, - db: i, +func (s *InMemorySessionDatabase) GetStore(ttl time.Duration, keys ...string) SessionStore { + return SessionStoreImpl[[]byte]{ + underlying: s.underlying, + ttl: ttl, + prefixes: keys, + db: s, } } -func (i *InMemorySessionDatabase) close() { - // Signal pruner to stop and wait for it to finish - i.done <- struct{}{} -} - -func (i *InMemorySessionDatabase) startPruning(interval time.Duration) { - ticker := time.NewTicker(interval) - i.routines.Add(1) - go func() { - defer i.routines.Done() - for { - select { - case <-i.done: - ticker.Stop() - return - case <-ticker.C: - valsPruned := i.prune() - if valsPruned > 0 { - log.Logger().Debugf("Pruned %d expired session variables", valsPruned) - } - } - } - }() -} - -func (i *InMemorySessionDatabase) prune() int { - i.mux.Lock() - defer i.mux.Unlock() - - moment := time.Now() - - // Find expired flows and delete them - var count int - for key, entry := range i.entries { - if entry.Expiry.Before(moment) { - count++ - delete(i.entries, key) - } - } - - return count -} - -type InMemorySessionStore struct { - ttl time.Duration - prefixes []string - db *InMemorySessionDatabase -} - -func (i InMemorySessionStore) Delete(key string) error { - i.db.mux.Lock() - defer i.db.mux.Unlock() - - delete(i.db.entries, i.getFullKey(key)) - return nil -} - -func (i InMemorySessionStore) Exists(key string) bool { - i.db.mux.Lock() - defer i.db.mux.Unlock() - - _, ok := i.db.entries[i.getFullKey(key)] - return ok -} - -func (i InMemorySessionStore) Get(key string, target interface{}) error { - i.db.mux.Lock() - defer i.db.mux.Unlock() - return i.get(key, target) -} - -func (i InMemorySessionStore) get(key string, target interface{}) error { - fullKey := i.getFullKey(key) - entry, ok := i.db.entries[fullKey] - if !ok { - return ErrNotFound - } - if entry.Expiry.Before(time.Now()) { - delete(i.db.entries, fullKey) - return ErrNotFound - } - - return json.Unmarshal([]byte(entry.Value), target) -} - -func (i InMemorySessionStore) Put(key string, value interface{}) error { - i.db.mux.Lock() - defer i.db.mux.Unlock() - - bytes, err := json.Marshal(value) - if err != nil { - return err - } - entry := expiringEntry{ - Value: string(bytes), - Expiry: time.Now().Add(i.ttl), - } - - i.db.entries[i.getFullKey(key)] = entry - return nil -} -func (i InMemorySessionStore) GetAndDelete(key string, target interface{}) error { - i.db.mux.Lock() - defer i.db.mux.Unlock() - if err := i.get(key, target); err != nil { - return err - } - delete(i.db.entries, i.getFullKey(key)) - return nil +func (s *InMemorySessionDatabase) close() { + // NOP } -func (i InMemorySessionStore) getFullKey(key string) string { - return strings.Join(append(i.prefixes, key), "/") +func (s *InMemorySessionDatabase) getFullKey(prefixes []string, key string) string { + return strings.Join(append(prefixes, key), "/") } diff --git a/storage/session_inmemory_test.go b/storage/session_inmemory_test.go index bfc29aa77a..3222e3c356 100644 --- a/storage/session_inmemory_test.go +++ b/storage/session_inmemory_test.go @@ -22,9 +22,6 @@ import ( "testing" "time" - "github.com/nuts-foundation/nuts-node/test" - "go.uber.org/goleak" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -38,52 +35,16 @@ func TestNewInMemorySessionDatabase(t *testing.T) { func TestInMemorySessionDatabase_GetStore(t *testing.T) { db := createDatabase(t) - store := db.GetStore(time.Minute, "key1", "key2").(InMemorySessionStore) + store := db.GetStore(time.Minute, "key1", "key2").(SessionStoreImpl[[]byte]) require.NotNil(t, store) assert.Equal(t, time.Minute, store.ttl) assert.Equal(t, []string{"key1", "key2"}, store.prefixes) } -func TestInMemorySessionStore_Put(t *testing.T) { - db := createDatabase(t) - store := db.GetStore(time.Minute, "prefix").(InMemorySessionStore) - - t.Run("string value is stored", func(t *testing.T) { - err := store.Put("key", "value") - - require.NoError(t, err) - assert.Equal(t, `"value"`, store.db.entries["prefix/key"].Value) - }) - - t.Run("float value is stored", func(t *testing.T) { - err := store.Put("key", 1.23) - - require.NoError(t, err) - assert.Equal(t, "1.23", store.db.entries["prefix/key"].Value) - }) - - t.Run("struct value is stored", func(t *testing.T) { - value := testStruct{ - Field1: "value", - } - - err := store.Put("key", value) - - require.NoError(t, err) - assert.Equal(t, "{\"field1\":\"value\"}", store.db.entries["prefix/key"].Value) - }) - - t.Run("value is not JSON", func(t *testing.T) { - err := store.Put("key", make(chan int)) - - assert.Error(t, err) - }) -} - func TestInMemorySessionStore_Get(t *testing.T) { db := createDatabase(t) - store := db.GetStore(time.Minute, "prefix").(InMemorySessionStore) + store := db.GetStore(time.Minute, "prefix").(SessionStoreImpl[[]byte]) t.Run("string value is retrieved correctly", func(t *testing.T) { _ = store.Put(t.Name(), "value") @@ -126,30 +87,6 @@ func TestInMemorySessionStore_Get(t *testing.T) { assert.Equal(t, ErrNotFound, err) }) - t.Run("value is expired", func(t *testing.T) { - store.db.entries["prefix/key"] = expiringEntry{ - Value: "", - Expiry: time.Now().Add(-time.Minute), - } - var actual string - - err := store.Get("key", &actual) - - assert.Equal(t, ErrNotFound, err) - }) - - t.Run("value is not JSON", func(t *testing.T) { - store.db.entries["prefix/key"] = expiringEntry{ - Value: "not JSON", - Expiry: time.Now().Add(time.Minute), - } - var actual string - - err := store.Get("key", &actual) - - assert.Error(t, err) - }) - t.Run("value is not a pointer", func(t *testing.T) { _ = store.Put(t.Name(), "value") @@ -161,7 +98,7 @@ func TestInMemorySessionStore_Get(t *testing.T) { func TestInMemorySessionStore_Delete(t *testing.T) { db := createDatabase(t) - store := db.GetStore(time.Minute, "prefix").(InMemorySessionStore) + store := db.GetStore(time.Minute, "prefix").(SessionStoreImpl[[]byte]) t.Run("value is deleted", func(t *testing.T) { _ = store.Put(t.Name(), "value") @@ -169,8 +106,7 @@ func TestInMemorySessionStore_Delete(t *testing.T) { err := store.Delete(t.Name()) require.NoError(t, err) - _, ok := store.db.entries["prefix/key"] - assert.False(t, ok) + assert.False(t, store.Exists("prefix/key")) }) t.Run("value is not found", func(t *testing.T) { @@ -182,7 +118,7 @@ func TestInMemorySessionStore_Delete(t *testing.T) { func TestInMemorySessionStore_GetAndDelete(t *testing.T) { db := createDatabase(t) - store := db.GetStore(time.Minute, "prefix").(InMemorySessionStore) + store := db.GetStore(time.Minute, "prefix").(SessionStoreImpl[[]byte]) t.Run("ok", func(t *testing.T) { _ = store.Put(t.Name(), "value") @@ -200,57 +136,6 @@ func TestInMemorySessionStore_GetAndDelete(t *testing.T) { }) } -func TestInMemorySessionDatabase_Close(t *testing.T) { - defer goleak.VerifyNone(t, goleak.IgnoreCurrent()) - - t.Run("assert Close() waits for pruning to finish to avoid leaking goroutines", func(t *testing.T) { - sessionStorePruneInterval = 10 * time.Millisecond - defer func() { - sessionStorePruneInterval = 10 * time.Minute - }() - store := NewInMemorySessionDatabase() - time.Sleep(50 * time.Millisecond) // make sure pruning is running - store.close() - }) -} - -func Test_memoryStore_prune(t *testing.T) { - t.Run("automatic", func(t *testing.T) { - store := createDatabase(t) - // we call startPruning a second time ourselves to speed things up, make sure not to leak the original goroutine - defer func() { - store.done <- struct{}{} - }() - store.startPruning(10 * time.Millisecond) - - err := store.GetStore(time.Millisecond).Put("key", "value") - require.NoError(t, err) - - test.WaitFor(t, func() (bool, error) { - store.mux.Lock() - defer store.mux.Unlock() - _, exists := store.entries["key"] - return !exists, nil - }, time.Second, "time-out waiting for entry to be pruned") - }) - t.Run("prunes expired flows", func(t *testing.T) { - store := createDatabase(t) - defer store.close() - - _ = store.GetStore(0).Put("key1", "value") - _ = store.GetStore(time.Minute).Put("key2", "value") - - count := store.prune() - - assert.Equal(t, 1, count) - - // Second round to assert there's nothing to prune now - count = store.prune() - - assert.Equal(t, 0, count) - }) -} - type testStruct struct { Field1 string `json:"field1"` } diff --git a/storage/session_memcached.go b/storage/session_memcached.go new file mode 100644 index 0000000000..eb1f618972 --- /dev/null +++ b/storage/session_memcached.go @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2024 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package storage + +import ( + "strings" + "time" + + "github.com/bradfitz/gomemcache/memcache" + "github.com/eko/gocache/lib/v4/cache" + "github.com/eko/gocache/lib/v4/store" + memcachestore "github.com/eko/gocache/store/memcache/v4" +) + +type MemcachedSessionDatabase struct { + client *memcache.Client + underlying *cache.Cache[[]byte] +} + +// NewMemcachedSessionDatabase creates a new MemcachedSessionDatabase using an initialized memcache.Client. +func NewMemcachedSessionDatabase(client *memcache.Client) *MemcachedSessionDatabase { + // the defaultSessionDataTTL is used because the memcached store requires a default + // the underlying store will always use the TTL of the store as argument for the PUT. + memcachedStore := memcachestore.NewMemcache(client, store.WithExpiration(defaultSessionDataTTL)) + return &MemcachedSessionDatabase{ + underlying: cache.New[[]byte](memcachedStore), + } +} + +func (s MemcachedSessionDatabase) GetStore(ttl time.Duration, keys ...string) SessionStore { + return SessionStoreImpl[[]byte]{ + underlying: s.underlying, + ttl: ttl, + prefixes: keys, + db: s, + } +} + +func (s MemcachedSessionDatabase) close() { + // noop + if s.client != nil { + _ = s.client.Close() + } +} + +func (s MemcachedSessionDatabase) getFullKey(prefixes []string, key string) string { + return strings.Join(append(prefixes, key), "/") +} diff --git a/storage/session_memcached_test.go b/storage/session_memcached_test.go new file mode 100644 index 0000000000..577f715435 --- /dev/null +++ b/storage/session_memcached_test.go @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package storage + +import ( + "fmt" + "github.com/bradfitz/gomemcache/memcache" + "github.com/daangn/minimemcached" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewMemcachedSessionDatabase(t *testing.T) { + db := memcachedTestDatabase(t) + + assert.NotNil(t, db) +} + +func TestNewMemcachedSessionDatabase_GetStore(t *testing.T) { + db := memcachedTestDatabase(t) + + store := db.GetStore(time.Minute, "key1", "key2").(SessionStoreImpl[[]byte]) + + require.NotNil(t, store) + assert.Equal(t, time.Minute, store.ttl) + assert.Equal(t, []string{"key1", "key2"}, store.prefixes) +} + +func TestNewMemcachedSessionDatabase_Get(t *testing.T) { + db := memcachedTestDatabase(t) + store := db.GetStore(time.Minute, "prefix").(SessionStoreImpl[[]byte]) + + t.Run("string value is retrieved correctly", func(t *testing.T) { + _ = store.Put(t.Name(), "value") + var actual string + + err := store.Get(t.Name(), &actual) + + require.NoError(t, err) + assert.Equal(t, "value", actual) + }) + + t.Run("float value is retrieved correctly", func(t *testing.T) { + _ = store.Put(t.Name(), 1.23) + var actual float64 + + err := store.Get(t.Name(), &actual) + + require.NoError(t, err) + assert.Equal(t, 1.23, actual) + }) + + t.Run("struct value is retrieved correctly", func(t *testing.T) { + value := testStruct{ + Field1: "value", + } + _ = store.Put(t.Name(), value) + var actual testStruct + + err := store.Get(t.Name(), &actual) + + require.NoError(t, err) + assert.Equal(t, value, actual) + }) + + t.Run("value is not found", func(t *testing.T) { + var actual string + + err := store.Get(t.Name(), &actual) + + assert.Equal(t, ErrNotFound, err) + }) + + t.Run("value is not a pointer", func(t *testing.T) { + _ = store.Put(t.Name(), "value") + + err := store.Get(t.Name(), "not a pointer") + + assert.Error(t, err) + }) +} + +func TestNewMemcachedSessionDatabase_Delete(t *testing.T) { + db := memcachedTestDatabase(t) + store := db.GetStore(time.Minute, "prefix").(SessionStoreImpl[[]byte]) + + t.Run("value is deleted", func(t *testing.T) { + _ = store.Put(t.Name(), "value") + + err := store.Delete(t.Name()) + + require.NoError(t, err) + assert.False(t, store.Exists("prefix/key")) + }) + + t.Run("value is not found", func(t *testing.T) { + err := store.Delete(t.Name()) + + assert.NoError(t, err) + }) +} + +func TestNewMemcachedSessionDatabase_GetAndDelete(t *testing.T) { + db := memcachedTestDatabase(t) + store := db.GetStore(time.Minute, "prefix").(SessionStoreImpl[[]byte]) + + t.Run("ok", func(t *testing.T) { + _ = store.Put(t.Name(), "value") + var actual string + + err := store.GetAndDelete(t.Name(), &actual) + + require.NoError(t, err) + assert.Equal(t, "value", actual) + // is deleted + assert.ErrorIs(t, store.Get(t.Name(), new(string)), ErrNotFound) + }) + t.Run("error", func(t *testing.T) { + assert.ErrorIs(t, store.GetAndDelete(t.Name(), new(string)), ErrNotFound) + }) +} + +func getRandomAvailablePort() (int, error) { + // Listen on a random port by specifying ":0" + listener, err := net.Listen("tcp", ":0") + if err != nil { + return 0, err + } + defer listener.Close() + + // Extract the assigned port + addr := listener.Addr().(*net.TCPAddr) + return addr.Port, nil +} + +func memcachedTestDatabase(t *testing.T) *MemcachedSessionDatabase { + m := memcachedTestServer(t) + client := memcache.New(fmt.Sprintf("localhost:%d", m.Port())) + t.Cleanup(func() { + _ = client.Close() + }) + return NewMemcachedSessionDatabase(client) +} + +func memcachedTestServer(t *testing.T) *minimemcached.MiniMemcached { + // get random available port + port, err := getRandomAvailablePort() + if err != nil { + t.Fatal(err) + } + + cfg := &minimemcached.Config{ + Port: uint16(port), + } + m, err := minimemcached.Run(cfg) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + m.Close() + }) + return m +} diff --git a/storage/session_redis.go b/storage/session_redis.go index 83ca50ffa6..2567a339b5 100644 --- a/storage/session_redis.go +++ b/storage/session_redis.go @@ -19,101 +19,45 @@ package storage import ( - "context" - "encoding/json" - "errors" "strings" "time" - "github.com/nuts-foundation/nuts-node/storage/log" + "github.com/eko/gocache/lib/v4/cache" + redisstore "github.com/eko/gocache/store/redis/v4" "github.com/redis/go-redis/v9" ) +type redisSessionDatabase struct { + underlying *cache.Cache[string] + prefix string +} + func NewRedisSessionDatabase(client *redis.Client, prefix string) SessionDatabase { + redisStore := redisstore.NewRedis(client) return redisSessionDatabase{ - client: client, - prefix: prefix, + underlying: cache.New[string](redisStore), + prefix: prefix, } } -type redisSessionDatabase struct { - client *redis.Client - prefix string -} - func (s redisSessionDatabase) GetStore(ttl time.Duration, keys ...string) SessionStore { var prefixParts []string if len(s.prefix) > 0 { prefixParts = append(prefixParts, s.prefix) } prefixParts = append(prefixParts, keys...) - return redisSessionStore{ - client: s.client, - ttl: ttl, - storeName: strings.Join(prefixParts, "."), + return SessionStoreImpl[string]{ + underlying: s.underlying, + ttl: ttl, + prefixes: prefixParts, + db: s, } } func (s redisSessionDatabase) close() { - err := s.client.Close() - if err != nil { - log.Logger().WithError(err).Error("Failed to close redis client") - } -} - -type redisSessionStore struct { - client *redis.Client - ttl time.Duration - storeName string -} - -func (s redisSessionStore) Delete(key string) error { - return s.client.Del(context.Background(), s.toRedisKey(key)).Err() -} - -func (s redisSessionStore) Exists(key string) bool { - result, err := s.client.Exists(context.Background(), s.toRedisKey(key)).Result() - if err != nil { - log.Logger().WithError(err).Error("Failed to check key existence in Redis session store") - return false - } - return result > 0 -} - -func (s redisSessionStore) Get(key string, target interface{}) error { - result, err := s.client.Get(context.Background(), s.toRedisKey(key)).Result() - if err != nil { - if errors.Is(redis.Nil, err) { - return ErrNotFound - } - return err - } - return json.Unmarshal([]byte(result), target) -} - -func (s redisSessionStore) Put(key string, value interface{}) error { - marshal, err := json.Marshal(value) - if err != nil { - return err - } - return s.client.Set(context.Background(), s.toRedisKey(key), marshal, s.ttl).Err() -} - -func (s redisSessionStore) GetAndDelete(key string, target interface{}) error { - // GetDel requires redis-server version >= 6.2.0. - result, err := s.client.GetDel(context.Background(), s.toRedisKey(key)).Result() - if err != nil { - if errors.Is(redis.Nil, err) { - return ErrNotFound - } - return err - } - return json.Unmarshal([]byte(result), target) + // nop } -func (s redisSessionStore) toRedisKey(key string) string { - if len(s.storeName) > 0 { - return strings.Join([]string{s.storeName, key}, ".") - } - return key +func (s redisSessionDatabase) getFullKey(prefixes []string, key string) string { + return strings.Join(append(prefixes, key), ".") } diff --git a/storage/session_redis_test.go b/storage/session_redis_test.go index b957c15b55..224c372d49 100644 --- a/storage/session_redis_test.go +++ b/storage/session_redis_test.go @@ -233,7 +233,7 @@ func TestRedisWithPrefixAll(t *testing.T) { t.Run("test prefix all", func(t *testing.T) { // PUT marshal, _ := json.Marshal(testValue) - mock.ExpectSet(expectedPrefix, marshal, time.Second).SetVal("") + mock.ExpectSet(expectedPrefix, string(marshal), time.Second).SetVal("") err := store.Put("last", testValue) assert.NoError(t, err) @@ -245,12 +245,12 @@ func TestRedisWithPrefixAll(t *testing.T) { assert.Equal(t, testValue, actual) // EXISTS False - mock.ExpectExists(expectedPrefix).SetVal(0) + mock.ExpectGet(expectedPrefix).SetVal("") exists := store.Exists("last") assert.False(t, exists) // EXISTS True - mock.ExpectExists(expectedPrefix).SetVal(1) + mock.ExpectGet(expectedPrefix).SetVal(string(marshal)) exists = store.Exists("last") assert.True(t, exists) @@ -282,7 +282,7 @@ func TestRedisWithPrefixDb(t *testing.T) { t.Run("test prefix db", func(t *testing.T) { // PUT marshal, _ := json.Marshal(testValue) - mock.ExpectSet(expectedPrefix, marshal, time.Second).SetVal("") + mock.ExpectSet(expectedPrefix, string(marshal), time.Second).SetVal("") err := store.Put("three", testValue) assert.NoError(t, err) @@ -294,12 +294,12 @@ func TestRedisWithPrefixDb(t *testing.T) { assert.Equal(t, testValue, actual) // EXISTS False - mock.ExpectExists(expectedPrefix).SetVal(0) + mock.ExpectGet(expectedPrefix).SetVal("") exists := store.Exists("three") assert.False(t, exists) // EXISTS True - mock.ExpectExists(expectedPrefix).SetVal(1) + mock.ExpectGet(expectedPrefix).SetVal(string(marshal)) exists = store.Exists("three") assert.True(t, exists) @@ -330,7 +330,7 @@ func TestRedisWithPrefixesClient(t *testing.T) { t.Run("test prefix db", func(t *testing.T) { // PUT marshal, _ := json.Marshal(testValue) - mock.ExpectSet(expectedPrefix, marshal, time.Second).SetVal("") + mock.ExpectSet(expectedPrefix, string(marshal), time.Second).SetVal("") err := store.Put("three", testValue) assert.NoError(t, err) @@ -342,12 +342,12 @@ func TestRedisWithPrefixesClient(t *testing.T) { assert.Equal(t, testValue, actual) // EXISTS False - mock.ExpectExists(expectedPrefix).SetVal(0) + mock.ExpectGet(expectedPrefix).SetVal("") exists := store.Exists("three") assert.False(t, exists) // EXISTS True - mock.ExpectExists(expectedPrefix).SetVal(1) + mock.ExpectGet(expectedPrefix).SetVal(string(marshal)) exists = store.Exists("three") assert.True(t, exists) @@ -379,7 +379,7 @@ func TestRedisWithPrefixNone(t *testing.T) { t.Run("test prefix none", func(t *testing.T) { // PUT marshal, _ := json.Marshal(testValue) - mock.ExpectSet(expectedPrefix, marshal, time.Second).SetVal("") + mock.ExpectSet(expectedPrefix, string(marshal), time.Second).SetVal("") err := store.Put("three", testValue) assert.NoError(t, err) @@ -391,12 +391,12 @@ func TestRedisWithPrefixNone(t *testing.T) { assert.Equal(t, testValue, actual) // EXISTS False - mock.ExpectExists(expectedPrefix).SetVal(0) + mock.ExpectGet(expectedPrefix).SetVal("") exists := store.Exists("three") assert.False(t, exists) // EXISTS True - mock.ExpectExists(expectedPrefix).SetVal(1) + mock.ExpectGet(expectedPrefix).SetVal(string(marshal)) exists = store.Exists("three") assert.True(t, exists) diff --git a/storage/test.go b/storage/test.go index 549b564cdd..164878511b 100644 --- a/storage/test.go +++ b/storage/test.go @@ -26,6 +26,7 @@ import ( "github.com/nuts-foundation/go-stoabs" "github.com/nuts-foundation/nuts-node/test/io" "testing" + "time" "github.com/nuts-foundation/go-did/did" "github.com/nuts-foundation/go-stoabs/bbolt" @@ -138,3 +139,51 @@ type nilGooseLogger struct{} func (m nilGooseLogger) Printf(format string, v ...interface{}) {} func (m nilGooseLogger) Fatalf(format string, v ...interface{}) {} + +var _ SessionDatabase = (*errorSessionDatabase)(nil) +var _ SessionStore = (*errorSessionStore)(nil) + +// NewErrorSessionDatabase creates a SessionDatabase that always returns an error. +func NewErrorSessionDatabase(err error) SessionDatabase { + return errorSessionDatabase{err: err} +} + +type errorSessionDatabase struct { + err error +} + +type errorSessionStore struct { + err error +} + +func (e errorSessionDatabase) GetStore(ttl time.Duration, keys ...string) SessionStore { + return errorSessionStore{err: e.err} +} + +func (e errorSessionDatabase) getFullKey(prefixes []string, key string) string { + return "" +} + +func (e errorSessionDatabase) close() { + // nop +} + +func (e errorSessionStore) Delete(key string) error { + return e.err +} + +func (e errorSessionStore) Exists(key string) bool { + return false +} + +func (e errorSessionStore) Get(key string, target interface{}) error { + return e.err +} + +func (e errorSessionStore) Put(key string, value interface{}) error { + return e.err +} + +func (e errorSessionStore) GetAndDelete(key string, target interface{}) error { + return e.err +}