From da5b4975229666404a917df7dd4dc352664b1439 Mon Sep 17 00:00:00 2001 From: Dusan Borovcanin Date: Fri, 5 Jul 2024 15:49:07 +0200 Subject: [PATCH] MG-2187 - Simplify Magistrala core repository (#1) Signed-off-by: Dusan Borovcanin --- .dockerignore | 7 + .github/CODEOWNERS | 1 + .github/ISSUE_TEMPLATE/bug_report.yml | 52 + .github/ISSUE_TEMPLATE/config.yml | 11 + .github/ISSUE_TEMPLATE/feature_request.yml | 39 + .github/PULL_REQUEST_TEMPLATE.md | 69 + .github/dependabot.yml | 19 + .github/workflows/build.yml | 62 + .github/workflows/check-license.yaml | 31 + .github/workflows/tests.yml | 161 + .gitignore | 3 + .golangci.yml | 97 + CONTRIBUTING.md | 87 + Makefile | 253 ++ certs/README.md | 130 + certs/api/doc.go | 5 + certs/api/endpoint.go | 104 + certs/api/endpoint_test.go | 528 ++++ certs/api/logging.go | 131 + certs/api/metrics.go | 81 + certs/api/requests.go | 88 + certs/api/responses.go | 73 + certs/api/transport.go | 120 + certs/certs.go | 86 + certs/certs_test.go | 93 + certs/doc.go | 6 + certs/mocks/certs.go | 162 ++ certs/mocks/doc.go | 5 + certs/mocks/pki.go | 135 + certs/mocks/service.go | 172 ++ certs/pki/doc.go | 8 + certs/pki/vault.go | 271 ++ certs/postgres/certs.go | 201 ++ certs/postgres/doc.go | 6 + certs/postgres/init.go | 29 + certs/postgres/setup_test.go | 88 + certs/service.go | 206 ++ certs/service_test.go | 448 +++ certs/tracing/doc.go | 12 + certs/tracing/tracing.go | 79 + cmd/cassandra-reader/main.go | 152 + cmd/cassandra-writer/main.go | 155 + cmd/certs/main.go | 194 ++ cmd/influxdb-reader/main.go | 161 + cmd/influxdb-writer/main.go | 161 + cmd/lora/main.go | 233 ++ cmd/mongodb-reader/main.go | 147 + cmd/mongodb-writer/main.go | 148 + cmd/opcua/main.go | 212 ++ cmd/postgres-reader/main.go | 155 + cmd/postgres-writer/main.go | 154 + cmd/provision/main.go | 190 ++ cmd/smpp-notifier/main.go | 189 ++ cmd/smtp-notifier/main.go | 203 ++ cmd/timescale-reader/main.go | 153 + cmd/timescale-writer/main.go | 156 + cmd/twins/main.go | 242 ++ consumers/README.md | 18 + consumers/consumer.go | 30 + consumers/doc.go | 6 + consumers/messages.go | 159 + consumers/notifiers/README.md | 23 + consumers/notifiers/api/doc.go | 6 + consumers/notifiers/api/endpoint.go | 103 + consumers/notifiers/api/endpoint_test.go | 548 ++++ consumers/notifiers/api/logging.go | 131 + consumers/notifiers/api/metrics.go | 81 + consumers/notifiers/api/requests.go | 55 + consumers/notifiers/api/responses.go | 88 + consumers/notifiers/api/transport.go | 132 + consumers/notifiers/doc.go | 6 + consumers/notifiers/mocks/doc.go | 5 + consumers/notifiers/mocks/notifier.go | 47 + consumers/notifiers/mocks/repository.go | 133 + consumers/notifiers/mocks/service.go | 151 + consumers/notifiers/notifier.go | 22 + consumers/notifiers/postgres/database.go | 74 + consumers/notifiers/postgres/doc.go | 6 + consumers/notifiers/postgres/init.go | 28 + consumers/notifiers/postgres/setup_test.go | 89 + consumers/notifiers/postgres/subscriptions.go | 164 ++ .../notifiers/postgres/subscriptions_test.go | 263 ++ consumers/notifiers/service.go | 174 ++ consumers/notifiers/service_test.go | 377 +++ consumers/notifiers/smpp/README.md | 51 + consumers/notifiers/smpp/config.go | 21 + consumers/notifiers/smpp/doc.go | 6 + consumers/notifiers/smpp/notifier.go | 67 + consumers/notifiers/smtp/README.md | 51 + consumers/notifiers/smtp/doc.go | 6 + consumers/notifiers/smtp/notifier.go | 40 + consumers/notifiers/subscriptions.go | 48 + consumers/notifiers/tracing/doc.go | 12 + consumers/notifiers/tracing/subscriptions.go | 73 + consumers/tracing/consumers.go | 132 + consumers/writers/README.md | 16 + consumers/writers/api/doc.go | 6 + consumers/writers/api/logging.go | 47 + consumers/writers/api/metrics.go | 41 + consumers/writers/api/transport.go | 21 + consumers/writers/cassandra/README.md | 81 + consumers/writers/cassandra/consumer.go | 102 + consumers/writers/cassandra/consumer_test.go | 122 + consumers/writers/cassandra/doc.go | 6 + consumers/writers/cassandra/init.go | 36 + consumers/writers/cassandra/setup_test.go | 83 + consumers/writers/doc.go | 6 + consumers/writers/influxdb/README.md | 105 + consumers/writers/influxdb/consumer.go | 164 ++ consumers/writers/influxdb/consumer_test.go | 476 +++ consumers/writers/influxdb/doc.go | 6 + consumers/writers/influxdb/fields.go | 35 + consumers/writers/influxdb/setup_test.go | 95 + consumers/writers/influxdb/tags.go | 28 + consumers/writers/mongodb/README.md | 65 + consumers/writers/mongodb/consumer.go | 84 + consumers/writers/mongodb/consumer_test.go | 134 + consumers/writers/mongodb/doc.go | 6 + consumers/writers/mongodb/setup_test.go | 56 + consumers/writers/postgres/README.md | 77 + consumers/writers/postgres/consumer.go | 213 ++ consumers/writers/postgres/consumer_test.go | 112 + consumers/writers/postgres/doc.go | 6 + consumers/writers/postgres/init.go | 46 + consumers/writers/postgres/setup_test.go | 85 + consumers/writers/timescale/README.md | 76 + consumers/writers/timescale/consumer.go | 198 ++ consumers/writers/timescale/consumer_test.go | 112 + consumers/writers/timescale/doc.go | 6 + consumers/writers/timescale/init.go | 39 + consumers/writers/timescale/setup_test.go | 85 + doc.go | 4 + docker/.env | 641 ++++ docker/Dockerfile | 23 + docker/Dockerfile.dev | 8 + docker/README.md | 134 + docker/addons/bootstrap/docker-compose.yml | 83 + .../cassandra-reader/docker-compose.yml | 77 + docker/addons/cassandra-writer/config.toml | 19 + .../cassandra-writer/docker-compose.yml | 66 + docker/addons/cassandra-writer/init.sh | 11 + docker/addons/certs/docker-compose.yml | 90 + .../addons/influxdb-reader/docker-compose.yml | 87 + docker/addons/influxdb-writer/config.toml | 19 + .../addons/influxdb-writer/docker-compose.yml | 72 + docker/addons/journal/docker-compose.yml | 67 + docker/addons/lora-adapter/docker-compose.yml | 46 + .../addons/mongodb-reader/docker-compose.yml | 76 + docker/addons/mongodb-writer/config.toml | 19 + .../addons/mongodb-writer/docker-compose.yml | 59 + .../addons/opcua-adapter/docker-compose.yml | 49 + .../addons/postgres-reader/docker-compose.yml | 80 + docker/addons/postgres-writer/config.toml | 19 + .../addons/postgres-writer/docker-compose.yml | 63 + docker/addons/prometheus/docker-compose.yml | 53 + .../addons/prometheus/grafana/dashboard.yml | 15 + .../addons/prometheus/grafana/datasource.yml | 12 + .../prometheus/grafana/example-dashboard.json | 1317 +++++++++ .../addons/prometheus/metrics/prometheus.yml | 22 + docker/addons/provision/configs/config.toml | 74 + docker/addons/provision/docker-compose.yml | 45 + docker/addons/smpp-notifier/config.toml | 8 + .../addons/smpp-notifier/docker-compose.yml | 91 + docker/addons/smtp-notifier/config.toml | 8 + .../addons/smtp-notifier/docker-compose.yml | 90 + .../timescale-reader/docker-compose.yml | 80 + docker/addons/timescale-writer/config.toml | 8 + .../timescale-writer/docker-compose.yml | 65 + docker/addons/twins/docker-compose.yml | 91 + docker/addons/vault/.gitignore | 5 + docker/addons/vault/README.md | 170 ++ docker/addons/vault/config.hcl | 10 + docker/addons/vault/docker-compose.yml | 39 + docker/addons/vault/entrypoint.sh | 25 + ...magistrala_things_certs_issue.template.hcl | 32 + docker/addons/vault/vault_cmd.sh | 24 + docker/addons/vault/vault_copy_certs.sh | 53 + docker/addons/vault/vault_copy_env.sh | 24 + docker/addons/vault/vault_create_approle.sh | 97 + docker/addons/vault/vault_init.sh | 24 + docker/addons/vault/vault_set_pki.sh | 229 ++ docker/addons/vault/vault_unseal.sh | 24 + docker/docker-compose.yml | 765 +++++ docker/nats/nats.conf | 27 + docker/nginx/.gitignore | 5 + docker/nginx/entrypoint.sh | 26 + docker/nginx/nginx-key.conf | 211 ++ docker/nginx/nginx-x509.conf | 232 ++ docker/nginx/snippets/http_access_log.conf | 8 + .../nginx/snippets/mqtt-upstream-cluster.conf | 9 + .../nginx/snippets/mqtt-upstream-single.conf | 6 + .../snippets/mqtt-ws-upstream-cluster.conf | 9 + .../snippets/mqtt-ws-upstream-single.conf | 6 + docker/nginx/snippets/proxy-headers.conf | 15 + docker/nginx/snippets/ssl-client.conf | 5 + docker/nginx/snippets/ssl.conf | 16 + docker/nginx/snippets/stream_access_log.conf | 7 + docker/nginx/snippets/verify-ssl-client.conf | 9 + docker/nginx/snippets/ws-upgrade.conf | 9 + docker/spicedb/schema.zed | 78 + docker/ssl/.gitignore | 7 + docker/ssl/Makefile | 170 ++ docker/ssl/authorization.js | 181 ++ docker/ssl/certs/ca.crt | 23 + docker/ssl/certs/ca.key | 28 + docker/ssl/certs/magistrala-server.crt | 26 + docker/ssl/certs/magistrala-server.key | 52 + docker/ssl/dhparam.pem | 8 + docker/templates/smtp-notifier.tmpl | 8 + docker/templates/users.tmpl | 13 + docker/vernemq/Dockerfile | 56 + docker/vernemq/bin/vernemq.sh | 352 +++ docker/vernemq/files/vm.args | 15 + go.mod | 141 + go.sum | 571 ++++ lora/README.md | 87 + lora/adapter.go | 179 ++ lora/adapter_test.go | 478 +++ lora/api/api.go | 21 + lora/api/doc.go | 6 + lora/api/logging.go | 189 ++ lora/api/metrics.go | 112 + lora/doc.go | 6 + lora/events/doc.go | 6 + lora/events/events.go | 27 + lora/events/routemap.go | 58 + lora/events/streams.go | 175 ++ lora/message.go | 47 + lora/mocks/doc.go | 5 + lora/mocks/routes.go | 94 + lora/mqtt/doc.go | 6 + lora/mqtt/sub.go | 62 + lora/routemap.go | 20 + opcua/README.md | 77 + opcua/adapter.go | 200 ++ opcua/api/doc.go | 6 + opcua/api/endpoint.go | 34 + opcua/api/logging.go | 191 ++ opcua/api/metrics.go | 112 + opcua/api/requests.go | 21 + opcua/api/responses.go | 29 + opcua/api/transport.go | 132 + opcua/browser.go | 20 + opcua/db/doc.go | 5 + opcua/db/subs.go | 81 + opcua/doc.go | 5 + opcua/events/doc.go | 6 + opcua/events/events.go | 27 + opcua/events/routemap.go | 62 + opcua/events/streams.go | 184 ++ opcua/gopcua/browser.go | 228 ++ opcua/gopcua/doc.go | 5 + opcua/gopcua/subscribe.go | 251 ++ opcua/routemap.go | 18 + opcua/subscriber.go | 12 + pkg/api/common.go | 209 ++ pkg/api/common_test.go | 338 +++ pkg/api/doc.go | 6 + pkg/clients/cassandra/cassandra.go | 72 + pkg/clients/cassandra/doc.go | 9 + pkg/clients/doc.go | 6 + pkg/clients/influxdb/doc.go | 9 + pkg/clients/influxdb/influxdb.go | 57 + pkg/clients/mongo/doc.go | 9 + pkg/clients/mongo/mongo.go | 51 + pkg/clients/redis/doc.go | 9 + pkg/clients/redis/redis.go | 16 + pkg/email/README.md | 21 + pkg/email/doc.go | 6 + pkg/email/email.go | 110 + pkg/groups/api/decode.go | 288 ++ pkg/groups/api/decode_test.go | 784 +++++ pkg/groups/api/doc.go | 6 + pkg/groups/api/endpoint_test.go | 1016 +++++++ pkg/groups/api/endpoints.go | 319 ++ pkg/groups/api/logging.go | 250 ++ pkg/groups/api/metrics.go | 129 + pkg/groups/api/requests.go | 203 ++ pkg/groups/api/requests_test.go | 516 ++++ pkg/groups/api/responses.go | 231 ++ pkg/groups/events/doc.go | 5 + pkg/groups/events/events.go | 271 ++ pkg/groups/events/streams.go | 211 ++ pkg/groups/postgres/doc.go | 5 + pkg/groups/postgres/groups.go | 499 ++++ pkg/groups/postgres/groups_test.go | 1213 ++++++++ pkg/groups/postgres/init.go | 38 + pkg/groups/postgres/setup_test.go | 94 + pkg/groups/service.go | 780 +++++ pkg/groups/service_test.go | 2583 +++++++++++++++++ pkg/groups/status.go | 58 + pkg/groups/status_test.go | 50 + pkg/groups/tracing/doc.go | 12 + pkg/groups/tracing/tracing.go | 112 + pkg/testsutil/common.go | 19 + provision/README.md | 194 ++ provision/api/doc.go | 6 + provision/api/endpoint.go | 54 + provision/api/endpoint_test.go | 210 ++ provision/api/logging.go | 77 + provision/api/requests.go | 40 + provision/api/requests_test.go | 86 + provision/api/responses.go | 55 + provision/api/transport.go | 76 + provision/config.go | 103 + provision/config_test.go | 222 ++ provision/configs/config.toml | 47 + provision/doc.go | 6 + provision/mocks/service.go | 122 + provision/service.go | 414 +++ provision/service_test.go | 222 ++ readers/README.md | 7 + readers/api/doc.go | 6 + readers/api/endpoint.go | 39 + readers/api/endpoint_test.go | 1020 +++++++ readers/api/logging.go | 56 + readers/api/metrics.go | 39 + readers/api/requests.go | 67 + readers/api/responses.go | 31 + readers/api/transport.go | 280 ++ readers/cassandra/README.md | 100 + readers/cassandra/doc.go | 5 + readers/cassandra/messages.go | 195 ++ readers/cassandra/messages_test.go | 591 ++++ readers/cassandra/setup_test.go | 83 + readers/doc.go | 5 + readers/influxdb/README.md | 126 + readers/influxdb/doc.go | 6 + readers/influxdb/messages.go | 312 ++ readers/influxdb/messages_test.go | 726 +++++ readers/influxdb/setup_test.go | 100 + readers/messages.go | 84 + readers/mocks/doc.go | 5 + readers/mocks/messages.go | 57 + readers/mongodb/README.md | 96 + readers/mongodb/doc.go | 6 + readers/mongodb/messages.go | 149 + readers/mongodb/messages_test.go | 549 ++++ readers/mongodb/setup_test.go | 59 + readers/postgres/README.md | 101 + readers/postgres/doc.go | 6 + readers/postgres/init.go | 80 + readers/postgres/messages.go | 199 ++ readers/postgres/messages_test.go | 687 +++++ readers/postgres/setup_test.go | 83 + readers/timescale/README.md | 99 + readers/timescale/doc.go | 6 + readers/timescale/init.go | 80 + readers/timescale/messages.go | 201 ++ readers/timescale/messages_test.go | 684 +++++ readers/timescale/setup_test.go | 84 + twins/README.md | 103 + twins/api/doc.go | 6 + twins/api/http/doc.go | 5 + twins/api/http/endpoint.go | 179 ++ twins/api/http/endpoint_states_test.go | 306 ++ twins/api/http/endpoint_twins_test.go | 865 ++++++ twins/api/http/requests.go | 121 + twins/api/http/responses.go | 146 + twins/api/http/transport.go | 176 ++ twins/api/logging.go | 168 ++ twins/api/metrics.go | 95 + twins/doc.go | 9 + twins/events/doc.go | 6 + twins/events/events.go | 252 ++ twins/events/setup_test.go | 61 + twins/events/streams.go | 155 + twins/events/twins.go | 128 + twins/events/twins_test.go | 291 ++ twins/mocks/cache.go | 133 + twins/mocks/create.go | 55 + twins/mocks/doc.go | 5 + twins/mocks/messages.go | 35 + twins/mocks/repository.go | 181 ++ twins/mocks/service.go | 199 ++ twins/mocks/states.go | 151 + twins/mongodb/doc.go | 6 + twins/mongodb/init.go | 33 + twins/mongodb/setup_test.go | 56 + twins/mongodb/states.go | 156 + twins/mongodb/states_test.go | 164 ++ twins/mongodb/twins.go | 210 ++ twins/mongodb/twins_test.go | 388 +++ twins/service.go | 427 +++ twins/service_test.go | 596 ++++ twins/states.go | 45 + twins/tracing/doc.go | 6 + twins/tracing/states.go | 69 + twins/tracing/twins.go | 130 + twins/twins.go | 100 + 390 files changed, 50271 insertions(+) create mode 100644 .dockerignore create mode 100644 .github/CODEOWNERS create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/check-license.yaml create mode 100644 .github/workflows/tests.yml create mode 100644 .golangci.yml create mode 100644 CONTRIBUTING.md create mode 100644 Makefile create mode 100644 certs/README.md create mode 100644 certs/api/doc.go create mode 100644 certs/api/endpoint.go create mode 100644 certs/api/endpoint_test.go create mode 100644 certs/api/logging.go create mode 100644 certs/api/metrics.go create mode 100644 certs/api/requests.go create mode 100644 certs/api/responses.go create mode 100644 certs/api/transport.go create mode 100644 certs/certs.go create mode 100644 certs/certs_test.go create mode 100644 certs/doc.go create mode 100644 certs/mocks/certs.go create mode 100644 certs/mocks/doc.go create mode 100644 certs/mocks/pki.go create mode 100644 certs/mocks/service.go create mode 100644 certs/pki/doc.go create mode 100644 certs/pki/vault.go create mode 100644 certs/postgres/certs.go create mode 100644 certs/postgres/doc.go create mode 100644 certs/postgres/init.go create mode 100644 certs/postgres/setup_test.go create mode 100644 certs/service.go create mode 100644 certs/service_test.go create mode 100644 certs/tracing/doc.go create mode 100644 certs/tracing/tracing.go create mode 100644 cmd/cassandra-reader/main.go create mode 100644 cmd/cassandra-writer/main.go create mode 100644 cmd/certs/main.go create mode 100644 cmd/influxdb-reader/main.go create mode 100644 cmd/influxdb-writer/main.go create mode 100644 cmd/lora/main.go create mode 100644 cmd/mongodb-reader/main.go create mode 100644 cmd/mongodb-writer/main.go create mode 100644 cmd/opcua/main.go create mode 100644 cmd/postgres-reader/main.go create mode 100644 cmd/postgres-writer/main.go create mode 100644 cmd/provision/main.go create mode 100644 cmd/smpp-notifier/main.go create mode 100644 cmd/smtp-notifier/main.go create mode 100644 cmd/timescale-reader/main.go create mode 100644 cmd/timescale-writer/main.go create mode 100644 cmd/twins/main.go create mode 100644 consumers/README.md create mode 100644 consumers/consumer.go create mode 100644 consumers/doc.go create mode 100644 consumers/messages.go create mode 100644 consumers/notifiers/README.md create mode 100644 consumers/notifiers/api/doc.go create mode 100644 consumers/notifiers/api/endpoint.go create mode 100644 consumers/notifiers/api/endpoint_test.go create mode 100644 consumers/notifiers/api/logging.go create mode 100644 consumers/notifiers/api/metrics.go create mode 100644 consumers/notifiers/api/requests.go create mode 100644 consumers/notifiers/api/responses.go create mode 100644 consumers/notifiers/api/transport.go create mode 100644 consumers/notifiers/doc.go create mode 100644 consumers/notifiers/mocks/doc.go create mode 100644 consumers/notifiers/mocks/notifier.go create mode 100644 consumers/notifiers/mocks/repository.go create mode 100644 consumers/notifiers/mocks/service.go create mode 100644 consumers/notifiers/notifier.go create mode 100644 consumers/notifiers/postgres/database.go create mode 100644 consumers/notifiers/postgres/doc.go create mode 100644 consumers/notifiers/postgres/init.go create mode 100644 consumers/notifiers/postgres/setup_test.go create mode 100644 consumers/notifiers/postgres/subscriptions.go create mode 100644 consumers/notifiers/postgres/subscriptions_test.go create mode 100644 consumers/notifiers/service.go create mode 100644 consumers/notifiers/service_test.go create mode 100644 consumers/notifiers/smpp/README.md create mode 100644 consumers/notifiers/smpp/config.go create mode 100644 consumers/notifiers/smpp/doc.go create mode 100644 consumers/notifiers/smpp/notifier.go create mode 100644 consumers/notifiers/smtp/README.md create mode 100644 consumers/notifiers/smtp/doc.go create mode 100644 consumers/notifiers/smtp/notifier.go create mode 100644 consumers/notifiers/subscriptions.go create mode 100644 consumers/notifiers/tracing/doc.go create mode 100644 consumers/notifiers/tracing/subscriptions.go create mode 100644 consumers/tracing/consumers.go create mode 100644 consumers/writers/README.md create mode 100644 consumers/writers/api/doc.go create mode 100644 consumers/writers/api/logging.go create mode 100644 consumers/writers/api/metrics.go create mode 100644 consumers/writers/api/transport.go create mode 100644 consumers/writers/cassandra/README.md create mode 100644 consumers/writers/cassandra/consumer.go create mode 100644 consumers/writers/cassandra/consumer_test.go create mode 100644 consumers/writers/cassandra/doc.go create mode 100644 consumers/writers/cassandra/init.go create mode 100644 consumers/writers/cassandra/setup_test.go create mode 100644 consumers/writers/doc.go create mode 100644 consumers/writers/influxdb/README.md create mode 100644 consumers/writers/influxdb/consumer.go create mode 100644 consumers/writers/influxdb/consumer_test.go create mode 100644 consumers/writers/influxdb/doc.go create mode 100644 consumers/writers/influxdb/fields.go create mode 100644 consumers/writers/influxdb/setup_test.go create mode 100644 consumers/writers/influxdb/tags.go create mode 100644 consumers/writers/mongodb/README.md create mode 100644 consumers/writers/mongodb/consumer.go create mode 100644 consumers/writers/mongodb/consumer_test.go create mode 100644 consumers/writers/mongodb/doc.go create mode 100644 consumers/writers/mongodb/setup_test.go create mode 100644 consumers/writers/postgres/README.md create mode 100644 consumers/writers/postgres/consumer.go create mode 100644 consumers/writers/postgres/consumer_test.go create mode 100644 consumers/writers/postgres/doc.go create mode 100644 consumers/writers/postgres/init.go create mode 100644 consumers/writers/postgres/setup_test.go create mode 100644 consumers/writers/timescale/README.md create mode 100644 consumers/writers/timescale/consumer.go create mode 100644 consumers/writers/timescale/consumer_test.go create mode 100644 consumers/writers/timescale/doc.go create mode 100644 consumers/writers/timescale/init.go create mode 100644 consumers/writers/timescale/setup_test.go create mode 100644 doc.go create mode 100644 docker/.env create mode 100644 docker/Dockerfile create mode 100644 docker/Dockerfile.dev create mode 100644 docker/README.md create mode 100644 docker/addons/bootstrap/docker-compose.yml create mode 100644 docker/addons/cassandra-reader/docker-compose.yml create mode 100644 docker/addons/cassandra-writer/config.toml create mode 100644 docker/addons/cassandra-writer/docker-compose.yml create mode 100755 docker/addons/cassandra-writer/init.sh create mode 100644 docker/addons/certs/docker-compose.yml create mode 100644 docker/addons/influxdb-reader/docker-compose.yml create mode 100644 docker/addons/influxdb-writer/config.toml create mode 100644 docker/addons/influxdb-writer/docker-compose.yml create mode 100644 docker/addons/journal/docker-compose.yml create mode 100644 docker/addons/lora-adapter/docker-compose.yml create mode 100644 docker/addons/mongodb-reader/docker-compose.yml create mode 100644 docker/addons/mongodb-writer/config.toml create mode 100644 docker/addons/mongodb-writer/docker-compose.yml create mode 100644 docker/addons/opcua-adapter/docker-compose.yml create mode 100644 docker/addons/postgres-reader/docker-compose.yml create mode 100644 docker/addons/postgres-writer/config.toml create mode 100644 docker/addons/postgres-writer/docker-compose.yml create mode 100644 docker/addons/prometheus/docker-compose.yml create mode 100644 docker/addons/prometheus/grafana/dashboard.yml create mode 100644 docker/addons/prometheus/grafana/datasource.yml create mode 100644 docker/addons/prometheus/grafana/example-dashboard.json create mode 100644 docker/addons/prometheus/metrics/prometheus.yml create mode 100644 docker/addons/provision/configs/config.toml create mode 100644 docker/addons/provision/docker-compose.yml create mode 100644 docker/addons/smpp-notifier/config.toml create mode 100644 docker/addons/smpp-notifier/docker-compose.yml create mode 100644 docker/addons/smtp-notifier/config.toml create mode 100644 docker/addons/smtp-notifier/docker-compose.yml create mode 100644 docker/addons/timescale-reader/docker-compose.yml create mode 100644 docker/addons/timescale-writer/config.toml create mode 100644 docker/addons/timescale-writer/docker-compose.yml create mode 100644 docker/addons/twins/docker-compose.yml create mode 100644 docker/addons/vault/.gitignore create mode 100644 docker/addons/vault/README.md create mode 100644 docker/addons/vault/config.hcl create mode 100644 docker/addons/vault/docker-compose.yml create mode 100644 docker/addons/vault/entrypoint.sh create mode 100644 docker/addons/vault/magistrala_things_certs_issue.template.hcl create mode 100644 docker/addons/vault/vault_cmd.sh create mode 100755 docker/addons/vault/vault_copy_certs.sh create mode 100755 docker/addons/vault/vault_copy_env.sh create mode 100755 docker/addons/vault/vault_create_approle.sh create mode 100755 docker/addons/vault/vault_init.sh create mode 100755 docker/addons/vault/vault_set_pki.sh create mode 100755 docker/addons/vault/vault_unseal.sh create mode 100644 docker/docker-compose.yml create mode 100644 docker/nats/nats.conf create mode 100644 docker/nginx/.gitignore create mode 100755 docker/nginx/entrypoint.sh create mode 100644 docker/nginx/nginx-key.conf create mode 100644 docker/nginx/nginx-x509.conf create mode 100644 docker/nginx/snippets/http_access_log.conf create mode 100644 docker/nginx/snippets/mqtt-upstream-cluster.conf create mode 100644 docker/nginx/snippets/mqtt-upstream-single.conf create mode 100644 docker/nginx/snippets/mqtt-ws-upstream-cluster.conf create mode 100644 docker/nginx/snippets/mqtt-ws-upstream-single.conf create mode 100644 docker/nginx/snippets/proxy-headers.conf create mode 100644 docker/nginx/snippets/ssl-client.conf create mode 100644 docker/nginx/snippets/ssl.conf create mode 100644 docker/nginx/snippets/stream_access_log.conf create mode 100644 docker/nginx/snippets/verify-ssl-client.conf create mode 100644 docker/nginx/snippets/ws-upgrade.conf create mode 100644 docker/spicedb/schema.zed create mode 100644 docker/ssl/.gitignore create mode 100644 docker/ssl/Makefile create mode 100644 docker/ssl/authorization.js create mode 100644 docker/ssl/certs/ca.crt create mode 100644 docker/ssl/certs/ca.key create mode 100644 docker/ssl/certs/magistrala-server.crt create mode 100644 docker/ssl/certs/magistrala-server.key create mode 100644 docker/ssl/dhparam.pem create mode 100644 docker/templates/smtp-notifier.tmpl create mode 100644 docker/templates/users.tmpl create mode 100644 docker/vernemq/Dockerfile create mode 100755 docker/vernemq/bin/vernemq.sh create mode 100644 docker/vernemq/files/vm.args create mode 100644 go.mod create mode 100644 go.sum create mode 100644 lora/README.md create mode 100644 lora/adapter.go create mode 100644 lora/adapter_test.go create mode 100644 lora/api/api.go create mode 100644 lora/api/doc.go create mode 100644 lora/api/logging.go create mode 100644 lora/api/metrics.go create mode 100644 lora/doc.go create mode 100644 lora/events/doc.go create mode 100644 lora/events/events.go create mode 100644 lora/events/routemap.go create mode 100644 lora/events/streams.go create mode 100644 lora/message.go create mode 100644 lora/mocks/doc.go create mode 100644 lora/mocks/routes.go create mode 100644 lora/mqtt/doc.go create mode 100644 lora/mqtt/sub.go create mode 100644 lora/routemap.go create mode 100644 opcua/README.md create mode 100644 opcua/adapter.go create mode 100644 opcua/api/doc.go create mode 100644 opcua/api/endpoint.go create mode 100644 opcua/api/logging.go create mode 100644 opcua/api/metrics.go create mode 100644 opcua/api/requests.go create mode 100644 opcua/api/responses.go create mode 100644 opcua/api/transport.go create mode 100644 opcua/browser.go create mode 100644 opcua/db/doc.go create mode 100644 opcua/db/subs.go create mode 100644 opcua/doc.go create mode 100644 opcua/events/doc.go create mode 100644 opcua/events/events.go create mode 100644 opcua/events/routemap.go create mode 100644 opcua/events/streams.go create mode 100644 opcua/gopcua/browser.go create mode 100644 opcua/gopcua/doc.go create mode 100644 opcua/gopcua/subscribe.go create mode 100644 opcua/routemap.go create mode 100644 opcua/subscriber.go create mode 100644 pkg/api/common.go create mode 100644 pkg/api/common_test.go create mode 100644 pkg/api/doc.go create mode 100644 pkg/clients/cassandra/cassandra.go create mode 100644 pkg/clients/cassandra/doc.go create mode 100644 pkg/clients/doc.go create mode 100644 pkg/clients/influxdb/doc.go create mode 100644 pkg/clients/influxdb/influxdb.go create mode 100644 pkg/clients/mongo/doc.go create mode 100644 pkg/clients/mongo/mongo.go create mode 100644 pkg/clients/redis/doc.go create mode 100644 pkg/clients/redis/redis.go create mode 100644 pkg/email/README.md create mode 100644 pkg/email/doc.go create mode 100644 pkg/email/email.go create mode 100644 pkg/groups/api/decode.go create mode 100644 pkg/groups/api/decode_test.go create mode 100644 pkg/groups/api/doc.go create mode 100644 pkg/groups/api/endpoint_test.go create mode 100644 pkg/groups/api/endpoints.go create mode 100644 pkg/groups/api/logging.go create mode 100644 pkg/groups/api/metrics.go create mode 100644 pkg/groups/api/requests.go create mode 100644 pkg/groups/api/requests_test.go create mode 100644 pkg/groups/api/responses.go create mode 100644 pkg/groups/events/doc.go create mode 100644 pkg/groups/events/events.go create mode 100644 pkg/groups/events/streams.go create mode 100644 pkg/groups/postgres/doc.go create mode 100644 pkg/groups/postgres/groups.go create mode 100644 pkg/groups/postgres/groups_test.go create mode 100644 pkg/groups/postgres/init.go create mode 100644 pkg/groups/postgres/setup_test.go create mode 100644 pkg/groups/service.go create mode 100644 pkg/groups/service_test.go create mode 100644 pkg/groups/status.go create mode 100644 pkg/groups/status_test.go create mode 100644 pkg/groups/tracing/doc.go create mode 100644 pkg/groups/tracing/tracing.go create mode 100644 pkg/testsutil/common.go create mode 100644 provision/README.md create mode 100644 provision/api/doc.go create mode 100644 provision/api/endpoint.go create mode 100644 provision/api/endpoint_test.go create mode 100644 provision/api/logging.go create mode 100644 provision/api/requests.go create mode 100644 provision/api/requests_test.go create mode 100644 provision/api/responses.go create mode 100644 provision/api/transport.go create mode 100644 provision/config.go create mode 100644 provision/config_test.go create mode 100644 provision/configs/config.toml create mode 100644 provision/doc.go create mode 100644 provision/mocks/service.go create mode 100644 provision/service.go create mode 100644 provision/service_test.go create mode 100644 readers/README.md create mode 100644 readers/api/doc.go create mode 100644 readers/api/endpoint.go create mode 100644 readers/api/endpoint_test.go create mode 100644 readers/api/logging.go create mode 100644 readers/api/metrics.go create mode 100644 readers/api/requests.go create mode 100644 readers/api/responses.go create mode 100644 readers/api/transport.go create mode 100644 readers/cassandra/README.md create mode 100644 readers/cassandra/doc.go create mode 100644 readers/cassandra/messages.go create mode 100644 readers/cassandra/messages_test.go create mode 100644 readers/cassandra/setup_test.go create mode 100644 readers/doc.go create mode 100644 readers/influxdb/README.md create mode 100644 readers/influxdb/doc.go create mode 100644 readers/influxdb/messages.go create mode 100644 readers/influxdb/messages_test.go create mode 100644 readers/influxdb/setup_test.go create mode 100644 readers/messages.go create mode 100644 readers/mocks/doc.go create mode 100644 readers/mocks/messages.go create mode 100644 readers/mongodb/README.md create mode 100644 readers/mongodb/doc.go create mode 100644 readers/mongodb/messages.go create mode 100644 readers/mongodb/messages_test.go create mode 100644 readers/mongodb/setup_test.go create mode 100644 readers/postgres/README.md create mode 100644 readers/postgres/doc.go create mode 100644 readers/postgres/init.go create mode 100644 readers/postgres/messages.go create mode 100644 readers/postgres/messages_test.go create mode 100644 readers/postgres/setup_test.go create mode 100644 readers/timescale/README.md create mode 100644 readers/timescale/doc.go create mode 100644 readers/timescale/init.go create mode 100644 readers/timescale/messages.go create mode 100644 readers/timescale/messages_test.go create mode 100644 readers/timescale/setup_test.go create mode 100644 twins/README.md create mode 100644 twins/api/doc.go create mode 100644 twins/api/http/doc.go create mode 100644 twins/api/http/endpoint.go create mode 100644 twins/api/http/endpoint_states_test.go create mode 100644 twins/api/http/endpoint_twins_test.go create mode 100644 twins/api/http/requests.go create mode 100644 twins/api/http/responses.go create mode 100644 twins/api/http/transport.go create mode 100644 twins/api/logging.go create mode 100644 twins/api/metrics.go create mode 100644 twins/doc.go create mode 100644 twins/events/doc.go create mode 100644 twins/events/events.go create mode 100644 twins/events/setup_test.go create mode 100644 twins/events/streams.go create mode 100644 twins/events/twins.go create mode 100644 twins/events/twins_test.go create mode 100644 twins/mocks/cache.go create mode 100644 twins/mocks/create.go create mode 100644 twins/mocks/doc.go create mode 100644 twins/mocks/messages.go create mode 100644 twins/mocks/repository.go create mode 100644 twins/mocks/service.go create mode 100644 twins/mocks/states.go create mode 100644 twins/mongodb/doc.go create mode 100644 twins/mongodb/init.go create mode 100644 twins/mongodb/setup_test.go create mode 100644 twins/mongodb/states.go create mode 100644 twins/mongodb/states_test.go create mode 100644 twins/mongodb/twins.go create mode 100644 twins/mongodb/twins_test.go create mode 100644 twins/service.go create mode 100644 twins/service_test.go create mode 100644 twins/states.go create mode 100644 twins/tracing/doc.go create mode 100644 twins/tracing/states.go create mode 100644 twins/tracing/twins.go create mode 100644 twins/twins.go diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..3b2eba5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,7 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +.git +.github +build +docker diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..bc8cb18 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @absmach/magistrala diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..ef96f9a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,52 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +name: Bug Report +description: File a bug/issue report. Make sure to search to see if an issue already exists for the bug you encountered. +title: "Bug: " +labels: ["bug", "needs-review", "help wanted"] +body: + - type: textarea + attributes: + label: What were you trying to achieve? + description: A clear and concise description of what the bug is. + validations: + required: true + - type: textarea + attributes: + label: What are the expected results? + description: A concise description of what you expected to happen. + validations: + required: true + - type: textarea + attributes: + label: What are the received results? + description: A concise description of what you received. + validations: + required: true + - type: textarea + attributes: + label: Steps To Reproduce + description: What are the steps to reproduce the issue? + placeholder: | + 1. In this environment... + 2. With this config... + 3. Run '...' + 4. See error... + validations: + required: false + - type: textarea + attributes: + label: In what environment did you encounter the issue? + description: A concise description of the environment you encountered the issue in. + validations: + required: true + - type: textarea + attributes: + label: Additional information you deem important + description: | + Links? References? Anything that will give us more context about the issue you are encountering! + + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..2fb1e56 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +blank_issues_enabled: false +contact_links: + - name: Google group + url: https://groups.google.com/forum/#!forum/mainflux + about: Join the Magistrala community on Google group. + - name: Gitter + url: https://gitter.im/mainflux/mainflux + about: Join the Magistrala community on Gitter. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000..db34ad6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,39 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +name: Feature Request +description: File a feature request. Make sure to search to see if a request already exists for the feature you are requesting. +title: "Feature: <title>" +labels: ["enchancement", "needs-review"] +body: + - type: textarea + attributes: + label: Is your feature request related to a problem? Please describe. + description: A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + validations: + required: true + - type: textarea + attributes: + label: Describe the feature you are requesting, as well as the possible use case(s) for it. + description: A clear and concise description of what you want to happen. + validations: + required: true + - type: dropdown + attributes: + label: Indicate the importance of this feature to you. + description: This will help us prioritize the feature request. + options: + - Must-have + - Should-have + - Nice-to-have + validations: + required: true + - type: textarea + attributes: + label: Anything else? + description: | + Links? References? Anything that will give us more context about the feature that you are requesting. + + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..bbe61bd --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,69 @@ +<!-- Copyright (c) Abstract Machines +SPDX-License-Identifier: Apache-2.0 --> + +<!-- + +Pull request title should be `MG-XXX - description` or `NOISSUE - description` where XXX is ID of the issue that this PR relate to. +Please review the [CONTRIBUTING.md](https://github.com/absmach/magistrala/blob/main/CONTRIBUTING.md) file for detailed contributing guidelines. + +For Work In Progress Pull Requests, please use the Draft PR feature, see https://github.blog/2019-02-14-introducing-draft-pull-requests/ for further details. + +For a timely review/response, please avoid force-pushing additional commits if your PR already received reviews or comments. + +- Provide tests for your changes. +- Use descriptive commit messages. +- Comment your code where appropriate. +- Squash your commits +- Update any related documentation. +--> + +# What type of PR is this? + +<!--This represents the type of PR you are submitting. + +For example: +This is a bug fix because it fixes the following issue: #1234 +This is a feature because it adds the following functionality: ... +This is a refactor because it changes the following functionality: ... +This is a documentation update because it updates the following documentation: ... +This is a dependency update because it updates the following dependencies: ... +This is an optimization because it improves the following functionality: ... +--> + +## What does this do? + +<!-- +Please provide a brief description of what this PR is intended to do. +Include List any changes that modify/break current functionality. +--> + +## Which issue(s) does this PR fix/relate to? + +<!-- +For pull requests that relate or close an issue, please include them below. We like to follow [Github's guidance on linking issues to pull requests](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue). + +For example having the text: "Resolves #1234" would connect the current pull request to issue 1234. And when we merge the pull request, Github will automatically close the issue. +--> + +- Related Issue # +- Resolves # + +## Have you included tests for your changes? + +<!--If you have not included tests, please explain why. +For example: +Yes, I have included tests for my changes. +No, I have not included tests because I do not know how to. +--> + +## Did you document any new/modified feature? + +<!--If you have not included documentation, please explain why. +For example: +Yes, I have updated the documentation for the new feature. +No, I have not updated the documentation because I do not know how to. +--> + +### Notes + +<!--Please provide any additional information you feel is important.--> diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..ce6fd6f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,19 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "./.github/workflows" + schedule: + interval: "weekly" + + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + + - package-ecosystem: "docker" + directory: "./docker" + schedule: + interval: "weekly" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..ff811a2 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,62 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +name: Continuous Delivery + +on: + push: + branches: + - main + +jobs: + build-and-push: + name: Build and Push + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Fetch tags for the build + run: | + git fetch --prune --unshallow --tags + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: 1.22.x + cache-dependency-path: "go.sum" + + - name: Run tests + run: | + make test + + - name: Upload coverage + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV }} + directory: ./coverage/ + name: codecov-umbrella + verbose: true + + - name: Set up Docker Build + uses: docker/setup-buildx-action@v3 + + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + registry: docker.io + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Compile check for rabbitmq + run: | + MG_MESSAGE_BROKER_TYPE=rabbitmq make mqtt + + - name: Compile check for redis + run: | + MG_ES_TYPE=redis make mqtt + + - name: Build and push Dockers + run: | + make latest -j $(nproc) diff --git a/.github/workflows/check-license.yaml b/.github/workflows/check-license.yaml new file mode 100644 index 0000000..10da327 --- /dev/null +++ b/.github/workflows/check-license.yaml @@ -0,0 +1,31 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +name: Check License Header + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + check-license: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Check License Header + run: | + CHECK=$(grep -rcL --exclude-dir={.git,build,vernemq} \ + --exclude=\*.{crt,key,pem,zed,hcl,md,json,csv,mod,sum,tmpl,args,gitignore,srl,csr} \ + --exclude={CODEOWNERS,LICENSE,MAINTAINERS} \ + --regexp "Copyright (c) Abstract Machines" .) + if [ "$CHECK" ]; then + echo "License header check failed. Fix the following files:" + echo "$CHECK" + exit 1 + fi diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..7cd8231 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,161 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +name: CI Pipeline + +on: + pull_request: + branches: + - main + +jobs: + lint-and-build: # Linting and building are combined to save time for setting up Go + name: Lint and Build + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: 1.22.x + cache-dependency-path: "go.sum" + + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.59.1 + + - name: Build all Binaries + run: | + make all -j $(nproc) + + run-tests: + name: Run tests + runs-on: ubuntu-latest + needs: lint-and-build + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: 1.22.x + cache-dependency-path: "go.sum" + + - name: Check for changes in specific paths + uses: dorny/paths-filter@v3 + id: changes + with: + base: main + filters: | + workflow: + - ".github/workflows/tests.yml" + + certs: + - "certs/**" + - "cmd/certs/**" + - "auth.pb.go" + - "auth_grpc.pb.go" + - "auth/**" + - "pkg/sdk/**" + + consumers: + - "consumers/**" + - "cmd/cassandra-writer/**" + - "cmd/influxdb-writer/**" + - "cmd/mongodb-writer/**" + - "cmd/postgres-writer/**" + - "cmd/timescale-writer/**" + - "cmd/smpp-notifier/**" + - "cmd/smtp-notifier/**" + - "auth.pb.go" + - "auth_grpc.pb.go" + - "auth/**" + - "pkg/ulid/**" + - "pkg/uuid/**" + - "pkg/messaging/**" + + lora: + - "lora/**" + - "cmd/lora/**" + - "pkg/messaging/**" + + opcua: + - "opcua/**" + - "cmd/opcua/**" + - "logger/**" + + readers: + - "readers/**" + - "cmd/cassandra-reader/**" + - "cmd/influxdb-reader/**" + - "cmd/mongodb-reader/**" + - "cmd/postgres-reader/**" + - "cmd/timescale-reader/**" + - "auth.pb.go" + - "auth_grpc.pb.go" + - "things/**" + - "auth/**" + + twins: + - "twins/**" + - "cmd/twins/**" + - "auth.pb.go" + - "auth_grpc.pb.go" + - "auth/**" + - "pkg/messaging/**" + - "pkg/ulid/**" + - "pkg/uuid/**" + - "logger/**" + + - name: Create coverage directory + run: | + mkdir coverage + + - name: Run certs tests + if: steps.changes.outputs.certs == 'true' || steps.changes.outputs.workflow == 'true' + run: | + go test --race -v -count=1 -coverprofile=coverage/certs.out ./certs/... + + - name: Run consumers tests + if: steps.changes.outputs.consumers == 'true' || steps.changes.outputs.workflow == 'true' + run: | + go test --race -v -count=1 -coverprofile=coverage/consumers.out ./consumers/... + + - name: Run LoRa tests + if: steps.changes.outputs.lora == 'true' || steps.changes.outputs.workflow == 'true' + run: | + go test --race -v -count=1 -coverprofile=coverage/lora.out ./lora/... + + - name: Run OPC-UA tests + if: steps.changes.outputs.opcua == 'true' || steps.changes.outputs.workflow == 'true' + run: | + go test --race -v -count=1 -coverprofile=coverage/opcua.out ./opcua/... + + - name: Run provision tests + if: steps.changes.outputs.provision == 'true' || steps.changes.outputs.workflow == 'true' + run: | + go test --race -v -count=1 -coverprofile=coverage/provision.out ./provision/... + + - name: Run readers tests + if: steps.changes.outputs.readers == 'true' || steps.changes.outputs.workflow == 'true' + run: | + go test --race -v -count=1 -coverprofile=coverage/readers.out ./readers/... + + - name: Run twins tests + if: steps.changes.outputs.twins == 'true' || steps.changes.outputs.workflow == 'true' + run: | + go test --race -v -count=1 -coverprofile=coverage/twins.out ./twins/... + + with: + token: ${{ secrets.CODECOV }} + directory: ./coverage/ + name: codecov-umbrella + verbose: true diff --git a/.gitignore b/.gitignore index 3b735ec..2c13abe 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,6 @@ # Go workspace file go.work + +# Project specific files and dirs +build \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..89ad8f9 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,97 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +run: + timeout: 10m + build-tags: + - "nats" + +issues: + max-issues-per-linter: 100 + max-same-issues: 100 + exclude: + - "string `Usage:\n` has (\\d+) occurrences, make it a constant" + - "string `For example:\n` has (\\d+) occurrences, make it a constant" + +linters-settings: + importas: + no-unaliased: true + no-extra-aliases: false + alias: + - pkg: github.com/absmach/callhome/pkg/client + alias: chclient + - pkg: github.com/absmach/magistrala/logger + alias: mglog + - pkg: github.com/absmach/magistrala/pkg/errors/service + alias: svcerr + - pkg: github.com/absmach/magistrala/pkg/errors/repository + alias: repoerr + - pkg: github.com/absmach/magistrala/pkg/sdk/mocks + alias: sdkmocks + + gocritic: + enabled-checks: + - importShadow + - httpNoBody + - paramTypeCombine + - emptyStringTest + - builtinShadow + - exposedSyncMutex + disabled-checks: + - appendAssign + enabled-tags: + - diagnostic + disabled-tags: + - performance + - style + - experimental + - opinionated + misspell: + ignore-words: + - "mosquitto" + stylecheck: + checks: ["-ST1000", "-ST1003", "-ST1020", "-ST1021", "-ST1022"] + goheader: + template: |- + Copyright (c) Abstract Machines + SPDX-License-Identifier: Apache-2.0 + +linters: + disable-all: true + enable: + - gocritic + - gosimple + - errcheck + - govet + - unused + - goconst + - godot + - godox + - ineffassign + - misspell + - stylecheck + - whitespace + - gci + - gofmt + - goimports + - loggercheck + - goheader + - asasalint + - asciicheck + - bidichk + - contextcheck + - decorder + - dogsled + - errchkjson + - errname + - execinquery + - exportloopref + - ginkgolinter + - gocheckcompilerdirectives + - gofumpt + - goprintffuncname + - importas + - makezero + - mirror + - nakedret + - dupword diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..35a196a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,87 @@ +# Contributing to Magistrala + +The following is a set of guidelines to contribute to Magistrala and its libraries, which are +hosted on the [Abstract Machines Organization](https://github.com/absmach) on GitHub. + +This project adheres to the [Contributor Covenant 1.2](http://contributor-covenant.org/version/1/2/0). +By participating, you are expected to uphold this code. Please report unacceptable behavior to +[abuse@magistrala.com](mailto:abuse@magistrala.com). + +## Reporting issues + +Reporting issues are a great way to contribute to the project. We are perpetually grateful about a well-written, +thorough bug report. + +Before raising a new issue, check [our issue +list](https://github.com/absmach/magistrala/issues) to determine if it already contains the +problem that you are facing. + +A good bug report shouldn't leave others needing to chase you for more information. Please be as detailed as possible. The following questions might serve as a template for writing a detailed +report: + +- What were you trying to achieve? +- What are the expected results? +- What are the received results? +- What are the steps to reproduce the issue? +- In what environment did you encounter the issue? + +## Pull requests + +Good pull requests (e.g. patches, improvements, new features) are a fantastic help. They should +remain focused in scope and avoid unrelated commits. + +**Please ask first** before embarking on any significant pull request (e.g. implementing new features, +refactoring code etc.), otherwise you risk spending a lot of time working on something that the +maintainers might not want to merge into the project. + +Please adhere to the coding conventions used throughout the project. If in doubt, consult the +[Effective Go](https://golang.org/doc/effective_go.html) style guide. + +To contribute to the project, [fork](https://help.github.com/articles/fork-a-repo/) it, +clone your fork repository, and configure the remotes: + +``` +git clone https://github.com/<your-username>/magistrala.git +cd magistrala +git remote add upstream https://github.com/absmach/magistrala.git +``` + +If your cloned repository is behind the upstream commits, then get the latest changes from upstream: + +``` +git checkout master +git pull --rebase upstream main +``` + +Create a new topic branch from `master` using the naming convention `MG-[issue-number]` +to help us keep track of your contribution scope: + +``` +git checkout -b MG-[issue-number] +``` + +Commit your changes in logical chunks. When you are ready to commit, make sure +to write a Good Commit Messageā„¢. Consult the [Erlang's contributing guide](https://github.com/erlang/otp/wiki/Writing-good-commit-messages) +if you're unsure of what constitutes a Good Commit Messageā„¢. Use [interactive rebase](https://help.github.com/articles/about-git-rebase) +to group your commits into logical units of work before making it public. + +Note that every commit you make must be signed. By signing off your work you indicate that you +are accepting the [Developer Certificate of Origin](https://developercertificate.org/). + +Use your real name (sorry, no pseudonyms or anonymous contributions). If you set your `user.name` +and `user.email` git configs, you can sign your commit automatically with `git commit -s`. + +Locally merge (or rebase) the upstream development branch into your topic branch: + +``` +git pull --rebase upstream main +``` + +Push your topic branch up to your fork: + +``` +git push origin MG-[issue-number] +``` + +[Open a Pull Request](https://help.github.com/articles/using-pull-requests/) with a clear title +and detailed description. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..a132224 --- /dev/null +++ b/Makefile @@ -0,0 +1,253 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +MG_DOCKER_IMAGE_NAME_PREFIX ?= magistrala +BUILD_DIR = build +SERVICES = opcua lora influxdb-writer influxdb-reader mongodb-writer \ + mongodb-reader cassandra-writer cassandra-reader postgres-writer postgres-reader \ + timescale-writer timescale-reader twins provision certs smtp-notifier smpp-notifier +TEST_API_SERVICES = certs notifiers provision readers twins +TEST_API = $(addprefix test_api_,$(TEST_API_SERVICES)) +DOCKERS = $(addprefix docker_,$(SERVICES)) +DOCKERS_DEV = $(addprefix docker_dev_,$(SERVICES)) +CGO_ENABLED ?= 0 +GOARCH ?= amd64 +VERSION ?= $(shell git describe --abbrev=0 --tags 2>/dev/null || echo 'unknown') +COMMIT ?= $(shell git rev-parse HEAD) +TIME ?= $(shell date +%F_%T) +USER_REPO ?= $(shell git remote get-url origin | sed -e 's/.*\/\([^/]*\)\/\([^/]*\).*/\1_\2/' ) +empty:= +space:= $(empty) $(empty) +# Docker compose project name should follow this guidelines: https://docs.docker.com/compose/reference/#use--p-to-specify-a-project-name +DOCKER_PROJECT ?= $(shell echo $(subst $(space),,$(USER_REPO)) | tr -c -s '[:alnum:][=-=]' '_' | tr '[:upper:]' '[:lower:]') +DOCKER_COMPOSE_COMMANDS_SUPPORTED := up down config +DEFAULT_DOCKER_COMPOSE_COMMAND := up +GRPC_MTLS_CERT_FILES_EXISTS = 0 +MOCKERY_VERSION=v2.43.2 +ifneq ($(MG_MESSAGE_BROKER_TYPE),) + MG_MESSAGE_BROKER_TYPE := $(MG_MESSAGE_BROKER_TYPE) +else + MG_MESSAGE_BROKER_TYPE=nats +endif + +ifneq ($(MG_ES_TYPE),) + MG_ES_TYPE := $(MG_ES_TYPE) +else + MG_ES_TYPE=nats +endif + +define compile_service + CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) \ + go build -tags $(MG_MESSAGE_BROKER_TYPE) --tags $(MG_ES_TYPE) -ldflags "-s -w \ + -X 'github.com/absmach/magistrala.BuildTime=$(TIME)' \ + -X 'github.com/absmach/magistrala.Version=$(VERSION)' \ + -X 'github.com/absmach/magistrala.Commit=$(COMMIT)'" \ + -o ${BUILD_DIR}/$(1) cmd/$(1)/main.go +endef + +define make_docker + $(eval svc=$(subst docker_,,$(1))) + + docker build \ + --no-cache \ + --build-arg SVC=$(svc) \ + --build-arg GOARCH=$(GOARCH) \ + --build-arg GOARM=$(GOARM) \ + --build-arg VERSION=$(VERSION) \ + --build-arg COMMIT=$(COMMIT) \ + --build-arg TIME=$(TIME) \ + --tag=$(MG_DOCKER_IMAGE_NAME_PREFIX)/$(svc) \ + -f docker/Dockerfile . +endef + +define make_docker_dev + $(eval svc=$(subst docker_dev_,,$(1))) + + docker build \ + --no-cache \ + --build-arg SVC=$(svc) \ + --tag=$(MG_DOCKER_IMAGE_NAME_PREFIX)/$(svc) \ + -f docker/Dockerfile.dev ./build +endef + +ADDON_SERVICES = bootstrap cassandra-reader cassandra-writer certs \ + influxdb-reader influxdb-writer lora-adapter mongodb-reader mongodb-writer \ + opcua-adapter postgres-reader postgres-writer provision smpp-notifier smtp-notifier \ + timescale-reader timescale-writer twins journal + +EXTERNAL_SERVICES = vault prometheus + +ifneq ($(filter run%,$(firstword $(MAKECMDGOALS))),) + temp_args := $(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + DOCKER_COMPOSE_COMMAND := $(if $(filter $(DOCKER_COMPOSE_COMMANDS_SUPPORTED),$(temp_args)), $(filter $(DOCKER_COMPOSE_COMMANDS_SUPPORTED),$(temp_args)), $(DEFAULT_DOCKER_COMPOSE_COMMAND)) + $(eval $(DOCKER_COMPOSE_COMMAND):;@) +endif + +ifneq ($(filter run_addons%,$(firstword $(MAKECMDGOALS))),) + temp_args := $(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + RUN_ADDON_ARGS := $(if $(filter-out $(DOCKER_COMPOSE_COMMANDS_SUPPORTED),$(temp_args)), $(filter-out $(DOCKER_COMPOSE_COMMANDS_SUPPORTED),$(temp_args)),$(ADDON_SERVICES) $(EXTERNAL_SERVICES)) + $(eval $(RUN_ADDON_ARGS):;@) +endif + +ifneq ("$(wildcard docker/ssl/certs/*-grpc-*)","") +GRPC_MTLS_CERT_FILES_EXISTS = 1 +else +GRPC_MTLS_CERT_FILES_EXISTS = 0 +endif + +FILTERED_SERVICES = $(filter-out $(RUN_ADDON_ARGS), $(SERVICES)) + +all: $(SERVICES) + +.PHONY: all $(SERVICES) dockers dockers_dev latest release run run_addons grpc_mtls_certs check_mtls check_certs test_api + +clean: + rm -rf ${BUILD_DIR} + +cleandocker: + # Stops containers and removes containers, networks, volumes, and images created by up + docker compose -f docker/docker-compose.yml -p $(DOCKER_PROJECT) down --rmi all -v --remove-orphans + +ifdef pv + # Remove unused volumes + docker volume ls -f name=$(MG_DOCKER_IMAGE_NAME_PREFIX) -f dangling=true -q | xargs -r docker volume rm +endif + +install: + for file in $(BUILD_DIR)/*; do \ + cp $$file $(GOBIN)/magistrala-`basename $$file`; \ + done + +mocks: + @which mockery > /dev/null || go install github.com/vektra/mockery/v2@$(MOCKERY_VERSION) + @unset MOCKERY_VERSION && go generate ./... + + +DIRS = consumers readers postgres internal opcua +test: mocks + mkdir -p coverage + @for dir in $(DIRS); do \ + go test -v --race -count 1 -tags test -coverprofile=coverage/$$dir.out $$(go list ./... | grep $$dir | grep -v 'cmd'); \ + done + go test -v --race -count 1 -tags test -coverprofile=coverage/coverage.out $$(go list ./... | grep -v 'consumers\|readers\|postgres\|internal\|opcua\|cmd') + +define test_api_service + $(eval svc=$(subst test_api_,,$(1))) + @which st > /dev/null || (echo "schemathesis not found, please install it from https://github.com/schemathesis/schemathesis#getting-started" && exit 1) + + @if [ -z "$(USER_TOKEN)" ]; then \ + echo "USER_TOKEN is not set"; \ + echo "Please set it to a valid token"; \ + exit 1; \ + fi + + @if [ "$(svc)" = "http" ] && [ -z "$(THING_SECRET)" ]; then \ + echo "THING_SECRET is not set"; \ + echo "Please set it to a valid secret"; \ + exit 1; \ + fi + + @if [ "$(svc)" = "http" ]; then \ + st run api/openapi/$(svc).yml \ + --checks all \ + --base-url $(2) \ + --header "Authorization: Thing $(THING_SECRET)" \ + --contrib-openapi-formats-uuid \ + --hypothesis-suppress-health-check=filter_too_much \ + --stateful=links; \ + else \ + st run api/openapi/$(svc).yml \ + --checks all \ + --base-url $(2) \ + --header "Authorization: Bearer $(USER_TOKEN)" \ + --contrib-openapi-formats-uuid \ + --hypothesis-suppress-health-check=filter_too_much \ + --stateful=links; \ + fi +endef + +test_api_certs: TEST_API_URL := http://localhost:9019 +test_api_twins: TEST_API_URL := http://localhost:9018 +test_api_provision: TEST_API_URL := http://localhost:9016 +test_api_readers: TEST_API_URL := http://localhost:9009 # This can be the URL of any reader service. +test_api_notifiers: TEST_API_URL := http://localhost:9014 # This can be the URL of any notifier service. + +$(TEST_API): + $(call test_api_service,$(@),$(TEST_API_URL)) + +$(FILTERED_SERVICES): + $(call compile_service,$(@)) + +$(DOCKERS): + $(call make_docker,$(@),$(GOARCH)) + +$(DOCKERS_DEV): + $(call make_docker_dev,$(@)) + +dockers: $(DOCKERS) +dockers_dev: $(DOCKERS_DEV) + +define docker_push + for svc in $(SERVICES); do \ + docker push $(MG_DOCKER_IMAGE_NAME_PREFIX)/$$svc:$(1); \ + done +endef + +changelog: + git log $(shell git describe --tags --abbrev=0)..HEAD --pretty=format:"- %s" + +latest: dockers + $(call docker_push,latest) + +release: + $(eval version = $(shell git describe --abbrev=0 --tags)) + git checkout $(version) + $(MAKE) dockers + for svc in $(SERVICES); do \ + docker tag $(MG_DOCKER_IMAGE_NAME_PREFIX)/$$svc $(MG_DOCKER_IMAGE_NAME_PREFIX)/$$svc:$(version); \ + done + $(call docker_push,$(version)) + +rundev: + cd scripts && ./run.sh + +grpc_mtls_certs: + $(MAKE) -C docker/ssl auth_grpc_certs things_grpc_certs + +check_tls: +ifeq ($(GRPC_TLS),true) + @unset GRPC_MTLS + @echo "gRPC TLS is enabled" + GRPC_MTLS= +else + @unset GRPC_TLS + GRPC_TLS= +endif + +check_mtls: +ifeq ($(GRPC_MTLS),true) + @unset GRPC_TLS + @echo "gRPC MTLS is enabled" + GRPC_TLS= +else + @unset GRPC_MTLS + GRPC_MTLS= +endif + +check_certs: check_mtls check_tls +ifeq ($(GRPC_MTLS_CERT_FILES_EXISTS),0) +ifeq ($(filter true,$(GRPC_MTLS) $(GRPC_TLS)),true) +ifeq ($(filter $(DEFAULT_DOCKER_COMPOSE_COMMAND),$(DOCKER_COMPOSE_COMMAND)),$(DEFAULT_DOCKER_COMPOSE_COMMAND)) + $(MAKE) -C docker/ssl auth_grpc_certs things_grpc_certs +endif +endif +endif + +run: check_certs + docker compose -f docker/docker-compose.yml --env-file docker/.env -p $(DOCKER_PROJECT) $(DOCKER_COMPOSE_COMMAND) $(args) + +run_addons: check_certs + $(foreach SVC,$(RUN_ADDON_ARGS),$(if $(filter $(SVC),$(ADDON_SERVICES) $(EXTERNAL_SERVICES)),,$(error Invalid Service $(SVC)))) + @for SVC in $(RUN_ADDON_ARGS); do \ + MG_ADDONS_CERTS_PATH_PREFIX="../." docker compose -f docker/addons/$$SVC/docker-compose.yml -p $(DOCKER_PROJECT) --env-file ./docker/.env $(DOCKER_COMPOSE_COMMAND) $(args) & \ + done diff --git a/certs/README.md b/certs/README.md new file mode 100644 index 0000000..52124be --- /dev/null +++ b/certs/README.md @@ -0,0 +1,130 @@ +# Certs Service + +Issues certificates for things. `Certs` service can create certificates to be used when `Magistrala` is deployed to support mTLS. +Certificate service can create certificates using PKI mode - where certificates issued by PKI, when you deploy `Vault` as PKI certificate management `cert` service will proxy requests to `Vault` previously checking access rights and saving info on successfully created certificate. + +## PKI mode + +When `MG_CERTS_VAULT_HOST` is set it is presumed that `Vault` is installed and `certs` service will issue certificates using `Vault` API. +First you'll need to set up `Vault`. +To setup `Vault` follow steps in [Build Your Own Certificate Authority (CA)](https://learn.hashicorp.com/tutorials/vault/pki-engine). + +For lab purposes you can use docker-compose and script for setting up PKI in [https://github.com/absmach/magistrala/blob/main/docker/addons/vault/README.md](https://github.com/absmach/magistrala/blob/main/docker/addons/vault/README.md) + +```bash +MG_CERTS_VAULT_HOST=<https://vault-domain:8200> +MG_CERTS_VAULT_NAMESPACE=<vault_namespace> +MG_CERTS_VAULT_APPROLE_ROLEID=<vault_approle_roleid> +MG_CERTS_VAULT_APPROLE_SECRET=<vault_approle_sceret> +MG_CERTS_VAULT_THINGS_CERTS_PKI_PATH=<vault_things_certs_pki_path> +MG_CERTS_VAULT_THINGS_CERTS_PKI_ROLE_NAME=<vault_things_certs_issue_role_name> +``` + +The certificates can also be revoked using `certs` service. To revoke a certificate you need to provide `thing_id` of the thing for which the certificate was issued. + +```bash +curl -s -S -X DELETE http://localhost:9019/certs/revoke -H "Authorization: Bearer $TOK" -H 'Content-Type: application/json' -d '{"thing_id":"c30b8842-507c-4bcd-973c-74008cef3be5"}' +``` + +## Configuration + +The service is configured using the environment variables presented in the following table. Note that any unset variables will be replaced with their default values. + + +| Variable | Description | Default | +| :---------------------------------------- | --------------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| MG_CERTS_LOG_LEVEL | Log level for the Certs (debug, info, warn, error) | info | +| MG_CERTS_HTTP_HOST | Service Certs host | "" | +| MG_CERTS_HTTP_PORT | Service Certs port | 9019 | +| MG_CERTS_HTTP_SERVER_CERT | Path to the PEM encoded server certificate file | "" | +| MG_CERTS_HTTP_SERVER_KEY | Path to the PEM encoded server key file | "" | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | [localhost:8181](localhost:8181) | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MG_AUTH_GRPC_CLIENT_CERT | Path to the PEM encoded auth service gRPC client certificate file | "" | +| MG_AUTH_GRPC_CLIENT_KEY | Path to the PEM encoded auth service gRPC client key file | "" | +| MG_AUTH_GRPC_SERVER_CERTS | Path to the PEM encoded auth server gRPC server trusted CA certificate file | "" | +| MG_CERTS_SIGN_CA_PATH | Path to the PEM encoded CA certificate file | ca.crt | +| MG_CERTS_SIGN_CA_KEY_PATH | Path to the PEM encoded CA key file | ca.key | +| MG_CERTS_VAULT_HOST | Vault host | http://vault:8200 | +| MG_CERTS_VAULT_NAMESPACE | Vault namespace in which pki is present | magistrala | +| MG_CERTS_VAULT_APPROLE_ROLEID | Vault AppRole auth RoleID | magistrala | +| MG_CERTS_VAULT_APPROLE_SECRET | Vault AppRole auth Secret | magistrala | +| MG_CERTS_VAULT_THINGS_CERTS_PKI_PATH | Vault PKI path for issuing Things Certificates | pki_int | +| MG_CERTS_VAULT_THINGS_CERTS_PKI_ROLE_NAME | Vault PKI Role Name for issuing Things Certificates | magistrala_things_certs | +| MG_CERTS_DB_HOST | Database host | localhost | +| MG_CERTS_DB_PORT | Database port | 5432 | +| MG_CERTS_DB_PASS | Database password | magistrala | +| MG_CERTS_DB_USER | Database user | magistrala | +| MG_CERTS_DB_NAME | Database name | certs | +| MG_CERTS_DB_SSL_MODE | Database SSL mode | disable | +| MG_CERTS_DB_SSL_CERT | Database SSL certificate | "" | +| MG_CERTS_DB_SSL_KEY | Database SSL key | "" | +| MG_CERTS_DB_SSL_ROOT_CERT | Database SSL root certificate | "" | +| MG_THINGS_URL | Things service URL | [localhost:9000](localhost:9000) | +| MG_JAEGER_URL | Jaeger server URL | [http://localhost:14268/api/traces](http://localhost:14268/api/traces) | +| MG_JAEGER_TRACE_RATIO | Jaeger sampling ratio | 1.0 | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_CERTS_INSTANCE_ID | Service instance ID | "" | + +## Deployment + +The service is distributed as Docker container. Check the [`certs`](https://github.com/absmach/magistrala/blob/main/docker/addons/bootstrap/docker-compose.yml) service section in docker-compose file to see how the service is deployed. + +Running this service outside of container requires working instance of the auth service, things service, postgres database, vault and Jaeger server. +To start the service outside of the container, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the certs +make certs + +# copy binary to bin +make install + +# set the environment variables and run the service +MG_CERTS_LOG_LEVEL=info \ +MG_CERTS_HTTP_HOST=localhost \ +MG_CERTS_HTTP_PORT=9019 \ +MG_CERTS_HTTP_SERVER_CERT="" \ +MG_CERTS_HTTP_SERVER_KEY="" \ +MG_AUTH_GRPC_URL=localhost:8181 \ +MG_AUTH_GRPC_TIMEOUT=1s \ +MG_AUTH_GRPC_CLIENT_CERT="" \ +MG_AUTH_GRPC_CLIENT_KEY="" \ +MG_AUTH_GRPC_SERVER_CERTS="" \ +MG_CERTS_SIGN_CA_PATH=ca.crt \ +MG_CERTS_SIGN_CA_KEY_PATH=ca.key \ +MG_CERTS_VAULT_HOST=http://vault:8200 \ +MG_CERTS_VAULT_NAMESPACE=magistrala \ +MG_CERTS_VAULT_APPROLE_ROLEID=magistrala \ +MG_CERTS_VAULT_APPROLE_SECRET=magistrala \ +MG_CERTS_VAULT_THINGS_CERTS_PKI_PATH=pki_int \ +MG_CERTS_VAULT_THINGS_CERTS_PKI_ROLE_NAME=magistrala_things_certs \ +MG_CERTS_DB_HOST=localhost \ +MG_CERTS_DB_PORT=5432 \ +MG_CERTS_DB_PASS=magistrala \ +MG_CERTS_DB_USER=magistrala \ +MG_CERTS_DB_NAME=certs \ +MG_CERTS_DB_SSL_MODE=disable \ +MG_CERTS_DB_SSL_CERT="" \ +MG_CERTS_DB_SSL_KEY="" \ +MG_CERTS_DB_SSL_ROOT_CERT="" \ +MG_THINGS_URL=localhost:9000 \ +MG_JAEGER_URL=http://localhost:14268/api/traces \ +MG_JAEGER_TRACE_RATIO=1.0 \ +MG_SEND_TELEMETRY=true \ +MG_CERTS_INSTANCE_ID="" \ +$GOBIN/magistrala-certs +``` + +Setting `MG_CERTS_HTTP_SERVER_CERT` and `MG_CERTS_HTTP_SERVER_KEY` will enable TLS against the service. The service expects a file in PEM format for both the certificate and the key. + +Setting `MG_AUTH_GRPC_CLIENT_CERT` and `MG_AUTH_GRPC_CLIENT_KEY` will enable TLS against the auth service. The service expects a file in PEM format for both the certificate and the key. Setting `MG_AUTH_GRPC_SERVER_CERTS` will enable TLS against the auth service trusting only those CAs that are provided. The service expects a file in PEM format of trusted CAs. + +## Usage + +For more information about service capabilities and its usage, please check out the [Certs section](https://docs.magistrala.abstractmachines.fr/certs/). diff --git a/certs/api/doc.go b/certs/api/doc.go new file mode 100644 index 0000000..943cf19 --- /dev/null +++ b/certs/api/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains implementation of certs service HTTP API. +package api diff --git a/certs/api/endpoint.go b/certs/api/endpoint.go new file mode 100644 index 0000000..fea0c63 --- /dev/null +++ b/certs/api/endpoint.go @@ -0,0 +1,104 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + + "github.com/absmach/magistrala/certs" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/go-kit/kit/endpoint" +) + +func issueCert(svc certs.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(addCertsReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + res, err := svc.IssueCert(ctx, req.token, req.ThingID, req.TTL) + if err != nil { + return certsRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + + return certsRes{ + CertSerial: res.Serial, + ThingID: res.ThingID, + ClientCert: res.ClientCert, + ClientKey: res.ClientKey, + Expiration: res.Expire, + created: true, + }, nil + } +} + +func listSerials(svc certs.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + page, err := svc.ListSerials(ctx, req.token, req.thingID, req.offset, req.limit) + if err != nil { + return certsPageRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + res := certsPageRes{ + pageRes: pageRes{ + Total: page.Total, + Offset: page.Offset, + Limit: page.Limit, + }, + Certs: []certsRes{}, + } + + for _, cert := range page.Certs { + cr := certsRes{ + CertSerial: cert.Serial, + } + res.Certs = append(res.Certs, cr) + } + return res, nil + } +} + +func viewCert(svc certs.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(viewReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + cert, err := svc.ViewCert(ctx, req.token, req.serialID) + if err != nil { + return certsPageRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + + certRes := certsRes{ + CertSerial: cert.Serial, + ThingID: cert.ThingID, + ClientCert: cert.ClientCert, + Expiration: cert.Expire, + } + + return certRes, nil + } +} + +func revokeCert(svc certs.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(revokeReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + res, err := svc.RevokeCert(ctx, req.token, req.certID) + if err != nil { + return nil, err + } + return revokeCertsRes{ + RevocationTime: res.RevocationTime, + }, nil + } +} diff --git a/certs/api/endpoint_test.go b/certs/api/endpoint_test.go new file mode 100644 index 0000000..39aa944 --- /dev/null +++ b/certs/api/endpoint_test.go @@ -0,0 +1,528 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api_test + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/absmach/magistrala/certs" + httpapi "github.com/absmach/magistrala/certs/api" + "github.com/absmach/magistrala/certs/mocks" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var ( + contentType = "application/json" + valid = "valid" + invalid = "invalid" + thingID = testsutil.GenerateUUID(&testing.T{}) + serial = testsutil.GenerateUUID(&testing.T{}) + ttl = "1h" + cert = certs.Cert{ + OwnerID: testsutil.GenerateUUID(&testing.T{}), + ThingID: thingID, + ClientCert: valid, + IssuingCA: valid, + CAChain: []string{valid}, + ClientKey: valid, + PrivateKeyType: valid, + Serial: serial, + Expire: time.Now().Add(time.Hour), + } +) + +type testRequest struct { + client *http.Client + method string + url string + contentType string + token string + body io.Reader +} + +func (tr testRequest) make() (*http.Response, error) { + req, err := http.NewRequest(tr.method, tr.url, tr.body) + if err != nil { + return nil, err + } + if tr.token != "" { + req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) + } + if tr.contentType != "" { + req.Header.Set("Content-Type", tr.contentType) + } + + return tr.client.Do(req) +} + +func newCertServer() (*httptest.Server, *mocks.Service) { + svc := new(mocks.Service) + logger := mglog.NewMock() + + mux := httpapi.MakeHandler(svc, logger, "") + return httptest.NewServer(mux), svc +} + +func TestIssueCert(t *testing.T) { + cs, svc := newCertServer() + defer cs.Close() + + validReqString := `{"thing_id": "%s","ttl": "%s"}` + invalidReqString := `{"thing_id": "%s","ttl": %s}` + + cases := []struct { + desc string + token string + contentType string + thingID string + ttl string + request string + status int + svcRes certs.Cert + svcErr error + err error + }{ + { + desc: "issue cert successfully", + token: valid, + contentType: contentType, + thingID: thingID, + ttl: ttl, + request: fmt.Sprintf(validReqString, thingID, ttl), + status: http.StatusCreated, + svcRes: cert, + svcErr: nil, + err: nil, + }, + { + desc: "issue with invalid token", + token: invalid, + contentType: contentType, + thingID: thingID, + ttl: ttl, + request: fmt.Sprintf(validReqString, thingID, ttl), + status: http.StatusUnauthorized, + svcRes: certs.Cert{}, + svcErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "issue with empty token", + token: "", + contentType: contentType, + request: fmt.Sprintf(validReqString, thingID, ttl), + status: http.StatusUnauthorized, + svcRes: certs.Cert{}, + svcErr: nil, + err: apiutil.ErrBearerToken, + }, + { + desc: "issue with empty thing id", + token: valid, + contentType: contentType, + request: fmt.Sprintf(validReqString, "", ttl), + status: http.StatusBadRequest, + svcRes: certs.Cert{}, + svcErr: nil, + err: apiutil.ErrMissingID, + }, + { + desc: "issue with empty ttl", + token: valid, + contentType: contentType, + request: fmt.Sprintf(validReqString, thingID, ""), + status: http.StatusBadRequest, + svcRes: certs.Cert{}, + svcErr: nil, + err: apiutil.ErrMissingCertData, + }, + { + desc: "issue with invalid ttl", + token: valid, + contentType: contentType, + request: fmt.Sprintf(validReqString, thingID, invalid), + status: http.StatusBadRequest, + svcRes: certs.Cert{}, + svcErr: nil, + err: apiutil.ErrInvalidCertData, + }, + { + desc: "issue with invalid content type", + token: valid, + contentType: "application/xml", + request: fmt.Sprintf(validReqString, thingID, ttl), + status: http.StatusUnsupportedMediaType, + svcRes: certs.Cert{}, + svcErr: nil, + err: apiutil.ErrUnsupportedContentType, + }, + { + desc: "issue with invalid request body", + token: valid, + contentType: contentType, + request: fmt.Sprintf(invalidReqString, thingID, ttl), + status: http.StatusInternalServerError, + svcRes: certs.Cert{}, + svcErr: nil, + err: apiutil.ErrValidation, + }, + } + + for _, tc := range cases { + req := testRequest{ + client: cs.Client(), + method: http.MethodPost, + url: fmt.Sprintf("%s/certs", cs.URL), + contentType: tc.contentType, + token: tc.token, + body: strings.NewReader(tc.request), + } + svcCall := svc.On("IssueCert", mock.Anything, tc.token, tc.thingID, tc.ttl).Return(tc.svcRes, tc.svcErr) + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + var errRes respBody + err = json.NewDecoder(res.Body).Decode(&errRes) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error while decoding response body: %s", tc.desc, err)) + if errRes.Err != "" || errRes.Message != "" { + err = errors.Wrap(errors.New(errRes.Err), errors.New(errRes.Message)) + } + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + svcCall.Unset() + } +} + +func TestViewCert(t *testing.T) { + cs, svc := newCertServer() + defer cs.Close() + + cases := []struct { + desc string + token string + serialID string + status int + svcRes certs.Cert + svcErr error + err error + }{ + { + desc: "view cert successfully", + token: valid, + serialID: serial, + status: http.StatusOK, + svcRes: cert, + svcErr: nil, + err: nil, + }, + { + desc: "view with invalid token", + token: invalid, + serialID: serial, + status: http.StatusUnauthorized, + svcRes: certs.Cert{}, + svcErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "view with empty token", + token: "", + serialID: serial, + status: http.StatusUnauthorized, + svcRes: certs.Cert{}, + svcErr: nil, + err: apiutil.ErrBearerToken, + }, + { + desc: "view non-existing cert", + token: valid, + serialID: invalid, + status: http.StatusNotFound, + svcRes: certs.Cert{}, + svcErr: svcerr.ErrNotFound, + err: svcerr.ErrNotFound, + }, + } + for _, tc := range cases { + req := testRequest{ + client: cs.Client(), + method: http.MethodGet, + url: fmt.Sprintf("%s/certs/%s", cs.URL, tc.serialID), + token: tc.token, + } + svcCall := svc.On("ViewCert", mock.Anything, tc.token, tc.serialID).Return(tc.svcRes, tc.svcErr) + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + var errRes respBody + err = json.NewDecoder(res.Body).Decode(&errRes) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error while decoding response body: %s", tc.desc, err)) + if errRes.Err != "" || errRes.Message != "" { + err = errors.Wrap(errors.New(errRes.Err), errors.New(errRes.Message)) + } + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + svcCall.Unset() + } +} + +func TestRevokeCert(t *testing.T) { + cs, svc := newCertServer() + defer cs.Close() + + cases := []struct { + desc string + token string + serialID string + status int + svcRes certs.Revoke + svcErr error + err error + }{ + { + desc: "revoke cert successfully", + token: valid, + serialID: serial, + status: http.StatusOK, + svcRes: certs.Revoke{RevocationTime: time.Now()}, + svcErr: nil, + err: nil, + }, + { + desc: "revoke with invalid token", + token: invalid, + serialID: serial, + status: http.StatusUnauthorized, + svcRes: certs.Revoke{}, + svcErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "revoke with empty token", + token: "", + serialID: serial, + status: http.StatusUnauthorized, + svcErr: nil, + err: apiutil.ErrBearerToken, + }, + { + desc: "revoke non-existing cert", + token: valid, + serialID: invalid, + status: http.StatusNotFound, + svcRes: certs.Revoke{}, + svcErr: svcerr.ErrNotFound, + err: svcerr.ErrNotFound, + }, + } + for _, tc := range cases { + req := testRequest{ + client: cs.Client(), + method: http.MethodDelete, + url: fmt.Sprintf("%s/certs/%s", cs.URL, tc.serialID), + token: tc.token, + } + svcCall := svc.On("RevokeCert", mock.Anything, tc.token, tc.serialID).Return(tc.svcRes, tc.svcErr) + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + var errRes respBody + err = json.NewDecoder(res.Body).Decode(&errRes) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error while decoding response body: %s", tc.desc, err)) + if errRes.Err != "" || errRes.Message != "" { + err = errors.Wrap(errors.New(errRes.Err), errors.New(errRes.Message)) + } + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n ", tc.desc, tc.err, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + svcCall.Unset() + } +} + +func TestListSerials(t *testing.T) { + cs, svc := newCertServer() + defer cs.Close() + + cases := []struct { + desc string + token string + thingID string + offset uint64 + limit uint64 + query string + status int + svcRes certs.Page + svcErr error + err error + }{ + { + desc: "list certs successfully with default limit", + token: valid, + thingID: thingID, + offset: 0, + limit: 10, + query: "", + status: http.StatusOK, + svcRes: certs.Page{ + Total: 1, + Offset: 0, + Limit: 10, + Certs: []certs.Cert{cert}, + }, + svcErr: nil, + err: nil, + }, + { + desc: "list certs successfully with limit", + token: valid, + thingID: thingID, + offset: 0, + limit: 5, + query: "?limit=5", + status: http.StatusOK, + svcRes: certs.Page{ + Total: 1, + Offset: 0, + Limit: 5, + Certs: []certs.Cert{cert}, + }, + svcErr: nil, + err: nil, + }, + { + desc: "list certs successfully with offset", + token: valid, + thingID: thingID, + offset: 1, + limit: 10, + query: "?offset=1", + status: http.StatusOK, + svcRes: certs.Page{ + Total: 1, + Offset: 1, + Limit: 10, + Certs: []certs.Cert{}, + }, + svcErr: nil, + err: nil, + }, + { + desc: "list certs successfully with offset and limit", + token: valid, + thingID: thingID, + offset: 1, + limit: 5, + query: "?offset=1&limit=5", + status: http.StatusOK, + svcRes: certs.Page{ + Total: 1, + Offset: 1, + Limit: 5, + Certs: []certs.Cert{}, + }, + svcErr: nil, + err: nil, + }, + { + desc: "list with invalid token", + token: invalid, + thingID: thingID, + offset: 0, + limit: 10, + query: "", + status: http.StatusUnauthorized, + svcRes: certs.Page{}, + svcErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "list with empty token", + token: "", + thingID: thingID, + offset: 0, + limit: 10, + query: "", + status: http.StatusUnauthorized, + svcRes: certs.Page{}, + svcErr: nil, + err: apiutil.ErrBearerToken, + }, + { + desc: "list with limit exceeding max limit", + token: valid, + thingID: thingID, + query: "?limit=1000", + status: http.StatusBadRequest, + svcRes: certs.Page{}, + svcErr: nil, + err: apiutil.ErrLimitSize, + }, + { + desc: "list with invalid offset", + token: valid, + thingID: thingID, + query: "?offset=invalid", + status: http.StatusBadRequest, + svcRes: certs.Page{}, + svcErr: nil, + err: apiutil.ErrValidation, + }, + { + desc: "list with invalid limit", + token: valid, + thingID: thingID, + query: "?limit=invalid", + status: http.StatusBadRequest, + svcRes: certs.Page{}, + svcErr: nil, + err: apiutil.ErrValidation, + }, + { + desc: "list with invalid thing id", + token: valid, + thingID: invalid, + offset: 0, + limit: 10, + query: "", + status: http.StatusNotFound, + svcRes: certs.Page{}, + svcErr: svcerr.ErrNotFound, + err: svcerr.ErrNotFound, + }, + } + for _, tc := range cases { + req := testRequest{ + client: cs.Client(), + method: http.MethodGet, + url: fmt.Sprintf("%s/serials/%s", cs.URL, tc.thingID) + tc.query, + token: tc.token, + } + svcCall := svc.On("ListSerials", mock.Anything, tc.token, tc.thingID, tc.offset, tc.limit).Return(tc.svcRes, tc.svcErr) + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + var errRes respBody + err = json.NewDecoder(res.Body).Decode(&errRes) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error while decoding response body: %s", tc.desc, err)) + if errRes.Err != "" || errRes.Message != "" { + err = errors.Wrap(errors.New(errRes.Err), errors.New(errRes.Message)) + } + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n ", tc.desc, tc.err, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + svcCall.Unset() + } +} + +type respBody struct { + Err string `json:"error"` + Message string `json:"message"` +} diff --git a/certs/api/logging.go b/certs/api/logging.go new file mode 100644 index 0000000..8567d65 --- /dev/null +++ b/certs/api/logging.go @@ -0,0 +1,131 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "log/slog" + "time" + + "github.com/absmach/magistrala/certs" +) + +var _ certs.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + svc certs.Service +} + +// LoggingMiddleware adds logging facilities to the bootstrap service. +func LoggingMiddleware(svc certs.Service, logger *slog.Logger) certs.Service { + return &loggingMiddleware{logger, svc} +} + +// IssueCert logs the issue_cert request. It logs the ttl, thing ID and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) IssueCert(ctx context.Context, token, thingID, ttl string) (c certs.Cert, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", thingID), + slog.String("ttl", ttl), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Issue certificate failed", args...) + return + } + lm.logger.Info("Issue certificate completed successfully", args...) + }(time.Now()) + + return lm.svc.IssueCert(ctx, token, thingID, ttl) +} + +// ListCerts logs the list_certs request. It logs the thing ID and the time it took to complete the request. +func (lm *loggingMiddleware) ListCerts(ctx context.Context, token, thingID string, offset, limit uint64) (cp certs.Page, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", thingID), + slog.Group("page", + slog.Uint64("offset", cp.Offset), + slog.Uint64("limit", cp.Limit), + slog.Uint64("total", cp.Total), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("List certificates failed", args...) + return + } + lm.logger.Info("List certificates completed successfully", args...) + }(time.Now()) + + return lm.svc.ListCerts(ctx, token, thingID, offset, limit) +} + +// ListSerials logs the list_serials request. It logs the thing ID and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ListSerials(ctx context.Context, token, thingID string, offset, limit uint64) (cp certs.Page, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", thingID), + slog.Group("page", + slog.Uint64("offset", cp.Offset), + slog.Uint64("limit", cp.Limit), + slog.Uint64("total", cp.Total), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("List certifcates serials failed", args...) + return + } + lm.logger.Info("List certificates serials completed successfully", args...) + }(time.Now()) + + return lm.svc.ListSerials(ctx, token, thingID, offset, limit) +} + +// ViewCert logs the view_cert request. It logs the serial ID and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ViewCert(ctx context.Context, token, serialID string) (c certs.Cert, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("serial_id", serialID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("View certificate failed", args...) + return + } + lm.logger.Info("View certificate completed successfully", args...) + }(time.Now()) + + return lm.svc.ViewCert(ctx, token, serialID) +} + +// RevokeCert logs the revoke_cert request. It logs the thing ID and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) RevokeCert(ctx context.Context, token, thingID string) (c certs.Revoke, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", thingID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Revoke certificate failed", args...) + return + } + lm.logger.Info("Revoke certificate completed successfully", args...) + }(time.Now()) + + return lm.svc.RevokeCert(ctx, token, thingID) +} diff --git a/certs/api/metrics.go b/certs/api/metrics.go new file mode 100644 index 0000000..e1ab83a --- /dev/null +++ b/certs/api/metrics.go @@ -0,0 +1,81 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "time" + + "github.com/absmach/magistrala/certs" + "github.com/go-kit/kit/metrics" +) + +var _ certs.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc certs.Service +} + +// MetricsMiddleware instruments core service by tracking request count and latency. +func MetricsMiddleware(svc certs.Service, counter metrics.Counter, latency metrics.Histogram) certs.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +// IssueCert instruments IssueCert method with metrics. +func (ms *metricsMiddleware) IssueCert(ctx context.Context, token, thingID, ttl string) (certs.Cert, error) { + defer func(begin time.Time) { + ms.counter.With("method", "issue_cert").Add(1) + ms.latency.With("method", "issue_cert").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.IssueCert(ctx, token, thingID, ttl) +} + +// ListCerts instruments ListCerts method with metrics. +func (ms *metricsMiddleware) ListCerts(ctx context.Context, token, thingID string, offset, limit uint64) (certs.Page, error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_certs").Add(1) + ms.latency.With("method", "list_certs").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ListCerts(ctx, token, thingID, offset, limit) +} + +// ListSerials instruments ListSerials method with metrics. +func (ms *metricsMiddleware) ListSerials(ctx context.Context, token, thingID string, offset, limit uint64) (certs.Page, error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_serials").Add(1) + ms.latency.With("method", "list_serials").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ListSerials(ctx, token, thingID, offset, limit) +} + +// ViewCert instruments ViewCert method with metrics. +func (ms *metricsMiddleware) ViewCert(ctx context.Context, token, serialID string) (certs.Cert, error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_cert").Add(1) + ms.latency.With("method", "view_cert").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ViewCert(ctx, token, serialID) +} + +// RevokeCert instruments RevokeCert method with metrics. +func (ms *metricsMiddleware) RevokeCert(ctx context.Context, token, thingID string) (certs.Revoke, error) { + defer func(begin time.Time) { + ms.counter.With("method", "revoke_cert").Add(1) + ms.latency.With("method", "revoke_cert").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.RevokeCert(ctx, token, thingID) +} diff --git a/certs/api/requests.go b/certs/api/requests.go new file mode 100644 index 0000000..78ac21d --- /dev/null +++ b/certs/api/requests.go @@ -0,0 +1,88 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "time" + + "github.com/absmach/magistrala/pkg/apiutil" +) + +const maxLimitSize = 100 + +type addCertsReq struct { + token string + ThingID string `json:"thing_id"` + TTL string `json:"ttl"` +} + +func (req addCertsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.ThingID == "" { + return apiutil.ErrMissingID + } + + if req.TTL == "" { + return apiutil.ErrMissingCertData + } + + if _, err := time.ParseDuration(req.TTL); err != nil { + return apiutil.ErrInvalidCertData + } + + return nil +} + +type listReq struct { + thingID string + token string + offset uint64 + limit uint64 +} + +func (req *listReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.limit > maxLimitSize { + return apiutil.ErrLimitSize + } + return nil +} + +type viewReq struct { + serialID string + token string +} + +func (req *viewReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.serialID == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type revokeReq struct { + token string + certID string +} + +func (req *revokeReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.certID == "" { + return apiutil.ErrMissingID + } + + return nil +} diff --git a/certs/api/responses.go b/certs/api/responses.go new file mode 100644 index 0000000..ce19064 --- /dev/null +++ b/certs/api/responses.go @@ -0,0 +1,73 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "net/http" + "time" +) + +type pageRes struct { + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` +} + +type certsPageRes struct { + pageRes + Certs []certsRes `json:"certs"` +} + +type certsRes struct { + ThingID string `json:"thing_id"` + ClientCert string `json:"client_cert"` + ClientKey string `json:"client_key"` + CertSerial string `json:"cert_serial"` + Expiration time.Time `json:"expiration"` + created bool +} + +type revokeCertsRes struct { + RevocationTime time.Time `json:"revocation_time"` +} + +func (res certsPageRes) Code() int { + return http.StatusOK +} + +func (res certsPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res certsPageRes) Empty() bool { + return false +} + +func (res certsRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res certsRes) Headers() map[string]string { + return map[string]string{} +} + +func (res certsRes) Empty() bool { + return false +} + +func (res revokeCertsRes) Code() int { + return http.StatusOK +} + +func (res revokeCertsRes) Headers() map[string]string { + return map[string]string{} +} + +func (res revokeCertsRes) Empty() bool { + return false +} diff --git a/certs/api/transport.go b/certs/api/transport.go new file mode 100644 index 0000000..c5cc717 --- /dev/null +++ b/certs/api/transport.go @@ -0,0 +1,120 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "log/slog" + "net/http" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/certs" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/pkg/api" + "github.com/go-chi/chi/v5" + kithttp "github.com/go-kit/kit/transport/http" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +const ( + contentType = "application/json" + offsetKey = "offset" + limitKey = "limit" + defOffset = 0 + defLimit = 10 +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(svc certs.Service, logger *slog.Logger, instanceID string) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + + r := chi.NewRouter() + + r.Route("/certs", func(r chi.Router) { + r.Post("/", otelhttp.NewHandler(kithttp.NewServer( + issueCert(svc), + decodeCerts, + api.EncodeResponse, + opts..., + ), "issue").ServeHTTP) + r.Get("/{certID}", otelhttp.NewHandler(kithttp.NewServer( + viewCert(svc), + decodeViewCert, + api.EncodeResponse, + opts..., + ), "view").ServeHTTP) + r.Delete("/{certID}", otelhttp.NewHandler(kithttp.NewServer( + revokeCert(svc), + decodeRevokeCerts, + api.EncodeResponse, + opts..., + ), "revoke").ServeHTTP) + }) + r.Get("/serials/{thingID}", otelhttp.NewHandler(kithttp.NewServer( + listSerials(svc), + decodeListCerts, + api.EncodeResponse, + opts..., + ), "list_serials").ServeHTTP) + + r.Handle("/metrics", promhttp.Handler()) + r.Get("/health", magistrala.Health("certs", instanceID)) + + return r +} + +func decodeListCerts(_ context.Context, r *http.Request) (interface{}, error) { + l, err := apiutil.ReadNumQuery[uint64](r, limitKey, defLimit) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + o, err := apiutil.ReadNumQuery[uint64](r, offsetKey, defOffset) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + req := listReq{ + token: apiutil.ExtractBearerToken(r), + thingID: chi.URLParam(r, "thingID"), + limit: l, + offset: o, + } + return req, nil +} + +func decodeViewCert(_ context.Context, r *http.Request) (interface{}, error) { + req := viewReq{ + token: apiutil.ExtractBearerToken(r), + serialID: chi.URLParam(r, "certID"), + } + + return req, nil +} + +func decodeCerts(_ context.Context, r *http.Request) (interface{}, error) { + if r.Header.Get("Content-Type") != contentType { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + + req := addCertsReq{token: apiutil.ExtractBearerToken(r)} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + return req, nil +} + +func decodeRevokeCerts(_ context.Context, r *http.Request) (interface{}, error) { + req := revokeReq{ + token: apiutil.ExtractBearerToken(r), + certID: chi.URLParam(r, "certID"), + } + + return req, nil +} diff --git a/certs/certs.go b/certs/certs.go new file mode 100644 index 0000000..8bfaa60 --- /dev/null +++ b/certs/certs.go @@ -0,0 +1,86 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package certs + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "os" + + "github.com/absmach/magistrala/pkg/errors" +) + +// ConfigsPage contains page related metadata as well as list. +type Page struct { + Total uint64 + Offset uint64 + Limit uint64 + Certs []Cert +} + +var ErrMissingCerts = errors.New("CA path or CA key path not set") + +// Repository specifies a Config persistence API. +// +//go:generate mockery --name Repository --output=./mocks --filename certs.go --quiet --note "Copyright (c) Abstract Machines" +type Repository interface { + // Save saves cert for thing into database + Save(ctx context.Context, cert Cert) (string, error) + + // RetrieveAll retrieve issued certificates for given owner ID + RetrieveAll(ctx context.Context, ownerID string, offset, limit uint64) (Page, error) + + // Remove removes certificate from DB for a given thing ID + Remove(ctx context.Context, ownerID, thingID string) error + + // RetrieveByThing retrieves issued certificates for a given thing ID + RetrieveByThing(ctx context.Context, ownerID, thingID string, offset, limit uint64) (Page, error) + + // RetrieveBySerial retrieves a certificate for a given serial ID + RetrieveBySerial(ctx context.Context, ownerID, serialID string) (Cert, error) +} + +func LoadCertificates(caPath, caKeyPath string) (tls.Certificate, *x509.Certificate, error) { + if caPath == "" || caKeyPath == "" { + return tls.Certificate{}, &x509.Certificate{}, ErrMissingCerts + } + + _, err := os.Stat(caPath) + if os.IsNotExist(err) || os.IsPermission(err) { + return tls.Certificate{}, &x509.Certificate{}, err + } + + _, err = os.Stat(caKeyPath) + if os.IsNotExist(err) || os.IsPermission(err) { + return tls.Certificate{}, &x509.Certificate{}, err + } + + tlsCert, err := tls.LoadX509KeyPair(caPath, caKeyPath) + if err != nil { + return tlsCert, &x509.Certificate{}, err + } + + b, err := os.ReadFile(caPath) + if err != nil { + return tlsCert, &x509.Certificate{}, err + } + + caCert, err := ReadCert(b) + if err != nil { + return tlsCert, &x509.Certificate{}, err + } + + return tlsCert, caCert, nil +} + +func ReadCert(b []byte) (*x509.Certificate, error) { + block, _ := pem.Decode(b) + if block == nil { + return nil, errors.New("failed to decode PEM data") + } + + return x509.ParseCertificate(block.Bytes) +} diff --git a/certs/certs_test.go b/certs/certs_test.go new file mode 100644 index 0000000..3ee7dc7 --- /dev/null +++ b/certs/certs_test.go @@ -0,0 +1,93 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package certs_test + +import ( + "fmt" + "testing" + + "github.com/absmach/magistrala/certs" + "github.com/absmach/magistrala/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestLoadCertificates(t *testing.T) { + cases := []struct { + desc string + caPath string + caKeyPath string + err error + }{ + { + desc: "load valid tls certificate and valid key", + caPath: "../docker/ssl/certs/ca.crt", + caKeyPath: "../docker/ssl/certs/ca.key", + err: nil, + }, + { + desc: "load valid tls certificate and missing key", + caPath: "../docker/ssl/certs/ca.crt", + caKeyPath: "", + err: certs.ErrMissingCerts, + }, + { + desc: "load missing tls certificate and valid key", + caPath: "", + caKeyPath: "../docker/ssl/certs/ca.key", + err: certs.ErrMissingCerts, + }, + { + desc: "load empty tls certificate and empty key", + caPath: "", + caKeyPath: "", + err: certs.ErrMissingCerts, + }, + { + desc: "load valid tls certificate and invalid key", + caPath: "../docker/ssl/certs/ca.crt", + caKeyPath: "certs.go", + err: errors.New("tls: failed to find any PEM data in key input"), + }, + { + desc: "load invalid tls certificate and valid key", + caPath: "certs.go", + caKeyPath: "../docker/ssl/certs/ca.key", + err: errors.New("tls: failed to find any PEM data in certificate input"), + }, + { + desc: "load invalid tls certificate and invalid key", + caPath: "certs.go", + caKeyPath: "certs.go", + err: errors.New("tls: failed to find any PEM data in certificate input"), + }, + + { + desc: "load valid tls certificate and non-existing key", + caPath: "../docker/ssl/certs/ca.crt", + caKeyPath: "ca.key", + err: errors.New("stat ca.key: no such file or directory"), + }, + { + desc: "load non-existing tls certificate and valid key", + caPath: "ca.crt", + caKeyPath: "../docker/ssl/certs/ca.key", + err: errors.New("stat ca.crt: no such file or directory"), + }, + { + desc: "load non-existing tls certificate and non-existing key", + caPath: "ca.crt", + caKeyPath: "ca.key", + err: errors.New("stat ca.crt: no such file or directory"), + }, + } + + for _, tc := range cases { + tlsCert, caCert, err := certs.LoadCertificates(tc.caPath, tc.caKeyPath) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.NotNil(t, tlsCert) + assert.NotNil(t, caCert) + } + } +} diff --git a/certs/doc.go b/certs/doc.go new file mode 100644 index 0000000..24a1987 --- /dev/null +++ b/certs/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package certs contains the domain concept definitions needed to support +// Magistrala certs service functionality. +package certs diff --git a/certs/mocks/certs.go b/certs/mocks/certs.go new file mode 100644 index 0000000..ea918bb --- /dev/null +++ b/certs/mocks/certs.go @@ -0,0 +1,162 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + certs "github.com/absmach/magistrala/certs" + + mock "github.com/stretchr/testify/mock" +) + +// Repository is an autogenerated mock type for the Repository type +type Repository struct { + mock.Mock +} + +// Remove provides a mock function with given fields: ctx, ownerID, thingID +func (_m *Repository) Remove(ctx context.Context, ownerID string, thingID string) error { + ret := _m.Called(ctx, ownerID, thingID) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, ownerID, thingID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RetrieveAll provides a mock function with given fields: ctx, ownerID, offset, limit +func (_m *Repository) RetrieveAll(ctx context.Context, ownerID string, offset uint64, limit uint64) (certs.Page, error) { + ret := _m.Called(ctx, ownerID, offset, limit) + + if len(ret) == 0 { + panic("no return value specified for RetrieveAll") + } + + var r0 certs.Page + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64) (certs.Page, error)); ok { + return rf(ctx, ownerID, offset, limit) + } + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64) certs.Page); ok { + r0 = rf(ctx, ownerID, offset, limit) + } else { + r0 = ret.Get(0).(certs.Page) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, uint64, uint64) error); ok { + r1 = rf(ctx, ownerID, offset, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveBySerial provides a mock function with given fields: ctx, ownerID, serialID +func (_m *Repository) RetrieveBySerial(ctx context.Context, ownerID string, serialID string) (certs.Cert, error) { + ret := _m.Called(ctx, ownerID, serialID) + + if len(ret) == 0 { + panic("no return value specified for RetrieveBySerial") + } + + var r0 certs.Cert + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (certs.Cert, error)); ok { + return rf(ctx, ownerID, serialID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) certs.Cert); ok { + r0 = rf(ctx, ownerID, serialID) + } else { + r0 = ret.Get(0).(certs.Cert) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, ownerID, serialID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveByThing provides a mock function with given fields: ctx, ownerID, thingID, offset, limit +func (_m *Repository) RetrieveByThing(ctx context.Context, ownerID string, thingID string, offset uint64, limit uint64) (certs.Page, error) { + ret := _m.Called(ctx, ownerID, thingID, offset, limit) + + if len(ret) == 0 { + panic("no return value specified for RetrieveByThing") + } + + var r0 certs.Page + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64, uint64) (certs.Page, error)); ok { + return rf(ctx, ownerID, thingID, offset, limit) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64, uint64) certs.Page); ok { + r0 = rf(ctx, ownerID, thingID, offset, limit) + } else { + r0 = ret.Get(0).(certs.Page) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, uint64, uint64) error); ok { + r1 = rf(ctx, ownerID, thingID, offset, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Save provides a mock function with given fields: ctx, cert +func (_m *Repository) Save(ctx context.Context, cert certs.Cert) (string, error) { + ret := _m.Called(ctx, cert) + + if len(ret) == 0 { + panic("no return value specified for Save") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, certs.Cert) (string, error)); ok { + return rf(ctx, cert) + } + if rf, ok := ret.Get(0).(func(context.Context, certs.Cert) string); ok { + r0 = rf(ctx, cert) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, certs.Cert) error); ok { + r1 = rf(ctx, cert) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewRepository creates a new instance of Repository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRepository(t interface { + mock.TestingT + Cleanup(func()) +}) *Repository { + mock := &Repository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/certs/mocks/doc.go b/certs/mocks/doc.go new file mode 100644 index 0000000..16ed198 --- /dev/null +++ b/certs/mocks/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mocks contains mocks for testing purposes. +package mocks diff --git a/certs/mocks/pki.go b/certs/mocks/pki.go new file mode 100644 index 0000000..47ae77b --- /dev/null +++ b/certs/mocks/pki.go @@ -0,0 +1,135 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + pki "github.com/absmach/magistrala/certs/pki" + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Agent is an autogenerated mock type for the Agent type +type Agent struct { + mock.Mock +} + +// IssueCert provides a mock function with given fields: cn, ttl +func (_m *Agent) IssueCert(cn string, ttl string) (pki.Cert, error) { + ret := _m.Called(cn, ttl) + + if len(ret) == 0 { + panic("no return value specified for IssueCert") + } + + var r0 pki.Cert + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (pki.Cert, error)); ok { + return rf(cn, ttl) + } + if rf, ok := ret.Get(0).(func(string, string) pki.Cert); ok { + r0 = rf(cn, ttl) + } else { + r0 = ret.Get(0).(pki.Cert) + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(cn, ttl) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoginAndRenew provides a mock function with given fields: ctx +func (_m *Agent) LoginAndRenew(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for LoginAndRenew") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Read provides a mock function with given fields: serial +func (_m *Agent) Read(serial string) (pki.Cert, error) { + ret := _m.Called(serial) + + if len(ret) == 0 { + panic("no return value specified for Read") + } + + var r0 pki.Cert + var r1 error + if rf, ok := ret.Get(0).(func(string) (pki.Cert, error)); ok { + return rf(serial) + } + if rf, ok := ret.Get(0).(func(string) pki.Cert); ok { + r0 = rf(serial) + } else { + r0 = ret.Get(0).(pki.Cert) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(serial) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Revoke provides a mock function with given fields: serial +func (_m *Agent) Revoke(serial string) (time.Time, error) { + ret := _m.Called(serial) + + if len(ret) == 0 { + panic("no return value specified for Revoke") + } + + var r0 time.Time + var r1 error + if rf, ok := ret.Get(0).(func(string) (time.Time, error)); ok { + return rf(serial) + } + if rf, ok := ret.Get(0).(func(string) time.Time); ok { + r0 = rf(serial) + } else { + r0 = ret.Get(0).(time.Time) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(serial) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAgent creates a new instance of Agent. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAgent(t interface { + mock.TestingT + Cleanup(func()) +}) *Agent { + mock := &Agent{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/certs/mocks/service.go b/certs/mocks/service.go new file mode 100644 index 0000000..6bc257d --- /dev/null +++ b/certs/mocks/service.go @@ -0,0 +1,172 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + certs "github.com/absmach/magistrala/certs" + + mock "github.com/stretchr/testify/mock" +) + +// Service is an autogenerated mock type for the Service type +type Service struct { + mock.Mock +} + +// IssueCert provides a mock function with given fields: ctx, token, thingID, ttl +func (_m *Service) IssueCert(ctx context.Context, token string, thingID string, ttl string) (certs.Cert, error) { + ret := _m.Called(ctx, token, thingID, ttl) + + if len(ret) == 0 { + panic("no return value specified for IssueCert") + } + + var r0 certs.Cert + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (certs.Cert, error)); ok { + return rf(ctx, token, thingID, ttl) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) certs.Cert); ok { + r0 = rf(ctx, token, thingID, ttl) + } else { + r0 = ret.Get(0).(certs.Cert) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, token, thingID, ttl) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListCerts provides a mock function with given fields: ctx, token, thingID, offset, limit +func (_m *Service) ListCerts(ctx context.Context, token string, thingID string, offset uint64, limit uint64) (certs.Page, error) { + ret := _m.Called(ctx, token, thingID, offset, limit) + + if len(ret) == 0 { + panic("no return value specified for ListCerts") + } + + var r0 certs.Page + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64, uint64) (certs.Page, error)); ok { + return rf(ctx, token, thingID, offset, limit) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64, uint64) certs.Page); ok { + r0 = rf(ctx, token, thingID, offset, limit) + } else { + r0 = ret.Get(0).(certs.Page) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, uint64, uint64) error); ok { + r1 = rf(ctx, token, thingID, offset, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListSerials provides a mock function with given fields: ctx, token, thingID, offset, limit +func (_m *Service) ListSerials(ctx context.Context, token string, thingID string, offset uint64, limit uint64) (certs.Page, error) { + ret := _m.Called(ctx, token, thingID, offset, limit) + + if len(ret) == 0 { + panic("no return value specified for ListSerials") + } + + var r0 certs.Page + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64, uint64) (certs.Page, error)); ok { + return rf(ctx, token, thingID, offset, limit) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64, uint64) certs.Page); ok { + r0 = rf(ctx, token, thingID, offset, limit) + } else { + r0 = ret.Get(0).(certs.Page) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, uint64, uint64) error); ok { + r1 = rf(ctx, token, thingID, offset, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RevokeCert provides a mock function with given fields: ctx, token, serialID +func (_m *Service) RevokeCert(ctx context.Context, token string, serialID string) (certs.Revoke, error) { + ret := _m.Called(ctx, token, serialID) + + if len(ret) == 0 { + panic("no return value specified for RevokeCert") + } + + var r0 certs.Revoke + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (certs.Revoke, error)); ok { + return rf(ctx, token, serialID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) certs.Revoke); ok { + r0 = rf(ctx, token, serialID) + } else { + r0 = ret.Get(0).(certs.Revoke) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, token, serialID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ViewCert provides a mock function with given fields: ctx, token, serialID +func (_m *Service) ViewCert(ctx context.Context, token string, serialID string) (certs.Cert, error) { + ret := _m.Called(ctx, token, serialID) + + if len(ret) == 0 { + panic("no return value specified for ViewCert") + } + + var r0 certs.Cert + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (certs.Cert, error)); ok { + return rf(ctx, token, serialID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) certs.Cert); ok { + r0 = rf(ctx, token, serialID) + } else { + r0 = ret.Get(0).(certs.Cert) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, token, serialID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewService(t interface { + mock.TestingT + Cleanup(func()) +}) *Service { + mock := &Service{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/certs/pki/doc.go b/certs/pki/doc.go new file mode 100644 index 0000000..cbd2d97 --- /dev/null +++ b/certs/pki/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package pki contains the domain concept definitions needed to +// support Magistrala Certs service functionality. +// It provides the abstraction of the PKI (Public Key Infrastructure) +// Valut service, which is used to issue and revoke certificates. +package pki diff --git a/certs/pki/vault.go b/certs/pki/vault.go new file mode 100644 index 0000000..69bb8bb --- /dev/null +++ b/certs/pki/vault.go @@ -0,0 +1,271 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package pki wraps vault client +package pki + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "time" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/api/auth/approle" + "github.com/mitchellh/mapstructure" +) + +const ( + issue = "issue" + cert = "cert" + revoke = "revoke" +) + +var ( + errFailedCertDecoding = errors.New("failed to decode response from vault service") + errFailedToLogin = errors.New("failed to login to Vault") + errFailedAppRole = errors.New("failed to create vault new app role") + errNoAuthInfo = errors.New("no auth information from Vault") + errNonRenewal = errors.New("token is not configured to be renewable") + errRenewWatcher = errors.New("unable to initialize new lifetime watcher for renewing auth token") + errFailedRenew = errors.New("failed to renew token") + errCouldNotRenew = errors.New("token can no longer be renewed") +) + +type Cert struct { + ClientCert string `json:"client_cert" mapstructure:"certificate"` + IssuingCA string `json:"issuing_ca" mapstructure:"issuing_ca"` + CAChain []string `json:"ca_chain" mapstructure:"ca_chain"` + ClientKey string `json:"client_key" mapstructure:"private_key"` + PrivateKeyType string `json:"private_key_type" mapstructure:"private_key_type"` + Serial string `json:"serial" mapstructure:"serial_number"` + Expire int64 `json:"expire" mapstructure:"expiration"` +} + +// Agent represents the Vault PKI interface. +// +//go:generate mockery --name Agent --output=../mocks --filename pki.go --quiet --note "Copyright (c) Abstract Machines" +type Agent interface { + // IssueCert issues certificate on PKI + IssueCert(cn, ttl string) (Cert, error) + + // Read retrieves certificate from PKI + Read(serial string) (Cert, error) + + // Revoke revokes certificate from PKI + Revoke(serial string) (time.Time, error) + + // Login to PKI and renews token + LoginAndRenew(ctx context.Context) error +} + +type pkiAgent struct { + appRole string + appSecret string + namespace string + path string + role string + host string + issueURL string + readURL string + revokeURL string + client *api.Client + secret *api.Secret + logger *slog.Logger +} + +type certReq struct { + CommonName string `json:"common_name"` + TTL string `json:"ttl"` +} + +type certRevokeReq struct { + SerialNumber string `json:"serial_number"` +} + +// NewVaultClient instantiates a Vault client. +func NewVaultClient(appRole, appSecret, host, namespace, path, role string, logger *slog.Logger) (Agent, error) { + conf := api.DefaultConfig() + conf.Address = host + + client, err := api.NewClient(conf) + if err != nil { + return nil, err + } + if namespace != "" { + client.SetNamespace(namespace) + } + + p := pkiAgent{ + appRole: appRole, + appSecret: appSecret, + host: host, + namespace: namespace, + role: role, + path: path, + client: client, + logger: logger, + issueURL: "/" + path + "/" + issue + "/" + role, + readURL: "/" + path + "/" + cert + "/", + revokeURL: "/" + path + "/" + revoke, + } + return &p, nil +} + +func (p *pkiAgent) IssueCert(cn, ttl string) (Cert, error) { + cReq := certReq{ + CommonName: cn, + TTL: ttl, + } + + var certIssueReq map[string]interface{} + data, err := json.Marshal(cReq) + if err != nil { + return Cert{}, err + } + if err := json.Unmarshal(data, &certIssueReq); err != nil { + return Cert{}, nil + } + + s, err := p.client.Logical().Write(p.issueURL, certIssueReq) + if err != nil { + return Cert{}, err + } + + cert := Cert{} + if err = mapstructure.Decode(s.Data, &cert); err != nil { + return Cert{}, errors.Wrap(errFailedCertDecoding, err) + } + + return cert, nil +} + +func (p *pkiAgent) Read(serial string) (Cert, error) { + s, err := p.client.Logical().Read(p.readURL + serial) + if err != nil { + return Cert{}, err + } + cert := Cert{} + if err = mapstructure.Decode(s.Data, &cert); err != nil { + return Cert{}, errors.Wrap(errFailedCertDecoding, err) + } + return cert, nil +} + +func (p *pkiAgent) Revoke(serial string) (time.Time, error) { + cReq := certRevokeReq{ + SerialNumber: serial, + } + + var certRevokeReq map[string]interface{} + data, err := json.Marshal(cReq) + if err != nil { + return time.Time{}, err + } + if err := json.Unmarshal(data, &certRevokeReq); err != nil { + return time.Time{}, nil + } + + s, err := p.client.Logical().Write(p.revokeURL, certRevokeReq) + if err != nil { + return time.Time{}, err + } + + // Vault will return a response without errors but with a warning if the certificate is expired. + // The response will not have "revocation_time" in such cases. + if revokeTime, ok := s.Data["revocation_time"]; ok { + switch v := revokeTime.(type) { + case json.Number: + rev, err := v.Float64() + if err != nil { + return time.Time{}, err + } + return time.Unix(0, int64(rev)*int64(time.Second)), nil + + default: + return time.Time{}, fmt.Errorf("unsupported type for revocation_time: %T", v) + } + } + + return time.Time{}, nil +} + +func (p *pkiAgent) LoginAndRenew(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + p.logger.Info("pki login and renew function stopping") + return nil + default: + err := p.login(ctx) + if err != nil { + p.logger.Info("unable to authenticate to Vault", slog.Any("error", err)) + time.Sleep(5 * time.Second) + break + } + tokenErr := p.manageTokenLifecycle() + if tokenErr != nil { + p.logger.Info("unable to start managing token lifecycle", slog.Any("error", tokenErr)) + time.Sleep(5 * time.Second) + } + } + } +} + +func (p *pkiAgent) login(ctx context.Context) error { + secretID := &approle.SecretID{FromString: p.appSecret} + + authMethod, err := approle.NewAppRoleAuth( + p.appRole, + secretID, + ) + if err != nil { + return errors.Wrap(errFailedAppRole, err) + } + if p.namespace != "" { + p.client.SetNamespace(p.namespace) + } + secret, err := p.client.Auth().Login(ctx, authMethod) + if err != nil { + return errors.Wrap(errFailedToLogin, err) + } + if secret == nil { + return errNoAuthInfo + } + p.secret = secret + return nil +} + +func (p *pkiAgent) manageTokenLifecycle() error { + renew := p.secret.Auth.Renewable + if !renew { + return errNonRenewal + } + + watcher, err := p.client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ + Secret: p.secret, + Increment: 3600, // Requesting token for 3600s = 1h, If this is more than token_max_ttl, then response token will have token_max_ttl + }) + if err != nil { + return errors.Wrap(errRenewWatcher, err) + } + + go watcher.Start() + defer watcher.Stop() + + for { + select { + case err := <-watcher.DoneCh(): + if err != nil { + return errors.Wrap(errFailedRenew, err) + } + // This occurs once the token has reached max TTL or if token is disabled for renewal. + return errCouldNotRenew + + case renewal := <-watcher.RenewCh(): + p.logger.Info("Successfully renewed token", slog.Any("renewed_at", renewal.RenewedAt)) + } + } +} diff --git a/certs/postgres/certs.go b/certs/postgres/certs.go new file mode 100644 index 0000000..8f581ea --- /dev/null +++ b/certs/postgres/certs.go @@ -0,0 +1,201 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "context" + "database/sql" + "fmt" + "log/slog" + "time" + + "github.com/absmach/magistrala/certs" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + "github.com/absmach/magistrala/pkg/postgres" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jmoiron/sqlx" +) + +var _ certs.Repository = (*certsRepository)(nil) + +// Cert holds info on expiration date for specific cert issued for specific Thing. +type Cert struct { + ThingID string + Serial string + Expire time.Time +} + +type certsRepository struct { + db postgres.Database + log *slog.Logger +} + +// NewRepository instantiates a PostgreSQL implementation of certs +// repository. +func NewRepository(db postgres.Database, log *slog.Logger) certs.Repository { + return &certsRepository{db: db, log: log} +} + +func (cr certsRepository) RetrieveAll(ctx context.Context, ownerID string, offset, limit uint64) (certs.Page, error) { + q := `SELECT thing_id, owner_id, serial, expire FROM certs WHERE owner_id = $1 ORDER BY expire LIMIT $2 OFFSET $3;` + rows, err := cr.db.QueryContext(ctx, q, ownerID, limit, offset) + if err != nil { + cr.log.Error(fmt.Sprintf("Failed to retrieve configs due to %s", err)) + return certs.Page{}, err + } + defer rows.Close() + + certificates := []certs.Cert{} + for rows.Next() { + c := certs.Cert{} + if err := rows.Scan(&c.ThingID, &c.OwnerID, &c.Serial, &c.Expire); err != nil { + cr.log.Error(fmt.Sprintf("Failed to read retrieved config due to %s", err)) + return certs.Page{}, err + } + certificates = append(certificates, c) + } + + q = `SELECT COUNT(*) FROM certs WHERE owner_id = $1` + var total uint64 + if err := cr.db.QueryRowxContext(ctx, q, ownerID).Scan(&total); err != nil { + cr.log.Error(fmt.Sprintf("Failed to count certs due to %s", err)) + return certs.Page{}, err + } + + return certs.Page{ + Total: total, + Limit: limit, + Offset: offset, + Certs: certificates, + }, nil +} + +func (cr certsRepository) Save(ctx context.Context, cert certs.Cert) (string, error) { + q := `INSERT INTO certs (thing_id, owner_id, serial, expire) VALUES (:thing_id, :owner_id, :serial, :expire)` + + tx, err := cr.db.BeginTxx(ctx, nil) + if err != nil { + return "", errors.Wrap(repoerr.ErrCreateEntity, err) + } + + dbcrt := toDBCert(cert) + + if _, err := tx.NamedExec(q, dbcrt); err != nil { + e := err + if pgErr, ok := err.(*pgconn.PgError); ok && pgErr.Code == pgerrcode.UniqueViolation { + e = errors.New("error conflict") + } + + cr.rollback("Failed to insert a Cert", tx, err) + + return "", errors.Wrap(repoerr.ErrCreateEntity, e) + } + + if err := tx.Commit(); err != nil { + cr.rollback("Failed to commit Config save", tx, err) + } + + return cert.Serial, nil +} + +func (cr certsRepository) Remove(ctx context.Context, ownerID, serial string) error { + if _, err := cr.RetrieveBySerial(ctx, ownerID, serial); err != nil { + return errors.Wrap(repoerr.ErrRemoveEntity, err) + } + q := `DELETE FROM certs WHERE serial = :serial` + var c certs.Cert + c.Serial = serial + dbcrt := toDBCert(c) + if _, err := cr.db.NamedExecContext(ctx, q, dbcrt); err != nil { + return errors.Wrap(repoerr.ErrRemoveEntity, err) + } + return nil +} + +func (cr certsRepository) RetrieveByThing(ctx context.Context, ownerID, thingID string, offset, limit uint64) (certs.Page, error) { + q := `SELECT thing_id, owner_id, serial, expire FROM certs WHERE owner_id = $1 AND thing_id = $2 ORDER BY expire LIMIT $3 OFFSET $4;` + rows, err := cr.db.QueryContext(ctx, q, ownerID, thingID, limit, offset) + if err != nil { + cr.log.Error(fmt.Sprintf("Failed to retrieve configs due to %s", err)) + return certs.Page{}, err + } + defer rows.Close() + + certificates := []certs.Cert{} + for rows.Next() { + c := certs.Cert{} + if err := rows.Scan(&c.ThingID, &c.OwnerID, &c.Serial, &c.Expire); err != nil { + cr.log.Error(fmt.Sprintf("Failed to read retrieved config due to %s", err)) + return certs.Page{}, err + } + certificates = append(certificates, c) + } + + q = `SELECT COUNT(*) FROM certs WHERE owner_id = $1 AND thing_id = $2` + var total uint64 + if err := cr.db.QueryRowxContext(ctx, q, ownerID, thingID).Scan(&total); err != nil { + cr.log.Error(fmt.Sprintf("Failed to count certs due to %s", err)) + return certs.Page{}, err + } + + return certs.Page{ + Total: total, + Limit: limit, + Offset: offset, + Certs: certificates, + }, nil +} + +func (cr certsRepository) RetrieveBySerial(ctx context.Context, ownerID, serialID string) (certs.Cert, error) { + q := `SELECT thing_id, owner_id, serial, expire FROM certs WHERE owner_id = $1 AND serial = $2` + var dbcrt dbCert + var c certs.Cert + + if err := cr.db.QueryRowxContext(ctx, q, ownerID, serialID).StructScan(&dbcrt); err != nil { + pqErr, ok := err.(*pgconn.PgError) + if err == sql.ErrNoRows || ok && pgerrcode.InvalidTextRepresentation == pqErr.Code { + return c, errors.Wrap(repoerr.ErrNotFound, err) + } + + return c, errors.Wrap(repoerr.ErrViewEntity, err) + } + c = toCert(dbcrt) + + return c, nil +} + +func (cr certsRepository) rollback(content string, tx *sqlx.Tx, err error) { + cr.log.Error(fmt.Sprintf("%s %s", content, err)) + + if err := tx.Rollback(); err != nil { + cr.log.Error(fmt.Sprintf("Failed to rollback due to %s", err)) + } +} + +type dbCert struct { + ThingID string `db:"thing_id"` + Serial string `db:"serial"` + Expire time.Time `db:"expire"` + OwnerID string `db:"owner_id"` +} + +func toDBCert(c certs.Cert) dbCert { + return dbCert{ + ThingID: c.ThingID, + OwnerID: c.OwnerID, + Serial: c.Serial, + Expire: c.Expire, + } +} + +func toCert(cdb dbCert) certs.Cert { + var c certs.Cert + c.OwnerID = cdb.OwnerID + c.ThingID = cdb.ThingID + c.Serial = cdb.Serial + c.Expire = cdb.Expire + return c +} diff --git a/certs/postgres/doc.go b/certs/postgres/doc.go new file mode 100644 index 0000000..73a6784 --- /dev/null +++ b/certs/postgres/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package postgres contains repository implementations using PostgreSQL as +// the underlying database. +package postgres diff --git a/certs/postgres/init.go b/certs/postgres/init.go new file mode 100644 index 0000000..a1f1eda --- /dev/null +++ b/certs/postgres/init.go @@ -0,0 +1,29 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import migrate "github.com/rubenv/sql-migrate" + +// Migration of Certs service. +func Migration() *migrate.MemoryMigrationSource { + return &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "certs_1", + Up: []string{ + `CREATE TABLE IF NOT EXISTS certs ( + thing_id TEXT NOT NULL, + owner_id TEXT NOT NULL, + expire TIMESTAMPTZ NOT NULL, + serial TEXT NOT NULL, + PRIMARY KEY (thing_id, owner_id, serial) + );`, + }, + Down: []string{ + "DROP TABLE IF EXISTS certs;", + }, + }, + }, + } +} diff --git a/certs/postgres/setup_test.go b/certs/postgres/setup_test.go new file mode 100644 index 0000000..1281e08 --- /dev/null +++ b/certs/postgres/setup_test.go @@ -0,0 +1,88 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + + "github.com/absmach/magistrala/certs/postgres" + mglog "github.com/absmach/magistrala/logger" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/jmoiron/sqlx" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var ( + testLog, _ = mglog.New(os.Stdout, "info") + db *sqlx.DB +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + return + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "16.2-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err := sql.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + dbConfig := pgclient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = pgclient.Setup(dbConfig, *postgres.Migration()); err != nil { + testLog.Error(fmt.Sprintf("Could not setup test DB connection: %s", err)) + } + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + testLog.Error(fmt.Sprintf("Could not purge container: %s", err)) + } + + os.Exit(code) +} diff --git a/certs/service.go b/certs/service.go new file mode 100644 index 0000000..191b328 --- /dev/null +++ b/certs/service.go @@ -0,0 +1,206 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package certs + +import ( + "context" + "time" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/certs/pki" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + mgsdk "github.com/absmach/magistrala/pkg/sdk/go" +) + +var ( + // ErrFailedCertCreation failed to create certificate. + ErrFailedCertCreation = errors.New("failed to create client certificate") + + // ErrFailedCertRevocation failed to revoke certificate. + ErrFailedCertRevocation = errors.New("failed to revoke certificate") + + ErrFailedToRemoveCertFromDB = errors.New("failed to remove cert serial from db") + + ErrFailedReadFromPKI = errors.New("failed to read certificate from PKI") +) + +var _ Service = (*certsService)(nil) + +// Service specifies an API that must be fulfilled by the domain service +// implementation, and all of its decorators (e.g. logging & metrics). +// +//go:generate mockery --name Service --output=./mocks --filename service.go --quiet --note "Copyright (c) Abstract Machines" +type Service interface { + // IssueCert issues certificate for given thing id if access is granted with token + IssueCert(ctx context.Context, token, thingID, ttl string) (Cert, error) + + // ListCerts lists certificates issued for a given thing ID + ListCerts(ctx context.Context, token, thingID string, offset, limit uint64) (Page, error) + + // ListSerials lists certificate serial IDs issued for a given thing ID + ListSerials(ctx context.Context, token, thingID string, offset, limit uint64) (Page, error) + + // ViewCert retrieves the certificate issued for a given serial ID + ViewCert(ctx context.Context, token, serialID string) (Cert, error) + + // RevokeCert revokes a certificate for a given serial ID + RevokeCert(ctx context.Context, token, serialID string) (Revoke, error) +} + +type certsService struct { + auth magistrala.AuthServiceClient + certsRepo Repository + sdk mgsdk.SDK + pki pki.Agent +} + +// New returns new Certs service. +func New(auth magistrala.AuthServiceClient, certs Repository, sdk mgsdk.SDK, pkiAgent pki.Agent) Service { + return &certsService{ + certsRepo: certs, + sdk: sdk, + auth: auth, + pki: pkiAgent, + } +} + +// Revoke defines the conditions to revoke a certificate. +type Revoke struct { + RevocationTime time.Time `mapstructure:"revocation_time"` +} + +// Cert defines the certificate paremeters. +type Cert struct { + OwnerID string `json:"owner_id" mapstructure:"owner_id"` + ThingID string `json:"thing_id" mapstructure:"thing_id"` + ClientCert string `json:"client_cert" mapstructure:"certificate"` + IssuingCA string `json:"issuing_ca" mapstructure:"issuing_ca"` + CAChain []string `json:"ca_chain" mapstructure:"ca_chain"` + ClientKey string `json:"client_key" mapstructure:"private_key"` + PrivateKeyType string `json:"private_key_type" mapstructure:"private_key_type"` + Serial string `json:"serial" mapstructure:"serial_number"` + Expire time.Time `json:"expire" mapstructure:"-"` +} + +func (cs *certsService) IssueCert(ctx context.Context, token, thingID, ttl string) (Cert, error) { + owner, err := cs.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return Cert{}, errors.Wrap(svcerr.ErrAuthentication, err) + } + + thing, err := cs.sdk.Thing(thingID, token) + if err != nil { + return Cert{}, errors.Wrap(ErrFailedCertCreation, err) + } + + cert, err := cs.pki.IssueCert(thing.Credentials.Secret, ttl) + if err != nil { + return Cert{}, errors.Wrap(ErrFailedCertCreation, err) + } + + c := Cert{ + ThingID: thingID, + OwnerID: owner.GetId(), + ClientCert: cert.ClientCert, + IssuingCA: cert.IssuingCA, + CAChain: cert.CAChain, + ClientKey: cert.ClientKey, + PrivateKeyType: cert.PrivateKeyType, + Serial: cert.Serial, + Expire: time.Unix(0, int64(cert.Expire)*int64(time.Second)), + } + + _, err = cs.certsRepo.Save(ctx, c) + return c, err +} + +func (cs *certsService) RevokeCert(ctx context.Context, token, thingID string) (Revoke, error) { + var revoke Revoke + u, err := cs.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return revoke, errors.Wrap(svcerr.ErrAuthentication, err) + } + thing, err := cs.sdk.Thing(thingID, token) + if err != nil { + return revoke, errors.Wrap(ErrFailedCertRevocation, err) + } + + offset, limit := uint64(0), uint64(10000) + cp, err := cs.certsRepo.RetrieveByThing(ctx, u.GetId(), thing.ID, offset, limit) + if err != nil { + return revoke, errors.Wrap(ErrFailedCertRevocation, err) + } + + for _, c := range cp.Certs { + revTime, err := cs.pki.Revoke(c.Serial) + if err != nil { + return revoke, errors.Wrap(ErrFailedCertRevocation, err) + } + revoke.RevocationTime = revTime + if err = cs.certsRepo.Remove(ctx, u.GetId(), c.Serial); err != nil { + return revoke, errors.Wrap(ErrFailedToRemoveCertFromDB, err) + } + } + + return revoke, nil +} + +func (cs *certsService) ListCerts(ctx context.Context, token, thingID string, offset, limit uint64) (Page, error) { + u, err := cs.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return Page{}, errors.Wrap(svcerr.ErrAuthentication, err) + } + + cp, err := cs.certsRepo.RetrieveByThing(ctx, u.GetId(), thingID, offset, limit) + if err != nil { + return Page{}, errors.Wrap(svcerr.ErrViewEntity, err) + } + + for i, cert := range cp.Certs { + vcert, err := cs.pki.Read(cert.Serial) + if err != nil { + return Page{}, errors.Wrap(ErrFailedReadFromPKI, err) + } + cp.Certs[i].ClientCert = vcert.ClientCert + cp.Certs[i].ClientKey = vcert.ClientKey + } + + return cp, nil +} + +func (cs *certsService) ListSerials(ctx context.Context, token, thingID string, offset, limit uint64) (Page, error) { + u, err := cs.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return Page{}, errors.Wrap(svcerr.ErrAuthentication, err) + } + + return cs.certsRepo.RetrieveByThing(ctx, u.GetId(), thingID, offset, limit) +} + +func (cs *certsService) ViewCert(ctx context.Context, token, serialID string) (Cert, error) { + u, err := cs.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return Cert{}, errors.Wrap(svcerr.ErrAuthentication, err) + } + + cert, err := cs.certsRepo.RetrieveBySerial(ctx, u.GetId(), serialID) + if err != nil { + return Cert{}, errors.Wrap(svcerr.ErrViewEntity, err) + } + + vcert, err := cs.pki.Read(serialID) + if err != nil { + return Cert{}, errors.Wrap(ErrFailedReadFromPKI, err) + } + + c := Cert{ + ThingID: cert.ThingID, + ClientCert: vcert.ClientCert, + Serial: cert.Serial, + Expire: cert.Expire, + } + + return c, nil +} diff --git a/certs/service_test.go b/certs/service_test.go new file mode 100644 index 0000000..49043eb --- /dev/null +++ b/certs/service_test.go @@ -0,0 +1,448 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package certs_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/absmach/magistrala" + authmocks "github.com/absmach/magistrala/auth/mocks" + "github.com/absmach/magistrala/certs" + "github.com/absmach/magistrala/certs/mocks" + "github.com/absmach/magistrala/certs/pki" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + mgsdk "github.com/absmach/magistrala/pkg/sdk/go" + sdkmocks "github.com/absmach/magistrala/pkg/sdk/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +const ( + invalid = "invalid" + email = "user@example.com" + token = "token" + thingsNum = 1 + thingKey = "thingKey" + thingID = "1" + ttl = "1h" + certNum = 10 + validID = "d4ebb847-5d0e-4e46-bdd9-b6aceaaa3a22" +) + +func newService(_ *testing.T) (certs.Service, *mocks.Repository, *mocks.Agent, *authmocks.AuthClient, *sdkmocks.SDK) { + repo := new(mocks.Repository) + agent := new(mocks.Agent) + auth := new(authmocks.AuthClient) + sdk := new(sdkmocks.SDK) + + return certs.New(auth, repo, sdk, agent), repo, agent, auth, sdk +} + +var cert = certs.Cert{ + OwnerID: validID, + ThingID: thingID, + Serial: "", + Expire: time.Time{}, +} + +func TestIssueCert(t *testing.T) { + svc, repo, agent, auth, sdk := newService(t) + cases := []struct { + token string + desc string + thingID string + ttl string + key string + pki pki.Cert + identifyRes *magistrala.IdentityRes + identifyErr error + thingErr errors.SDKError + issueCertErr error + repoErr error + err error + }{ + { + desc: "issue new cert", + token: token, + thingID: thingID, + ttl: ttl, + pki: pki.Cert{ + ClientCert: "", + IssuingCA: "", + CAChain: []string{}, + ClientKey: "", + PrivateKeyType: "", + Serial: "", + Expire: 0, + }, + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + { + desc: "issue new cert for non existing thing id", + token: token, + thingID: "2", + ttl: ttl, + pki: pki.Cert{ + ClientCert: "", + IssuingCA: "", + CAChain: []string{}, + ClientKey: "", + PrivateKeyType: "", + Serial: "", + Expire: 0, + }, + identifyRes: &magistrala.IdentityRes{Id: validID}, + thingErr: errors.NewSDKError(errors.ErrMalformedEntity), + err: certs.ErrFailedCertCreation, + }, + { + desc: "issue new cert for invalid token", + token: invalid, + thingID: thingID, + ttl: ttl, + pki: pki.Cert{ + ClientCert: "", + IssuingCA: "", + CAChain: []string{}, + ClientKey: "", + PrivateKeyType: "", + Serial: "", + Expire: 0, + }, + identifyRes: &magistrala.IdentityRes{Id: validID}, + identifyErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.identifyRes, tc.identifyErr) + sdkCall := sdk.On("Thing", tc.thingID, tc.token).Return(mgsdk.Thing{ID: tc.thingID, Credentials: mgsdk.Credentials{Secret: thingKey}}, tc.thingErr) + agentCall := agent.On("IssueCert", thingKey, tc.ttl).Return(tc.pki, tc.issueCertErr) + repoCall := repo.On("Save", context.Background(), mock.Anything).Return("", tc.repoErr) + + c, err := svc.IssueCert(context.Background(), tc.token, tc.thingID, tc.ttl) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + cert, _ := certs.ReadCert([]byte(c.ClientCert)) + if cert != nil { + assert.True(t, strings.Contains(cert.Subject.CommonName, thingKey), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, thingKey, cert.Subject.CommonName)) + } + authCall.Unset() + sdkCall.Unset() + agentCall.Unset() + repoCall.Unset() + } +} + +func TestRevokeCert(t *testing.T) { + svc, repo, _, auth, sdk := newService(t) + cases := []struct { + token string + desc string + thingID string + page certs.Page + identifyRes *magistrala.IdentityRes + identifyErr error + authErr error + thingErr errors.SDKError + repoErr error + err error + }{ + { + desc: "revoke cert", + token: token, + thingID: thingID, + page: certs.Page{Limit: 10000, Offset: 0, Total: 1, Certs: []certs.Cert{cert}}, + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + { + desc: "revoke cert for invalid token", + token: invalid, + thingID: thingID, + page: certs.Page{}, + identifyRes: &magistrala.IdentityRes{Id: validID}, + identifyErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "revoke cert for invalid thing id", + token: token, + thingID: "2", + page: certs.Page{}, + identifyRes: &magistrala.IdentityRes{Id: validID}, + thingErr: errors.NewSDKError(certs.ErrFailedCertCreation), + err: certs.ErrFailedCertRevocation, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.identifyRes, tc.identifyErr) + authCall1 := auth.On("Authorize", context.Background(), mock.Anything).Return(&magistrala.AuthorizeRes{Authorized: true}, tc.authErr) + sdkCall := sdk.On("Thing", tc.thingID, tc.token).Return(mgsdk.Thing{ID: tc.thingID, Credentials: mgsdk.Credentials{Secret: thingKey}}, tc.thingErr) + repoCall := repo.On("RetrieveByThing", context.Background(), validID, tc.thingID, tc.page.Offset, tc.page.Limit).Return(certs.Page{}, tc.repoErr) + + _, err := svc.RevokeCert(context.Background(), tc.token, tc.thingID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + authCall1.Unset() + sdkCall.Unset() + repoCall.Unset() + } +} + +func TestListCerts(t *testing.T) { + svc, repo, agent, auth, _ := newService(t) + var mycerts []certs.Cert + for i := 0; i < certNum; i++ { + c := certs.Cert{ + OwnerID: validID, + ThingID: thingID, + Serial: fmt.Sprintf("%d", i), + Expire: time.Now().Add(time.Hour), + } + mycerts = append(mycerts, c) + } + + for i := 0; i < certNum; i++ { + agent.On("Read", fmt.Sprintf("%d", i)).Return(pki.Cert{}, nil) + } + + cases := []struct { + token string + desc string + thingID string + page certs.Page + cert certs.Cert + identifyRes *magistrala.IdentityRes + identifyErr error + repoErr error + err error + }{ + { + desc: "list all certs with valid token", + token: token, + thingID: thingID, + page: certs.Page{Limit: certNum, Offset: 0, Total: certNum, Certs: mycerts}, + cert: certs.Cert{ + OwnerID: validID, + ThingID: thingID, + Serial: "0", + Expire: time.Now().Add(time.Hour), + }, + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + { + desc: "list all certs with invalid token", + token: invalid, + thingID: thingID, + page: certs.Page{}, + cert: certs.Cert{ + OwnerID: validID, + ThingID: thingID, + Serial: fmt.Sprintf("%d", certNum-1), + Expire: time.Now().Add(time.Hour), + }, + identifyRes: &magistrala.IdentityRes{Id: validID}, + identifyErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "list half certs with valid token", + token: token, + thingID: thingID, + page: certs.Page{Limit: certNum, Offset: certNum / 2, Total: certNum / 2, Certs: mycerts[certNum/2:]}, + cert: certs.Cert{ + OwnerID: validID, + ThingID: thingID, + Serial: fmt.Sprintf("%d", certNum/2), + Expire: time.Now().Add(time.Hour), + }, + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + { + desc: "list last cert with valid token", + token: token, + thingID: thingID, + page: certs.Page{Limit: certNum, Offset: certNum - 1, Total: 1, Certs: []certs.Cert{mycerts[certNum-1]}}, + cert: certs.Cert{ + OwnerID: validID, + ThingID: thingID, + Serial: fmt.Sprintf("%d", certNum-1), + Expire: time.Now().Add(time.Hour), + }, + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.identifyRes, tc.identifyErr) + repoCall := repo.On("RetrieveByThing", context.Background(), validID, thingID, tc.page.Offset, tc.page.Limit).Return(tc.page, tc.repoErr) + + page, err := svc.ListCerts(context.Background(), tc.token, tc.thingID, tc.page.Offset, tc.page.Limit) + size := uint64(len(page.Certs)) + assert.Equal(t, tc.page.Total, size, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.page.Total, size)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + repoCall.Unset() + } +} + +func TestListSerials(t *testing.T) { + svc, repo, _, auth, _ := newService(t) + + var issuedCerts []certs.Cert + for i := 0; i < certNum; i++ { + crt := certs.Cert{ + OwnerID: cert.OwnerID, + ThingID: cert.ThingID, + Serial: cert.Serial, + Expire: cert.Expire, + } + issuedCerts = append(issuedCerts, crt) + } + + cases := []struct { + token string + desc string + thingID string + offset uint64 + limit uint64 + certs []certs.Cert + identifyRes *magistrala.IdentityRes + identifyErr error + repoErr error + err error + }{ + { + desc: "list all certs with valid token", + token: token, + thingID: thingID, + offset: 0, + limit: certNum, + certs: issuedCerts, + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + { + desc: "list all certs with invalid token", + token: invalid, + thingID: thingID, + offset: 0, + limit: certNum, + certs: nil, + identifyRes: &magistrala.IdentityRes{Id: validID}, + identifyErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "list half certs with valid token", + token: token, + thingID: thingID, + offset: certNum / 2, + limit: certNum, + certs: issuedCerts[certNum/2:], + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + { + desc: "list last cert with valid token", + token: token, + thingID: thingID, + offset: certNum - 1, + limit: certNum, + certs: []certs.Cert{issuedCerts[certNum-1]}, + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.identifyRes, tc.identifyErr) + repoCall := repo.On("RetrieveByThing", context.Background(), mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(certs.Page{Limit: tc.limit, Offset: tc.offset, Total: certNum, Certs: tc.certs}, tc.repoErr) + + page, err := svc.ListSerials(context.Background(), tc.token, tc.thingID, tc.offset, tc.limit) + assert.Equal(t, tc.certs, page.Certs, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.certs, page.Certs)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + repoCall.Unset() + } +} + +func TestViewCert(t *testing.T) { + svc, repo, agent, auth, sdk := newService(t) + + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: token}).Return(&magistrala.IdentityRes{Id: validID}, nil) + sdkCall := sdk.On("Thing", thingID, token).Return(mgsdk.Thing{ID: thingID, Credentials: mgsdk.Credentials{Secret: thingKey}}, nil) + agentCall := agent.On("IssueCert", thingKey, ttl).Return(pki.Cert{}, nil) + repoCall := repo.On("Save", context.Background(), mock.Anything).Return("", nil) + + ic, err := svc.IssueCert(context.Background(), token, thingID, ttl) + require.Nil(t, err, fmt.Sprintf("unexpected cert creation error: %s\n", err)) + authCall.Unset() + sdkCall.Unset() + agentCall.Unset() + repoCall.Unset() + + cert := certs.Cert{ + ThingID: thingID, + ClientCert: ic.ClientCert, + Serial: ic.Serial, + Expire: ic.Expire, + } + + cases := []struct { + token string + desc string + serialID string + cert certs.Cert + identifyRes *magistrala.IdentityRes + identifyErr error + repoErr error + agentErr error + err error + }{ + { + desc: "list cert with valid token and serial", + token: token, + serialID: cert.Serial, + cert: cert, + identifyRes: &magistrala.IdentityRes{Id: validID}, + }, + { + desc: "list cert with invalid token", + token: invalid, + serialID: cert.Serial, + cert: certs.Cert{}, + identifyRes: &magistrala.IdentityRes{Id: validID}, + identifyErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "list cert with invalid serial", + token: token, + serialID: invalid, + cert: certs.Cert{}, + identifyRes: &magistrala.IdentityRes{Id: validID}, + repoErr: repoerr.ErrNotFound, + err: svcerr.ErrNotFound, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.identifyRes, tc.identifyErr) + repoCall := repo.On("RetrieveBySerial", context.Background(), validID, tc.serialID).Return(tc.cert, tc.repoErr) + agentCall := agent.On("Read", tc.serialID).Return(pki.Cert{}, tc.agentErr) + + cert, err := svc.ViewCert(context.Background(), tc.token, tc.serialID) + assert.Equal(t, tc.cert, cert, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.cert, cert)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + repoCall.Unset() + agentCall.Unset() + } +} diff --git a/certs/tracing/doc.go b/certs/tracing/doc.go new file mode 100644 index 0000000..6a419f3 --- /dev/null +++ b/certs/tracing/doc.go @@ -0,0 +1,12 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package tracing provides tracing instrumentation for Magistrala Users Groups service. +// +// This package provides tracing middleware for Magistrala Users Groups service. +// It can be used to trace incoming requests and add tracing capabilities to +// Magistrala Users Groups service. +// +// For more details about tracing instrumentation for Magistrala messaging refer +// to the documentation at https://docs.magistrala.abstractmachines.fr/tracing/. +package tracing diff --git a/certs/tracing/tracing.go b/certs/tracing/tracing.go new file mode 100644 index 0000000..e42614e --- /dev/null +++ b/certs/tracing/tracing.go @@ -0,0 +1,79 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package tracing + +import ( + "context" + + "github.com/absmach/magistrala/certs" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ certs.Service = (*tracingMiddleware)(nil) + +type tracingMiddleware struct { + tracer trace.Tracer + svc certs.Service +} + +// New returns a new certs service with tracing capabilities. +func New(svc certs.Service, tracer trace.Tracer) certs.Service { + return &tracingMiddleware{tracer, svc} +} + +// IssueCert traces the "IssueCert" operation of the wrapped certs.Service. +func (tm *tracingMiddleware) IssueCert(ctx context.Context, token, thingID, ttl string) (certs.Cert, error) { + ctx, span := tm.tracer.Start(ctx, "svc_create_group", trace.WithAttributes( + attribute.String("thing_id", thingID), + attribute.String("ttl", ttl), + )) + defer span.End() + + return tm.svc.IssueCert(ctx, token, thingID, ttl) +} + +// ListCerts traces the "ListCerts" operation of the wrapped certs.Service. +func (tm *tracingMiddleware) ListCerts(ctx context.Context, token, thingID string, offset, limit uint64) (certs.Page, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_certs", trace.WithAttributes( + attribute.String("thing_id", thingID), + attribute.Int64("offset", int64(offset)), + attribute.Int64("limit", int64(limit)), + )) + defer span.End() + + return tm.svc.ListCerts(ctx, token, thingID, offset, limit) +} + +// ListSerials traces the "ListSerials" operation of the wrapped certs.Service. +func (tm *tracingMiddleware) ListSerials(ctx context.Context, token, thingID string, offset, limit uint64) (certs.Page, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_serials", trace.WithAttributes( + attribute.String("thing_id", thingID), + attribute.Int64("offset", int64(offset)), + attribute.Int64("limit", int64(limit)), + )) + defer span.End() + + return tm.svc.ListSerials(ctx, token, thingID, offset, limit) +} + +// ViewCert traces the "ViewCert" operation of the wrapped certs.Service. +func (tm *tracingMiddleware) ViewCert(ctx context.Context, token, serialID string) (certs.Cert, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_cert", trace.WithAttributes( + attribute.String("serial_id", serialID), + )) + defer span.End() + + return tm.svc.ViewCert(ctx, token, serialID) +} + +// RevokeCert traces the "RevokeCert" operation of the wrapped certs.Service. +func (tm *tracingMiddleware) RevokeCert(ctx context.Context, token, serialID string) (certs.Revoke, error) { + ctx, span := tm.tracer.Start(ctx, "svc_revoke_cert", trace.WithAttributes( + attribute.String("serial_id", serialID), + )) + defer span.End() + + return tm.svc.RevokeCert(ctx, token, serialID) +} diff --git a/cmd/cassandra-reader/main.go b/cmd/cassandra-reader/main.go new file mode 100644 index 0000000..8b4a1f5 --- /dev/null +++ b/cmd/cassandra-reader/main.go @@ -0,0 +1,152 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains cassandra-reader main function to start the cassandra-reader service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/magistrala/readers" + "github.com/absmach/magistrala/readers/api" + cassandraclient "github.com/absmach/mg-contrib/pkg/clients/cassandra" + "github.com/absmach/mg-contrib/readers/cassandra" + "github.com/caarlos0/env/v10" + "github.com/gocql/gocql" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "cassandra-reader" + envPrefixDB = "MG_CASSANDRA_" + envPrefixHTTP = "MG_CASSANDRA_READER_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + envPrefixAuthz = "MG_THINGS_AUTH_GRPC_" + defSvcHTTPPort = "9003" +) + +type config struct { + LogLevel string `env:"MG_CASSANDRA_READER_LOG_LEVEL" envDefault:"info"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_CASSANDRA_READER_INSTANCE_ID" envDefault:""` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + // Create cassandra reader service configurations + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s service configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + ac, acHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer acHandler.Close() + + logger.Info("Successfully connected to auth grpc server " + acHandler.Secure()) + + authConfig = auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuthz}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tc, tcHandler, err := auth.SetupAuthz(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer tcHandler.Close() + + logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) + + // Create new cassandra client + csdSession, err := cassandraclient.Setup(envPrefixDB) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer csdSession.Close() + + // Create new service + repo := newService(csdSession, logger) + + // Create new http server + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, ac, tc, svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + // Start servers + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Cassandra reader service terminated: %s", err)) + } +} + +func newService(csdSession *gocql.Session, logger *slog.Logger) readers.MessageRepository { + repo := cassandra.New(csdSession) + repo = api.LoggingMiddleware(repo, logger) + counter, latency := prometheus.MakeMetrics("cassandra", "message_reader") + repo = api.MetricsMiddleware(repo, counter, latency) + return repo +} diff --git a/cmd/cassandra-writer/main.go b/cmd/cassandra-writer/main.go new file mode 100644 index 0000000..fcec712 --- /dev/null +++ b/cmd/cassandra-writer/main.go @@ -0,0 +1,155 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains cassandra-writer main function to start the cassandra-writer service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers" + consumertracing "github.com/absmach/magistrala/consumers/tracing" + "github.com/absmach/magistrala/consumers/writers/api" + mglog "github.com/absmach/magistrala/logger" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/consumers/writers/cassandra" + cassandraclient "github.com/absmach/mg-contrib/pkg/clients/cassandra" + "github.com/caarlos0/env/v10" + "github.com/gocql/gocql" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "cassandra-writer" + envPrefixDB = "MG_CASSANDRA_" + envPrefixHTTP = "MG_CASSANDRA_WRITER_HTTP_" + defSvcHTTPPort = "9004" +) + +type config struct { + LogLevel string `env:"MG_CASSANDRA_WRITER_LOG_LEVEL" envDefault:"info"` + ConfigPath string `env:"MG_CASSANDRA_WRITER_CONFIG_PATH" envDefault:"/config.toml"` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_CASSANDRA_WRITER_INSTANCE_ID" envDefault:""` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + // Create new cassandra writer service configurations + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + // Create new to cassandra client + csdSession, err := cassandraclient.SetupDB(envPrefixDB, cassandra.Table) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer csdSession.Close() + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + // Create new cassandra-writer repo + repo := newService(csdSession, logger) + repo = consumertracing.NewBlocking(tracer, repo, httpServerConfig) + + // Create new pub sub broker + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + // Start new consumer + if err := consumers.Start(ctx, svcName, pubSub, repo, cfg.ConfigPath, logger); err != nil { + logger.Error(fmt.Sprintf("Failed to create Cassandra writer: %s", err)) + exitCode = 1 + return + } + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + // Start servers + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Cassandra writer service terminated: %s", err)) + } +} + +func newService(session *gocql.Session, logger *slog.Logger) consumers.BlockingConsumer { + repo := cassandra.New(session) + repo = api.LoggingMiddleware(repo, logger) + counter, latency := prometheus.MakeMetrics("cassandra", "message_writer") + repo = api.MetricsMiddleware(repo, counter, latency) + return repo +} diff --git a/cmd/certs/main.go b/cmd/certs/main.go new file mode 100644 index 0000000..a0c32e7 --- /dev/null +++ b/cmd/certs/main.go @@ -0,0 +1,194 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains certs main function to start the certs service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/certs" + "github.com/absmach/magistrala/certs/api" + vault "github.com/absmach/magistrala/certs/pki" + certspg "github.com/absmach/magistrala/certs/postgres" + "github.com/absmach/magistrala/certs/tracing" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/postgres" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/magistrala/pkg/prometheus" + mgsdk "github.com/absmach/magistrala/pkg/sdk/go" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/caarlos0/env/v10" + "github.com/jmoiron/sqlx" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "certs" + envPrefixDB = "MG_CERTS_DB_" + envPrefixHTTP = "MG_CERTS_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + defDB = "certs" + defSvcHTTPPort = "9019" +) + +type config struct { + LogLevel string `env:"MG_CERTS_LOG_LEVEL" envDefault:"info"` + ThingsURL string `env:"MG_THINGS_URL" envDefault:"http://localhost:9000"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://localhost:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_CERTS_INSTANCE_ID" envDefault:""` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` + + // Sign and issue certificates without 3rd party PKI + SignCAPath string `env:"MG_CERTS_SIGN_CA_PATH" envDefault:"ca.crt"` + SignCAKeyPath string `env:"MG_CERTS_SIGN_CA_KEY_PATH" envDefault:"ca.key"` + + // 3rd party PKI API access settings + PkiHost string `env:"MG_CERTS_VAULT_HOST" envDefault:""` + PkiAppRoleID string `env:"MG_CERTS_VAULT_APPROLE_ROLEID" envDefault:""` + PkiAppSecret string `env:"MG_CERTS_VAULT_APPROLE_SECRET" envDefault:""` + PkiNamespace string `env:"MG_CERTS_VAULT_NAMESPACE" envDefault:""` + PkiPath string `env:"MG_CERTS_VAULT_THINGS_CERTS_PKI_PATH" envDefault:"pki_int"` + PkiRole string `env:"MG_CERTS_VAULT_THINGS_CERTS_PKI_ROLE_NAME" envDefault:"magistrala"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + if cfg.PkiHost == "" { + logger.Error("No host specified for PKI engine") + exitCode = 1 + return + } + + pkiclient, err := vault.NewVaultClient(cfg.PkiAppRoleID, cfg.PkiAppSecret, cfg.PkiHost, cfg.PkiNamespace, cfg.PkiPath, cfg.PkiRole, logger) + if err != nil { + logger.Error("failed to configure client for PKI engine") + exitCode = 1 + return + } + + g.Go(func() error { + return pkiclient.LoginAndRenew(ctx) + }) + + dbConfig := pgclient.Config{Name: defDB} + if err := env.ParseWithOptions(&dbConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(err.Error()) + } + db, err := pgclient.Setup(dbConfig, *certspg.Migration()) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer db.Close() + + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + authClient, authHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer authHandler.Close() + + logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + svc := newService(authClient, db, tracer, logger, cfg, dbConfig, pkiclient) + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, logger, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Certs service terminated: %s", err)) + } +} + +func newService(authClient magistrala.AuthServiceClient, db *sqlx.DB, tracer trace.Tracer, logger *slog.Logger, cfg config, dbConfig pgclient.Config, pkiAgent vault.Agent) certs.Service { + database := postgres.NewDatabase(db, dbConfig, tracer) + certsRepo := certspg.NewRepository(database, logger) + config := mgsdk.Config{ + ThingsURL: cfg.ThingsURL, + } + sdk := mgsdk.NewSDK(config) + svc := certs.New(authClient, certsRepo, sdk, pkiAgent) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics(svcName, "api") + svc = api.MetricsMiddleware(svc, counter, latency) + svc = tracing.New(svc, tracer) + + return svc +} diff --git a/cmd/influxdb-reader/main.go b/cmd/influxdb-reader/main.go new file mode 100644 index 0000000..88b4602 --- /dev/null +++ b/cmd/influxdb-reader/main.go @@ -0,0 +1,161 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains influxdb-reader main function to start the influxdb-reader service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/magistrala/readers" + "github.com/absmach/magistrala/readers/api" + influxdbclient "github.com/absmach/mg-contrib/pkg/clients/influxdb" + "github.com/absmach/mg-contrib/readers/influxdb" + "github.com/caarlos0/env/v10" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "influxdb-reader" + envPrefixHTTP = "MG_INFLUX_READER_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + envPrefixAuthz = "MG_THINGS_AUTH_GRPC_" + envPrefixDB = "MG_INFLUXDB_" + defSvcHTTPPort = "9005" +) + +type config struct { + LogLevel string `env:"MG_INFLUX_READER_LOG_LEVEL" envDefault:"info"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_INFLUX_READER_INSTANCE_ID" envDefault:""` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + ac, acHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer acHandler.Close() + + logger.Info("Successfully connected to auth grpc server " + acHandler.Secure()) + + authConfig = auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuthz}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tc, tcHandler, err := auth.SetupAuthz(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer tcHandler.Close() + + logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) + + influxDBConfig := influxdbclient.Config{} + if err := env.ParseWithOptions(&influxDBConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(fmt.Sprintf("failed to load InfluxDB client configuration from environment variable : %s", err)) + exitCode = 1 + return + } + influxDBConfig.DBUrl = fmt.Sprintf("%s://%s:%s", influxDBConfig.Protocol, influxDBConfig.Host, influxDBConfig.Port) + + repocfg := influxdb.RepoConfig{ + Bucket: influxDBConfig.Bucket, + Org: influxDBConfig.Org, + } + + client, err := influxdbclient.Connect(ctx, influxDBConfig) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to InfluxDB : %s", err)) + exitCode = 1 + return + } + defer client.Close() + + repo := newService(client, repocfg, logger) + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, ac, tc, svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("InfluxDB reader service terminated: %s", err)) + } +} + +func newService(client influxdb2.Client, repocfg influxdb.RepoConfig, logger *slog.Logger) readers.MessageRepository { + repo := influxdb.New(client, repocfg) + repo = api.LoggingMiddleware(repo, logger) + counter, latency := prometheus.MakeMetrics("influxdb", "message_reader") + repo = api.MetricsMiddleware(repo, counter, latency) + + return repo +} diff --git a/cmd/influxdb-writer/main.go b/cmd/influxdb-writer/main.go new file mode 100644 index 0000000..f60f8e5 --- /dev/null +++ b/cmd/influxdb-writer/main.go @@ -0,0 +1,161 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains influxdb-writer main function to start the influxdb-writer service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers" + consumertracing "github.com/absmach/magistrala/consumers/tracing" + "github.com/absmach/magistrala/consumers/writers/api" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/consumers/writers/influxdb" + influxdbclient "github.com/absmach/mg-contrib/pkg/clients/influxdb" + "github.com/caarlos0/env/v10" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "influxdb-writer" + envPrefixHTTP = "MG_INFLUX_WRITER_HTTP_" + envPrefixDB = "MG_INFLUXDB_" + defSvcHTTPPort = "9006" +) + +type config struct { + LogLevel string `env:"MG_INFLUX_WRITER_LOG_LEVEL" envDefault:"info"` + ConfigPath string `env:"MG_INFLUX_WRITER_CONFIG_PATH" envDefault:"/config.toml"` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_INFLUX_WRITER_INSTANCE_ID" envDefault:""` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tp, err := jaeger.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + influxDBConfig := influxdbclient.Config{} + if err := env.ParseWithOptions(&influxDBConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(fmt.Sprintf("failed to load InfluxDB client configuration from environment variable : %s", err)) + exitCode = 1 + return + } + influxDBConfig.DBUrl = fmt.Sprintf("%s://%s:%s", influxDBConfig.Protocol, influxDBConfig.Host, influxDBConfig.Port) + + repocfg := influxdb.RepoConfig{ + Bucket: influxDBConfig.Bucket, + Org: influxDBConfig.Org, + } + + client, err := influxdbclient.Connect(ctx, influxDBConfig) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to InfluxDB : %s", err)) + exitCode = 1 + return + } + defer client.Close() + + repo := influxdb.NewAsync(client, repocfg) + repo = consumertracing.NewAsync(tracer, repo, httpServerConfig) + + // Start consuming and logging errors. + go func(log *slog.Logger) { + for err := range repo.Errors() { + if err != nil { + log.Error(err.Error()) + } + } + }(logger) + + if err := consumers.Start(ctx, svcName, pubSub, repo, cfg.ConfigPath, logger); err != nil { + logger.Error(fmt.Sprintf("failed to start InfluxDB writer: %s", err)) + exitCode = 1 + return + } + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("InfluxDB reader service terminated: %s", err)) + } +} diff --git a/cmd/lora/main.go b/cmd/lora/main.go new file mode 100644 index 0000000..7f68318 --- /dev/null +++ b/cmd/lora/main.go @@ -0,0 +1,233 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains lora main function to start the lora service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + "time" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/lora" + "github.com/absmach/magistrala/lora/api" + loraevents "github.com/absmach/magistrala/lora/events" + "github.com/absmach/magistrala/lora/mqtt" + "github.com/absmach/magistrala/pkg/events" + "github.com/absmach/magistrala/pkg/events/store" + "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + redisclient "github.com/absmach/mg-contrib/pkg/clients/redis" + "github.com/caarlos0/env/v10" + mqttpaho "github.com/eclipse/paho.mqtt.golang" + "github.com/go-redis/redis/v8" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "lora-adapter" + envPrefixHTTP = "MG_LORA_ADAPTER_HTTP_" + defSvcHTTPPort = "9017" + + thingsRMPrefix = "thing" + channelsRMPrefix = "channel" + connsRMPrefix = "connection" + thingsStream = "events.magistrala.things" +) + +type config struct { + LogLevel string `env:"MG_LORA_ADAPTER_LOG_LEVEL" envDefault:"info"` + LoraMsgURL string `env:"MG_LORA_ADAPTER_MESSAGES_URL" envDefault:"tcp://localhost:1883"` + LoraMsgUser string `env:"MG_LORA_ADAPTER_MESSAGES_USER" envDefault:""` + LoraMsgPass string `env:"MG_LORA_ADAPTER_MESSAGES_PASS" envDefault:""` + LoraMsgTopic string `env:"MG_LORA_ADAPTER_MESSAGES_TOPIC" envDefault:"application/+/device/+/event/up"` + LoraMsgTimeout time.Duration `env:"MG_LORA_ADAPTER_MESSAGES_TIMEOUT" envDefault:"30s"` + ESConsumerName string `env:"MG_LORA_ADAPTER_EVENT_CONSUMER" envDefault:"lora-adapter"` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://localhost:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_LORA_ADAPTER_INSTANCE_ID" envDefault:""` + ESURL string `env:"MG_ES_URL" envDefault:"nats://localhost:4222"` + RouteMapURL string `env:"MG_LORA_ADAPTER_ROUTE_MAP_URL" envDefault:"redis://localhost:6379/0"` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + rmConn, err := redisclient.Connect(cfg.RouteMapURL) + if err != nil { + logger.Error(fmt.Sprintf("failed to setup route map redis client : %s", err)) + exitCode = 1 + return + } + defer rmConn.Close() + + tp, err := jaeger.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + pub, err := brokers.NewPublisher(ctx, cfg.BrokerURL) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pub.Close() + pub = brokerstracing.NewPublisher(httpServerConfig, tracer, pub) + + svc := newService(pub, rmConn, thingsRMPrefix, channelsRMPrefix, connsRMPrefix, logger) + + mqttConn, err := connectToMQTTBroker(cfg.LoraMsgURL, cfg.LoraMsgUser, cfg.LoraMsgPass, cfg.LoraMsgTimeout, logger) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + + if err = subscribeToLoRaBroker(svc, mqttConn, cfg.LoraMsgTimeout, cfg.LoraMsgTopic, logger); err != nil { + logger.Error(fmt.Sprintf("failed to subscribe to Lora MQTT broker: %s", err)) + exitCode = 1 + return + } + + if err = subscribeToThingsES(ctx, svc, cfg, logger); err != nil { + logger.Error(fmt.Sprintf("failed to subscribe to things event store: %s", err)) + exitCode = 1 + return + } + + logger.Info("Subscribed to Event Store") + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("LoRa adapter terminated: %s", err)) + } +} + +func connectToMQTTBroker(burl, user, password string, timeout time.Duration, logger *slog.Logger) (mqttpaho.Client, error) { + opts := mqttpaho.NewClientOptions() + opts.AddBroker(burl) + opts.SetUsername(user) + opts.SetPassword(password) + opts.SetOnConnectHandler(func(_ mqttpaho.Client) { + logger.Info("Connected to Lora MQTT broker") + }) + opts.SetConnectionLostHandler(func(_ mqttpaho.Client, err error) { + logger.Error(fmt.Sprintf("MQTT connection lost: %s", err)) + }) + + client := mqttpaho.NewClient(opts) + + if token := client.Connect(); token.WaitTimeout(timeout) && token.Error() != nil { + return nil, fmt.Errorf("failed to connect to Lora MQTT broker: %s", token.Error()) + } + + return client, nil +} + +func subscribeToLoRaBroker(svc lora.Service, mc mqttpaho.Client, timeout time.Duration, topic string, logger *slog.Logger) error { + mqttBroker := mqtt.NewBroker(svc, mc, timeout, logger) + logger.Info("Subscribed to Lora MQTT broker") + if err := mqttBroker.Subscribe(topic); err != nil { + return fmt.Errorf("failed to subscribe to Lora MQTT broker: %s", err) + } + return nil +} + +func subscribeToThingsES(ctx context.Context, svc lora.Service, cfg config, logger *slog.Logger) error { + subscriber, err := store.NewSubscriber(ctx, cfg.ESURL, logger) + if err != nil { + return err + } + + subConfig := events.SubscriberConfig{ + Stream: thingsStream, + Consumer: cfg.ESConsumerName, + Handler: loraevents.NewEventHandler(svc), + } + return subscriber.Subscribe(ctx, subConfig) +} + +func newRouteMapRepository(client *redis.Client, prefix string, logger *slog.Logger) lora.RouteMapRepository { + logger.Info(fmt.Sprintf("Connected to %s Redis Route-map", prefix)) + return loraevents.NewRouteMapRepository(client, prefix) +} + +func newService(pub messaging.Publisher, rmConn *redis.Client, thingsRMPrefix, channelsRMPrefix, connsRMPrefix string, logger *slog.Logger) lora.Service { + thingsRM := newRouteMapRepository(rmConn, thingsRMPrefix, logger) + chansRM := newRouteMapRepository(rmConn, channelsRMPrefix, logger) + connsRM := newRouteMapRepository(rmConn, connsRMPrefix, logger) + + svc := lora.New(pub, thingsRM, chansRM, connsRM) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics("lora_adapter", "api") + svc = api.MetricsMiddleware(svc, counter, latency) + + return svc +} diff --git a/cmd/mongodb-reader/main.go b/cmd/mongodb-reader/main.go new file mode 100644 index 0000000..f0ace25 --- /dev/null +++ b/cmd/mongodb-reader/main.go @@ -0,0 +1,147 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains mongodb-reader main function to start the mongodb-reader service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/magistrala/readers" + "github.com/absmach/magistrala/readers/api" + mongoclient "github.com/absmach/mg-contrib/pkg/clients/mongo" + "github.com/absmach/mg-contrib/readers/mongodb" + "github.com/caarlos0/env/v10" + "go.mongodb.org/mongo-driver/mongo" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "mongodb-reader" + envPrefixDB = "MG_MONGO_" + envPrefixHTTP = "MG_MONGO_READER_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + envPrefixAuthz = "MG_THINGS_AUTH_GRPC_" + defSvcHTTPPort = "9007" +) + +type config struct { + LogLevel string `env:"MG_MONGO_READER_LOG_LEVEL" envDefault:"info"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_MONGO_READER_INSTANCE_ID" envDefault:""` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + db, err := mongoclient.Setup(envPrefixDB) + if err != nil { + logger.Error(fmt.Sprintf("failed to setup mongo database : %s", err)) + exitCode = 1 + return + } + + repo := newService(db, logger) + + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + ac, acHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer acHandler.Close() + + logger.Info("Successfully connected to auth grpc server " + acHandler.Secure()) + + authConfig = auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuthz}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tc, tcHandler, err := auth.SetupAuthz(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer tcHandler.Close() + + logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, ac, tc, svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("MongoDB reader service terminated: %s", err)) + } +} + +func newService(db *mongo.Database, logger *slog.Logger) readers.MessageRepository { + repo := mongodb.New(db) + repo = api.LoggingMiddleware(repo, logger) + counter, latency := prometheus.MakeMetrics("mongodb", "message_reader") + repo = api.MetricsMiddleware(repo, counter, latency) + + return repo +} diff --git a/cmd/mongodb-writer/main.go b/cmd/mongodb-writer/main.go new file mode 100644 index 0000000..bf15acf --- /dev/null +++ b/cmd/mongodb-writer/main.go @@ -0,0 +1,148 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains mongodb-writer main function to start the mongodb-writer service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers" + consumertracing "github.com/absmach/magistrala/consumers/tracing" + "github.com/absmach/magistrala/consumers/writers/api" + mglog "github.com/absmach/magistrala/logger" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/consumers/writers/mongodb" + mongoclient "github.com/absmach/mg-contrib/pkg/clients/mongo" + "github.com/caarlos0/env/v10" + "go.mongodb.org/mongo-driver/mongo" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "mongodb-writer" + envPrefixDB = "MG_MONGO_" + envPrefixHTTP = "MG_MONGO_WRITER_HTTP_" + defSvcHTTPPort = "9008" +) + +type config struct { + LogLevel string `env:"MG_MONGO_WRITER_LOG_LEVEL" envDefault:"info"` + ConfigPath string `env:"MG_MONGO_WRITER_CONFIG_PATH" envDefault:"/config.toml"` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_MONGO_WRITER_INSTANCE_ID" envDefault:""` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + db, err := mongoclient.Setup(envPrefixDB) + if err != nil { + logger.Error(fmt.Sprintf("failed to setup mongo database : %s", err)) + exitCode = 1 + return + } + + repo := newService(db, logger) + repo = consumertracing.NewBlocking(tracer, repo, httpServerConfig) + + if err := consumers.Start(ctx, svcName, pubSub, repo, cfg.ConfigPath, logger); err != nil { + logger.Error(fmt.Sprintf("failed to start MongoDB writer: %s", err)) + exitCode = 1 + return + } + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("MongoDB writer service terminated: %s", err)) + } +} + +func newService(db *mongo.Database, logger *slog.Logger) consumers.BlockingConsumer { + repo := mongodb.New(db) + repo = api.LoggingMiddleware(repo, logger) + counter, latency := prometheus.MakeMetrics("mongodb", "message_writer") + repo = api.MetricsMiddleware(repo, counter, latency) + return repo +} diff --git a/cmd/opcua/main.go b/cmd/opcua/main.go new file mode 100644 index 0000000..eb0a758 --- /dev/null +++ b/cmd/opcua/main.go @@ -0,0 +1,212 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains opcua-adapter main function to start the opcua-adapter service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/events" + "github.com/absmach/magistrala/pkg/events/store" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/opcua" + "github.com/absmach/mg-contrib/opcua/api" + "github.com/absmach/mg-contrib/opcua/db" + opcuaevents "github.com/absmach/mg-contrib/opcua/events" + "github.com/absmach/mg-contrib/opcua/gopcua" + redisclient "github.com/absmach/mg-contrib/pkg/clients/redis" + "github.com/caarlos0/env/v10" + "github.com/go-redis/redis/v8" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "opc-ua-adapter" + envPrefixHTTP = "MG_OPCUA_ADAPTER_HTTP_" + defSvcHTTPPort = "8180" + + thingsRMPrefix = "thing" + channelsRMPrefix = "channel" + connectionRMPrefix = "connection" + + thingsStream = "events.magistrala.things" +) + +type config struct { + LogLevel string `env:"MG_OPCUA_ADAPTER_LOG_LEVEL" envDefault:"info"` + ESConsumerName string `env:"MG_OPCUA_ADAPTER_EVENT_CONSUMER" envDefault:"opcua-adapter"` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://localhost:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_OPCUA_ADAPTER_INSTANCE_ID" envDefault:""` + ESURL string `env:"MG_ES_URL" envDefault:"nats://localhost:4222"` + RouteMapURL string `env:"MG_OPCUA_ADAPTER_ROUTE_MAP_URL" envDefault:"redis://localhost:6379/0"` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, httpCancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + opcConfig := opcua.Config{} + if err := env.Parse(&opcConfig); err != nil { + log.Fatalf("failed to load %s opcua client configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + rmConn, err := redisclient.Connect(cfg.RouteMapURL) + if err != nil { + logger.Error(fmt.Sprintf("failed to setup %s bootstrap event store redis client : %s", svcName, err)) + exitCode = 1 + return + } + defer rmConn.Close() + + thingRM := newRouteMapRepositoy(rmConn, thingsRMPrefix, logger) + chanRM := newRouteMapRepositoy(rmConn, channelsRMPrefix, logger) + connRM := newRouteMapRepositoy(rmConn, connectionRMPrefix, logger) + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + sub := gopcua.NewSubscriber(ctx, pubSub, thingRM, chanRM, connRM, logger) + browser := gopcua.NewBrowser(ctx, logger) + + svc := newService(sub, browser, thingRM, chanRM, connRM, opcConfig, logger) + + go subscribeToStoredSubs(ctx, sub, opcConfig, logger) + + if err = subscribeToThingsES(ctx, svc, cfg, logger); err != nil { + logger.Error(fmt.Sprintf("failed to subscribe to things event store: %s", err)) + exitCode = 1 + return + } + + logger.Info("Subscribed to Event Store") + + hs := httpserver.NewServer(ctx, httpCancel, svcName, httpServerConfig, api.MakeHandler(svc, logger, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, httpCancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, httpCancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("OPC-UA adapter service terminated: %s", err)) + } +} + +func subscribeToStoredSubs(ctx context.Context, sub opcua.Subscriber, cfg opcua.Config, logger *slog.Logger) { + // Get all stored subscriptions + nodes, err := db.ReadAll() + if err != nil { + logger.Warn(fmt.Sprintf("Read stored subscriptions failed: %s", err)) + } + + for _, n := range nodes { + cfg.ServerURI = n.ServerURI + cfg.NodeID = n.NodeID + go func() { + if err := sub.Subscribe(ctx, cfg); err != nil { + logger.Warn(fmt.Sprintf("Subscription failed: %s", err)) + } + }() + } +} + +func subscribeToThingsES(ctx context.Context, svc opcua.Service, cfg config, logger *slog.Logger) error { + subscriber, err := store.NewSubscriber(ctx, cfg.ESURL, logger) + if err != nil { + return err + } + + subConfig := events.SubscriberConfig{ + Stream: thingsStream, + Consumer: cfg.ESConsumerName, + Handler: opcuaevents.NewEventHandler(svc), + } + return subscriber.Subscribe(ctx, subConfig) +} + +func newRouteMapRepositoy(client *redis.Client, prefix string, logger *slog.Logger) opcua.RouteMapRepository { + logger.Info(fmt.Sprintf("Connected to %s Redis Route-map", prefix)) + return opcuaevents.NewRouteMapRepository(client, prefix) +} + +func newService(sub opcua.Subscriber, browser opcua.Browser, thingRM, chanRM, connRM opcua.RouteMapRepository, opcuaConfig opcua.Config, logger *slog.Logger) opcua.Service { + svc := opcua.New(sub, browser, thingRM, chanRM, connRM, opcuaConfig, logger) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics("opc_ua_adapter", "api") + svc = api.MetricsMiddleware(svc, counter, latency) + + return svc +} diff --git a/cmd/postgres-reader/main.go b/cmd/postgres-reader/main.go new file mode 100644 index 0000000..9db1fb8 --- /dev/null +++ b/cmd/postgres-reader/main.go @@ -0,0 +1,155 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains postgres-reader main function to start the postgres-reader service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/magistrala/readers" + "github.com/absmach/magistrala/readers/api" + "github.com/absmach/mg-contrib/readers/postgres" + "github.com/caarlos0/env/v10" + "github.com/jmoiron/sqlx" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "postgres-reader" + envPrefixDB = "MG_POSTGRES_" + envPrefixHTTP = "MG_POSTGRES_READER_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + envPrefixAuthz = "MG_THINGS_AUTH_GRPC_" + defDB = "magistrala" + defSvcHTTPPort = "9009" +) + +type config struct { + LogLevel string `env:"MG_POSTGRES_READER_LOG_LEVEL" envDefault:"info"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_POSTGRES_READER_INSTANCE_ID" envDefault:""` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + dbConfig := pgclient.Config{} + if err := env.ParseWithOptions(&dbConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + db, err := pgclient.Connect(dbConfig) + if err != nil { + logger.Error(fmt.Sprintf("failed to setup postgres database : %s", err)) + exitCode = 1 + return + } + defer db.Close() + + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + ac, acHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer acHandler.Close() + + logger.Info("Successfully connected to auth grpc server " + acHandler.Secure()) + + authConfig = auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuthz}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tc, tcHandler, err := auth.SetupAuthz(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer tcHandler.Close() + + logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) + + repo := newService(db, logger) + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, ac, tc, svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Postgres reader service terminated: %s", err)) + } +} + +func newService(db *sqlx.DB, logger *slog.Logger) readers.MessageRepository { + svc := postgres.New(db) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics("postgres", "message_reader") + svc = api.MetricsMiddleware(svc, counter, latency) + + return svc +} diff --git a/cmd/postgres-writer/main.go b/cmd/postgres-writer/main.go new file mode 100644 index 0000000..df2f521 --- /dev/null +++ b/cmd/postgres-writer/main.go @@ -0,0 +1,154 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains postgres-writer main function to start the postgres-writer service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers" + consumertracing "github.com/absmach/magistrala/consumers/tracing" + "github.com/absmach/magistrala/consumers/writers/api" + mglog "github.com/absmach/magistrala/logger" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + writerpg "github.com/absmach/mg-contrib/consumers/writers/postgres" + "github.com/caarlos0/env/v10" + "github.com/jmoiron/sqlx" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "postgres-writer" + envPrefixDB = "MG_POSTGRES_" + envPrefixHTTP = "MG_POSTGRES_WRITER_HTTP_" + defDB = "messages" + defSvcHTTPPort = "9010" +) + +type config struct { + LogLevel string `env:"MG_POSTGRES_WRITER_LOG_LEVEL" envDefault:"info"` + ConfigPath string `env:"MG_POSTGRES_WRITER_CONFIG_PATH" envDefault:"/config.toml"` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_POSTGRES_WRITER_INSTANCE_ID" envDefault:""` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + dbConfig := pgclient.Config{Name: defDB} + if err := env.ParseWithOptions(&dbConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s Postgres configuration : %s", svcName, err)) + exitCode = 1 + return + } + db, err := pgclient.Setup(dbConfig, *writerpg.Migration()) + if err != nil { + logger.Error(err.Error()) + } + defer db.Close() + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + repo := newService(db, logger) + repo = consumertracing.NewBlocking(tracer, repo, httpServerConfig) + + if err = consumers.Start(ctx, svcName, pubSub, repo, cfg.ConfigPath, logger); err != nil { + logger.Error(fmt.Sprintf("failed to create Postgres writer: %s", err)) + exitCode = 1 + return + } + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Postgres writer service terminated: %s", err)) + } +} + +func newService(db *sqlx.DB, logger *slog.Logger) consumers.BlockingConsumer { + svc := writerpg.New(db) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics("postgres", "message_writer") + svc = api.MetricsMiddleware(svc, counter, latency) + return svc +} diff --git a/cmd/provision/main.go b/cmd/provision/main.go new file mode 100644 index 0000000..e6e5b8a --- /dev/null +++ b/cmd/provision/main.go @@ -0,0 +1,190 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains provision main function to start the provision service. +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + mgclients "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + mggroups "github.com/absmach/magistrala/pkg/groups" + mgsdk "github.com/absmach/magistrala/pkg/sdk/go" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/provision" + "github.com/absmach/mg-contrib/provision/api" + "github.com/caarlos0/env/v10" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "provision" + contentType = "application/json" +) + +var ( + errMissingConfigFile = errors.New("missing config file setting") + errFailLoadingConfigFile = errors.New("failed to load config from file") + errFailedToReadBootstrapContent = errors.New("failed to read bootstrap content from envs") +) + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg, err := loadConfig() + if err != nil { + log.Fatalf(fmt.Sprintf("failed to load %s configuration : %s", svcName, err)) + } + + logger, err := mglog.New(os.Stdout, cfg.Server.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + if cfgFromFile, err := loadConfigFromFile(cfg.File); err != nil { + logger.Warn(fmt.Sprintf("Continue with settings from env, failed to load from: %s: %s", cfg.File, err)) + } else { + // Merge environment variables and file settings. + mergeConfigs(&cfgFromFile, &cfg) + cfg = cfgFromFile + logger.Info("Continue with settings from file: " + cfg.File) + } + + SDKCfg := mgsdk.Config{ + UsersURL: cfg.Server.UsersURL, + ThingsURL: cfg.Server.ThingsURL, + BootstrapURL: cfg.Server.MgBSURL, + CertsURL: cfg.Server.MgCertsURL, + MsgContentType: contentType, + TLSVerification: cfg.Server.TLS, + } + SDK := mgsdk.NewSDK(SDKCfg) + + svc := provision.New(cfg, SDK, logger) + svc = api.NewLoggingMiddleware(svc, logger) + + httpServerConfig := server.Config{Host: "", Port: cfg.Server.HTTPPort, KeyFile: cfg.Server.ServerKey, CertFile: cfg.Server.ServerCert} + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, logger, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Provision service terminated: %s", err)) + } +} + +func loadConfigFromFile(file string) (provision.Config, error) { + _, err := os.Stat(file) + if os.IsNotExist(err) { + return provision.Config{}, errors.Wrap(errMissingConfigFile, err) + } + c, err := provision.Read(file) + if err != nil { + return provision.Config{}, errors.Wrap(errFailLoadingConfigFile, err) + } + return c, nil +} + +func loadConfig() (provision.Config, error) { + cfg := provision.Config{} + if err := env.Parse(&cfg); err != nil { + return provision.Config{}, err + } + + if cfg.Bootstrap.AutoWhiteList && !cfg.Bootstrap.Provision { + return provision.Config{}, errors.New("Can't auto whitelist if auto config save is off") + } + + var content map[string]interface{} + if cfg.BSContent != "" { + if err := json.Unmarshal([]byte(cfg.BSContent), &content); err != nil { + return provision.Config{}, errFailedToReadBootstrapContent + } + } + + cfg.Bootstrap.Content = content + // This is default conf for provision if there is no config file + cfg.Channels = []mggroups.Group{ + { + Name: "control-channel", + Metadata: map[string]interface{}{"type": "control"}, + }, { + Name: "data-channel", + Metadata: map[string]interface{}{"type": "data"}, + }, + } + cfg.Things = []mgclients.Client{ + { + Name: "thing", + Metadata: map[string]interface{}{"external_id": "xxxxxx"}, + }, + } + + return cfg, nil +} + +func mergeConfigs(dst, src interface{}) interface{} { + d := reflect.ValueOf(dst).Elem() + s := reflect.ValueOf(src).Elem() + + for i := 0; i < d.NumField(); i++ { + dField := d.Field(i) + sField := s.Field(i) + switch dField.Kind() { + case reflect.Struct: + dst := dField.Addr().Interface() + src := sField.Addr().Interface() + m := mergeConfigs(dst, src) + val := reflect.ValueOf(m).Elem().Interface() + dField.Set(reflect.ValueOf(val)) + case reflect.Slice: + case reflect.Bool: + if dField.Interface() == false { + dField.Set(reflect.ValueOf(sField.Interface())) + } + case reflect.Int: + if dField.Interface() == 0 { + dField.Set(reflect.ValueOf(sField.Interface())) + } + case reflect.String: + if dField.Interface() == "" { + dField.Set(reflect.ValueOf(sField.Interface())) + } + } + } + return dst +} diff --git a/cmd/smpp-notifier/main.go b/cmd/smpp-notifier/main.go new file mode 100644 index 0000000..7b6eed1 --- /dev/null +++ b/cmd/smpp-notifier/main.go @@ -0,0 +1,189 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains smpp-notifier main function to start the smpp-notifier service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/consumers/notifiers/api" + notifierpg "github.com/absmach/magistrala/consumers/notifiers/postgres" + mgsmpp "github.com/absmach/magistrala/consumers/notifiers/smpp" + "github.com/absmach/magistrala/consumers/notifiers/tracing" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/ulid" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/caarlos0/env/v10" + "github.com/jmoiron/sqlx" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "smpp-notifier" + envPrefixDB = "MG_SMPP_NOTIFIER_DB_" + envPrefixHTTP = "MG_SMPP_NOTIFIER_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + defDB = "subscriptions" + defSvcHTTPPort = "9014" +) + +type config struct { + LogLevel string `env:"MG_SMPP_NOTIFIER_LOG_LEVEL" envDefault:"info"` + From string `env:"MG_SMPP_NOTIFIER_FROM_ADDR" envDefault:""` + ConfigPath string `env:"MG_SMPP_NOTIFIER_CONFIG_PATH" envDefault:"/config.toml"` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_SMPP_NOTIFIER_INSTANCE_ID" envDefault:""` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + dbConfig := pgclient.Config{Name: defDB} + if err := env.ParseWithOptions(&dbConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s Postgres configuration : %s", svcName, err)) + exitCode = 1 + return + } + db, err := pgclient.Setup(dbConfig, *notifierpg.Migration()) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer db.Close() + + smppConfig := mgsmpp.Config{} + if err := env.Parse(&smppConfig); err != nil { + logger.Error(fmt.Sprintf("failed to load SMPP configuration from environment : %s", err)) + exitCode = 1 + return + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + authClient, authHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer authHandler.Close() + logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) + + svc := newService(db, tracer, authClient, cfg, smppConfig, logger) + if err = consumers.Start(ctx, svcName, pubSub, svc, cfg.ConfigPath, logger); err != nil { + logger.Error(fmt.Sprintf("failed to create Postgres writer: %s", err)) + exitCode = 1 + return + } + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, logger, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("SMPP notifier service terminated: %s", err)) + } +} + +func newService(db *sqlx.DB, tracer trace.Tracer, authClient magistrala.AuthServiceClient, c config, sc mgsmpp.Config, logger *slog.Logger) notifiers.Service { + database := notifierpg.NewDatabase(db, tracer) + repo := tracing.New(tracer, notifierpg.New(database)) + idp := ulid.New() + notifier := mgsmpp.New(sc) + svc := notifiers.New(authClient, repo, idp, notifier, c.From) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics("notifier", "smpp") + svc = api.MetricsMiddleware(svc, counter, latency) + + return svc +} diff --git a/cmd/smtp-notifier/main.go b/cmd/smtp-notifier/main.go new file mode 100644 index 0000000..ca0821c --- /dev/null +++ b/cmd/smtp-notifier/main.go @@ -0,0 +1,203 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains smtp-notifier main function to start the smtp-notifier service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/consumers/notifiers/api" + notifierpg "github.com/absmach/magistrala/consumers/notifiers/postgres" + "github.com/absmach/magistrala/consumers/notifiers/tracing" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/ulid" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/consumers/notifiers/smtp" + "github.com/absmach/mg-contrib/pkg/email" + "github.com/caarlos0/env/v10" + "github.com/jmoiron/sqlx" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "smtp-notifier" + envPrefixDB = "MG_SMTP_NOTIFIER_DB_" + envPrefixHTTP = "MG_SMTP_NOTIFIER_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + defDB = "subscriptions" + defSvcHTTPPort = "9015" +) + +type config struct { + LogLevel string `env:"MG_SMTP_NOTIFIER_LOG_LEVEL" envDefault:"info"` + ConfigPath string `env:"MG_SMTP_NOTIFIER_CONFIG_PATH" envDefault:"/config.toml"` + From string `env:"MG_SMTP_NOTIFIER_FROM_ADDR" envDefault:""` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_SMTP_NOTIFIER_INSTANCE_ID" envDefault:""` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + dbConfig := pgclient.Config{Name: defDB} + if err := env.ParseWithOptions(&dbConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s Postgres configuration : %s", svcName, err)) + exitCode = 1 + return + } + db, err := pgclient.Setup(dbConfig, *notifierpg.Migration()) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer db.Close() + + ec := email.Config{} + if err := env.Parse(&ec); err != nil { + logger.Error(fmt.Sprintf("failed to load email configuration : %s", err)) + exitCode = 1 + return + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + authClient, authHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer authHandler.Close() + + logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) + + svc, err := newService(db, tracer, authClient, cfg, ec, logger) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + + if err = consumers.Start(ctx, svcName, pubSub, svc, cfg.ConfigPath, logger); err != nil { + logger.Error(fmt.Sprintf("failed to create Postgres writer: %s", err)) + exitCode = 1 + return + } + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, logger, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("SMTP notifier service terminated: %s", err)) + } +} + +func newService(db *sqlx.DB, tracer trace.Tracer, authClient magistrala.AuthServiceClient, c config, ec email.Config, logger *slog.Logger) (notifiers.Service, error) { + database := notifierpg.NewDatabase(db, tracer) + repo := tracing.New(tracer, notifierpg.New(database)) + idp := ulid.New() + + agent, err := email.New(&ec) + if err != nil { + return nil, fmt.Errorf("failed to create email agent: %s", err) + } + + notifier := smtp.New(agent) + svc := notifiers.New(authClient, repo, idp, notifier, c.From) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics("notifier", "smtp") + svc = api.MetricsMiddleware(svc, counter, latency) + + return svc, nil +} diff --git a/cmd/timescale-reader/main.go b/cmd/timescale-reader/main.go new file mode 100644 index 0000000..31dbd84 --- /dev/null +++ b/cmd/timescale-reader/main.go @@ -0,0 +1,153 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains timescale-reader main function to start the timescale-reader service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/magistrala/readers" + "github.com/absmach/magistrala/readers/api" + "github.com/absmach/mg-contrib/readers/timescale" + "github.com/caarlos0/env/v10" + "github.com/jmoiron/sqlx" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "timescaledb-reader" + envPrefixDB = "MG_TIMESCALE_" + envPrefixHTTP = "MG_TIMESCALE_READER_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + envPrefixAuthz = "MG_THINGS_AUTH_GRPC_" + defDB = "messages" + defSvcHTTPPort = "9011" +) + +type config struct { + LogLevel string `env:"MG_TIMESCALE_READER_LOG_LEVEL" envDefault:"info"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_TIMESCALE_READER_INSTANCE_ID" envDefault:""` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + dbConfig := pgclient.Config{Name: defDB} + if err := env.ParseWithOptions(&dbConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + db, err := pgclient.Connect(dbConfig) + if err != nil { + logger.Error(err.Error()) + } + defer db.Close() + + repo := newService(db, logger) + + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + ac, acHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer acHandler.Close() + + logger.Info("Successfully connected to auth grpc server " + acHandler.Secure()) + + authConfig = auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuthz}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + tc, tcHandler, err := auth.SetupAuthz(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer tcHandler.Close() + + logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, ac, tc, svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Timescale reader service terminated: %s", err)) + } +} + +func newService(db *sqlx.DB, logger *slog.Logger) readers.MessageRepository { + svc := timescale.New(db) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics("timescale", "message_reader") + svc = api.MetricsMiddleware(svc, counter, latency) + + return svc +} diff --git a/cmd/timescale-writer/main.go b/cmd/timescale-writer/main.go new file mode 100644 index 0000000..943a51f --- /dev/null +++ b/cmd/timescale-writer/main.go @@ -0,0 +1,156 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains timescale-writer main function to start the timescale-writer service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers" + consumertracing "github.com/absmach/magistrala/consumers/tracing" + "github.com/absmach/magistrala/consumers/writers/api" + mglog "github.com/absmach/magistrala/logger" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/consumers/writers/timescale" + "github.com/caarlos0/env/v10" + "github.com/jmoiron/sqlx" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "timescaledb-writer" + envPrefixDB = "MG_TIMESCALE_" + envPrefixHTTP = "MG_TIMESCALE_WRITER_HTTP_" + defDB = "messages" + defSvcHTTPPort = "9012" +) + +type config struct { + LogLevel string `env:"MG_TIMESCALE_WRITER_LOG_LEVEL" envDefault:"info"` + ConfigPath string `env:"MG_TIMESCALE_WRITER_CONFIG_PATH" envDefault:"/config.toml"` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_TIMESCALE_WRITER_INSTANCE_ID" envDefault:""` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s service configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + dbConfig := pgclient.Config{Name: defDB} + if err := env.ParseWithOptions(&dbConfig, env.Options{Prefix: envPrefixDB}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s Postgres configuration : %s", svcName, err)) + exitCode = 1 + return + } + db, err := pgclient.Setup(dbConfig, *timescale.Migration()) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer db.Close() + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + repo := newService(db, logger) + repo = consumertracing.NewBlocking(tracer, repo, httpServerConfig) + + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + if err = consumers.Start(ctx, svcName, pubSub, repo, cfg.ConfigPath, logger); err != nil { + logger.Error(fmt.Sprintf("failed to create Timescale writer: %s", err)) + exitCode = 1 + return + } + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svcName, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Timescale writer service terminated: %s", err)) + } +} + +func newService(db *sqlx.DB, logger *slog.Logger) consumers.BlockingConsumer { + svc := timescale.New(db) + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics("timescale", "message_writer") + svc = api.MetricsMiddleware(svc, counter, latency) + return svc +} diff --git a/cmd/twins/main.go b/cmd/twins/main.go new file mode 100644 index 0000000..725149c --- /dev/null +++ b/cmd/twins/main.go @@ -0,0 +1,242 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package main contains twins main function to start the twins service. +package main + +import ( + "context" + "fmt" + "log" + "log/slog" + "net/url" + "os" + + chclient "github.com/absmach/callhome/pkg/client" + "github.com/absmach/magistrala" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/auth" + jaegerclient "github.com/absmach/magistrala/pkg/jaeger" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/magistrala/pkg/messaging/brokers" + brokerstracing "github.com/absmach/magistrala/pkg/messaging/brokers/tracing" + "github.com/absmach/magistrala/pkg/prometheus" + "github.com/absmach/magistrala/pkg/server" + httpserver "github.com/absmach/magistrala/pkg/server/http" + "github.com/absmach/magistrala/pkg/uuid" + localusers "github.com/absmach/magistrala/things/standalone" + mongoclient "github.com/absmach/mg-contrib/pkg/clients/mongo" + redisclient "github.com/absmach/mg-contrib/pkg/clients/redis" + "github.com/absmach/mg-contrib/twins" + "github.com/absmach/mg-contrib/twins/api" + twapi "github.com/absmach/mg-contrib/twins/api/http" + "github.com/absmach/mg-contrib/twins/events" + twmongodb "github.com/absmach/mg-contrib/twins/mongodb" + "github.com/absmach/mg-contrib/twins/tracing" + "github.com/caarlos0/env/v10" + "github.com/go-redis/redis/v8" + "go.mongodb.org/mongo-driver/mongo" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" +) + +const ( + svcName = "twins" + envPrefixDB = "MG_TWINS_DB_" + envPrefixHTTP = "MG_TWINS_HTTP_" + envPrefixAuth = "MG_AUTH_GRPC_" + defSvcHTTPPort = "9018" +) + +type config struct { + LogLevel string `env:"MG_TWINS_LOG_LEVEL" envDefault:"info"` + StandaloneID string `env:"MG_TWINS_STANDALONE_ID" envDefault:""` + StandaloneToken string `env:"MG_TWINS_STANDALONE_TOKEN" envDefault:""` + ChannelID string `env:"MG_TWINS_CHANNEL_ID" envDefault:""` + BrokerURL string `env:"MG_MESSAGE_BROKER_URL" envDefault:"nats://localhost:4222"` + JaegerURL url.URL `env:"MG_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_TWINS_INSTANCE_ID" envDefault:""` + ESURL string `env:"MG_ES_URL" envDefault:"nats://localhost:4222"` + CacheURL string `env:"MG_TWINS_CACHE_URL" envDefault:"redis://localhost:6379/0"` + TraceRatio float64 `env:"MG_JAEGER_TRACE_RATIO" envDefault:"1.0"` +} + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) + + cfg := config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("failed to load %s configuration : %s", svcName, err) + } + + logger, err := mglog.New(os.Stdout, cfg.LogLevel) + if err != nil { + log.Fatalf("failed to init logger: %s", err.Error()) + } + + var exitCode int + defer mglog.ExitWithError(&exitCode) + + if cfg.InstanceID == "" { + if cfg.InstanceID, err = uuid.New().ID(); err != nil { + logger.Error(fmt.Sprintf("failed to generate instanceID: %s", err)) + exitCode = 1 + return + } + } + + httpServerConfig := server.Config{Port: defSvcHTTPPort} + if err := env.ParseWithOptions(&httpServerConfig, env.Options{Prefix: envPrefixHTTP}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + exitCode = 1 + return + } + + cacheClient, err := redisclient.Connect(cfg.CacheURL) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer cacheClient.Close() + + db, err := mongoclient.Setup(envPrefixDB) + if err != nil { + logger.Error(fmt.Sprintf("failed to setup postgres database : %s", err)) + exitCode = 1 + return + } + + tp, err := jaegerclient.NewProvider(ctx, svcName, cfg.JaegerURL, cfg.InstanceID, cfg.TraceRatio) + if err != nil { + logger.Error(fmt.Sprintf("failed to init Jaeger: %s", err)) + exitCode = 1 + return + } + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + + var authClient magistrala.AuthServiceClient + switch cfg.StandaloneID != "" && cfg.StandaloneToken != "" { + case true: + authClient = localusers.NewAuthService(cfg.StandaloneID, cfg.StandaloneToken) + default: + authConfig := auth.Config{} + if err := env.ParseWithOptions(&authConfig, env.Options{Prefix: envPrefixAuth}); err != nil { + logger.Error(fmt.Sprintf("failed to load %s auth configuration : %s", svcName, err)) + exitCode = 1 + return + } + + authServiceClient, authHandler, err := auth.Setup(ctx, authConfig) + if err != nil { + logger.Error(err.Error()) + exitCode = 1 + return + } + defer authHandler.Close() + authClient = authServiceClient + logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) + } + + pubSub, err := brokers.NewPubSub(ctx, cfg.BrokerURL, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to connect to message broker: %s", err)) + exitCode = 1 + return + } + defer pubSub.Close() + pubSub = brokerstracing.NewPubSub(httpServerConfig, tracer, pubSub) + + svc, err := newService(ctx, svcName, pubSub, cfg, authClient, tracer, db, cacheClient, logger) + if err != nil { + logger.Error(fmt.Sprintf("failed to create %s service: %s", svcName, err)) + exitCode = 1 + return + } + + hs := httpserver.NewServer(ctx, cancel, svcName, httpServerConfig, twapi.MakeHandler(svc, logger, cfg.InstanceID), logger) + + if cfg.SendTelemetry { + chc := chclient.New(svcName, magistrala.Version, logger, cancel) + go chc.CallHome(ctx) + } + + g.Go(func() error { + return hs.Start() + }) + + g.Go(func() error { + return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + }) + + if err := g.Wait(); err != nil { + logger.Error(fmt.Sprintf("Twins service terminated: %s", err)) + } +} + +func newService(ctx context.Context, id string, ps messaging.PubSub, cfg config, users magistrala.AuthServiceClient, tracer trace.Tracer, db *mongo.Database, cacheclient *redis.Client, logger *slog.Logger) (twins.Service, error) { + twinRepo := twmongodb.NewTwinRepository(db) + twinRepo = tracing.TwinRepositoryMiddleware(tracer, twinRepo) + + stateRepo := twmongodb.NewStateRepository(db) + stateRepo = tracing.StateRepositoryMiddleware(tracer, stateRepo) + + idProvider := uuid.New() + twinCache := events.NewTwinCache(cacheclient) + twinCache = tracing.TwinCacheMiddleware(tracer, twinCache) + + svc := twins.New(ps, users, twinRepo, twinCache, stateRepo, idProvider, cfg.ChannelID, logger) + + var err error + svc, err = events.NewEventStoreMiddleware(ctx, svc, cfg.ESURL) + if err != nil { + return nil, err + } + + svc = api.LoggingMiddleware(svc, logger) + counter, latency := prometheus.MakeMetrics(svcName, "api") + svc = api.MetricsMiddleware(svc, counter, latency) + + subCfg := messaging.SubscriberConfig{ + ID: id, + Topic: brokers.SubjectAllChannels, + Handler: handle(ctx, logger, cfg.ChannelID, svc), + } + if err = ps.Subscribe(ctx, subCfg); err != nil { + logger.Error(err.Error()) + } + + return svc, nil +} + +func handle(ctx context.Context, logger *slog.Logger, chanID string, svc twins.Service) handlerFunc { + return func(msg *messaging.Message) error { + if msg.GetChannel() == chanID { + return nil + } + + if err := svc.SaveStates(ctx, msg); err != nil { + logger.Error(fmt.Sprintf("State save failed: %s", err)) + return err + } + + return nil + } +} + +type handlerFunc func(msg *messaging.Message) error + +func (h handlerFunc) Handle(msg *messaging.Message) error { + return h(msg) +} + +func (h handlerFunc) Cancel() error { + return nil +} diff --git a/consumers/README.md b/consumers/README.md new file mode 100644 index 0000000..f4e2f28 --- /dev/null +++ b/consumers/README.md @@ -0,0 +1,18 @@ +# Consumers + +Consumers provide an abstraction of various `Magistrala consumers`. +Magistrala consumer is a generic service that can handle received messages - consume them. +The message is not necessarily a Magistrala message - before consuming, Magistrala message can +be transformed into any valid format that specific consumer can understand. For example, +writers are consumers that can take a SenML or JSON message and store it. + +Consumers are optional services and are treated as plugins. In order to +run consumer services, core services must be up and running. + +For an in-depth explanation of the usage of `consumers`, as well as thorough +understanding of Magistrala, please check out the [official documentation][doc]. + +For more information about service capabilities and its usage, please check out +the [API documentation](https://docs.api.magistrala.abstractmachines.fr/?urls.primaryName=consumers-notifiers-openapi.yml). + +[doc]: https://docs.magistrala.abstractmachines.fr diff --git a/consumers/consumer.go b/consumers/consumer.go new file mode 100644 index 0000000..403f9a3 --- /dev/null +++ b/consumers/consumer.go @@ -0,0 +1,30 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package consumers + +import "context" + +// AsyncConsumer specifies a non-blocking message-consuming API, +// which can be used for writing data to the DB, publishing messages +// to broker, sending notifications, or any other asynchronous job. +type AsyncConsumer interface { + // ConsumeAsync method is used to asynchronously consume received messages. + ConsumeAsync(ctx context.Context, messages interface{}) + + // Errors method returns a channel for reading errors which occur during async writes. + // Must be called before performing any writes for errors to be collected. + // The channel is buffered(1) so it allows only 1 error without blocking if not drained. + // The channel may receive nil error to indicate success. + Errors() <-chan error +} + +// BlockingConsumer specifies a blocking message-consuming API, +// which can be used for writing data to the DB, publishing messages +// to broker, sending notifications... BlockingConsumer implementations +// might also support concurrent use, but consult implementation for more details. +type BlockingConsumer interface { + // ConsumeBlocking method is used to consume received messages synchronously. + // A non-nil error is returned to indicate operation failure. + ConsumeBlocking(ctx context.Context, messages interface{}) error +} diff --git a/consumers/doc.go b/consumers/doc.go new file mode 100644 index 0000000..6280125 --- /dev/null +++ b/consumers/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package consumers contain the domain concept definitions needed to +// support Magistrala consumer services functionality. +package consumers diff --git a/consumers/messages.go b/consumers/messages.go new file mode 100644 index 0000000..0d25edf --- /dev/null +++ b/consumers/messages.go @@ -0,0 +1,159 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package consumers + +import ( + "context" + "fmt" + "log/slog" + "os" + "strings" + + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/magistrala/pkg/messaging/brokers" + "github.com/absmach/magistrala/pkg/transformers" + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/pelletier/go-toml" +) + +const ( + defContentType = "application/senml+json" + defFormat = "senml" +) + +var ( + errOpenConfFile = errors.New("unable to open configuration file") + errParseConfFile = errors.New("unable to parse configuration file") +) + +// Start method starts consuming messages received from Message broker. +// This method transforms messages to SenML format before +// using MessageRepository to store them. +func Start(ctx context.Context, id string, sub messaging.Subscriber, consumer interface{}, configPath string, logger *slog.Logger) error { + cfg, err := loadConfig(configPath) + if err != nil { + logger.Warn(fmt.Sprintf("Failed to load consumer config: %s", err)) + } + + transformer := makeTransformer(cfg.TransformerCfg, logger) + + for _, subject := range cfg.SubscriberCfg.Subjects { + subCfg := messaging.SubscriberConfig{ + ID: id, + Topic: subject, + DeliveryPolicy: messaging.DeliverAllPolicy, + } + switch c := consumer.(type) { + case AsyncConsumer: + subCfg.Handler = handleAsync(ctx, transformer, c) + if err := sub.Subscribe(ctx, subCfg); err != nil { + return err + } + case BlockingConsumer: + subCfg.Handler = handleSync(ctx, transformer, c) + if err := sub.Subscribe(ctx, subCfg); err != nil { + return err + } + default: + return apiutil.ErrInvalidQueryParams + } + } + return nil +} + +func handleSync(ctx context.Context, t transformers.Transformer, sc BlockingConsumer) handleFunc { + return func(msg *messaging.Message) error { + m := interface{}(msg) + var err error + if t != nil { + m, err = t.Transform(msg) + if err != nil { + return err + } + } + return sc.ConsumeBlocking(ctx, m) + } +} + +func handleAsync(ctx context.Context, t transformers.Transformer, ac AsyncConsumer) handleFunc { + return func(msg *messaging.Message) error { + m := interface{}(msg) + var err error + if t != nil { + m, err = t.Transform(msg) + if err != nil { + return err + } + } + + ac.ConsumeAsync(ctx, m) + return nil + } +} + +type handleFunc func(msg *messaging.Message) error + +func (h handleFunc) Handle(msg *messaging.Message) error { + return h(msg) +} + +func (h handleFunc) Cancel() error { + return nil +} + +type subscriberConfig struct { + Subjects []string `toml:"subjects"` +} + +type transformerConfig struct { + Format string `toml:"format"` + ContentType string `toml:"content_type"` + TimeFields []json.TimeField `toml:"time_fields"` +} + +type config struct { + SubscriberCfg subscriberConfig `toml:"subscriber"` + TransformerCfg transformerConfig `toml:"transformer"` +} + +func loadConfig(configPath string) (config, error) { + cfg := config{ + SubscriberCfg: subscriberConfig{ + Subjects: []string{brokers.SubjectAllChannels}, + }, + TransformerCfg: transformerConfig{ + Format: defFormat, + ContentType: defContentType, + }, + } + + data, err := os.ReadFile(configPath) + if err != nil { + return cfg, errors.Wrap(errOpenConfFile, err) + } + + if err := toml.Unmarshal(data, &cfg); err != nil { + return cfg, errors.Wrap(errParseConfFile, err) + } + + return cfg, nil +} + +func makeTransformer(cfg transformerConfig, logger *slog.Logger) transformers.Transformer { + switch strings.ToUpper(cfg.Format) { + case "SENML": + logger.Info("Using SenML transformer") + return senml.New(cfg.ContentType) + case "JSON": + logger.Info("Using JSON transformer") + return json.New(cfg.TimeFields) + default: + logger.Error(fmt.Sprintf("Can't create transformer: unknown transformer type %s", cfg.Format)) + os.Exit(1) + return nil + } +} diff --git a/consumers/notifiers/README.md b/consumers/notifiers/README.md new file mode 100644 index 0000000..1866719 --- /dev/null +++ b/consumers/notifiers/README.md @@ -0,0 +1,23 @@ +# Notifiers service + +Notifiers service provides a service for sending notifications using Notifiers. +Notifiers service can be configured to use different types of Notifiers to send +different types of notifications such as SMS messages, emails, or push notifications. +Service is extensible so that new implementations of Notifiers can be easily added. +Notifiers **are not standalone services** but rather dependencies used by Notifiers service +for sending notifications over specific protocols. + +## Configuration + +The service is configured using the environment variables. +The environment variables needed for service configuration depend on the underlying Notifier. +An example of the service configuration for SMTP Notifier can be found [in SMTP Notifier documentation](smtp/README.md). +Note that any unset variables will be replaced with their +default values. + + +## Usage + +Subscriptions service will start consuming messages and sending notifications when a message is received. + +[doc]: https://docs.magistrala.abstractmachines.fr diff --git a/consumers/notifiers/api/doc.go b/consumers/notifiers/api/doc.go new file mode 100644 index 0000000..2424852 --- /dev/null +++ b/consumers/notifiers/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains API-related concerns: endpoint definitions, middlewares +// and all resource representations. +package api diff --git a/consumers/notifiers/api/endpoint.go b/consumers/notifiers/api/endpoint.go new file mode 100644 index 0000000..4b411ea --- /dev/null +++ b/consumers/notifiers/api/endpoint.go @@ -0,0 +1,103 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + + notifiers "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/go-kit/kit/endpoint" +) + +func createSubscriptionEndpoint(svc notifiers.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createSubReq) + if err := req.validate(); err != nil { + return createSubRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + sub := notifiers.Subscription{ + Contact: req.Contact, + Topic: req.Topic, + } + id, err := svc.CreateSubscription(ctx, req.token, sub) + if err != nil { + return createSubRes{}, err + } + ucr := createSubRes{ + ID: id, + } + + return ucr, nil + } +} + +func viewSubscriptionEndpint(svc notifiers.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(subReq) + if err := req.validate(); err != nil { + return viewSubRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + sub, err := svc.ViewSubscription(ctx, req.token, req.id) + if err != nil { + return viewSubRes{}, err + } + res := viewSubRes{ + ID: sub.ID, + OwnerID: sub.OwnerID, + Contact: sub.Contact, + Topic: sub.Topic, + } + return res, nil + } +} + +func listSubscriptionsEndpoint(svc notifiers.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listSubsReq) + if err := req.validate(); err != nil { + return listSubsRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + pm := notifiers.PageMetadata{ + Topic: req.topic, + Contact: req.contact, + Offset: req.offset, + Limit: int(req.limit), + } + page, err := svc.ListSubscriptions(ctx, req.token, pm) + if err != nil { + return listSubsRes{}, err + } + res := listSubsRes{ + Offset: page.Offset, + Limit: page.Limit, + Total: page.Total, + } + for _, sub := range page.Subscriptions { + r := viewSubRes{ + ID: sub.ID, + OwnerID: sub.OwnerID, + Contact: sub.Contact, + Topic: sub.Topic, + } + res.Subscriptions = append(res.Subscriptions, r) + } + + return res, nil + } +} + +func deleteSubscriptionEndpint(svc notifiers.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(subReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + if err := svc.RemoveSubscription(ctx, req.token, req.id); err != nil { + return nil, err + } + return removeSubRes{}, nil + } +} diff --git a/consumers/notifiers/api/endpoint_test.go b/consumers/notifiers/api/endpoint_test.go new file mode 100644 index 0000000..e1148d6 --- /dev/null +++ b/consumers/notifiers/api/endpoint_test.go @@ -0,0 +1,548 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api_test + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "path" + "strings" + "testing" + + authmocks "github.com/absmach/magistrala/auth/mocks" + "github.com/absmach/magistrala/consumers/notifiers" + httpapi "github.com/absmach/magistrala/consumers/notifiers/api" + "github.com/absmach/magistrala/consumers/notifiers/mocks" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/apiutil" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + contentType = "application/json" + email = "user@example.com" + contact1 = "email1@example.com" + contact2 = "email2@example.com" + token = "token" + topic = "topic" + instanceID = "5de9b29a-feb9-11ed-be56-0242ac120002" + validID = "d4ebb847-5d0e-4e46-bdd9-b6aceaaa3a22" +) + +var ( + notFoundRes = toJSON(apiutil.ErrorRes{Msg: svcerr.ErrNotFound.Error()}) + unauthRes = toJSON(apiutil.ErrorRes{Msg: svcerr.ErrAuthentication.Error()}) + invalidRes = toJSON(apiutil.ErrorRes{Err: apiutil.ErrInvalidQueryParams.Error(), Msg: apiutil.ErrValidation.Error()}) + missingTokRes = toJSON(apiutil.ErrorRes{Err: apiutil.ErrBearerToken.Error(), Msg: apiutil.ErrValidation.Error()}) +) + +type testRequest struct { + client *http.Client + method string + url string + contentType string + token string + body io.Reader +} + +func (tr testRequest) make() (*http.Response, error) { + req, err := http.NewRequest(tr.method, tr.url, tr.body) + if err != nil { + return nil, err + } + if tr.token != "" { + req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) + } + if tr.contentType != "" { + req.Header.Set("Content-Type", tr.contentType) + } + return tr.client.Do(req) +} + +func newServer() (*httptest.Server, *mocks.Service) { + logger := mglog.NewMock() + svc := new(mocks.Service) + mux := httpapi.MakeHandler(svc, logger, instanceID) + return httptest.NewServer(mux), svc +} + +func toJSON(data interface{}) string { + jsonData, err := json.Marshal(data) + if err != nil { + return "" + } + return string(jsonData) +} + +func TestCreate(t *testing.T) { + ss, svc := newServer() + defer ss.Close() + + sub := notifiers.Subscription{ + Topic: topic, + Contact: contact1, + } + + data := toJSON(sub) + + emptyTopic := toJSON(notifiers.Subscription{Contact: contact1}) + emptyContact := toJSON(notifiers.Subscription{Topic: "topic123"}) + + cases := []struct { + desc string + req string + contentType string + auth string + status int + location string + err error + }{ + { + desc: "add successfully", + req: data, + contentType: contentType, + auth: token, + status: http.StatusCreated, + location: fmt.Sprintf("/subscriptions/%s%012d", uuid.Prefix, 1), + err: nil, + }, + { + desc: "add an existing subscription", + req: data, + contentType: contentType, + auth: token, + status: http.StatusConflict, + location: "", + err: svcerr.ErrConflict, + }, + { + desc: "add with empty topic", + req: emptyTopic, + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + location: "", + err: svcerr.ErrMalformedEntity, + }, + { + desc: "add with empty contact", + req: emptyContact, + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + location: "", + err: svcerr.ErrMalformedEntity, + }, + { + desc: "add with invalid auth token", + req: data, + contentType: contentType, + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + location: "", + err: svcerr.ErrAuthentication, + }, + { + desc: "add with empty auth token", + req: data, + contentType: contentType, + auth: "", + status: http.StatusUnauthorized, + location: "", + err: svcerr.ErrAuthentication, + }, + { + desc: "add with invalid request format", + req: "}", + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + location: "", + err: svcerr.ErrMalformedEntity, + }, + { + desc: "add without content type", + req: data, + contentType: "", + auth: token, + status: http.StatusUnsupportedMediaType, + location: "", + err: apiutil.ErrUnsupportedContentType, + }, + } + + for _, tc := range cases { + svcCall := svc.On("CreateSubscription", mock.Anything, tc.auth, sub).Return(path.Base(tc.location), tc.err) + + req := testRequest{ + client: ss.Client(), + method: http.MethodPost, + url: fmt.Sprintf("%s/subscriptions", ss.URL), + contentType: tc.contentType, + token: tc.auth, + body: strings.NewReader(tc.req), + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + + location := res.Header.Get("Location") + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + assert.Equal(t, tc.location, location, fmt.Sprintf("%s: expected location %s got %s", tc.desc, tc.location, location)) + + svcCall.Unset() + } +} + +func TestView(t *testing.T) { + ss, svc := newServer() + defer ss.Close() + + sub := notifiers.Subscription{ + Topic: topic, + Contact: contact1, + ID: testsutil.GenerateUUID(t), + OwnerID: validID, + } + + sr := subRes{ + ID: sub.ID, + OwnerID: validID, + Contact: sub.Contact, + Topic: sub.Topic, + } + data := toJSON(sr) + + cases := []struct { + desc string + id string + auth string + status int + res string + err error + Sub notifiers.Subscription + }{ + { + desc: "view successfully", + id: sub.ID, + auth: token, + status: http.StatusOK, + res: data, + err: nil, + Sub: sub, + }, + { + desc: "view not existing", + id: "not existing", + auth: token, + status: http.StatusNotFound, + res: notFoundRes, + err: svcerr.ErrNotFound, + }, + { + desc: "view with invalid auth token", + id: sub.ID, + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + res: unauthRes, + err: svcerr.ErrAuthentication, + }, + { + desc: "view with empty auth token", + id: sub.ID, + auth: "", + status: http.StatusUnauthorized, + res: missingTokRes, + err: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + svcCall := svc.On("ViewSubscription", mock.Anything, tc.auth, tc.id).Return(tc.Sub, tc.err) + + req := testRequest{ + client: ss.Client(), + method: http.MethodGet, + url: fmt.Sprintf("%s/subscriptions/%s", ss.URL, tc.id), + token: tc.auth, + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected request error %s", tc.desc, err)) + body, err := io.ReadAll(res.Body) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected read error %s", tc.desc, err)) + data := strings.Trim(string(body), "\n") + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + assert.Equal(t, tc.res, data, fmt.Sprintf("%s: expected body %s got %s", tc.desc, tc.res, data)) + + svcCall.Unset() + } +} + +func TestList(t *testing.T) { + ss, svc := newServer() + defer ss.Close() + + const numSubs = 100 + var subs []subRes + var sub notifiers.Subscription + + for i := 0; i < numSubs; i++ { + sub = notifiers.Subscription{ + Topic: fmt.Sprintf("topic.subtopic.%d", i), + Contact: contact1, + ID: testsutil.GenerateUUID(t), + } + if i%2 == 0 { + sub.Contact = contact2 + } + sr := subRes{ + ID: sub.ID, + OwnerID: validID, + Contact: sub.Contact, + Topic: sub.Topic, + } + subs = append(subs, sr) + } + noLimit := toJSON(page{Offset: 5, Limit: 20, Total: numSubs, Subscriptions: subs[5:25]}) + one := toJSON(page{Offset: 0, Limit: 20, Total: 1, Subscriptions: subs[10:11]}) + + var contact2Subs []subRes + for i := 20; i < 40; i += 2 { + contact2Subs = append(contact2Subs, subs[i]) + } + contactList := toJSON(page{Offset: 10, Limit: 10, Total: 50, Subscriptions: contact2Subs}) + + cases := []struct { + desc string + query map[string]string + auth string + status int + res string + err error + page notifiers.Page + }{ + { + desc: "list default limit", + query: map[string]string{ + "offset": "5", + }, + auth: token, + status: http.StatusOK, + res: noLimit, + err: nil, + page: notifiers.Page{ + PageMetadata: notifiers.PageMetadata{ + Offset: 5, + Limit: 20, + }, + Total: numSubs, + Subscriptions: subscriptionsSlice(subs, 5, 25), + }, + }, + { + desc: "list not existing", + query: map[string]string{ + "topic": "not-found-topic", + }, + auth: token, + status: http.StatusNotFound, + res: notFoundRes, + err: svcerr.ErrNotFound, + }, + { + desc: "list one with topic", + query: map[string]string{ + "topic": "topic.subtopic.10", + }, + auth: token, + status: http.StatusOK, + res: one, + err: nil, + page: notifiers.Page{ + PageMetadata: notifiers.PageMetadata{ + Offset: 0, + Limit: 20, + }, + Total: 1, + Subscriptions: subscriptionsSlice(subs, 10, 11), + }, + }, + { + desc: "list with contact", + query: map[string]string{ + "contact": contact2, + "offset": "10", + "limit": "10", + }, + auth: token, + status: http.StatusOK, + res: contactList, + err: nil, + page: notifiers.Page{ + PageMetadata: notifiers.PageMetadata{ + Offset: 10, + Limit: 10, + }, + Total: 50, + Subscriptions: subscriptionsSlice(contact2Subs, 0, 10), + }, + }, + { + desc: "list with invalid query", + query: map[string]string{ + "offset": "two", + }, + auth: token, + status: http.StatusBadRequest, + res: invalidRes, + err: svcerr.ErrMalformedEntity, + }, + { + desc: "list with invalid auth token", + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + res: unauthRes, + err: svcerr.ErrAuthentication, + }, + { + desc: "list with empty auth token", + auth: "", + status: http.StatusUnauthorized, + res: missingTokRes, + err: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + svcCall := svc.On("ListSubscriptions", mock.Anything, tc.auth, mock.Anything).Return(tc.page, tc.err) + req := testRequest{ + client: ss.Client(), + method: http.MethodGet, + url: fmt.Sprintf("%s/subscriptions%s", ss.URL, makeQuery(tc.query)), + token: tc.auth, + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + body, err := io.ReadAll(res.Body) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + data := strings.Trim(string(body), "\n") + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + assert.Equal(t, tc.res, data, fmt.Sprintf("%s: got unexpected body\n", tc.desc)) + + svcCall.Unset() + } +} + +func TestRemove(t *testing.T) { + ss, svc := newServer() + defer ss.Close() + id := testsutil.GenerateUUID(t) + + cases := []struct { + desc string + id string + auth string + status int + res string + err error + }{ + { + desc: "remove successfully", + id: id, + auth: token, + status: http.StatusNoContent, + err: nil, + }, + { + desc: "remove not existing", + id: "not existing", + auth: token, + status: http.StatusNotFound, + err: svcerr.ErrNotFound, + }, + { + desc: "remove empty id", + id: "", + auth: token, + status: http.StatusBadRequest, + err: svcerr.ErrMalformedEntity, + }, + { + desc: "view with invalid auth token", + id: id, + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + res: unauthRes, + err: svcerr.ErrAuthentication, + }, + { + desc: "view with empty auth token", + id: id, + auth: "", + status: http.StatusUnauthorized, + res: missingTokRes, + err: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + svcCall := svc.On("RemoveSubscription", mock.Anything, tc.auth, tc.id).Return(tc.err) + + req := testRequest{ + client: ss.Client(), + method: http.MethodDelete, + url: fmt.Sprintf("%s/subscriptions/%s", ss.URL, tc.id), + token: tc.auth, + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + + svcCall.Unset() + } +} + +func makeQuery(m map[string]string) string { + var ret string + for k, v := range m { + ret += fmt.Sprintf("&%s=%s", k, v) + } + if ret != "" { + return fmt.Sprintf("?%s", ret[1:]) + } + return "" +} + +type subRes struct { + ID string `json:"id"` + OwnerID string `json:"owner_id"` + Contact string `json:"contact"` + Topic string `json:"topic"` +} +type page struct { + Offset uint `json:"offset"` + Limit int `json:"limit"` + Total uint `json:"total,omitempty"` + Subscriptions []subRes `json:"subscriptions,omitempty"` +} + +func subscriptionsSlice(subs []subRes, start, end int) []notifiers.Subscription { + var res []notifiers.Subscription + for i := start; i < end; i++ { + sub := subs[i] + res = append(res, notifiers.Subscription{ + ID: sub.ID, + OwnerID: sub.OwnerID, + Contact: sub.Contact, + Topic: sub.Topic, + }) + } + return res +} diff --git a/consumers/notifiers/api/logging.go b/consumers/notifiers/api/logging.go new file mode 100644 index 0000000..e327d92 --- /dev/null +++ b/consumers/notifiers/api/logging.go @@ -0,0 +1,131 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "log/slog" + "time" + + "github.com/absmach/magistrala/consumers/notifiers" +) + +var _ notifiers.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + svc notifiers.Service +} + +// LoggingMiddleware adds logging facilities to the core service. +func LoggingMiddleware(svc notifiers.Service, logger *slog.Logger) notifiers.Service { + return &loggingMiddleware{logger, svc} +} + +// CreateSubscription logs the create_subscription request. It logs subscription ID and topic and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) CreateSubscription(ctx context.Context, token string, sub notifiers.Subscription) (id string, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("subscription", + slog.String("topic", sub.Topic), + slog.String("id", id), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Create subscription failed", args...) + return + } + lm.logger.Info("Create subscription completed successfully", args...) + }(time.Now()) + + return lm.svc.CreateSubscription(ctx, token, sub) +} + +// ViewSubscription logs the view_subscription request. It logs subscription topic and id and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ViewSubscription(ctx context.Context, token, topic string) (sub notifiers.Subscription, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("subscription", + slog.String("topic", topic), + slog.String("id", sub.ID), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("View subscription failed", args...) + return + } + lm.logger.Info("View subscription completed successfully", args...) + }(time.Now()) + + return lm.svc.ViewSubscription(ctx, token, topic) +} + +// ListSubscriptions logs the list_subscriptions request. It logs page metadata and subscription topic and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ListSubscriptions(ctx context.Context, token string, pm notifiers.PageMetadata) (res notifiers.Page, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("page", + slog.String("topic", pm.Topic), + slog.Int("limit", pm.Limit), + slog.Uint64("offset", uint64(pm.Offset)), + slog.Uint64("total", uint64(res.Total)), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("List subscriptions failed", args...) + return + } + lm.logger.Info("List subscriptions completed successfully", args...) + }(time.Now()) + + return lm.svc.ListSubscriptions(ctx, token, pm) +} + +// RemoveSubscription logs the remove_subscription request. It logs subscription ID and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) RemoveSubscription(ctx context.Context, token, id string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("subscription_id", id), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Remove subscription failed", args...) + return + } + lm.logger.Info("Remove subscription completed successfully", args...) + }(time.Now()) + + return lm.svc.RemoveSubscription(ctx, token, id) +} + +// ConsumeBlocking logs the consume_blocking request. It logs the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ConsumeBlocking(ctx context.Context, msg interface{}) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Blocking consumer failed to consume messages successfully", args...) + return + } + lm.logger.Info("Blocking consumer consumed messages successfully", args...) + }(time.Now()) + + return lm.svc.ConsumeBlocking(ctx, msg) +} diff --git a/consumers/notifiers/api/metrics.go b/consumers/notifiers/api/metrics.go new file mode 100644 index 0000000..2097302 --- /dev/null +++ b/consumers/notifiers/api/metrics.go @@ -0,0 +1,81 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "time" + + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/go-kit/kit/metrics" +) + +var _ notifiers.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc notifiers.Service +} + +// MetricsMiddleware instruments core service by tracking request count and latency. +func MetricsMiddleware(svc notifiers.Service, counter metrics.Counter, latency metrics.Histogram) notifiers.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +// CreateSubscription instruments CreateSubscription method with metrics. +func (ms *metricsMiddleware) CreateSubscription(ctx context.Context, token string, sub notifiers.Subscription) (string, error) { + defer func(begin time.Time) { + ms.counter.With("method", "create_subscription").Add(1) + ms.latency.With("method", "create_subscription").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.CreateSubscription(ctx, token, sub) +} + +// ViewSubscription instruments ViewSubscription method with metrics. +func (ms *metricsMiddleware) ViewSubscription(ctx context.Context, token, topic string) (notifiers.Subscription, error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_subscription").Add(1) + ms.latency.With("method", "view_subscription").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ViewSubscription(ctx, token, topic) +} + +// ListSubscriptions instruments ListSubscriptions method with metrics. +func (ms *metricsMiddleware) ListSubscriptions(ctx context.Context, token string, pm notifiers.PageMetadata) (notifiers.Page, error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_subscriptions").Add(1) + ms.latency.With("method", "list_subscriptions").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ListSubscriptions(ctx, token, pm) +} + +// RemoveSubscription instruments RemoveSubscription method with metrics. +func (ms *metricsMiddleware) RemoveSubscription(ctx context.Context, token, id string) error { + defer func(begin time.Time) { + ms.counter.With("method", "remove_subscription").Add(1) + ms.latency.With("method", "remove_subscription").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.RemoveSubscription(ctx, token, id) +} + +// ConsumeBlocking instruments ConsumeBlocking method with metrics. +func (ms *metricsMiddleware) ConsumeBlocking(ctx context.Context, msg interface{}) error { + defer func(begin time.Time) { + ms.counter.With("method", "consume").Add(1) + ms.latency.With("method", "consume").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ConsumeBlocking(ctx, msg) +} diff --git a/consumers/notifiers/api/requests.go b/consumers/notifiers/api/requests.go new file mode 100644 index 0000000..9285f4d --- /dev/null +++ b/consumers/notifiers/api/requests.go @@ -0,0 +1,55 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import "github.com/absmach/magistrala/pkg/apiutil" + +type createSubReq struct { + token string + Topic string `json:"topic,omitempty"` + Contact string `json:"contact,omitempty"` +} + +func (req createSubReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.Topic == "" { + return apiutil.ErrInvalidTopic + } + if req.Contact == "" { + return apiutil.ErrInvalidContact + } + return nil +} + +type subReq struct { + token string + id string +} + +func (req subReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} + +type listSubsReq struct { + token string + topic string + contact string + offset uint + limit uint +} + +func (req listSubsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + return nil +} diff --git a/consumers/notifiers/api/responses.go b/consumers/notifiers/api/responses.go new file mode 100644 index 0000000..7d31006 --- /dev/null +++ b/consumers/notifiers/api/responses.go @@ -0,0 +1,88 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "fmt" + "net/http" + + "github.com/absmach/magistrala" +) + +var ( + _ magistrala.Response = (*createSubRes)(nil) + _ magistrala.Response = (*viewSubRes)(nil) + _ magistrala.Response = (*listSubsRes)(nil) + _ magistrala.Response = (*removeSubRes)(nil) +) + +type createSubRes struct { + ID string +} + +func (res createSubRes) Code() int { + return http.StatusCreated +} + +func (res createSubRes) Headers() map[string]string { + return map[string]string{ + "Location": fmt.Sprintf("/subscriptions/%s", res.ID), + } +} + +func (res createSubRes) Empty() bool { + return true +} + +type viewSubRes struct { + ID string `json:"id"` + OwnerID string `json:"owner_id"` + Contact string `json:"contact"` + Topic string `json:"topic"` +} + +func (res viewSubRes) Code() int { + return http.StatusOK +} + +func (res viewSubRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewSubRes) Empty() bool { + return false +} + +type listSubsRes struct { + Offset uint `json:"offset"` + Limit int `json:"limit"` + Total uint `json:"total,omitempty"` + Subscriptions []viewSubRes `json:"subscriptions,omitempty"` +} + +func (res listSubsRes) Code() int { + return http.StatusOK +} + +func (res listSubsRes) Headers() map[string]string { + return map[string]string{} +} + +func (res listSubsRes) Empty() bool { + return false +} + +type removeSubRes struct{} + +func (res removeSubRes) Code() int { + return http.StatusNoContent +} + +func (res removeSubRes) Headers() map[string]string { + return map[string]string{} +} + +func (res removeSubRes) Empty() bool { + return true +} diff --git a/consumers/notifiers/api/transport.go b/consumers/notifiers/api/transport.go new file mode 100644 index 0000000..693819e --- /dev/null +++ b/consumers/notifiers/api/transport.go @@ -0,0 +1,132 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "log/slog" + "net/http" + "strings" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/pkg/api" + "github.com/go-chi/chi/v5" + kithttp "github.com/go-kit/kit/transport/http" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +const ( + contentType = "application/json" + offsetKey = "offset" + limitKey = "limit" + topicKey = "topic" + contactKey = "contact" + defOffset = 0 + defLimit = 20 +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(svc notifiers.Service, logger *slog.Logger, instanceID string) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + + mux := chi.NewRouter() + + mux.Route("/subscriptions", func(r chi.Router) { + r.Post("/", otelhttp.NewHandler(kithttp.NewServer( + createSubscriptionEndpoint(svc), + decodeCreate, + api.EncodeResponse, + opts..., + ), "create").ServeHTTP) + + r.Get("/", otelhttp.NewHandler(kithttp.NewServer( + listSubscriptionsEndpoint(svc), + decodeList, + api.EncodeResponse, + opts..., + ), "list").ServeHTTP) + + r.Delete("/", otelhttp.NewHandler(kithttp.NewServer( + deleteSubscriptionEndpint(svc), + decodeSubscription, + api.EncodeResponse, + opts..., + ), "delete").ServeHTTP) + + r.Get("/{subID}", otelhttp.NewHandler(kithttp.NewServer( + viewSubscriptionEndpint(svc), + decodeSubscription, + api.EncodeResponse, + opts..., + ), "view").ServeHTTP) + + r.Delete("/{subID}", otelhttp.NewHandler(kithttp.NewServer( + deleteSubscriptionEndpint(svc), + decodeSubscription, + api.EncodeResponse, + opts..., + ), "delete").ServeHTTP) + }) + + mux.Get("/health", magistrala.Health("notifier", instanceID)) + mux.Handle("/metrics", promhttp.Handler()) + + return mux +} + +func decodeCreate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), contentType) { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + + req := createSubReq{token: apiutil.ExtractBearerToken(r)} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, errors.Wrap(err, errors.ErrMalformedEntity)) + } + + return req, nil +} + +func decodeSubscription(_ context.Context, r *http.Request) (interface{}, error) { + req := subReq{ + id: chi.URLParam(r, "subID"), + token: apiutil.ExtractBearerToken(r), + } + + return req, nil +} + +func decodeList(_ context.Context, r *http.Request) (interface{}, error) { + req := listSubsReq{token: apiutil.ExtractBearerToken(r)} + vals := r.URL.Query()[topicKey] + if len(vals) > 0 { + req.topic = vals[0] + } + + vals = r.URL.Query()[contactKey] + if len(vals) > 0 { + req.contact = vals[0] + } + + offset, err := apiutil.ReadNumQuery[uint64](r, offsetKey, defOffset) + if err != nil { + return listSubsReq{}, errors.Wrap(apiutil.ErrValidation, err) + } + req.offset = uint(offset) + + limit, err := apiutil.ReadNumQuery[uint64](r, limitKey, defLimit) + if err != nil { + return listSubsReq{}, errors.Wrap(apiutil.ErrValidation, err) + } + req.limit = uint(limit) + + return req, nil +} diff --git a/consumers/notifiers/doc.go b/consumers/notifiers/doc.go new file mode 100644 index 0000000..e90c58c --- /dev/null +++ b/consumers/notifiers/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package notifiers contain the domain concept definitions needed to +// support Magistrala notifications functionality. +package notifiers diff --git a/consumers/notifiers/mocks/doc.go b/consumers/notifiers/mocks/doc.go new file mode 100644 index 0000000..16ed198 --- /dev/null +++ b/consumers/notifiers/mocks/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mocks contains mocks for testing purposes. +package mocks diff --git a/consumers/notifiers/mocks/notifier.go b/consumers/notifiers/mocks/notifier.go new file mode 100644 index 0000000..a3dcc56 --- /dev/null +++ b/consumers/notifiers/mocks/notifier.go @@ -0,0 +1,47 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + messaging "github.com/absmach/magistrala/pkg/messaging" + mock "github.com/stretchr/testify/mock" +) + +// Notifier is an autogenerated mock type for the Notifier type +type Notifier struct { + mock.Mock +} + +// Notify provides a mock function with given fields: from, to, msg +func (_m *Notifier) Notify(from string, to []string, msg *messaging.Message) error { + ret := _m.Called(from, to, msg) + + if len(ret) == 0 { + panic("no return value specified for Notify") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, []string, *messaging.Message) error); ok { + r0 = rf(from, to, msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewNotifier creates a new instance of Notifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNotifier(t interface { + mock.TestingT + Cleanup(func()) +}) *Notifier { + mock := &Notifier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consumers/notifiers/mocks/repository.go b/consumers/notifiers/mocks/repository.go new file mode 100644 index 0000000..49e5727 --- /dev/null +++ b/consumers/notifiers/mocks/repository.go @@ -0,0 +1,133 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + notifiers "github.com/absmach/magistrala/consumers/notifiers" + mock "github.com/stretchr/testify/mock" +) + +// SubscriptionsRepository is an autogenerated mock type for the SubscriptionsRepository type +type SubscriptionsRepository struct { + mock.Mock +} + +// Remove provides a mock function with given fields: ctx, id +func (_m *SubscriptionsRepository) Remove(ctx context.Context, id string) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Retrieve provides a mock function with given fields: ctx, id +func (_m *SubscriptionsRepository) Retrieve(ctx context.Context, id string) (notifiers.Subscription, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Retrieve") + } + + var r0 notifiers.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (notifiers.Subscription, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, string) notifiers.Subscription); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(notifiers.Subscription) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveAll provides a mock function with given fields: ctx, pm +func (_m *SubscriptionsRepository) RetrieveAll(ctx context.Context, pm notifiers.PageMetadata) (notifiers.Page, error) { + ret := _m.Called(ctx, pm) + + if len(ret) == 0 { + panic("no return value specified for RetrieveAll") + } + + var r0 notifiers.Page + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, notifiers.PageMetadata) (notifiers.Page, error)); ok { + return rf(ctx, pm) + } + if rf, ok := ret.Get(0).(func(context.Context, notifiers.PageMetadata) notifiers.Page); ok { + r0 = rf(ctx, pm) + } else { + r0 = ret.Get(0).(notifiers.Page) + } + + if rf, ok := ret.Get(1).(func(context.Context, notifiers.PageMetadata) error); ok { + r1 = rf(ctx, pm) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Save provides a mock function with given fields: ctx, sub +func (_m *SubscriptionsRepository) Save(ctx context.Context, sub notifiers.Subscription) (string, error) { + ret := _m.Called(ctx, sub) + + if len(ret) == 0 { + panic("no return value specified for Save") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, notifiers.Subscription) (string, error)); ok { + return rf(ctx, sub) + } + if rf, ok := ret.Get(0).(func(context.Context, notifiers.Subscription) string); ok { + r0 = rf(ctx, sub) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, notifiers.Subscription) error); ok { + r1 = rf(ctx, sub) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSubscriptionsRepository creates a new instance of SubscriptionsRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSubscriptionsRepository(t interface { + mock.TestingT + Cleanup(func()) +}) *SubscriptionsRepository { + mock := &SubscriptionsRepository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consumers/notifiers/mocks/service.go b/consumers/notifiers/mocks/service.go new file mode 100644 index 0000000..9fe9494 --- /dev/null +++ b/consumers/notifiers/mocks/service.go @@ -0,0 +1,151 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + notifiers "github.com/absmach/magistrala/consumers/notifiers" + mock "github.com/stretchr/testify/mock" +) + +// Service is an autogenerated mock type for the Service type +type Service struct { + mock.Mock +} + +// ConsumeBlocking provides a mock function with given fields: ctx, messages +func (_m *Service) ConsumeBlocking(ctx context.Context, messages interface{}) error { + ret := _m.Called(ctx, messages) + + if len(ret) == 0 { + panic("no return value specified for ConsumeBlocking") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) error); ok { + r0 = rf(ctx, messages) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateSubscription provides a mock function with given fields: ctx, token, sub +func (_m *Service) CreateSubscription(ctx context.Context, token string, sub notifiers.Subscription) (string, error) { + ret := _m.Called(ctx, token, sub) + + if len(ret) == 0 { + panic("no return value specified for CreateSubscription") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, notifiers.Subscription) (string, error)); ok { + return rf(ctx, token, sub) + } + if rf, ok := ret.Get(0).(func(context.Context, string, notifiers.Subscription) string); ok { + r0 = rf(ctx, token, sub) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, notifiers.Subscription) error); ok { + r1 = rf(ctx, token, sub) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListSubscriptions provides a mock function with given fields: ctx, token, pm +func (_m *Service) ListSubscriptions(ctx context.Context, token string, pm notifiers.PageMetadata) (notifiers.Page, error) { + ret := _m.Called(ctx, token, pm) + + if len(ret) == 0 { + panic("no return value specified for ListSubscriptions") + } + + var r0 notifiers.Page + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, notifiers.PageMetadata) (notifiers.Page, error)); ok { + return rf(ctx, token, pm) + } + if rf, ok := ret.Get(0).(func(context.Context, string, notifiers.PageMetadata) notifiers.Page); ok { + r0 = rf(ctx, token, pm) + } else { + r0 = ret.Get(0).(notifiers.Page) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, notifiers.PageMetadata) error); ok { + r1 = rf(ctx, token, pm) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveSubscription provides a mock function with given fields: ctx, token, id +func (_m *Service) RemoveSubscription(ctx context.Context, token string, id string) error { + ret := _m.Called(ctx, token, id) + + if len(ret) == 0 { + panic("no return value specified for RemoveSubscription") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, token, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ViewSubscription provides a mock function with given fields: ctx, token, id +func (_m *Service) ViewSubscription(ctx context.Context, token string, id string) (notifiers.Subscription, error) { + ret := _m.Called(ctx, token, id) + + if len(ret) == 0 { + panic("no return value specified for ViewSubscription") + } + + var r0 notifiers.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (notifiers.Subscription, error)); ok { + return rf(ctx, token, id) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) notifiers.Subscription); ok { + r0 = rf(ctx, token, id) + } else { + r0 = ret.Get(0).(notifiers.Subscription) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, token, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewService(t interface { + mock.TestingT + Cleanup(func()) +}) *Service { + mock := &Service{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consumers/notifiers/notifier.go b/consumers/notifiers/notifier.go new file mode 100644 index 0000000..2c23bc9 --- /dev/null +++ b/consumers/notifiers/notifier.go @@ -0,0 +1,22 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package notifiers + +import ( + "errors" + + "github.com/absmach/magistrala/pkg/messaging" +) + +// ErrNotify wraps sending notification errors. +var ErrNotify = errors.New("error sending notification") + +// Notifier represents an API for sending notification. +// +//go:generate mockery --name Notifier --output=./mocks --filename notifier.go --quiet --note "Copyright (c) Abstract Machines" +type Notifier interface { + // Notify method is used to send notification for the + // received message to the provided list of receivers. + Notify(from string, to []string, msg *messaging.Message) error +} diff --git a/consumers/notifiers/postgres/database.go b/consumers/notifiers/postgres/database.go new file mode 100644 index 0000000..2e7ee74 --- /dev/null +++ b/consumers/notifiers/postgres/database.go @@ -0,0 +1,74 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "context" + "database/sql" + "fmt" + + "github.com/jmoiron/sqlx" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ Database = (*database)(nil) + +type database struct { + db *sqlx.DB + tracer trace.Tracer +} + +// Database provides a database interface. +type Database interface { + NamedExecContext(context.Context, string, interface{}) (sql.Result, error) + QueryRowxContext(context.Context, string, ...interface{}) *sqlx.Row + NamedQueryContext(context.Context, string, interface{}) (*sqlx.Rows, error) + GetContext(context.Context, interface{}, string, ...interface{}) error +} + +// NewDatabase creates a SubscriptionsDatabase instance. +func NewDatabase(db *sqlx.DB, tracer trace.Tracer) Database { + return &database{ + db: db, + tracer: tracer, + } +} + +func (dm database) NamedExecContext(ctx context.Context, query string, args interface{}) (sql.Result, error) { + ctx, span := dm.addSpanTags(ctx, "NamedExecContext", query) + defer span.End() + return dm.db.NamedExecContext(ctx, query, args) +} + +func (dm database) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *sqlx.Row { + ctx, span := dm.addSpanTags(ctx, "QueryRowxContext", query) + defer span.End() + return dm.db.QueryRowxContext(ctx, query, args...) +} + +func (dm database) NamedQueryContext(ctx context.Context, query string, args interface{}) (*sqlx.Rows, error) { + ctx, span := dm.addSpanTags(ctx, "NamedQueryContext", query) + defer span.End() + return dm.db.NamedQueryContext(ctx, query, args) +} + +func (dm database) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + ctx, span := dm.addSpanTags(ctx, "GetContext", query) + defer span.End() + return dm.db.GetContext(ctx, dest, query, args...) +} + +func (dm database) addSpanTags(ctx context.Context, method, query string) (context.Context, trace.Span) { + ctx, span := dm.tracer.Start(ctx, + fmt.Sprintf("sql_%s", method), + trace.WithAttributes( + attribute.String("sql.statement", query), + attribute.String("span.kind", "client"), + attribute.String("peer.service", "postgres"), + attribute.String("db.type", "sql"), + ), + ) + return ctx, span +} diff --git a/consumers/notifiers/postgres/doc.go b/consumers/notifiers/postgres/doc.go new file mode 100644 index 0000000..73a6784 --- /dev/null +++ b/consumers/notifiers/postgres/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package postgres contains repository implementations using PostgreSQL as +// the underlying database. +package postgres diff --git a/consumers/notifiers/postgres/init.go b/consumers/notifiers/postgres/init.go new file mode 100644 index 0000000..ac74c3c --- /dev/null +++ b/consumers/notifiers/postgres/init.go @@ -0,0 +1,28 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import migrate "github.com/rubenv/sql-migrate" + +func Migration() *migrate.MemoryMigrationSource { + return &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "subscriptions_1", + Up: []string{ + `CREATE TABLE IF NOT EXISTS subscriptions ( + id VARCHAR(254) PRIMARY KEY, + owner_id VARCHAR(254) NOT NULL, + contact VARCHAR(254), + topic TEXT, + UNIQUE(topic, contact) + )`, + }, + Down: []string{ + "DROP TABLE IF EXISTS subscriptions", + }, + }, + }, + } +} diff --git a/consumers/notifiers/postgres/setup_test.go b/consumers/notifiers/postgres/setup_test.go new file mode 100644 index 0000000..b603378 --- /dev/null +++ b/consumers/notifiers/postgres/setup_test.go @@ -0,0 +1,89 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "fmt" + "log" + "os" + "testing" + + "github.com/absmach/magistrala/consumers/notifiers/postgres" + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/magistrala/pkg/ulid" + _ "github.com/jackc/pgx/v5/stdlib" // required for SQL access + "github.com/jmoiron/sqlx" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var ( + idProvider = ulid.New() + db *sqlx.DB +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "16.2-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + if err := pool.Retry(func() error { + db, err = sqlx.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgclient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = pgclient.Setup(dbConfig, *postgres.Migration()); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/consumers/notifiers/postgres/subscriptions.go b/consumers/notifiers/postgres/subscriptions.go new file mode 100644 index 0000000..1d445d9 --- /dev/null +++ b/consumers/notifiers/postgres/subscriptions.go @@ -0,0 +1,164 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" +) + +var _ notifiers.SubscriptionsRepository = (*subscriptionsRepo)(nil) + +type subscriptionsRepo struct { + db Database +} + +// New instantiates a PostgreSQL implementation of Subscriptions repository. +func New(db Database) notifiers.SubscriptionsRepository { + return &subscriptionsRepo{ + db: db, + } +} + +func (repo subscriptionsRepo) Save(ctx context.Context, sub notifiers.Subscription) (string, error) { + q := `INSERT INTO subscriptions (id, owner_id, contact, topic) VALUES (:id, :owner_id, :contact, :topic) RETURNING id` + + dbSub := dbSubscription{ + ID: sub.ID, + OwnerID: sub.OwnerID, + Contact: sub.Contact, + Topic: sub.Topic, + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbSub) + if err != nil { + if pqErr, ok := err.(*pgconn.PgError); ok && pqErr.Code == pgerrcode.UniqueViolation { + return "", errors.Wrap(repoerr.ErrConflict, err) + } + return "", errors.Wrap(repoerr.ErrCreateEntity, err) + } + defer row.Close() + + return sub.ID, nil +} + +func (repo subscriptionsRepo) Retrieve(ctx context.Context, id string) (notifiers.Subscription, error) { + q := `SELECT id, owner_id, contact, topic FROM subscriptions WHERE id = $1` + sub := dbSubscription{} + if err := repo.db.QueryRowxContext(ctx, q, id).StructScan(&sub); err != nil { + if err == sql.ErrNoRows { + return notifiers.Subscription{}, errors.Wrap(repoerr.ErrNotFound, err) + } + return notifiers.Subscription{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + + return fromDBSub(sub), nil +} + +func (repo subscriptionsRepo) RetrieveAll(ctx context.Context, pm notifiers.PageMetadata) (notifiers.Page, error) { + q := `SELECT id, owner_id, contact, topic FROM subscriptions` + args := make(map[string]interface{}) + if pm.Topic != "" { + args["topic"] = pm.Topic + } + if pm.Contact != "" { + args["contact"] = pm.Contact + } + var condition string + if len(args) > 0 { + var cond []string + for k := range args { + cond = append(cond, fmt.Sprintf("%s = :%s", k, k)) + } + condition = fmt.Sprintf(" WHERE %s", strings.Join(cond, " AND ")) + q = fmt.Sprintf("%s%s", q, condition) + } + args["offset"] = pm.Offset + q = fmt.Sprintf("%s OFFSET :offset", q) + if pm.Limit > 0 { + q = fmt.Sprintf("%s LIMIT :limit", q) + args["limit"] = pm.Limit + } + + rows, err := repo.db.NamedQueryContext(ctx, q, args) + if err != nil { + return notifiers.Page{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + defer rows.Close() + + var subs []notifiers.Subscription + for rows.Next() { + sub := dbSubscription{} + if err := rows.StructScan(&sub); err != nil { + return notifiers.Page{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + subs = append(subs, fromDBSub(sub)) + } + + if len(subs) == 0 { + return notifiers.Page{}, repoerr.ErrNotFound + } + + cq := fmt.Sprintf(`SELECT COUNT(*) FROM subscriptions %s`, condition) + total, err := total(ctx, repo.db, cq, args) + if err != nil { + return notifiers.Page{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + + ret := notifiers.Page{ + PageMetadata: pm, + Total: total, + Subscriptions: subs, + } + + return ret, nil +} + +func (repo subscriptionsRepo) Remove(ctx context.Context, id string) error { + q := `DELETE from subscriptions WHERE id = $1` + + if r := repo.db.QueryRowxContext(ctx, q, id); r.Err() != nil { + return errors.Wrap(repoerr.ErrRemoveEntity, r.Err()) + } + return nil +} + +func total(ctx context.Context, db Database, query string, params interface{}) (uint, error) { + rows, err := db.NamedQueryContext(ctx, query, params) + if err != nil { + return 0, err + } + defer rows.Close() + var total uint + if rows.Next() { + if err := rows.Scan(&total); err != nil { + return 0, err + } + } + return total, nil +} + +type dbSubscription struct { + ID string `db:"id"` + OwnerID string `db:"owner_id"` + Contact string `db:"contact"` + Topic string `db:"topic"` +} + +func fromDBSub(sub dbSubscription) notifiers.Subscription { + return notifiers.Subscription{ + ID: sub.ID, + OwnerID: sub.OwnerID, + Contact: sub.Contact, + Topic: sub.Topic, + } +} diff --git a/consumers/notifiers/postgres/subscriptions_test.go b/consumers/notifiers/postgres/subscriptions_test.go new file mode 100644 index 0000000..507de04 --- /dev/null +++ b/consumers/notifiers/postgres/subscriptions_test.go @@ -0,0 +1,263 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres_test + +import ( + "context" + "fmt" + "testing" + + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/consumers/notifiers/postgres" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" +) + +const ( + owner = "owner@example.com" + numSubs = 100 +) + +var tracer = otel.Tracer("tests") + +func TestSave(t *testing.T) { + dbMiddleware := postgres.NewDatabase(db, tracer) + repo := postgres.New(dbMiddleware) + + id1, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + id2, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + sub1 := notifiers.Subscription{ + OwnerID: id1, + ID: id1, + Contact: owner, + Topic: "topic.subtopic", + } + + sub2 := sub1 + sub2.ID = id2 + + cases := []struct { + desc string + sub notifiers.Subscription + id string + err error + }{ + { + desc: "save successfully", + sub: sub1, + id: id1, + err: nil, + }, + { + desc: "save duplicate", + sub: sub2, + id: "", + err: repoerr.ErrConflict, + }, + } + + for _, tc := range cases { + id, err := repo.Save(context.Background(), tc.sub) + assert.Equal(t, tc.id, id, fmt.Sprintf("%s: expected id %s got %s\n", tc.desc, tc.id, id)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestView(t *testing.T) { + dbMiddleware := postgres.NewDatabase(db, tracer) + repo := postgres.New(dbMiddleware) + + id, err := idProvider.ID() + require.Nil(t, err, fmt.Sprintf("got an error creating id: %s", err)) + + sub := notifiers.Subscription{ + OwnerID: id, + ID: id, + Contact: owner, + Topic: "view.subtopic", + } + + ret, err := repo.Save(context.Background(), sub) + require.Nil(t, err, fmt.Sprintf("creating subscription must not fail: %s", err)) + require.Equal(t, id, ret, fmt.Sprintf("provided id %s must be the same as the returned id %s", id, ret)) + + cases := []struct { + desc string + sub notifiers.Subscription + id string + err error + }{ + { + desc: "retrieve successfully", + sub: sub, + id: id, + err: nil, + }, + { + desc: "retrieve not existing", + sub: notifiers.Subscription{}, + id: "non-existing", + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + sub, err := repo.Retrieve(context.Background(), tc.id) + assert.Equal(t, tc.sub, sub, fmt.Sprintf("%s: expected sub %v got %v\n", tc.desc, tc.sub, sub)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestRetrieveAll(t *testing.T) { + _, err := db.Exec("DELETE FROM subscriptions") + require.Nil(t, err, fmt.Sprintf("cleanup must not fail: %s", err)) + + dbMiddleware := postgres.NewDatabase(db, tracer) + repo := postgres.New(dbMiddleware) + + var subs []notifiers.Subscription + + for i := 0; i < numSubs; i++ { + id, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + sub := notifiers.Subscription{ + OwnerID: "owner", + ID: id, + Contact: owner, + Topic: fmt.Sprintf("list.subtopic.%d", i), + } + + ret, err := repo.Save(context.Background(), sub) + require.Nil(t, err, fmt.Sprintf("creating subscription must not fail: %s", err)) + require.Equal(t, id, ret, fmt.Sprintf("provided id %s must be the same as the returned id %s", id, ret)) + subs = append(subs, sub) + } + + cases := []struct { + desc string + pageMeta notifiers.PageMetadata + page notifiers.Page + err error + }{ + { + desc: "retrieve successfully", + pageMeta: notifiers.PageMetadata{ + Offset: 10, + Limit: 2, + }, + page: notifiers.Page{ + Total: numSubs, + PageMetadata: notifiers.PageMetadata{ + Offset: 10, + Limit: 2, + }, + Subscriptions: subs[10:12], + }, + err: nil, + }, + { + desc: "retrieve with contact", + pageMeta: notifiers.PageMetadata{ + Offset: 10, + Limit: 2, + Contact: owner, + }, + page: notifiers.Page{ + Total: numSubs, + PageMetadata: notifiers.PageMetadata{ + Offset: 10, + Limit: 2, + Contact: owner, + }, + Subscriptions: subs[10:12], + }, + err: nil, + }, + { + desc: "retrieve with topic", + pageMeta: notifiers.PageMetadata{ + Offset: 0, + Limit: 2, + Topic: "list.subtopic.11", + }, + page: notifiers.Page{ + Total: 1, + PageMetadata: notifiers.PageMetadata{ + Offset: 0, + Limit: 2, + Topic: "list.subtopic.11", + }, + Subscriptions: subs[11:12], + }, + err: nil, + }, + { + desc: "retrieve with no limit", + pageMeta: notifiers.PageMetadata{ + Offset: 0, + Limit: -1, + }, + page: notifiers.Page{ + Total: numSubs, + PageMetadata: notifiers.PageMetadata{ + Limit: -1, + }, + Subscriptions: subs, + }, + err: nil, + }, + } + + for _, tc := range cases { + page, err := repo.RetrieveAll(context.Background(), tc.pageMeta) + assert.Equal(t, tc.page, page, fmt.Sprintf("%s: got unexpected page\n", tc.desc)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestRemove(t *testing.T) { + dbMiddleware := postgres.NewDatabase(db, tracer) + repo := postgres.New(dbMiddleware) + id, err := idProvider.ID() + require.Nil(t, err, fmt.Sprintf("got an error creating id: %s", err)) + sub := notifiers.Subscription{ + OwnerID: id, + ID: id, + Contact: owner, + Topic: "remove.subtopic.%d", + } + + ret, err := repo.Save(context.Background(), sub) + require.Nil(t, err, fmt.Sprintf("creating subscription must not fail: %s", err)) + require.Equal(t, id, ret, fmt.Sprintf("provided id %s must be the same as the returned id %s", id, ret)) + + cases := []struct { + desc string + id string + err error + }{ + { + desc: "remove successfully", + id: id, + err: nil, + }, + { + desc: "remove not existing", + id: "empty", + err: nil, + }, + } + + for _, tc := range cases { + err := repo.Remove(context.Background(), tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} diff --git a/consumers/notifiers/service.go b/consumers/notifiers/service.go new file mode 100644 index 0000000..563fd59 --- /dev/null +++ b/consumers/notifiers/service.go @@ -0,0 +1,174 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package notifiers + +import ( + "context" + "fmt" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/messaging" +) + +// ErrMessage indicates an error converting a message to Magistrala message. +var ErrMessage = errors.New("failed to convert to Magistrala message") + +var _ consumers.AsyncConsumer = (*notifierService)(nil) + +// Service reprents a notification service. +// +//go:generate mockery --name Service --output=./mocks --filename service.go --quiet --note "Copyright (c) Abstract Machines" +type Service interface { + // CreateSubscription persists a subscription. + // Successful operation is indicated by non-nil error response. + CreateSubscription(ctx context.Context, token string, sub Subscription) (string, error) + + // ViewSubscription retrieves the subscription for the given user and id. + ViewSubscription(ctx context.Context, token, id string) (Subscription, error) + + // ListSubscriptions lists subscriptions having the provided user token and search params. + ListSubscriptions(ctx context.Context, token string, pm PageMetadata) (Page, error) + + // RemoveSubscription removes the subscription having the provided identifier. + RemoveSubscription(ctx context.Context, token, id string) error + + consumers.BlockingConsumer +} + +var _ Service = (*notifierService)(nil) + +type notifierService struct { + auth magistrala.AuthServiceClient + subs SubscriptionsRepository + idp magistrala.IDProvider + notifier Notifier + errCh chan error + from string +} + +// New instantiates the subscriptions service implementation. +func New(auth magistrala.AuthServiceClient, subs SubscriptionsRepository, idp magistrala.IDProvider, notifier Notifier, from string) Service { + return ¬ifierService{ + auth: auth, + subs: subs, + idp: idp, + notifier: notifier, + errCh: make(chan error, 1), + from: from, + } +} + +func (ns *notifierService) CreateSubscription(ctx context.Context, token string, sub Subscription) (string, error) { + res, err := ns.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return "", err + } + sub.ID, err = ns.idp.ID() + if err != nil { + return "", err + } + + sub.OwnerID = res.GetId() + id, err := ns.subs.Save(ctx, sub) + if err != nil { + return "", errors.Wrap(svcerr.ErrCreateEntity, err) + } + return id, nil +} + +func (ns *notifierService) ViewSubscription(ctx context.Context, token, id string) (Subscription, error) { + if _, err := ns.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}); err != nil { + return Subscription{}, err + } + + return ns.subs.Retrieve(ctx, id) +} + +func (ns *notifierService) ListSubscriptions(ctx context.Context, token string, pm PageMetadata) (Page, error) { + if _, err := ns.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}); err != nil { + return Page{}, err + } + + return ns.subs.RetrieveAll(ctx, pm) +} + +func (ns *notifierService) RemoveSubscription(ctx context.Context, token, id string) error { + if _, err := ns.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}); err != nil { + return err + } + + return ns.subs.Remove(ctx, id) +} + +func (ns *notifierService) ConsumeBlocking(ctx context.Context, message interface{}) error { + msg, ok := message.(*messaging.Message) + if !ok { + return ErrMessage + } + topic := msg.GetChannel() + if msg.GetSubtopic() != "" { + topic = fmt.Sprintf("%s.%s", msg.GetChannel(), msg.GetSubtopic()) + } + pm := PageMetadata{ + Topic: topic, + Offset: 0, + Limit: -1, + } + page, err := ns.subs.RetrieveAll(ctx, pm) + if err != nil { + return err + } + + var to []string + for _, sub := range page.Subscriptions { + to = append(to, sub.Contact) + } + if len(to) > 0 { + err := ns.notifier.Notify(ns.from, to, msg) + if err != nil { + return errors.Wrap(ErrNotify, err) + } + } + + return nil +} + +func (ns *notifierService) ConsumeAsync(ctx context.Context, message interface{}) { + msg, ok := message.(*messaging.Message) + if !ok { + ns.errCh <- ErrMessage + return + } + topic := msg.GetChannel() + if msg.GetSubtopic() != "" { + topic = fmt.Sprintf("%s.%s", msg.GetChannel(), msg.GetSubtopic()) + } + pm := PageMetadata{ + Topic: topic, + Offset: 0, + Limit: -1, + } + page, err := ns.subs.RetrieveAll(ctx, pm) + if err != nil { + ns.errCh <- err + return + } + + var to []string + for _, sub := range page.Subscriptions { + to = append(to, sub.Contact) + } + if len(to) > 0 { + if err := ns.notifier.Notify(ns.from, to, msg); err != nil { + ns.errCh <- errors.Wrap(ErrNotify, err) + } + } +} + +func (ns *notifierService) Errors() <-chan error { + return ns.errCh +} diff --git a/consumers/notifiers/service_test.go b/consumers/notifiers/service_test.go new file mode 100644 index 0000000..fe3ed7e --- /dev/null +++ b/consumers/notifiers/service_test.go @@ -0,0 +1,377 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package notifiers_test + +import ( + "context" + "fmt" + "testing" + + "github.com/absmach/magistrala" + authmocks "github.com/absmach/magistrala/auth/mocks" + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/consumers/notifiers/mocks" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + total = 100 + exampleUser1 = "token1" + exampleUser2 = "token2" + validID = "d4ebb847-5d0e-4e46-bdd9-b6aceaaa3a22" +) + +func newService() (notifiers.Service, *authmocks.AuthClient, *mocks.SubscriptionsRepository) { + repo := new(mocks.SubscriptionsRepository) + auth := new(authmocks.AuthClient) + notifier := new(mocks.Notifier) + idp := uuid.NewMock() + from := "exampleFrom" + return notifiers.New(auth, repo, idp, notifier, from), auth, repo +} + +func TestCreateSubscription(t *testing.T) { + svc, auth, repo := newService() + + cases := []struct { + desc string + token string + sub notifiers.Subscription + id string + err error + identifyErr error + userID string + }{ + { + desc: "test success", + token: exampleUser1, + sub: notifiers.Subscription{Contact: exampleUser1, Topic: "valid.topic"}, + id: uuid.Prefix + fmt.Sprintf("%012d", 1), + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "test already existing", + token: exampleUser1, + sub: notifiers.Subscription{Contact: exampleUser1, Topic: "valid.topic"}, + id: "", + err: repoerr.ErrConflict, + identifyErr: nil, + userID: validID, + }, + { + desc: "test with empty token", + token: "", + sub: notifiers.Subscription{Contact: exampleUser1, Topic: "valid.topic"}, + id: "", + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall1 := repo.On("Save", context.Background(), mock.Anything).Return(tc.id, tc.err) + id, err := svc.CreateSubscription(context.Background(), tc.token, tc.sub) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.id, id, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.id, id)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestViewSubscription(t *testing.T) { + svc, auth, repo := newService() + sub := notifiers.Subscription{ + Contact: exampleUser1, + Topic: "valid.topic", + ID: testsutil.GenerateUUID(t), + OwnerID: validID, + } + + cases := []struct { + desc string + token string + id string + sub notifiers.Subscription + err error + identifyErr error + userID string + }{ + { + desc: "test success", + token: exampleUser1, + id: validID, + sub: sub, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "test not existing", + token: exampleUser1, + id: "not_exist", + sub: notifiers.Subscription{}, + err: svcerr.ErrNotFound, + identifyErr: nil, + userID: validID, + }, + { + desc: "test with empty token", + token: "", + id: validID, + sub: notifiers.Subscription{}, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall1 := repo.On("Retrieve", context.Background(), tc.id).Return(tc.sub, tc.err) + sub, err := svc.ViewSubscription(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.sub, sub, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.sub, sub)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestListSubscriptions(t *testing.T) { + svc, auth, repo := newService() + sub := notifiers.Subscription{Contact: exampleUser1, OwnerID: exampleUser1} + topic := "topic.subtopic" + var subs []notifiers.Subscription + for i := 0; i < total; i++ { + tmp := sub + if i%2 == 0 { + tmp.Contact = exampleUser2 + tmp.OwnerID = exampleUser2 + } + tmp.Topic = fmt.Sprintf("%s.%d", topic, i) + tmp.ID = testsutil.GenerateUUID(t) + tmp.OwnerID = validID + subs = append(subs, tmp) + } + + var offsetSubs []notifiers.Subscription + for i := 20; i < 40; i += 2 { + offsetSubs = append(offsetSubs, subs[i]) + } + + cases := []struct { + desc string + token string + pageMeta notifiers.PageMetadata + page notifiers.Page + err error + identifyErr error + userID string + }{ + { + desc: "test success", + token: exampleUser1, + pageMeta: notifiers.PageMetadata{ + Offset: 0, + Limit: 3, + }, + err: nil, + page: notifiers.Page{ + PageMetadata: notifiers.PageMetadata{ + Offset: 0, + Limit: 3, + }, + Subscriptions: subs[:3], + Total: total, + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "test not existing", + token: exampleUser1, + pageMeta: notifiers.PageMetadata{ + Limit: 10, + Contact: "empty@example.com", + }, + page: notifiers.Page{}, + err: svcerr.ErrNotFound, + identifyErr: nil, + userID: validID, + }, + { + desc: "test with empty token", + token: "", + pageMeta: notifiers.PageMetadata{ + Offset: 2, + Limit: 12, + Topic: "topic.subtopic.13", + }, + page: notifiers.Page{}, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "test with topic", + token: exampleUser1, + pageMeta: notifiers.PageMetadata{ + Limit: 10, + Topic: fmt.Sprintf("%s.%d", topic, 4), + }, + page: notifiers.Page{ + PageMetadata: notifiers.PageMetadata{ + Limit: 10, + Topic: fmt.Sprintf("%s.%d", topic, 4), + }, + Subscriptions: subs[4:5], + Total: 1, + }, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "test with contact and offset", + token: exampleUser1, + pageMeta: notifiers.PageMetadata{ + Offset: 10, + Limit: 10, + Contact: exampleUser2, + }, + page: notifiers.Page{ + PageMetadata: notifiers.PageMetadata{ + Offset: 10, + Limit: 10, + Contact: exampleUser2, + }, + Subscriptions: offsetSubs, + Total: uint(total / 2), + }, + err: nil, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + repoCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall1 := repo.On("RetrieveAll", context.Background(), tc.pageMeta).Return(tc.page, tc.err) + page, err := svc.ListSubscriptions(context.Background(), tc.token, tc.pageMeta) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.page, page, fmt.Sprintf("%s: got unexpected page\n", tc.desc)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestRemoveSubscription(t *testing.T) { + svc, auth, repo := newService() + sub := notifiers.Subscription{ + Contact: exampleUser1, + Topic: "valid.topic", + ID: testsutil.GenerateUUID(t), + OwnerID: validID, + } + + cases := []struct { + desc string + token string + id string + err error + identifyErr error + userID string + }{ + { + desc: "test success", + token: exampleUser1, + id: sub.ID, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "test not existing", + token: exampleUser1, + id: "not_exist", + err: svcerr.ErrNotFound, + identifyErr: nil, + userID: validID, + }, + { + desc: "test with empty token", + token: "", + id: sub.ID, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall1 := repo.On("Remove", context.Background(), tc.id).Return(tc.err) + err := svc.RemoveSubscription(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestConsume(t *testing.T) { + svc, _, repo := newService() + sub := notifiers.Subscription{ + Contact: exampleUser1, + OwnerID: validID, + Topic: "topic.subtopic", + } + for i := 0; i < total; i++ { + tmp := sub + tmp.Contact = fmt.Sprintf("contact%d@example.com", i) + if i%2 == 0 { + tmp.Topic = fmt.Sprintf("%s-2", sub.Topic) + } + } + sub.Contact = "invalid@example.com" + sub.Topic = fmt.Sprintf("%s-2", sub.Topic) + + msg := messaging.Message{ + Channel: "topic", + Subtopic: "subtopic", + } + errMsg := messaging.Message{ + Channel: "topic", + Subtopic: "subtopic-2", + } + + cases := []struct { + desc string + msg *messaging.Message + err error + }{ + { + desc: "test success", + msg: &msg, + err: nil, + }, + { + desc: "test fail", + msg: &errMsg, + err: notifiers.ErrNotify, + }, + } + + for _, tc := range cases { + repoCall := repo.On("RetrieveAll", context.TODO(), mock.Anything).Return(notifiers.Page{}, tc.err) + err := svc.ConsumeBlocking(context.TODO(), tc.msg) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + } +} diff --git a/consumers/notifiers/smpp/README.md b/consumers/notifiers/smpp/README.md new file mode 100644 index 0000000..784763d --- /dev/null +++ b/consumers/notifiers/smpp/README.md @@ -0,0 +1,51 @@ +# SMPP Notifier + +SMPP Notifier implements notifier for send SMS notifications. + +## Configuration + +The Subscription service using SMPP Notifier is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| --------------------------------- | --------------------------------------------------------------------------------- | ------------------------------ | +| MG_SMPP_NOTIFIER_LOG_LEVEL | Log level for SMPP Notifier (debug, info, warn, error) | info | +| MG_SMPP_NOTIFIER_FROM_ADDRESS | From address for SMS notifications | | +| MG_SMPP_NOTIFIER_CONFIG_PATH | Config file path with Message broker subjects list, payload type and content-type | /config.toml | +| MG_SMPP_NOTIFIER_HTTP_HOST | Service HTTP host | localhost | +| MG_SMPP_NOTIFIER_HTTP_PORT | Service HTTP port | 9014 | +| MG_SMPP_NOTIFIER_HTTP_SERVER_CERT | Service HTTP server certificate path | "" | +| MG_SMPP_NOTIFIER_HTTP_SERVER_KEY | Service HTTP server key | "" | +| MG_SMPP_NOTIFIER_DB_HOST | Database host address | localhost | +| MG_SMPP_NOTIFIER_DB_PORT | Database host port | 5432 | +| MG_SMPP_NOTIFIER_DB_USER | Database user | magistrala | +| MG_SMPP_NOTIFIER_DB_PASS | Database password | magistrala | +| MG_SMPP_NOTIFIER_DB_NAME | Name of the database used by the service | subscriptions | +| MG_SMPP_NOTIFIER_DB_SSL_MODE | DB connection SSL mode (disable, require, verify-ca, verify-full) | disable | +| MG_SMPP_NOTIFIER_DB_SSL_CERT | Path to the PEM encoded certificate file | "" | +| MG_SMPP_NOTIFIER_DB_SSL_KEY | Path to the PEM encoded key file | "" | +| MG_SMPP_NOTIFIER_DB_SSL_ROOT_CERT | Path to the PEM encoded root certificate file | "" | +| MG_SMPP_ADDRESS | SMPP address [host:port] | | +| MG_SMPP_USERNAME | SMPP Username | | +| MG_SMPP_PASSWORD | SMPP Password | | +| MG_SMPP_SYSTEM_TYPE | SMPP System Type | | +| MG_SMPP_SRC_ADDR_TON | SMPP source address TON | | +| MG_SMPP_DST_ADDR_TON | SMPP destination address TON | | +| MG_SMPP_SRC_ADDR_NPI | SMPP source address NPI | | +| MG_SMPP_DST_ADDR_NPI | SMPP destination address NPI | | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MG_AUTH_GRPC_CLIENT_TLS | Auth client TLS flag | false | +| MG_AUTH_GRPC_CA_CERT | Path to Auth client CA certs in pem format | "" | +| MG_MESSAGE_BROKER_URL | Message broker URL | nats://127.0.0.1:4222 | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_SMPP_NOTIFIER_INSTANCE_ID | SMPP Notifier instance ID | "" | + +## Usage + +Starting service will start consuming messages and sending SMS when a message is received. + +[doc]: https://docs.magistrala.abstractmachines.fr + diff --git a/consumers/notifiers/smpp/config.go b/consumers/notifiers/smpp/config.go new file mode 100644 index 0000000..a8af3a6 --- /dev/null +++ b/consumers/notifiers/smpp/config.go @@ -0,0 +1,21 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package smpp + +import ( + "crypto/tls" +) + +// Config represents SMPP transmitter configuration. +type Config struct { + Address string `env:"MG_SMPP_ADDRESS" envDefault:""` + Username string `env:"MG_SMPP_USERNAME" envDefault:""` + Password string `env:"MG_SMPP_PASSWORD" envDefault:""` + SystemType string `env:"MG_SMPP_SYSTEM_TYPE" envDefault:""` + SourceAddrTON uint8 `env:"MG_SMPP_SRC_ADDR_TON" envDefault:"0"` + SourceAddrNPI uint8 `env:"MG_SMPP_DST_ADDR_TON" envDefault:"0"` + DestAddrTON uint8 `env:"MG_SMPP_SRC_ADDR_NPI" envDefault:"0"` + DestAddrNPI uint8 `env:"MG_SMPP_DST_ADDR_NPI" envDefault:"0"` + TLS *tls.Config +} diff --git a/consumers/notifiers/smpp/doc.go b/consumers/notifiers/smpp/doc.go new file mode 100644 index 0000000..c81f3e7 --- /dev/null +++ b/consumers/notifiers/smpp/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package smpp contains the domain concept definitions needed to +// support Magistrala SMS notifications. +package smpp diff --git a/consumers/notifiers/smpp/notifier.go b/consumers/notifiers/smpp/notifier.go new file mode 100644 index 0000000..bd2dd8f --- /dev/null +++ b/consumers/notifiers/smpp/notifier.go @@ -0,0 +1,67 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package smpp + +import ( + "time" + + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/magistrala/pkg/transformers" + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/fiorix/go-smpp/smpp" + "github.com/fiorix/go-smpp/smpp/pdu/pdufield" + "github.com/fiorix/go-smpp/smpp/pdu/pdutext" +) + +var _ notifiers.Notifier = (*notifier)(nil) + +type notifier struct { + transmitter *smpp.Transmitter + transformer transformers.Transformer + sourceAddrTON uint8 + sourceAddrNPI uint8 + destAddrTON uint8 + destAddrNPI uint8 +} + +// New instantiates SMTP message notifier. +func New(cfg Config) notifiers.Notifier { + t := &smpp.Transmitter{ + Addr: cfg.Address, + User: cfg.Username, + Passwd: cfg.Password, + SystemType: cfg.SystemType, + RespTimeout: 3 * time.Second, + } + t.Bind() + ret := ¬ifier{ + transmitter: t, + transformer: json.New([]json.TimeField{}), + sourceAddrTON: cfg.SourceAddrTON, + destAddrTON: cfg.DestAddrTON, + sourceAddrNPI: cfg.SourceAddrNPI, + destAddrNPI: cfg.DestAddrNPI, + } + return ret +} + +func (n *notifier) Notify(from string, to []string, msg *messaging.Message) error { + send := &smpp.ShortMessage{ + Src: from, + DstList: to, + Validity: 10 * time.Minute, + SourceAddrTON: n.sourceAddrTON, + DestAddrTON: n.destAddrTON, + SourceAddrNPI: n.sourceAddrNPI, + DestAddrNPI: n.destAddrNPI, + Text: pdutext.Raw(msg.GetPayload()), + Register: pdufield.NoDeliveryReceipt, + } + _, err := n.transmitter.Submit(send) + if err != nil { + return err + } + return nil +} diff --git a/consumers/notifiers/smtp/README.md b/consumers/notifiers/smtp/README.md new file mode 100644 index 0000000..ce0c64f --- /dev/null +++ b/consumers/notifiers/smtp/README.md @@ -0,0 +1,51 @@ +# SMTP Notifier + +SMTP Notifier implements notifier for send SMTP notifications. + +## Configuration + +The Subscription service using SMTP Notifier is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| --------------------------------- | ----------------------------------------------------------------------- | ------------------------------ | +| MG_SMTP_NOTIFIER_LOG_LEVEL | Log level for SMT Notifier (debug, info, warn, error) | info | +| MG_SMTP_NOTIFIER_FROM_ADDRESS | From address for SMTP notifications | | +| MG_SMTP_NOTIFIER_CONFIG_PATH | Path to the config file with message broker subjects configuration | disable | +| MG_SMTP_NOTIFIER_HTTP_HOST | SMTP Notifier service HTTP host | localhost | +| MG_SMTP_NOTIFIER_HTTP_PORT | SMTP Notifier service HTTP port | 9015 | +| MG_SMTP_NOTIFIER_HTTP_SERVER_CERT | SMTP Notifier service HTTP server certificate path | "" | +| MG_SMTP_NOTIFIER_HTTP_SERVER_KEY | SMTP Notifier service HTTP server key | "" | +| MG_SMTP_NOTIFIER_DB_HOST | Database host address | localhost | +| MG_SMTP_NOTIFIER_DB_PORT | Database host port | 5432 | +| MG_SMTP_NOTIFIER_DB_USER | Database user | magistrala | +| MG_SMTP_NOTIFIER_DB_PASS | Database password | magistrala | +| MG_SMTP_NOTIFIER_DB_NAME | Name of the database used by the service | subscriptions | +| MG_SMTP_NOTIFIER_DB_SSL_MODE | Database connection SSL mode (disable, require, verify-ca, verify-full) | disable | +| MG_SMTP_NOTIFIER_DB_SSL_CERT | Path to the PEM encoded cert file | "" | +| MG_SMTP_NOTIFIER_DB_SSL_KEY | Path to the PEM encoded certificate key | "" | +| MG_SMTP_NOTIFIER_DB_SSL_ROOT_CERT | Path to the PEM encoded root certificate file | "" | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_MESSAGE_BROKER_URL | Message broker URL | nats://127.0.0.1:4222 | +| MG_EMAIL_HOST | Mail server host | localhost | +| MG_EMAIL_PORT | Mail server port | 25 | +| MG_EMAIL_USERNAME | Mail server username | | +| MG_EMAIL_PASSWORD | Mail server password | | +| MG_EMAIL_FROM_ADDRESS | Email "from" address | | +| MG_EMAIL_FROM_NAME | Email "from" name | | +| MG_EMAIL_TEMPLATE | Email template for sending notification emails | email.tmpl | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MG_AUTH_GRPC_CLIENT_TLS | Auth service gRPC TLS flag | false | +| MG_AUTH_GRPC_CA_CERT | Path to Auth service CA cert in pem format | "" | +| MG_AUTH_CLIENT_TLS | Auth client TLS flag | false | +| MG_AUTH_CA_CERTS | Path to Auth client CA certs in pem format | "" | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_SMTP_NOTIFIER_INSTANCE_ID | SMTP Notifier instance ID | "" | + +## Usage + +Starting service will start consuming messages and sending emails when a message is received. + +[doc]: https://docs.magistrala.abstractmachines.fr diff --git a/consumers/notifiers/smtp/doc.go b/consumers/notifiers/smtp/doc.go new file mode 100644 index 0000000..0acc090 --- /dev/null +++ b/consumers/notifiers/smtp/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package smtp contains the domain concept definitions needed to +// support Magistrala SMTP notifications. +package smtp diff --git a/consumers/notifiers/smtp/notifier.go b/consumers/notifiers/smtp/notifier.go new file mode 100644 index 0000000..cbc1b6f --- /dev/null +++ b/consumers/notifiers/smtp/notifier.go @@ -0,0 +1,40 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package smtp + +import ( + "fmt" + + "github.com/absmach/magistrala/consumers/notifiers" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/mg-contrib/pkg/email" +) + +const ( + footer = "Sent by Magistrala SMTP Notification" + contentTemplate = "A publisher with an id %s sent the message over %s with the following values \n %s" +) + +var _ notifiers.Notifier = (*notifier)(nil) + +type notifier struct { + agent *email.Agent +} + +// New instantiates SMTP message notifier. +func New(agent *email.Agent) notifiers.Notifier { + return ¬ifier{agent: agent} +} + +func (n *notifier) Notify(from string, to []string, msg *messaging.Message) error { + subject := fmt.Sprintf(`Notification for Channel %s`, msg.GetChannel()) + if msg.GetSubtopic() != "" { + subject = fmt.Sprintf("%s and subtopic %s", subject, msg.GetSubtopic()) + } + + values := string(msg.GetPayload()) + content := fmt.Sprintf(contentTemplate, msg.GetPublisher(), msg.GetProtocol(), values) + + return n.agent.Send(to, from, subject, "", "", content, footer) +} diff --git a/consumers/notifiers/subscriptions.go b/consumers/notifiers/subscriptions.go new file mode 100644 index 0000000..dcaf4eb --- /dev/null +++ b/consumers/notifiers/subscriptions.go @@ -0,0 +1,48 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package notifiers + +import "context" + +// Subscription represents a user Subscription. +type Subscription struct { + ID string + OwnerID string + Contact string + Topic string +} + +// Page represents page metadata with content. +type Page struct { + PageMetadata + Total uint + Subscriptions []Subscription +} + +// PageMetadata contains page metadata that helps navigation. +type PageMetadata struct { + Offset uint + // Limit values less than 0 indicate no limit. + Limit int + Topic string + Contact string +} + +// SubscriptionsRepository specifies a Subscription persistence API. +// +//go:generate mockery --name SubscriptionsRepository --output=./mocks --filename repository.go --quiet --note "Copyright (c) Abstract Machines" +type SubscriptionsRepository interface { + // Save persists a subscription. Successful operation is indicated by non-nil + // error response. + Save(ctx context.Context, sub Subscription) (string, error) + + // Retrieve retrieves the subscription for the given id. + Retrieve(ctx context.Context, id string) (Subscription, error) + + // RetrieveAll retrieves all the subscriptions for the given page metadata. + RetrieveAll(ctx context.Context, pm PageMetadata) (Page, error) + + // Remove removes the subscription for the given ID. + Remove(ctx context.Context, id string) error +} diff --git a/consumers/notifiers/tracing/doc.go b/consumers/notifiers/tracing/doc.go new file mode 100644 index 0000000..2d65dbe --- /dev/null +++ b/consumers/notifiers/tracing/doc.go @@ -0,0 +1,12 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package tracing provides tracing instrumentation for Magistrala WebSocket adapter service. +// +// This package provides tracing middleware for Magistrala WebSocket adapter service. +// It can be used to trace incoming requests and add tracing capabilities to +// Magistrala WebSocket adapter service. +// +// For more details about tracing instrumentation for Magistrala messaging refer +// to the documentation at https://docs.magistrala.abstractmachines.fr/tracing/. +package tracing diff --git a/consumers/notifiers/tracing/subscriptions.go b/consumers/notifiers/tracing/subscriptions.go new file mode 100644 index 0000000..c8c2920 --- /dev/null +++ b/consumers/notifiers/tracing/subscriptions.go @@ -0,0 +1,73 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package tracing contains middlewares that will add spans +// to existing traces. +package tracing + +import ( + "context" + + "github.com/absmach/magistrala/consumers/notifiers" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +const ( + saveOp = "save_op" + retrieveOp = "retrieve_op" + retrieveAllOp = "retrieve_all_op" + removeOp = "remove_op" +) + +var _ notifiers.SubscriptionsRepository = (*subRepositoryMiddleware)(nil) + +type subRepositoryMiddleware struct { + tracer trace.Tracer + repo notifiers.SubscriptionsRepository +} + +// New instantiates a new Subscriptions repository that +// tracks request and their latency, and adds spans to context. +func New(tracer trace.Tracer, repo notifiers.SubscriptionsRepository) notifiers.SubscriptionsRepository { + return subRepositoryMiddleware{ + tracer: tracer, + repo: repo, + } +} + +// Save traces the "Save" operation of the wrapped Subscriptions repository. +func (urm subRepositoryMiddleware) Save(ctx context.Context, sub notifiers.Subscription) (string, error) { + ctx, span := urm.tracer.Start(ctx, saveOp, trace.WithAttributes( + attribute.String("id", sub.ID), + attribute.String("contact", sub.Contact), + attribute.String("topic", sub.Topic), + )) + defer span.End() + + return urm.repo.Save(ctx, sub) +} + +// Retrieve traces the "Retrieve" operation of the wrapped Subscriptions repository. +func (urm subRepositoryMiddleware) Retrieve(ctx context.Context, id string) (notifiers.Subscription, error) { + ctx, span := urm.tracer.Start(ctx, retrieveOp, trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return urm.repo.Retrieve(ctx, id) +} + +// RetrieveAll traces the "RetrieveAll" operation of the wrapped Subscriptions repository. +func (urm subRepositoryMiddleware) RetrieveAll(ctx context.Context, pm notifiers.PageMetadata) (notifiers.Page, error) { + ctx, span := urm.tracer.Start(ctx, retrieveAllOp) + defer span.End() + + return urm.repo.RetrieveAll(ctx, pm) +} + +// Remove traces the "Remove" operation of the wrapped Subscriptions repository. +func (urm subRepositoryMiddleware) Remove(ctx context.Context, id string) error { + ctx, span := urm.tracer.Start(ctx, removeOp, trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return urm.repo.Remove(ctx, id) +} diff --git a/consumers/tracing/consumers.go b/consumers/tracing/consumers.go new file mode 100644 index 0000000..c9cb362 --- /dev/null +++ b/consumers/tracing/consumers.go @@ -0,0 +1,132 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package tracing + +import ( + "context" + "fmt" + + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/pkg/server" + mgjson "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +const ( + consumeBlockingOP = "retrieve_blocking" // This is not specified in the open telemetry spec. + consumeAsyncOP = "retrieve_async" // This is not specified in the open telemetry spec. +) + +var defaultAttributes = []attribute.KeyValue{ + attribute.String("messaging.system", "nats"), + attribute.Bool("messaging.destination.anonymous", false), + attribute.String("messaging.destination.template", "channels/{channelID}/messages/*"), + attribute.Bool("messaging.destination.temporary", true), + attribute.String("network.protocol.name", "nats"), + attribute.String("network.protocol.version", "2.2.4"), + attribute.String("network.transport", "tcp"), + attribute.String("network.type", "ipv4"), +} + +var ( + _ consumers.AsyncConsumer = (*tracingMiddlewareAsync)(nil) + _ consumers.BlockingConsumer = (*tracingMiddlewareBlock)(nil) +) + +type tracingMiddlewareAsync struct { + consumer consumers.AsyncConsumer + tracer trace.Tracer + host server.Config +} +type tracingMiddlewareBlock struct { + consumer consumers.BlockingConsumer + tracer trace.Tracer + host server.Config +} + +// NewAsync creates a new traced consumers.AsyncConsumer service. +func NewAsync(tracer trace.Tracer, consumerAsync consumers.AsyncConsumer, host server.Config) consumers.AsyncConsumer { + return &tracingMiddlewareAsync{ + consumer: consumerAsync, + tracer: tracer, + host: host, + } +} + +// NewBlocking creates a new traced consumers.BlockingConsumer service. +func NewBlocking(tracer trace.Tracer, consumerBlock consumers.BlockingConsumer, host server.Config) consumers.BlockingConsumer { + return &tracingMiddlewareBlock{ + consumer: consumerBlock, + tracer: tracer, + host: host, + } +} + +// ConsumeBlocking traces consume operations for message/s consumed. +func (tm *tracingMiddlewareBlock) ConsumeBlocking(ctx context.Context, messages interface{}) error { + var span trace.Span + switch m := messages.(type) { + case mgjson.Messages: + if len(m.Data) > 0 { + firstMsg := m.Data[0] + ctx, span = createSpan(ctx, consumeBlockingOP, firstMsg.Publisher, firstMsg.Channel, firstMsg.Subtopic, len(m.Data), tm.host, trace.SpanKindConsumer, tm.tracer) + defer span.End() + } + case []senml.Message: + if len(m) > 0 { + firstMsg := m[0] + ctx, span = createSpan(ctx, consumeBlockingOP, firstMsg.Publisher, firstMsg.Channel, firstMsg.Subtopic, len(m), tm.host, trace.SpanKindConsumer, tm.tracer) + defer span.End() + } + } + return tm.consumer.ConsumeBlocking(ctx, messages) +} + +// ConsumeAsync traces consume operations for message/s consumed. +func (tm *tracingMiddlewareAsync) ConsumeAsync(ctx context.Context, messages interface{}) { + var span trace.Span + switch m := messages.(type) { + case mgjson.Messages: + if len(m.Data) > 0 { + firstMsg := m.Data[0] + ctx, span = createSpan(ctx, consumeAsyncOP, firstMsg.Publisher, firstMsg.Channel, firstMsg.Subtopic, len(m.Data), tm.host, trace.SpanKindConsumer, tm.tracer) + defer span.End() + } + case []senml.Message: + if len(m) > 0 { + firstMsg := m[0] + ctx, span = createSpan(ctx, consumeAsyncOP, firstMsg.Publisher, firstMsg.Channel, firstMsg.Subtopic, len(m), tm.host, trace.SpanKindConsumer, tm.tracer) + defer span.End() + } + } + tm.consumer.ConsumeAsync(ctx, messages) +} + +// Errors traces async consume errors. +func (tm *tracingMiddlewareAsync) Errors() <-chan error { + return tm.consumer.Errors() +} + +func createSpan(ctx context.Context, operation, clientID, topic, subTopic string, noMessages int, cfg server.Config, spanKind trace.SpanKind, tracer trace.Tracer) (context.Context, trace.Span) { + subject := fmt.Sprintf("channels.%s.messages", topic) + if subTopic != "" { + subject = fmt.Sprintf("%s.%s", subject, subTopic) + } + spanName := fmt.Sprintf("%s %s", subject, operation) + + kvOpts := []attribute.KeyValue{ + attribute.String("messaging.operation", operation), + attribute.String("messaging.client_id", clientID), + attribute.String("messaging.destination.name", subject), + attribute.String("server.address", cfg.Host), + attribute.String("server.socket.port", cfg.Port), + attribute.Int("messaging.batch.message_count", noMessages), + } + + kvOpts = append(kvOpts, defaultAttributes...) + + return tracer.Start(ctx, spanName, trace.WithAttributes(kvOpts...), trace.WithSpanKind(spanKind)) +} diff --git a/consumers/writers/README.md b/consumers/writers/README.md new file mode 100644 index 0000000..3bfd0e6 --- /dev/null +++ b/consumers/writers/README.md @@ -0,0 +1,16 @@ +# Writers + +Writers provide an implementation of various `message writers`. +Message writers are services that normalize (in `SenML` format) +Magistrala messages and store them in specific data store. + +Writers are optional services and are treated as plugins. In order to +run writer services, core services must be up and running. For more info +on the platform core services with its dependencies, please check out +the [Docker Compose][compose] file. + +For an in-depth explanation of the usage of `writers`, as well as thorough +understanding of Magistrala, please check out the [official documentation][doc]. + +[doc]: https://docs.magistrala.abstractmachines.fr +[compose]: ../docker/docker-compose.yml diff --git a/consumers/writers/api/doc.go b/consumers/writers/api/doc.go new file mode 100644 index 0000000..2424852 --- /dev/null +++ b/consumers/writers/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains API-related concerns: endpoint definitions, middlewares +// and all resource representations. +package api diff --git a/consumers/writers/api/logging.go b/consumers/writers/api/logging.go new file mode 100644 index 0000000..77e5f91 --- /dev/null +++ b/consumers/writers/api/logging.go @@ -0,0 +1,47 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "log/slog" + "time" + + "github.com/absmach/magistrala/consumers" +) + +var _ consumers.BlockingConsumer = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + consumer consumers.BlockingConsumer +} + +// LoggingMiddleware adds logging facilities to the adapter. +func LoggingMiddleware(consumer consumers.BlockingConsumer, logger *slog.Logger) consumers.BlockingConsumer { + return &loggingMiddleware{ + logger: logger, + consumer: consumer, + } +} + +// ConsumeBlocking logs the consume request. It logs the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ConsumeBlocking(ctx context.Context, msgs interface{}) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Blocking consumer failed to consume messages successfully", args...) + return + } + lm.logger.Info("Blocking consumer consumed messages successfully", args...) + }(time.Now()) + + return lm.consumer.ConsumeBlocking(ctx, msgs) +} diff --git a/consumers/writers/api/metrics.go b/consumers/writers/api/metrics.go new file mode 100644 index 0000000..29dfb2f --- /dev/null +++ b/consumers/writers/api/metrics.go @@ -0,0 +1,41 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "time" + + "github.com/absmach/magistrala/consumers" + "github.com/go-kit/kit/metrics" +) + +var _ consumers.BlockingConsumer = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + consumer consumers.BlockingConsumer +} + +// MetricsMiddleware returns new message repository +// with Save method wrapped to expose metrics. +func MetricsMiddleware(consumer consumers.BlockingConsumer, counter metrics.Counter, latency metrics.Histogram) consumers.BlockingConsumer { + return &metricsMiddleware{ + counter: counter, + latency: latency, + consumer: consumer, + } +} + +// ConsumeBlocking instruments ConsumeBlocking method with metrics. +func (mm *metricsMiddleware) ConsumeBlocking(ctx context.Context, msgs interface{}) error { + defer func(begin time.Time) { + mm.counter.With("method", "consume").Add(1) + mm.latency.With("method", "consume").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return mm.consumer.ConsumeBlocking(ctx, msgs) +} diff --git a/consumers/writers/api/transport.go b/consumers/writers/api/transport.go new file mode 100644 index 0000000..3c2fa5d --- /dev/null +++ b/consumers/writers/api/transport.go @@ -0,0 +1,21 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "net/http" + + "github.com/absmach/magistrala" + "github.com/go-chi/chi/v5" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// MakeHandler returns a HTTP API handler with health check and metrics. +func MakeHandler(svcName, instanceID string) http.Handler { + r := chi.NewRouter() + r.Get("/health", magistrala.Health(svcName, instanceID)) + r.Handle("/metrics", promhttp.Handler()) + + return r +} diff --git a/consumers/writers/cassandra/README.md b/consumers/writers/cassandra/README.md new file mode 100644 index 0000000..9862ef4 --- /dev/null +++ b/consumers/writers/cassandra/README.md @@ -0,0 +1,81 @@ +# Cassandra writer + +Cassandra writer provides message repository implementation for Cassandra. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| ------------------------------------ | ----------------------------------------------------------------------- | ------------------------------ | +| MG_CASSANDRA_WRITER_LOG_LEVEL | Log level for Cassandra writer (debug, info, warn, error) | info | +| MG_CASSANDRA_WRITER_CONFIG_PATH | Config file path with NATS subjects list, payload type and content-type | /config.toml | +| MG_CASSANDRA_WRITER_HTTP_HOST | Cassandra service HTTP host | | +| MG_CASSANDRA_WRITER_HTTP_PORT | Cassandra service HTTP port | 9004 | +| MG_CASSANDRA_WRITER_HTTP_SERVER_CERT | Cassandra service HTTP server certificate path | | +| MG_CASSANDRA_WRITER_HTTP_SERVER_KEY | Cassandra service HTTP server key path | | +| MG_CASSANDRA_CLUSTER | Cassandra cluster comma separated addresses | 127.0.0.1 | +| MG_CASSANDRA_KEYSPACE | Cassandra keyspace name | magistrala | +| MG_CASSANDRA_USER | Cassandra DB username | magistrala | +| MG_CASSANDRA_PASS | Cassandra DB password | magistrala | +| MG_CASSANDRA_PORT | Cassandra DB port | 9042 | +| MG_MESSAGE_BROKER_URL | Message broker instance URL | nats://localhost:4222 | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_CASSANDRA_WRITER_INSTANCE_ID | Cassandra writer instance ID | | + +## Deployment + +The service itself is distributed as Docker container. Check the [`cassandra-writer`](https://github.com/absmach/magistrala/blob/main/docker/addons/cassandra-writer/docker-compose.yml#L30-L49) service section in docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the cassandra writer +make cassandra-writer + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_CASSANDRA_WRITER_LOG_LEVEL=[Cassandra writer log level] \ +MG_CASSANDRA_WRITER_CONFIG_PATH=[Config file path with NATS subjects list, payload type and content-type] \ +MG_CASSANDRA_WRITER_HTTP_HOST=[Cassandra service HTTP host] \ +MG_CASSANDRA_WRITER_HTTP_PORT=[Cassandra service HTTP port] \ +MG_CASSANDRA_WRITER_HTTP_SERVER_CERT=[Cassandra service HTTP server cert] \ +MG_CASSANDRA_WRITER_HTTP_SERVER_KEY=[Cassandra service HTTP server key] \ +MG_CASSANDRA_CLUSTER=[Cassandra cluster comma separated addresses] \ +MG_CASSANDRA_KEYSPACE=[Cassandra keyspace name] \ +MG_CASSANDRA_USER=[Cassandra DB username] \ +MG_CASSANDRA_PASS=[Cassandra DB password] \ +MG_CASSANDRA_PORT=[Cassandra DB port] \ +MG_MESSAGE_BROKER_URL=[Message Broker instance URL] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_CASSANDRA_WRITER_INSTANCE_ID=[Cassandra writer instance ID] \ +$GOBIN/magistrala-cassandra-writer +``` + +### Using docker-compose + +This service can be deployed using docker containers. Docker compose file is +available in `<project_root>/docker/addons/cassandra-writer/docker-compose.yml`. +In order to run all Magistrala core services, as well as mentioned optional ones, +execute following command: + +```bash +./docker/addons/cassandra-writer/init.sh +``` + +## Usage + +Starting service will start consuming normalized messages in SenML format. + +[doc]: https://docs.magistrala.abstractmachines.fr diff --git a/consumers/writers/cassandra/consumer.go b/consumers/writers/cassandra/consumer.go new file mode 100644 index 0000000..cd35e98 --- /dev/null +++ b/consumers/writers/cassandra/consumer.go @@ -0,0 +1,102 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package cassandra + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/pkg/errors" + mgjson "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/gocql/gocql" +) + +var ( + errSaveMessage = errors.New("failed to save message to cassandra database") + errNoTable = errors.New("table does not exist") +) +var _ consumers.BlockingConsumer = (*cassandraRepository)(nil) + +type cassandraRepository struct { + session *gocql.Session +} + +// New instantiates Cassandra message repository. +func New(session *gocql.Session) consumers.BlockingConsumer { + return &cassandraRepository{session} +} + +func (cr *cassandraRepository) ConsumeBlocking(_ context.Context, message interface{}) error { + switch m := message.(type) { + case mgjson.Messages: + return cr.saveJSON(m) + default: + return cr.saveSenml(m) + } +} + +func (cr *cassandraRepository) saveSenml(messages interface{}) error { + msgs, ok := messages.([]senml.Message) + if !ok { + return errSaveMessage + } + cql := `INSERT INTO messages (id, channel, subtopic, publisher, protocol, + name, unit, value, string_value, bool_value, data_value, sum, + time, update_time) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` + id := gocql.TimeUUID() + + for _, msg := range msgs { + err := cr.session.Query(cql, id, msg.Channel, msg.Subtopic, msg.Publisher, + msg.Protocol, msg.Name, msg.Unit, msg.Value, msg.StringValue, + msg.BoolValue, msg.DataValue, msg.Sum, msg.Time, msg.UpdateTime).Exec() + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + } + + return nil +} + +func (cr *cassandraRepository) saveJSON(msgs mgjson.Messages) error { + if err := cr.insertJSON(msgs); err != nil { + if err == errNoTable { + if err := cr.createTable(msgs.Format); err != nil { + return err + } + return cr.insertJSON(msgs) + } + return err + } + return nil +} + +func (cr *cassandraRepository) insertJSON(msgs mgjson.Messages) error { + cql := `INSERT INTO %s (id, channel, created, subtopic, publisher, protocol, payload) VALUES (?, ?, ?, ?, ?, ?, ?)` + cql = fmt.Sprintf(cql, msgs.Format) + for _, msg := range msgs.Data { + pld, err := json.Marshal(msg.Payload) + if err != nil { + return err + } + id := gocql.TimeUUID() + + err = cr.session.Query(cql, id, msg.Channel, msg.Created, msg.Subtopic, msg.Publisher, msg.Protocol, string(pld)).Exec() + if err != nil { + if err.Error() == fmt.Sprintf("unconfigured table %s", msgs.Format) { + return errNoTable + } + return errors.Wrap(errSaveMessage, err) + } + } + return nil +} + +func (cr *cassandraRepository) createTable(name string) error { + q := fmt.Sprintf(jsonTable, name) + return cr.session.Query(q).Exec() +} diff --git a/consumers/writers/cassandra/consumer_test.go b/consumers/writers/cassandra/consumer_test.go new file mode 100644 index 0000000..9f09b74 --- /dev/null +++ b/consumers/writers/cassandra/consumer_test.go @@ -0,0 +1,122 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package cassandra_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/mg-contrib/consumers/writers/cassandra" + casclient "github.com/absmach/mg-contrib/pkg/clients/cassandra" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + keyspace = "magistrala" + msgsNum = 42 + valueFields = 5 + subtopic = "topic" +) + +var addr = "localhost" + +var ( + v float64 = 5 + stringV = "value" + boolV = true + dataV = "base64" + sum float64 = 42 +) + +func TestSaveSenml(t *testing.T) { + session, err := casclient.Connect(casclient.Config{ + Hosts: []string{addr}, + Keyspace: keyspace, + }) + require.Nil(t, err, fmt.Sprintf("failed to connect to Cassandra: %s", err)) + err = casclient.InitDB(session, cassandra.Table) + require.Nil(t, err, fmt.Sprintf("failed to initialize to Cassandra: %s", err)) + repo := cassandra.New(session) + now := time.Now().Unix() + msg := senml.Message{ + Channel: "1", + Publisher: "1", + Protocol: "mqtt", + } + var msgs []senml.Message + + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + count := i % valueFields + switch count { + case 0: + msg.Subtopic = subtopic + msg.Value = &v + case 1: + msg.BoolValue = &boolV + case 2: + msg.StringValue = &stringV + case 3: + msg.DataValue = &dataV + case 4: + msg.Sum = &sum + } + + msg.Time = float64(now + int64(i)) + msgs = append(msgs, msg) + } + + err = repo.ConsumeBlocking(context.TODO(), msgs) + assert.Nil(t, err, fmt.Sprintf("expected no error, got %s", err)) +} + +func TestSaveJSON(t *testing.T) { + session, err := casclient.Connect(casclient.Config{ + Hosts: []string{addr}, + Keyspace: keyspace, + }) + require.Nil(t, err, fmt.Sprintf("failed to connect to Cassandra: %s", err)) + repo := cassandra.New(session) + chid, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + pubid, err := uuid.NewV4() + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + msg := json.Message{ + Channel: chid.String(), + Publisher: pubid.String(), + Created: time.Now().Unix(), + Subtopic: "subtopic/format/some_json", + Protocol: "mqtt", + Payload: map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + }, + } + + now := time.Now().Unix() + msgs := json.Messages{ + Format: "some_json", + } + + for i := 0; i < msgsNum; i++ { + msg.Created = now + int64(i) + msgs.Data = append(msgs.Data, msg) + } + + err = repo.ConsumeBlocking(context.TODO(), msgs) + assert.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) +} diff --git a/consumers/writers/cassandra/doc.go b/consumers/writers/cassandra/doc.go new file mode 100644 index 0000000..0807756 --- /dev/null +++ b/consumers/writers/cassandra/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package cassandra contains the domain concept definitions needed to +// support Magistrala Cassandra writer service. +package cassandra diff --git a/consumers/writers/cassandra/init.go b/consumers/writers/cassandra/init.go new file mode 100644 index 0000000..f70db53 --- /dev/null +++ b/consumers/writers/cassandra/init.go @@ -0,0 +1,36 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package cassandra + +const ( + // Table contains query for default table created in cassandra db. + Table = `CREATE TABLE IF NOT EXISTS messages ( + id uuid, + channel text, + subtopic text, + publisher text, + protocol text, + name text, + unit text, + value double, + string_value text, + bool_value boolean, + data_value blob, + sum double, + time double, + update_time double, + PRIMARY KEY (publisher, time, subtopic, name) + ) WITH CLUSTERING ORDER BY (time DESC)` + + jsonTable = `CREATE TABLE IF NOT EXISTS %s ( + id uuid, + channel text, + subtopic text, + publisher text, + protocol text, + created bigint, + payload text, + PRIMARY KEY (publisher, created, subtopic) + ) WITH CLUSTERING ORDER BY (created DESC)` +) diff --git a/consumers/writers/cassandra/setup_test.go b/consumers/writers/cassandra/setup_test.go new file mode 100644 index 0000000..6846dc3 --- /dev/null +++ b/consumers/writers/cassandra/setup_test.go @@ -0,0 +1,83 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package cassandra_test + +import ( + "fmt" + "log" + "os" + "testing" + + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/mg-contrib/pkg/clients/cassandra" + "github.com/gocql/gocql" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var logger, _ = mglog.New(os.Stdout, "info") + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + logger.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "cassandra", + Tag: "3.11.16", + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("9042/tcp") + addr = fmt.Sprintf("%s:%s", addr, port) + + if err = pool.Retry(func() error { + if err := createKeyspace([]string{addr}); err != nil { + return err + } + + session, err := cassandra.Connect(cassandra.Config{ + Hosts: []string{addr}, + Keyspace: keyspace, + }) + if err != nil { + return err + } + defer session.Close() + + return nil + }); err != nil { + logger.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + code := m.Run() + + if err := pool.Purge(container); err != nil { + logger.Error(fmt.Sprintf("Could not purge container: %s", err)) + } + + os.Exit(code) +} + +func createKeyspace(hosts []string) error { + cluster := gocql.NewCluster(hosts...) + cluster.Consistency = gocql.Quorum + + session, err := cluster.CreateSession() + if err != nil { + return err + } + defer session.Close() + + keyspaceCQL := fmt.Sprintf(`CREATE KEYSPACE IF NOT EXISTS %s WITH replication = + {'class':'SimpleStrategy','replication_factor':'1'}`, keyspace) + + return session.Query(keyspaceCQL).Exec() +} diff --git a/consumers/writers/doc.go b/consumers/writers/doc.go new file mode 100644 index 0000000..59e88b6 --- /dev/null +++ b/consumers/writers/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package writers contain the domain concept definitions needed to +// support Magistrala writer services functionality. +package writers diff --git a/consumers/writers/influxdb/README.md b/consumers/writers/influxdb/README.md new file mode 100644 index 0000000..fdf30cf --- /dev/null +++ b/consumers/writers/influxdb/README.md @@ -0,0 +1,105 @@ +# InfluxDB writer + +InfluxDB writer provides message repository implementation for InfluxDB. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| --------------------------------- | --------------------------------------------------------------------------------- | ------------------------------ | +| MG_INFLUX_WRITER_LOG_LEVEL | Log level for InfluxDB writer (debug, info, warn, error) | info | +| MG_INFLUX_WRITER_CONFIG_PATH | Config file path with message broker subjects list, payload type and content-type | /configs.toml | +| MG_INFLUX_WRITER_HTTP_HOST | Service HTTP host | | +| MG_INFLUX_WRITER_HTTP_PORT | Service HTTP port | 9006 | +| MG_INFLUX_WRITER_HTTP_SERVER_CERT | Path to server certificate in pem format | | +| MG_INFLUX_WRITER_HTTP_SERVER_KEY | Path to server key in pem format | | +| MG_INFLUXDB_PROTOCOL | InfluxDB protocol | http | +| MG_INFLUXDB_HOST | InfluxDB host name | magistrala-influxdb | +| MG_INFLUXDB_PORT | Default port of InfluxDB database | 8086 | +| MG_INFLUXDB_ADMIN_USER | Default user of InfluxDB database | magistrala | +| MG_INFLUXDB_ADMIN_PASSWORD | Default password of InfluxDB user | magistrala | +| MG_INFLUXDB_NAME | InfluxDB database name | magistrala | +| MG_INFLUXDB_BUCKET | InfluxDB bucket name | magistrala-bucket | +| MG_INFLUXDB_ORG | InfluxDB organization name | magistrala | +| MG_INFLUXDB_TOKEN | InfluxDB API token | magistrala-token | +| MG_INFLUXDB_DBURL | InfluxDB database URL | | +| MG_INFLUXDB_USER_AGENT | InfluxDB user agent | | +| MG_INFLUXDB_TIMEOUT | InfluxDB client connection readiness timeout | 1s | +| MG_INFLUXDB_INSECURE_SKIP_VERIFY | InfluxDB client connection insecure skip verify | false | +| MG_MESSAGE_BROKER_URL | Message broker instance URL | nats://localhost:4222 | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_INFLUX_WRITER_INSTANCE_ID | InfluxDB writer instance ID | | + +## Deployment + +The service itself is distributed as Docker container. Check the [`influxdb-writer`](https://github.com/absmach/magistrala/blob/main/docker/addons/influxdb-writer/docker-compose.yml#L35-L58) service section in docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the influxdb +make influxdb + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_INFLUX_WRITER_LOG_LEVEL=[Influx writer log level] \ +MG_INFLUX_WRITER_CONFIG_PATH=[Config file path with Message broker subjects list, payload type and content-type] \ +MG_INFLUX_WRITER_HTTP_HOST=[Service HTTP host] \ +MG_INFLUX_WRITER_HTTP_PORT=[Service HTTP port] \ +MG_INFLUX_WRITER_HTTP_SERVER_CERT=[Service HTTP server cert] \ +MG_INFLUX_WRITER_HTTP_SERVER_KEY=[Service HTTP server key] \ +MG_INFLUXDB_PROTOCOL=[InfluxDB protocol] \ +MG_INFLUXDB_HOST=[InfluxDB database host] \ +MG_INFLUXDB_PORT=[InfluxDB database port] \ +MG_INFLUXDB_ADMIN_USER=[InfluxDB admin user] \ +MG_INFLUXDB_ADMIN_PASSWORD=[InfluxDB admin password] \ +MG_INFLUXDB_NAME=[InfluxDB database name] \ +MG_INFLUXDB_BUCKET=[InfluxDB bucket] \ +MG_INFLUXDB_ORG=[InfluxDB org] \ +MG_INFLUXDB_TOKEN=[InfluxDB token] \ +MG_INFLUXDB_DBURL=[InfluxDB database url] \ +MG_INFLUXDB_USER_AGENT=[InfluxDB user agent] \ +MG_INFLUXDB_TIMEOUT=[InfluxDB timeout] \ +MG_INFLUXDB_INSECURE_SKIP_VERIFY=[InfluxDB insecure skip verify] \ +MG_MESSAGE_BROKER_URL=[Message broker instance URL] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_INFLUX_WRITER_INSTANCE_ID=[Influx writer instance ID] \ +$GOBIN/magistrala-influxdb +``` + +### Using docker-compose + +This service can be deployed using docker containers. +Docker compose file is available in `<project_root>/docker/addons/influxdb-writer/docker-compose.yml`. Besides database +and writer service, it contains InfluxData Web Admin Interface which can be used for database +exploration and data visualization and analytics. In order to run Magistrala InfluxDB writer, execute the following command: + +```bash +docker compose -f docker/addons/influxdb-writer/docker-compose.yml up -d +``` + +And, to use the default .env file, execute the following command: + +```bash +docker compose -f docker/addons/influxdb-writer/docker-compose.yml up --env-file docker/.env -d +``` + +_Please note that you need to start core services before the additional ones._ + +## Usage + +Starting service will start consuming normalized messages in SenML format. + +Official docs can be found [here](https://docs.magistrala.abstractmachines.fr). diff --git a/consumers/writers/influxdb/consumer.go b/consumers/writers/influxdb/consumer.go new file mode 100644 index 0000000..86ffa3d --- /dev/null +++ b/consumers/writers/influxdb/consumer.go @@ -0,0 +1,164 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb + +import ( + "context" + "math" + "time" + + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + "github.com/influxdata/influxdb-client-go/v2/api" + "github.com/influxdata/influxdb-client-go/v2/api/write" +) + +const senmlPoints = "messages" + +var errSaveMessage = errors.New("failed to save message to influxdb database") + +var ( + _ consumers.AsyncConsumer = (*influxRepo)(nil) + _ consumers.BlockingConsumer = (*influxRepo)(nil) +) + +type RepoConfig struct { + Bucket string + Org string +} + +type influxRepo struct { + client influxdb2.Client + cfg RepoConfig + errCh chan error + writeAPI api.WriteAPI + writeAPIBlocking api.WriteAPIBlocking +} + +// NewSync returns new InfluxDB writer. +func NewSync(client influxdb2.Client, config RepoConfig) consumers.BlockingConsumer { + return &influxRepo{ + client: client, + cfg: config, + writeAPI: nil, + writeAPIBlocking: client.WriteAPIBlocking(config.Org, config.Bucket), + } +} + +func NewAsync(client influxdb2.Client, config RepoConfig) consumers.AsyncConsumer { + return &influxRepo{ + client: client, + cfg: config, + errCh: make(chan error, 1), + writeAPI: client.WriteAPI(config.Org, config.Bucket), + writeAPIBlocking: nil, + } +} + +func (repo *influxRepo) ConsumeAsync(_ context.Context, message interface{}) { + var err error + var pts []*write.Point + switch m := message.(type) { + case json.Messages: + pts, err = repo.jsonPoints(m) + default: + pts, err = repo.senmlPoints(m) + } + if err != nil { + repo.errCh <- err + return + } + + done := make(chan bool) + defer close(done) + + go func(done <-chan bool) { + for { + select { + case err := <-repo.writeAPI.Errors(): + repo.errCh <- err + case <-done: + repo.errCh <- nil // pass nil error to the error channel + return + } + } + }(done) + + for _, pt := range pts { + repo.writeAPI.WritePoint(pt) + } + + repo.writeAPI.Flush() +} + +func (repo *influxRepo) Errors() <-chan error { + if repo.errCh != nil { + return repo.errCh + } + + return nil +} + +func (repo *influxRepo) ConsumeBlocking(ctx context.Context, message interface{}) error { + var err error + var pts []*write.Point + switch m := message.(type) { + case json.Messages: + pts, err = repo.jsonPoints(m) + default: + pts, err = repo.senmlPoints(m) + } + if err != nil { + return err + } + + return repo.writeAPIBlocking.WritePoint(ctx, pts...) +} + +func (repo *influxRepo) senmlPoints(messages interface{}) ([]*write.Point, error) { + msgs, ok := messages.([]senml.Message) + if !ok { + return nil, errSaveMessage + } + var pts []*write.Point + for _, msg := range msgs { + tgs, flds := senmlTags(msg), senmlFields(msg) + + sec, dec := math.Modf(msg.Time) + t := time.Unix(int64(sec), int64(dec*(1e9))) + + pt := influxdb2.NewPoint(senmlPoints, tgs, flds, t) + pts = append(pts, pt) + } + + return pts, nil +} + +func (repo *influxRepo) jsonPoints(msgs json.Messages) ([]*write.Point, error) { + var pts []*write.Point + for i, m := range msgs.Data { + t := time.Unix(0, m.Created+int64(i)) + + flat, err := json.Flatten(m.Payload) + if err != nil { + return nil, errors.Wrap(json.ErrTransform, err) + } + m.Payload = flat + + // Copy first-level fields so that the original Payload is unchanged. + fields := make(map[string]interface{}) + for k, v := range m.Payload { + fields[k] = v + } + // At least one known field need to exist so that COUNT can be performed. + fields["protocol"] = m.Protocol + pt := influxdb2.NewPoint(msgs.Format, jsonTags(m), fields, t) + pts = append(pts, pt) + } + + return pts, nil +} diff --git a/consumers/writers/influxdb/consumer_test.go b/consumers/writers/influxdb/consumer_test.go new file mode 100644 index 0000000..4bc0d3b --- /dev/null +++ b/consumers/writers/influxdb/consumer_test.go @@ -0,0 +1,476 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb_test + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/pkg/uuid" + writer "github.com/absmach/mg-contrib/consumers/writers/influxdb" + influxdata "github.com/influxdata/influxdb-client-go/v2" + "github.com/stretchr/testify/assert" +) + +const valueFields = 5 + +var ( + testLog, _ = mglog.New(os.Stdout, "info") + streamsSize = 250 + rowCountSenml = fmt.Sprintf(`from(bucket: "%s") + |> range(start: -1h, stop: 1h) + |> filter(fn: (r) => r["_measurement"] == "messages") + |> filter(fn: (r) => r["_field"] == "dataValue" or r["_field"] == "stringValue" or r["_field"] == "value" or r["_field"] == "boolValue" or r["_field"] == "sum" ) + |> group(columns: ["_measurement"]) + |> count() + |> yield(name: "count")`, repoCfg.Bucket) + + rowCountJSON = fmt.Sprintf(`from(bucket: "%s") + |> range(start: -1h, stop: 1h) + |> filter(fn: (r) => r["_measurement"] == "some_json") + |> filter(fn: (r) => r["_field"] == "field_1" or r["_field"] == "field_2" or r["_field"] == "field_3" or r["_field"] == "field_4" or r["_field"] == "field_5/field_1" or r["_field"] == "field_5/field_2") + |> count() + |> yield(name: "count")`, repoCfg.Bucket) + subtopic = "topic" + + client influxdata.Client + v float64 = 5 + stringV = "value" + boolV = true + dataV = "base64" + sum float64 = 42 + repoCfg = writer.RepoConfig{ + Bucket: dbBucket, + Org: dbOrg, + } + errUnexpectedType = errors.New("Unexpected response type") + + idProvider = uuid.New() +) + +func deleteBucket() error { + bucketsAPI := client.BucketsAPI() + bucket, err := bucketsAPI.FindBucketByName(context.Background(), repoCfg.Bucket) + if err != nil { + return err + } + + if err = bucketsAPI.DeleteBucket(context.Background(), bucket); err != nil { + return err + } + + return nil +} + +func createBucket() error { + orgAPI := client.OrganizationsAPI() + org, err := orgAPI.FindOrganizationByName(context.Background(), repoCfg.Org) + if err != nil { + return err + } + bucketsAPI := client.BucketsAPI() + if _, err = bucketsAPI.CreateBucketWithName(context.Background(), org, repoCfg.Bucket); err != nil { + return err + } + + return nil +} + +func resetBucket() error { + if err := deleteBucket(); err != nil { + return err + } + if err := createBucket(); err != nil { + return err + } + + return nil +} + +func queryDB(fluxQuery string) (int, error) { + rowCount := 0 + queryAPI := client.QueryAPI(repoCfg.Org) + + // get QueryTableResult + result, err := queryAPI.Query(context.Background(), fluxQuery) + if err != nil { + return rowCount, err + } + if result.Next() { + value, ok := result.Record().Value().(int64) + if !ok { + return rowCount, errUnexpectedType + } + rowCount = int(value) + } + if result.Err() != nil { + return rowCount, result.Err() + } + + return rowCount, nil +} + +func TestAsyncSaveSenml(t *testing.T) { + asyncRepo := writer.NewAsync(client, repoCfg) + + cases := []struct { + desc string + msgsNum int + expectedSize int + }{ + { + desc: "save a single message", + msgsNum: 1, + expectedSize: 1, + }, + { + desc: "save a batch of messages", + msgsNum: streamsSize, + expectedSize: streamsSize, + }, + } + + for _, tc := range cases { + err := resetBucket() + assert.Nil(t, err, fmt.Sprintf("Cleaning data from InfluxDB expected to succeed: %s.\n", err)) + now := time.Now().UnixNano() + var msgs []senml.Message + + chanID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) + pubID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) + for i := 0; i < tc.msgsNum; i++ { + msg := senml.Message{ + Channel: chanID, + Publisher: pubID, + Protocol: "http", + Name: "test name", + Unit: "km", + UpdateTime: 5456565466, + } + // Mix possible values as well as value sum. + count := i % valueFields + switch count { + case 0: + msg.Subtopic = subtopic + msg.Value = &v + case 1: + msg.BoolValue = &boolV + case 2: + msg.StringValue = &stringV + case 3: + msg.DataValue = &dataV + case 4: + msg.Sum = &sum + } + + msg.Time = float64(now)/float64(1e9) - float64(i) + msgs = append(msgs, msg) + } + + errs := asyncRepo.Errors() + asyncRepo.ConsumeAsync(context.TODO(), msgs) + err = <-errs + assert.Nil(t, err, fmt.Sprintf("Save operation expected to succeed: %s.\n", err)) + + count, err := queryDB(rowCountSenml) + assert.Nil(t, err, fmt.Sprintf("Querying InfluxDB to retrieve data expected to succeed: %s.\n", err)) + assert.Equal(t, tc.expectedSize, count, fmt.Sprintf("Expected to have %d messages saved, found %d instead.\n", tc.expectedSize, count)) + } +} + +func TestBlockingSaveSenml(t *testing.T) { + syncRepo := writer.NewSync(client, repoCfg) + + cases := []struct { + desc string + msgsNum int + expectedSize int + }{ + { + desc: "save a single message", + msgsNum: 1, + expectedSize: 1, + }, + { + desc: "save a batch of messages", + msgsNum: streamsSize, + expectedSize: streamsSize, + }, + } + + for _, tc := range cases { + err := resetBucket() + assert.Nil(t, err, fmt.Sprintf("Cleaning data from InfluxDB expected to succeed: %s.\n", err)) + now := time.Now().UnixNano() + var msgs []senml.Message + + chanID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) + pubID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) + for i := 0; i < tc.msgsNum; i++ { + msg := senml.Message{ + Channel: chanID, + Publisher: pubID, + Protocol: "http", + Name: "test name", + Unit: "km", + UpdateTime: 5456565466, + } + // Mix possible values as well as value sum. + count := i % valueFields + switch count { + case 0: + msg.Subtopic = subtopic + msg.Value = &v + case 1: + msg.BoolValue = &boolV + case 2: + msg.StringValue = &stringV + case 3: + msg.DataValue = &dataV + case 4: + msg.Sum = &sum + } + + msg.Time = float64(now)/float64(1e9) - float64(i) + msgs = append(msgs, msg) + } + + err = syncRepo.ConsumeBlocking(context.TODO(), msgs) + assert.Nil(t, err, fmt.Sprintf("Save operation expected to succeed: %s.\n", err)) + + count, err := queryDB(rowCountSenml) + assert.Nil(t, err, fmt.Sprintf("Querying InfluxDB to retrieve data expected to succeed: %s.\n", err)) + assert.Equal(t, tc.expectedSize, count, fmt.Sprintf("Expected to have %d messages saved, found %d instead.\n", tc.expectedSize, count)) + } +} + +func TestAsyncSaveJSON(t *testing.T) { + asyncRepo := writer.NewAsync(client, repoCfg) + + chanID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + pubID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + msg := json.Message{ + Channel: chanID, + Publisher: pubID, + Created: time.Now().UnixNano(), + Subtopic: "subtopic/format/some_json", + Protocol: "mqtt", + Payload: map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + }, + } + + invalidKeySepMsg := msg + invalidKeySepMsg.Payload = map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + "field_6/field_7": "value", + } + invalidKeyNameMsg := msg + invalidKeyNameMsg.Payload = map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + "publisher": "value", + } + + now := time.Now().UnixNano() + msgs := json.Messages{ + Format: "some_json", + } + invalidKeySepMsgs := json.Messages{ + Format: "some_json", + } + invalidKeyNameMsgs := json.Messages{ + Format: "some_json", + } + + for i := 0; i < streamsSize; i++ { + msg.Created = now + msgs.Data = append(msgs.Data, msg) + invalidKeySepMsgs.Data = append(invalidKeySepMsgs.Data, invalidKeySepMsg) + invalidKeyNameMsgs.Data = append(invalidKeyNameMsgs.Data, invalidKeyNameMsg) + } + + cases := []struct { + desc string + msgs json.Messages + err error + }{ + { + desc: "consume valid json messages", + msgs: msgs, + err: nil, + }, + { + desc: "consume invalid json messages containing invalid key separator", + msgs: invalidKeySepMsgs, + err: json.ErrInvalidKey, + }, + { + desc: "consume invalid json messages containing invalid key name", + msgs: invalidKeySepMsgs, + err: json.ErrInvalidKey, + }, + } + + for _, tc := range cases { + err := resetBucket() + assert.Nil(t, err, fmt.Sprintf("Cleaning data from InfluxDB expected to succeed: %s.\n", err)) + + asyncRepo.ConsumeAsync(context.TODO(), msgs) + timer := time.NewTimer(1 * time.Millisecond) + select { + case err = <-asyncRepo.Errors(): + case <-timer.C: + t.Error("errors channel blocked, nothing returned.") + } + switch err { + case nil: + count, err := queryDB(rowCountJSON) + assert.Nil(t, err, fmt.Sprintf("Querying InfluxDB to retrieve data expected to succeed: %s.\n", err)) + assert.Equal(t, streamsSize, count, fmt.Sprintf("Expected to have %d messages saved, found %d instead.\n", streamsSize, count)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s expected %s, got %s", tc.desc, tc.err, err)) + } + } +} + +func TestBlockingSaveJSON(t *testing.T) { + syncRepo := writer.NewSync(client, repoCfg) + + chanID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + pubID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + msg := json.Message{ + Channel: chanID, + Publisher: pubID, + Created: time.Now().UnixNano(), + Subtopic: "subtopic/format/some_json", + Protocol: "mqtt", + Payload: map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + }, + } + + invalidKeySepMsg := msg + invalidKeySepMsg.Payload = map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + "field_6/field_7": "value", + } + invalidKeyNameMsg := msg + invalidKeyNameMsg.Payload = map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + "publisher": "value", + } + + now := time.Now().UnixNano() + msgs := json.Messages{ + Format: "some_json", + } + invalidKeySepMsgs := json.Messages{ + Format: "some_json", + } + invalidKeyNameMsgs := json.Messages{ + Format: "some_json", + } + + for i := 0; i < streamsSize; i++ { + msg.Created = now + msgs.Data = append(msgs.Data, msg) + invalidKeySepMsgs.Data = append(invalidKeySepMsgs.Data, invalidKeySepMsg) + invalidKeyNameMsgs.Data = append(invalidKeyNameMsgs.Data, invalidKeyNameMsg) + } + + cases := []struct { + desc string + msgs json.Messages + err error + }{ + { + desc: "consume valid json messages", + msgs: msgs, + err: nil, + }, + { + desc: "consume invalid json messages containing invalid key separator", + msgs: invalidKeySepMsgs, + err: json.ErrInvalidKey, + }, + { + desc: "consume invalid json messages containing invalid key name", + msgs: invalidKeySepMsgs, + err: json.ErrInvalidKey, + }, + } + + for _, tc := range cases { + err := resetBucket() + assert.Nil(t, err, fmt.Sprintf("Cleaning data from InfluxDB expected to succeed: %s.\n", err)) + + switch err = syncRepo.ConsumeBlocking(context.TODO(), tc.msgs); err { + case nil: + count, err := queryDB(rowCountJSON) + assert.Nil(t, err, fmt.Sprintf("Querying InfluxDB to retrieve data expected to succeed: %s.\n", err)) + assert.Equal(t, streamsSize, count, fmt.Sprintf("Expected to have %d messages saved, found %d instead.\n", streamsSize, count)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s expected %s, got %s", tc.desc, tc.err, err)) + } + } +} diff --git a/consumers/writers/influxdb/doc.go b/consumers/writers/influxdb/doc.go new file mode 100644 index 0000000..b660754 --- /dev/null +++ b/consumers/writers/influxdb/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package influxdb contains the domain concept definitions needed to +// support Magistrala InfluxDB writer service functionality. +package influxdb diff --git a/consumers/writers/influxdb/fields.go b/consumers/writers/influxdb/fields.go new file mode 100644 index 0000000..4916402 --- /dev/null +++ b/consumers/writers/influxdb/fields.go @@ -0,0 +1,35 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb + +import ( + "github.com/absmach/magistrala/pkg/transformers/senml" +) + +type fields map[string]interface{} + +func senmlFields(msg senml.Message) fields { + ret := fields{ + "protocol": msg.Protocol, + "unit": msg.Unit, + "updateTime": msg.UpdateTime, + } + + switch { + case msg.Value != nil: + ret["value"] = *msg.Value + case msg.StringValue != nil: + ret["stringValue"] = *msg.StringValue + case msg.DataValue != nil: + ret["dataValue"] = *msg.DataValue + case msg.BoolValue != nil: + ret["boolValue"] = *msg.BoolValue + } + + if msg.Sum != nil { + ret["sum"] = *msg.Sum + } + + return ret +} diff --git a/consumers/writers/influxdb/setup_test.go b/consumers/writers/influxdb/setup_test.go new file mode 100644 index 0000000..c25f7c5 --- /dev/null +++ b/consumers/writers/influxdb/setup_test.go @@ -0,0 +1,95 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb_test + +import ( + "context" + "fmt" + "log" + "os" + "os/signal" + "syscall" + "testing" + "time" + + influxdata "github.com/influxdata/influxdb-client-go/v2" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +const ( + dbToken = "test-token" + dbOrg = "test-org" + dbAdmin = "test-admin" + dbPass = "test-password" + dbBucket = "test-bucket" + dbInitMode = "setup" + dbFluxEnabled = "true" + dbBindAddress = ":8088" + port = "8086/tcp" + db = "influxdb" + dbVersion = "2.7-alpine" + poolMaxWait = 120 * time.Second +) + +var address string + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: db, + Tag: dbVersion, + Env: []string{ + fmt.Sprintf("DOCKER_INFLUXDB_INIT_MODE=%s", dbInitMode), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_USERNAME=%s", dbAdmin), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_PASSWORD=%s", dbPass), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_ORG=%s", dbOrg), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_BUCKET=%s", dbBucket), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=%s", dbToken), + fmt.Sprintf("INFLUXDB_HTTP_FLUX_ENABLED=%s", dbFluxEnabled), + fmt.Sprintf("INFLUXDB_BIND_ADDRESS=%s", dbBindAddress), + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + handleInterrupt(pool, container) + + address = fmt.Sprintf("%s:%s", "http://localhost", container.GetPort(port)) + pool.MaxWait = poolMaxWait + + if err := pool.Retry(func() error { + client = influxdata.NewClientWithOptions(address, dbToken, influxdata.DefaultOptions()) + _, err = client.Ready(context.Background()) + return err + }); err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + code := m.Run() + if err := pool.Purge(container); err != nil { + testLog.Error(fmt.Sprintf("Could not purge container: %s", err)) + } + + os.Exit(code) +} + +func handleInterrupt(pool *dockertest.Pool, container *dockertest.Resource) { + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + os.Exit(0) + }() +} diff --git a/consumers/writers/influxdb/tags.go b/consumers/writers/influxdb/tags.go new file mode 100644 index 0000000..793069b --- /dev/null +++ b/consumers/writers/influxdb/tags.go @@ -0,0 +1,28 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb + +import ( + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" +) + +type tags map[string]string + +func senmlTags(msg senml.Message) tags { + return tags{ + "channel": msg.Channel, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "name": msg.Name, + } +} + +func jsonTags(msg json.Message) tags { + return tags{ + "channel": msg.Channel, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + } +} diff --git a/consumers/writers/mongodb/README.md b/consumers/writers/mongodb/README.md new file mode 100644 index 0000000..f9fbf36 --- /dev/null +++ b/consumers/writers/mongodb/README.md @@ -0,0 +1,65 @@ +# MongoDB writer + +MongoDB writer provides message repository implementation for MongoDB. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| -------------------------------- | --------------------------------------------------------------------------------- | ------------------------------ | +| MG_MONGO_WRITER_LOG_LEVEL | Log level for MongoDB writer | info | +| MG_MONGO_WRITER_CONFIG_PATH | Config file path with Message broker subjects list, payload type and content-type | /config.toml | +| MG_MONGO_WRITER_HTTP_HOST | Service HTTP host | localhost | +| MG_MONGO_WRITER_HTTP_PORT | Service HTTP port | 9010 | +| MG_MONGO_WRITER_HTTP_SERVER_CERT | Service HTTP server certificate path | "" | +| MG_MONGO_WRITER_HTTP_SERVER_KEY | Service HTTP server key | "" | +| MG_MONGO_NAME | Default MongoDB database name | messages | +| MG_MONGO_HOST | Default MongoDB database host | localhost | +| MG_MONGO_PORT | Default MongoDB database port | 27017 | +| MG_MESSAGE_BROKER_URL | Message broker instance URL | nats://localhost:4222 | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_MONGO_WRITER_INSTANCE_ID | MongoDB writer instance ID | "" | + +## Deployment + +The service itself is distributed as Docker container. Check the [`mongodb-writer`](https://github.com/absmach/magistrala/blob/main/docker/addons/mongodb-writer/docker-compose.yml#L36-L55) service section in docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the mongodb writer +make mongodb-writer + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_MONGO_WRITER_LOG_LEVEL=[MongoDB writer log level] \ +MG_MONGO_WRITER_CONFIG_PATH=[Configuration file path with Message broker subjects list] \ +MG_MONGO_WRITER_HTTP_HOST=[Service HTTP host] \ +MG_MONGO_WRITER_HTTP_PORT=[Service HTTP port] \ +MG_MONGO_WRITER_HTTP_SERVER_CERT=[Service HTTP server certificate] \ +MG_MONGO_WRITER_HTTP_SERVER_KEY=[Service HTTP server key] \ +MG_MONGO_NAME=[MongoDB database name] \ +MG_MONGO_HOST=[MongoDB database host] \ +MG_MONGO_PORT=[MongoDB database port] \ +MG_MESSAGE_BROKER_URL=[Message broker instance URL] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_MONGO_WRITER_INSTANCE_ID=[MongoDB writer instance ID] \ + +$GOBIN/magistrala-mongodb-writer +``` + +## Usage + +Starting service will start consuming normalized messages in SenML format. diff --git a/consumers/writers/mongodb/consumer.go b/consumers/writers/mongodb/consumer.go new file mode 100644 index 0000000..d655809 --- /dev/null +++ b/consumers/writers/mongodb/consumer.go @@ -0,0 +1,84 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb + +import ( + "context" + + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" +) + +const senmlCollection string = "messages" + +var errSaveMessage = errors.New("failed to save message to mongodb database") + +var _ consumers.BlockingConsumer = (*mongoRepo)(nil) + +type mongoRepo struct { + db *mongo.Database +} + +// New returns new MongoDB writer. +func New(db *mongo.Database) consumers.BlockingConsumer { + return &mongoRepo{db} +} + +func (repo *mongoRepo) ConsumeBlocking(ctx context.Context, message interface{}) error { + switch m := message.(type) { + case json.Messages: + return repo.saveJSON(ctx, m) + default: + return repo.saveSenml(ctx, m) + } +} + +func (repo *mongoRepo) saveSenml(ctx context.Context, messages interface{}) error { + msgs, ok := messages.([]senml.Message) + if !ok { + return errSaveMessage + } + coll := repo.db.Collection(senmlCollection) + var dbMsgs []interface{} + for _, msg := range msgs { + // Check if message is already in database. + filter := bson.M{"time": msg.Time, "publisher": msg.Publisher, "subtopic": msg.Subtopic, "name": msg.Name} + + count, err := coll.CountDocuments(ctx, filter) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + + if count == 0 { + dbMsgs = append(dbMsgs, msg) + } + } + + _, err := coll.InsertMany(ctx, dbMsgs) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + + return nil +} + +func (repo *mongoRepo) saveJSON(ctx context.Context, msgs json.Messages) error { + m := []interface{}{} + for _, msg := range msgs.Data { + m = append(m, msg) + } + + coll := repo.db.Collection(msgs.Format) + + _, err := coll.InsertMany(ctx, m) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + + return nil +} diff --git a/consumers/writers/mongodb/consumer_test.go b/consumers/writers/mongodb/consumer_test.go new file mode 100644 index 0000000..4507376 --- /dev/null +++ b/consumers/writers/mongodb/consumer_test.go @@ -0,0 +1,134 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb_test + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/mg-contrib/consumers/writers/mongodb" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +var ( + port string + addr string + testLog, _ = mglog.New(os.Stdout, "info") + testDB = "test" + collection = "messages" + msgsNum = 100 + valueFields = 5 + subtopic = "topic" +) + +var ( + v float64 = 5 + stringV = "value" + boolV = true + dataV = "base64" + sum float64 = 42 +) + +func TestSaveSenml(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + repo := mongodb.New(db) + + now := time.Now().Unix() + msg := senml.Message{ + Channel: "45", + Publisher: "2580", + Protocol: "http", + Name: "test name", + Unit: "km", + Time: 13451312, + UpdateTime: 5456565466, + } + var msgs []senml.Message + + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + count := i % valueFields + switch count { + case 0: + msg.Subtopic = subtopic + msg.Value = &v + case 1: + msg.BoolValue = &boolV + case 2: + msg.StringValue = &stringV + case 3: + msg.DataValue = &dataV + case 4: + msg.Sum = &sum + } + + msg.Time = float64(now + int64(i)) + msgs = append(msgs, msg) + } + + err = repo.ConsumeBlocking(context.TODO(), msgs) + require.Nil(t, err, fmt.Sprintf("Save operation expected to succeed: %s.\n", err)) + + count, err := db.Collection(collection).CountDocuments(context.Background(), bson.D{}) + assert.Nil(t, err, fmt.Sprintf("Querying database expected to succeed: %s.\n", err)) + assert.Equal(t, int64(msgsNum), count, fmt.Sprintf("Expected to have %d value, found %d instead.\n", msgsNum, count)) +} + +func TestSaveJSON(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + repo := mongodb.New(db) + + chid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + pubid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + msg := json.Message{ + Channel: chid.String(), + Publisher: pubid.String(), + Created: time.Now().Unix(), + Subtopic: "subtopic/format/some_json", + Protocol: "mqtt", + Payload: map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + }, + } + + now := time.Now().Unix() + msgs := json.Messages{ + Format: "some_json", + } + + for i := 0; i < msgsNum; i++ { + msg.Created = now + int64(i) + msgs.Data = append(msgs.Data, msg) + } + + err = repo.ConsumeBlocking(context.TODO(), msgs) + assert.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) +} diff --git a/consumers/writers/mongodb/doc.go b/consumers/writers/mongodb/doc.go new file mode 100644 index 0000000..d809074 --- /dev/null +++ b/consumers/writers/mongodb/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mongodb contains the domain concept definitions needed to +// support Magistrala MondoDB writer service functionality. +package mongodb diff --git a/consumers/writers/mongodb/setup_test.go b/consumers/writers/mongodb/setup_test.go new file mode 100644 index 0000000..2d1dfae --- /dev/null +++ b/consumers/writers/mongodb/setup_test.go @@ -0,0 +1,56 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb_test + +import ( + "context" + "fmt" + "log" + "os" + "testing" + + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "mongo", + Tag: "7.0.5", + Env: []string{ + "MONGO_INITDB_DATABASE=test", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port = container.GetPort("27017/tcp") + addr = fmt.Sprintf("mongodb://localhost:%s", port) + + if err := pool.Retry(func() error { + _, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + return err + }); err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + code := m.Run() + + if err := pool.Purge(container); err != nil { + testLog.Error(fmt.Sprintf("Could not purge container: %s", err)) + } + + os.Exit(code) +} diff --git a/consumers/writers/postgres/README.md b/consumers/writers/postgres/README.md new file mode 100644 index 0000000..bd9907e --- /dev/null +++ b/consumers/writers/postgres/README.md @@ -0,0 +1,77 @@ +# Postgres writer + +Postgres writer provides message repository implementation for Postgres. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| ----------------------------------- | --------------------------------------------------------------------------------- | ------------------------------ | +| MG_POSTGRES_WRITER_LOG_LEVEL | Service log level | info | +| MG_POSTGRES_WRITER_CONFIG_PATH | Config file path with Message broker subjects list, payload type and content-type | /config.toml | +| MG_POSTGRES_WRITER_HTTP_HOST | Service HTTP host | localhost | +| MG_POSTGRES_WRITER_HTTP_PORT | Service HTTP port | 9010 | +| MG_POSTGRES_WRITER_HTTP_SERVER_CERT | Service HTTP server certificate path | "" | +| MG_POSTGRES_WRITER_HTTP_SERVER_KEY | Service HTTP server key | "" | +| MG_POSTGRES_HOST | Postgres DB host | postgres | +| MG_POSTGRES_PORT | Postgres DB port | 5432 | +| MG_POSTGRES_USER | Postgres user | magistrala | +| MG_POSTGRES_PASS | Postgres password | magistrala | +| MG_POSTGRES_NAME | Postgres database name | messages | +| MG_POSTGRES_SSL_MODE | Postgres SSL mode | disabled | +| MG_POSTGRES_SSL_CERT | Postgres SSL certificate path | "" | +| MG_POSTGRES_SSL_KEY | Postgres SSL key | "" | +| MG_POSTGRES_SSL_ROOT_CERT | Postgres SSL root certificate path | "" | +| MG_MESSAGE_BROKER_URL | Message broker instance URL | nats://localhost:4222 | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_POSTGRES_WRITER_INSTANCE_ID | Service instance ID | "" | + +## Deployment + +The service itself is distributed as Docker container. Check the [`postgres-writer`](https://github.com/absmach/magistrala/blob/main/docker/addons/postgres-writer/docker-compose.yml#L34-L59) service section in docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the postgres writer +make postgres-writer + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_POSTGRES_WRITER_LOG_LEVEL=[Service log level] \ +MG_POSTGRES_WRITER_CONFIG_PATH=[Config file path with Message broker subjects list, payload type and content-type] \ +MG_POSTGRES_WRITER_HTTP_HOST=[Service HTTP host] \ +MG_POSTGRES_WRITER_HTTP_PORT=[Service HTTP port] \ +MG_POSTGRES_WRITER_HTTP_SERVER_CERT=[Service HTTP server cert] \ +MG_POSTGRES_WRITER_HTTP_SERVER_KEY=[Service HTTP server key] \ +MG_POSTGRES_HOST=[Postgres host] \ +MG_POSTGRES_PORT=[Postgres port] \ +MG_POSTGRES_USER=[Postgres user] \ +MG_POSTGRES_PASS=[Postgres password] \ +MG_POSTGRES_NAME=[Postgres database name] \ +MG_POSTGRES_SSL_MODE=[Postgres SSL mode] \ +MG_POSTGRES_SSL_CERT=[Postgres SSL cert] \ +MG_POSTGRES_SSL_KEY=[Postgres SSL key] \ +MG_POSTGRES_SSL_ROOT_CERT=[Postgres SSL Root cert] \ +MG_MESSAGE_BROKER_URL=[Message broker instance URL] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_POSTGRES_WRITER_INSTANCE_ID=[Service instance ID] \ + +$GOBIN/magistrala-postgres-writer +``` + +## Usage + +Starting service will start consuming normalized messages in SenML format. diff --git a/consumers/writers/postgres/consumer.go b/consumers/writers/postgres/consumer.go new file mode 100644 index 0000000..28e73e7 --- /dev/null +++ b/consumers/writers/postgres/consumer.go @@ -0,0 +1,213 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/pkg/errors" + mgjson "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/gofrs/uuid" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jmoiron/sqlx" // required for DB access +) + +var ( + errInvalidMessage = errors.New("invalid message representation") + errSaveMessage = errors.New("failed to save message to postgres database") + errTransRollback = errors.New("failed to rollback transaction") + errNoTable = errors.New("relation does not exist") +) + +var _ consumers.BlockingConsumer = (*postgresRepo)(nil) + +type postgresRepo struct { + db *sqlx.DB +} + +// New returns new PostgreSQL writer. +func New(db *sqlx.DB) consumers.BlockingConsumer { + return &postgresRepo{db: db} +} + +func (pr postgresRepo) ConsumeBlocking(ctx context.Context, message interface{}) (err error) { + switch m := message.(type) { + case mgjson.Messages: + return pr.saveJSON(ctx, m) + default: + return pr.saveSenml(ctx, m) + } +} + +func (pr postgresRepo) saveSenml(ctx context.Context, messages interface{}) (err error) { + msgs, ok := messages.([]senml.Message) + if !ok { + return errSaveMessage + } + q := `INSERT INTO messages (id, channel, subtopic, publisher, protocol, + name, unit, value, string_value, bool_value, data_value, sum, + time, update_time) + VALUES (:id, :channel, :subtopic, :publisher, :protocol, :name, :unit, + :value, :string_value, :bool_value, :data_value, :sum, + :time, :update_time);` + + tx, err := pr.db.BeginTxx(ctx, nil) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + defer func() { + if err != nil { + if txErr := tx.Rollback(); txErr != nil { + err = errors.Wrap(err, errors.Wrap(errTransRollback, txErr)) + } + return + } + + if err = tx.Commit(); err != nil { + err = errors.Wrap(errSaveMessage, err) + } + }() + + for _, msg := range msgs { + id, err := uuid.NewV4() + if err != nil { + return err + } + m := senmlMessage{Message: msg, ID: id.String()} + if _, err := tx.NamedExec(q, m); err != nil { + pgErr, ok := err.(*pgconn.PgError) + if ok { + if pgErr.Code == pgerrcode.InvalidTextRepresentation { + return errors.Wrap(errSaveMessage, errInvalidMessage) + } + } + + return errors.Wrap(errSaveMessage, err) + } + } + return err +} + +func (pr postgresRepo) saveJSON(ctx context.Context, msgs mgjson.Messages) error { + if err := pr.insertJSON(ctx, msgs); err != nil { + if err == errNoTable { + if err := pr.createTable(msgs.Format); err != nil { + return err + } + return pr.insertJSON(ctx, msgs) + } + return err + } + return nil +} + +func (pr postgresRepo) insertJSON(ctx context.Context, msgs mgjson.Messages) error { + tx, err := pr.db.BeginTxx(ctx, nil) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + defer func() { + if err != nil { + if txErr := tx.Rollback(); txErr != nil { + err = errors.Wrap(err, errors.Wrap(errTransRollback, txErr)) + } + return + } + + if err = tx.Commit(); err != nil { + err = errors.Wrap(errSaveMessage, err) + } + }() + + q := `INSERT INTO %s (id, channel, created, subtopic, publisher, protocol, payload) + VALUES (:id, :channel, :created, :subtopic, :publisher, :protocol, :payload);` + q = fmt.Sprintf(q, msgs.Format) + + for _, m := range msgs.Data { + var dbmsg jsonMessage + dbmsg, err = toJSONMessage(m) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + + if _, err = tx.NamedExec(q, dbmsg); err != nil { + pgErr, ok := err.(*pgconn.PgError) + if ok { + switch pgErr.Code { + case pgerrcode.InvalidTextRepresentation: + return errors.Wrap(errSaveMessage, errInvalidMessage) + case pgerrcode.UndefinedTable: + return errNoTable + } + } + return err + } + } + return nil +} + +func (pr postgresRepo) createTable(name string) error { + q := `CREATE TABLE IF NOT EXISTS %s ( + id UUID, + created BIGINT, + channel VARCHAR(254), + subtopic VARCHAR(254), + publisher VARCHAR(254), + protocol TEXT, + payload JSONB, + PRIMARY KEY (id) + )` + q = fmt.Sprintf(q, name) + + _, err := pr.db.Exec(q) + return err +} + +type senmlMessage struct { + senml.Message + ID string `db:"id"` +} + +type jsonMessage struct { + ID string `db:"id"` + Channel string `db:"channel"` + Created int64 `db:"created"` + Subtopic string `db:"subtopic"` + Publisher string `db:"publisher"` + Protocol string `db:"protocol"` + Payload []byte `db:"payload"` +} + +func toJSONMessage(msg mgjson.Message) (jsonMessage, error) { + id, err := uuid.NewV4() + if err != nil { + return jsonMessage{}, err + } + + data := []byte("{}") + if msg.Payload != nil { + b, err := json.Marshal(msg.Payload) + if err != nil { + return jsonMessage{}, errors.Wrap(errSaveMessage, err) + } + data = b + } + + m := jsonMessage{ + ID: id.String(), + Channel: msg.Channel, + Created: msg.Created, + Subtopic: msg.Subtopic, + Publisher: msg.Publisher, + Protocol: msg.Protocol, + Payload: data, + } + + return m, nil +} diff --git a/consumers/writers/postgres/consumer_test.go b/consumers/writers/postgres/consumer_test.go new file mode 100644 index 0000000..300f518 --- /dev/null +++ b/consumers/writers/postgres/consumer_test.go @@ -0,0 +1,112 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/mg-contrib/consumers/writers/postgres" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" +) + +const ( + msgsNum = 42 + valueFields = 5 + subtopic = "topic" +) + +var ( + v float64 = 5 + stringV = "value" + boolV = true + dataV = "base64" + sum float64 = 42 +) + +func TestSaveSenml(t *testing.T) { + repo := postgres.New(db) + + chid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + msg := senml.Message{} + msg.Channel = chid.String() + + pubid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + msg.Publisher = pubid.String() + + now := time.Now().Unix() + var msgs []senml.Message + + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + count := i % valueFields + switch count { + case 0: + msg.Subtopic = subtopic + msg.Value = &v + case 1: + msg.BoolValue = &boolV + case 2: + msg.StringValue = &stringV + case 3: + msg.DataValue = &dataV + case 4: + msg.Sum = &sum + } + + msg.Time = float64(now + int64(i)) + msgs = append(msgs, msg) + } + + err = repo.ConsumeBlocking(context.TODO(), msgs) + assert.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) +} + +func TestSaveJSON(t *testing.T) { + repo := postgres.New(db) + + chid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + pubid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + msg := json.Message{ + Channel: chid.String(), + Publisher: pubid.String(), + Created: time.Now().Unix(), + Subtopic: "subtopic/format/some_json", + Protocol: "mqtt", + Payload: map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + }, + } + + now := time.Now().Unix() + msgs := json.Messages{ + Format: "some_json", + } + + for i := 0; i < msgsNum; i++ { + msg.Created = now + int64(i) + msgs.Data = append(msgs.Data, msg) + } + + err = repo.ConsumeBlocking(context.TODO(), msgs) + assert.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) +} diff --git a/consumers/writers/postgres/doc.go b/consumers/writers/postgres/doc.go new file mode 100644 index 0000000..a92d4f9 --- /dev/null +++ b/consumers/writers/postgres/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package postgres contains repository implementations using Postgres as +// the underlying database. +package postgres diff --git a/consumers/writers/postgres/init.go b/consumers/writers/postgres/init.go new file mode 100644 index 0000000..de140b2 --- /dev/null +++ b/consumers/writers/postgres/init.go @@ -0,0 +1,46 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import migrate "github.com/rubenv/sql-migrate" + +// Migration of postgres-writer. +func Migration() *migrate.MemoryMigrationSource { + return &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "messages_1", + Up: []string{ + `CREATE TABLE IF NOT EXISTS messages ( + id UUID, + channel UUID, + subtopic VARCHAR(254), + publisher UUID, + protocol TEXT, + name TEXT, + unit TEXT, + value FLOAT, + string_value TEXT, + bool_value BOOL, + data_value BYTEA, + sum FLOAT, + time FLOAT, + update_time FLOAT, + PRIMARY KEY (id) + )`, + }, + Down: []string{ + "DROP TABLE messages", + }, + }, + { + Id: "messages_2", + Up: []string{ + `ALTER TABLE messages DROP CONSTRAINT messages_pkey`, + `ALTER TABLE messages ADD PRIMARY KEY (time, publisher, subtopic, name)`, + }, + }, + }, + } +} diff --git a/consumers/writers/postgres/setup_test.go b/consumers/writers/postgres/setup_test.go new file mode 100644 index 0000000..b98b746 --- /dev/null +++ b/consumers/writers/postgres/setup_test.go @@ -0,0 +1,85 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "fmt" + "log" + "os" + "testing" + + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/mg-contrib/consumers/writers/postgres" + "github.com/jmoiron/sqlx" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var db *sqlx.DB + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "16.2-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err = sqlx.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgclient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + db, err = pgclient.Setup(dbConfig, *postgres.Migration()) + if err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/consumers/writers/timescale/README.md b/consumers/writers/timescale/README.md new file mode 100644 index 0000000..4cbbddd --- /dev/null +++ b/consumers/writers/timescale/README.md @@ -0,0 +1,76 @@ +# Timescale writer + +Timescale writer provides message repository implementation for Timescale. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| ------------------------------------ | --------------------------------------------------------- | ------------------------------ | +| MG_TIMESCALE_WRITER_LOG_LEVEL | Service log level | info | +| MG_TIMESCALE_WRITER_CONFIG_PATH | Configuration file path with Message broker subjects list | /config.toml | +| MG_TIMESCALE_WRITER_HTTP_HOST | Service HTTP host | localhost | +| MG_TIMESCALE_WRITER_HTTP_PORT | Service HTTP port | 9012 | +| MG_TIMESCALE_WRITER_HTTP_SERVER_CERT | Service HTTP server certificate path | "" | +| MG_TIMESCALE_WRITER_HTTP_SERVER_KEY | Service HTTP server key | "" | +| MG_TIMESCALE_HOST | Timescale DB host | timescale | +| MG_TIMESCALE_PORT | Timescale DB port | 5432 | +| MG_TIMESCALE_USER | Timescale user | magistrala | +| MG_TIMESCALE_PASS | Timescale password | magistrala | +| MG_TIMESCALE_NAME | Timescale database name | messages | +| MG_TIMESCALE_SSL_MODE | Timescale SSL mode | disabled | +| MG_TIMESCALE_SSL_CERT | Timescale SSL certificate path | "" | +| MG_TIMESCALE_SSL_KEY | Timescale SSL key | "" | +| MG_TIMESCALE_SSL_ROOT_CERT | Timescale SSL root certificate path | "" | +| MG_MESSAGE_BROKER_URL | Message broker instance URL | nats://localhost:4222 | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_TIMESCALE_WRITER_INSTANCE_ID | Timescale writer instance ID | "" | + +## Deployment + +The service itself is distributed as Docker container. Check the [`timescale-writer`](https://github.com/absmach/magistrala/blob/main/docker/addons/timescale-writer/docker-compose.yml#L34-L59) service section in docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the timescale writer +make timescale-writer + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_TIMESCALE_WRITER_LOG_LEVEL=[Service log level] \ +MG_TIMESCALE_WRITER_CONFIG_PATH=[Configuration file path with Message broker subjects list] \ +MG_TIMESCALE_WRITER_HTTP_HOST=[Service HTTP host] \ +MG_TIMESCALE_WRITER_HTTP_PORT=[Service HTTP port] \ +MG_TIMESCALE_WRITER_HTTP_SERVER_CERT=[Service HTTP server cert] \ +MG_TIMESCALE_WRITER_HTTP_SERVER_KEY=[Service HTTP server key] \ +MG_TIMESCALE_HOST=[Timescale host] \ +MG_TIMESCALE_PORT=[Timescale port] \ +MG_TIMESCALE_USER=[Timescale user] \ +MG_TIMESCALE_PASS=[Timescale password] \ +MG_TIMESCALE_NAME=[Timescale database name] \ +MG_TIMESCALE_SSL_MODE=[Timescale SSL mode] \ +MG_TIMESCALE_SSL_CERT=[Timescale SSL cert] \ +MG_TIMESCALE_SSL_KEY=[Timescale SSL key] \ +MG_TIMESCALE_SSL_ROOT_CERT=[Timescale SSL Root cert] \ +MG_MESSAGE_BROKER_URL=[Message broker instance URL] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_TIMESCALE_WRITER_INSTANCE_ID=[Timescale writer instance ID] \ +$GOBIN/magistrala-timescale-writer +``` + +## Usage + +Starting service will start consuming normalized messages in SenML format. diff --git a/consumers/writers/timescale/consumer.go b/consumers/writers/timescale/consumer.go new file mode 100644 index 0000000..070fe5d --- /dev/null +++ b/consumers/writers/timescale/consumer.go @@ -0,0 +1,198 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package timescale + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/absmach/magistrala/consumers" + "github.com/absmach/magistrala/pkg/errors" + mgjson "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jmoiron/sqlx" // required for DB access +) + +var ( + errInvalidMessage = errors.New("invalid message representation") + errSaveMessage = errors.New("failed to save message to timescale database") + errTransRollback = errors.New("failed to rollback transaction") + errNoTable = errors.New("relation does not exist") +) + +var _ consumers.BlockingConsumer = (*timescaleRepo)(nil) + +type timescaleRepo struct { + db *sqlx.DB +} + +// New returns new TimescaleSQL writer. +func New(db *sqlx.DB) consumers.BlockingConsumer { + return ×caleRepo{db: db} +} + +func (tr *timescaleRepo) ConsumeBlocking(ctx context.Context, message interface{}) (err error) { + switch m := message.(type) { + case mgjson.Messages: + return tr.saveJSON(ctx, m) + default: + return tr.saveSenml(ctx, m) + } +} + +func (tr timescaleRepo) saveSenml(ctx context.Context, messages interface{}) (err error) { + msgs, ok := messages.([]senml.Message) + if !ok { + return errSaveMessage + } + q := `INSERT INTO messages (channel, subtopic, publisher, protocol, + name, unit, value, string_value, bool_value, data_value, sum, + time, update_time) + VALUES (:channel, :subtopic, :publisher, :protocol, :name, :unit, + :value, :string_value, :bool_value, :data_value, :sum, + :time, :update_time);` + + tx, err := tr.db.BeginTxx(ctx, nil) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + defer func() { + if err != nil { + if txErr := tx.Rollback(); txErr != nil { + err = errors.Wrap(err, errors.Wrap(errTransRollback, txErr)) + } + return + } + + if err = tx.Commit(); err != nil { + err = errors.Wrap(errSaveMessage, err) + } + }() + + for _, msg := range msgs { + m := senmlMessage{Message: msg} + if _, err := tx.NamedExec(q, m); err != nil { + pgErr, ok := err.(*pgconn.PgError) + if ok { + if pgErr.Code == pgerrcode.InvalidTextRepresentation { + return errors.Wrap(errSaveMessage, errInvalidMessage) + } + } + + return errors.Wrap(errSaveMessage, err) + } + } + return err +} + +func (tr timescaleRepo) saveJSON(ctx context.Context, msgs mgjson.Messages) error { + if err := tr.insertJSON(ctx, msgs); err != nil { + if err == errNoTable { + if err := tr.createTable(msgs.Format); err != nil { + return err + } + return tr.insertJSON(ctx, msgs) + } + return err + } + return nil +} + +func (tr timescaleRepo) insertJSON(ctx context.Context, msgs mgjson.Messages) error { + tx, err := tr.db.BeginTxx(ctx, nil) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + defer func() { + if err != nil { + if txErr := tx.Rollback(); txErr != nil { + err = errors.Wrap(err, errors.Wrap(errTransRollback, txErr)) + } + return + } + + if err = tx.Commit(); err != nil { + err = errors.Wrap(errSaveMessage, err) + } + }() + + q := `INSERT INTO %s (channel, created, subtopic, publisher, protocol, payload) + VALUES (:channel, :created, :subtopic, :publisher, :protocol, :payload);` + q = fmt.Sprintf(q, msgs.Format) + + for _, m := range msgs.Data { + var dbmsg jsonMessage + dbmsg, err = toJSONMessage(m) + if err != nil { + return errors.Wrap(errSaveMessage, err) + } + if _, err = tx.NamedExec(q, dbmsg); err != nil { + pgErr, ok := err.(*pgconn.PgError) + if ok { + switch pgErr.Code { + case pgerrcode.InvalidTextRepresentation: + return errors.Wrap(errSaveMessage, errInvalidMessage) + case pgerrcode.UndefinedTable: + return errNoTable + } + } + return err + } + } + return nil +} + +func (tr timescaleRepo) createTable(name string) error { + q := `CREATE TABLE IF NOT EXISTS %s ( + created BIGINT NOT NULL, + channel VARCHAR(254), + subtopic VARCHAR(254), + publisher VARCHAR(254), + protocol TEXT, + payload JSONB, + PRIMARY KEY (created, publisher, subtopic) + );` + q = fmt.Sprintf(q, name) + + _, err := tr.db.Exec(q) + return err +} + +type senmlMessage struct { + senml.Message +} + +type jsonMessage struct { + Channel string `db:"channel"` + Created int64 `db:"created"` + Subtopic string `db:"subtopic"` + Publisher string `db:"publisher"` + Protocol string `db:"protocol"` + Payload []byte `db:"payload"` +} + +func toJSONMessage(msg mgjson.Message) (jsonMessage, error) { + data := []byte("{}") + if msg.Payload != nil { + b, err := json.Marshal(msg.Payload) + if err != nil { + return jsonMessage{}, errors.Wrap(errSaveMessage, err) + } + data = b + } + + m := jsonMessage{ + Channel: msg.Channel, + Created: msg.Created, + Subtopic: msg.Subtopic, + Publisher: msg.Publisher, + Protocol: msg.Protocol, + Payload: data, + } + + return m, nil +} diff --git a/consumers/writers/timescale/consumer_test.go b/consumers/writers/timescale/consumer_test.go new file mode 100644 index 0000000..4d509dc --- /dev/null +++ b/consumers/writers/timescale/consumer_test.go @@ -0,0 +1,112 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package timescale_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/mg-contrib/consumers/writers/timescale" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" +) + +const ( + msgsNum = 42 + valueFields = 5 + subtopic = "topic" +) + +var ( + v float64 = 5 + stringV = "value" + boolV = true + dataV = "base64" + sum float64 = 42 +) + +func TestSaveSenml(t *testing.T) { + repo := timescale.New(db) + + chid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + msg := senml.Message{} + msg.Channel = chid.String() + + pubid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + msg.Publisher = pubid.String() + + now := time.Now().Unix() + var msgs []senml.Message + + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + count := i % valueFields + switch count { + case 0: + msg.Subtopic = subtopic + msg.Value = &v + case 1: + msg.BoolValue = &boolV + case 2: + msg.StringValue = &stringV + case 3: + msg.DataValue = &dataV + case 4: + msg.Sum = &sum + } + + msg.Time = float64(now + int64(i)) + msgs = append(msgs, msg) + } + + err = repo.ConsumeBlocking(context.TODO(), msgs) + assert.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) +} + +func TestSaveJSON(t *testing.T) { + repo := timescale.New(db) + + chid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + pubid, err := uuid.NewV4() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + msg := json.Message{ + Channel: chid.String(), + Publisher: pubid.String(), + Created: time.Now().Unix(), + Subtopic: "subtopic/format/some_json", + Protocol: "mqtt", + Payload: map[string]interface{}{ + "field_1": 123, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42, + }, + }, + } + + now := time.Now().Unix() + msgs := json.Messages{ + Format: "some_json", + } + + for i := 0; i < msgsNum; i++ { + msg.Created = now + int64(i) + msgs.Data = append(msgs.Data, msg) + } + + err = repo.ConsumeBlocking(context.TODO(), msgs) + assert.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) +} diff --git a/consumers/writers/timescale/doc.go b/consumers/writers/timescale/doc.go new file mode 100644 index 0000000..302be6e --- /dev/null +++ b/consumers/writers/timescale/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package timescale contains repository implementations using Timescale as +// the underlying database. +package timescale diff --git a/consumers/writers/timescale/init.go b/consumers/writers/timescale/init.go new file mode 100644 index 0000000..cfd7156 --- /dev/null +++ b/consumers/writers/timescale/init.go @@ -0,0 +1,39 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package timescale + +import migrate "github.com/rubenv/sql-migrate" + +// Migration of timescale-writer. +func Migration() *migrate.MemoryMigrationSource { + return &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "messages_1", + Up: []string{ + `CREATE TABLE IF NOT EXISTS messages ( + time BIGINT NOT NULL, + channel UUID, + subtopic VARCHAR(254), + publisher UUID, + protocol TEXT, + name VARCHAR(254), + unit TEXT, + value FLOAT, + string_value TEXT, + bool_value BOOL, + data_value BYTEA, + sum FLOAT, + update_time FLOAT, + PRIMARY KEY (time, publisher, subtopic, name) + ); + SELECT create_hypertable('messages', 'time', create_default_indexes => FALSE, chunk_time_interval => 86400000, if_not_exists => TRUE);`, + }, + Down: []string{ + "DROP TABLE messages", + }, + }, + }, + } +} diff --git a/consumers/writers/timescale/setup_test.go b/consumers/writers/timescale/setup_test.go new file mode 100644 index 0000000..803acc7 --- /dev/null +++ b/consumers/writers/timescale/setup_test.go @@ -0,0 +1,85 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package timescale_test contains tests for TimescaleSQL repository +// implementations. +package timescale_test + +import ( + "fmt" + "log" + "os" + "testing" + + pgclient "github.com/absmach/magistrala/pkg/postgres" + "github.com/absmach/mg-contrib/consumers/writers/timescale" + "github.com/jmoiron/sqlx" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var db *sqlx.DB + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "timescale/timescaledb", + Tag: "2.13.1-pg16", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err = sqlx.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgclient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + db, err = pgclient.Setup(dbConfig, *timescale.Migration()) + if err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/doc.go b/doc.go new file mode 100644 index 0000000..6eedf65 --- /dev/null +++ b/doc.go @@ -0,0 +1,4 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package contrib diff --git a/docker/.env b/docker/.env new file mode 100644 index 0000000..4e830f9 --- /dev/null +++ b/docker/.env @@ -0,0 +1,641 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 +# Docker: Environment variables in Compose + +## NginX +MG_NGINX_HTTP_PORT=80 +MG_NGINX_SSL_PORT=443 +MG_NGINX_MQTT_PORT=1883 +MG_NGINX_MQTTS_PORT=8883 + +## Nats +MG_NATS_PORT=4222 +MG_NATS_HTTP_PORT=8222 +MG_NATS_JETSTREAM_KEY=u7wFoAPgXpDueXOFldBnXDh4xjnSOyEJ2Cb8Z5SZvGLzIZ3U4exWhhoIBZHzuNvh +MG_NATS_URL=nats://nats:${MG_NATS_PORT} +# Configs for nats as MQTT broker +MG_NATS_HEALTH_CHECK=http://nats:${MG_NATS_HTTP_PORT}/healthz +MG_NATS_WS_TARGET_PATH= +MG_NATS_MQTT_QOS=1 + +## RabbitMQ +MG_RABBITMQ_PORT=5672 +MG_RABBITMQ_HTTP_PORT=15672 +MG_RABBITMQ_USER=magistrala +MG_RABBITMQ_PASS=magistrala +MG_RABBITMQ_COOKIE=magistrala +MG_RABBITMQ_VHOST=/ +MG_RABBITMQ_URL=amqp://${MG_RABBITMQ_USER}:${MG_RABBITMQ_PASS}@rabbitmq:${MG_RABBITMQ_PORT}${MG_RABBITMQ_VHOST} + +## Message Broker +MG_MESSAGE_BROKER_TYPE=nats +MG_MESSAGE_BROKER_URL=${MG_NATS_URL} + +##Ā VERNEMQ +MG_DOCKER_VERNEMQ_ALLOW_ANONYMOUS=on +MG_DOCKER_VERNEMQ_LOG__CONSOLE__LEVEL=error +MG_VERNEMQ_HEALTH_CHECK=http://vernemq:8888/health +MG_VERNEMQ_WS_TARGET_PATH=/mqtt +MG_VERNEMQ_MQTT_QOS=2 + +## MQTT Broker +MG_MQTT_BROKER_TYPE=vernemq +MG_MQTT_BROKER_HEALTH_CHECK=${MG_VERNEMQ_HEALTH_CHECK} +MG_MQTT_ADAPTER_MQTT_QOS=${MG_VERNEMQ_MQTT_QOS} +MG_MQTT_ADAPTER_MQTT_TARGET_HOST=${MG_MQTT_BROKER_TYPE} +MG_MQTT_ADAPTER_MQTT_TARGET_PORT=1883 +MG_MQTT_ADAPTER_MQTT_TARGET_HEALTH_CHECK=${MG_MQTT_BROKER_HEALTH_CHECK} +MG_MQTT_ADAPTER_WS_TARGET_HOST=${MG_MQTT_BROKER_TYPE} +MG_MQTT_ADAPTER_WS_TARGET_PORT=8080 +MG_MQTT_ADAPTER_WS_TARGET_PATH=${MG_VERNEMQ_WS_TARGET_PATH} + +## Redis +MG_REDIS_TCP_PORT=6379 +MG_REDIS_URL=redis://es-redis:${MG_REDIS_TCP_PORT}/0 + +## Event Store +MG_ES_TYPE=${MG_MESSAGE_BROKER_TYPE} +MG_ES_URL=${MG_MESSAGE_BROKER_URL} + +## Jaeger +MG_JAEGER_COLLECTOR_OTLP_ENABLED=true +MG_JAEGER_FRONTEND=16686 +MG_JAEGER_OLTP_HTTP=4318 +MG_JAEGER_URL=http://jaeger:4318 +MG_JAEGER_TRACE_RATIO=1.0 +MG_JAEGER_MEMORY_MAX_TRACES=5000 + +## Call home +MG_SEND_TELEMETRY=true + +## Postgres +MG_POSTGRES_MAX_CONNECTIONS=100 + +## Core Services + +### Auth +MG_AUTH_LOG_LEVEL=debug +MG_AUTH_HTTP_HOST=auth +MG_AUTH_HTTP_PORT=8189 +MG_AUTH_HTTP_SERVER_CERT= +MG_AUTH_HTTP_SERVER_KEY= +MG_AUTH_GRPC_HOST=auth +MG_AUTH_GRPC_PORT=8181 +MG_AUTH_GRPC_SERVER_CERT=${GRPC_MTLS:+./ssl/certs/auth-grpc-server.crt}${GRPC_TLS:+./ssl/certs/auth-grpc-server.crt} +MG_AUTH_GRPC_SERVER_KEY=${GRPC_MTLS:+./ssl/certs/auth-grpc-server.key}${GRPC_TLS:+./ssl/certs/auth-grpc-server.key} +MG_AUTH_GRPC_SERVER_CA_CERTS=${GRPC_MTLS:+./ssl/certs/ca.crt}${GRPC_TLS:+./ssl/certs/ca.crt} +MG_AUTH_DB_HOST=auth-db +MG_AUTH_DB_PORT=5432 +MG_AUTH_DB_USER=magistrala +MG_AUTH_DB_PASS=magistrala +MG_AUTH_DB_NAME=auth +MG_AUTH_DB_SSL_MODE=disable +MG_AUTH_DB_SSL_CERT= +MG_AUTH_DB_SSL_KEY= +MG_AUTH_DB_SSL_ROOT_CERT= +MG_AUTH_SECRET_KEY=HyE2D4RUt9nnKG6v8zKEqAp6g6ka8hhZsqUpzgKvnwpXrNVQSH +MG_AUTH_ACCESS_TOKEN_DURATION="1h" +MG_AUTH_REFRESH_TOKEN_DURATION="24h" +MG_AUTH_INVITATION_DURATION="168h" +MG_AUTH_ADAPTER_INSTANCE_ID= + +#### Auth GRPC Client Config +MG_AUTH_GRPC_URL=auth:8181 +MG_AUTH_GRPC_TIMEOUT=300s +MG_AUTH_GRPC_CLIENT_CERT=${GRPC_MTLS:+./ssl/certs/auth-grpc-client.crt} +MG_AUTH_GRPC_CLIENT_KEY=${GRPC_MTLS:+./ssl/certs/auth-grpc-client.key} +MG_AUTH_GRPC_CLIENT_CA_CERTS=${GRPC_MTLS:+./ssl/certs/ca.crt} + +#### Domains Client Config +MG_DOMAINS_URL=http://auth:8189 + +### SpiceDB Datastore config +MG_SPICEDB_DB_USER=magistrala +MG_SPICEDB_DB_PASS=magistrala +MG_SPICEDB_DB_NAME=spicedb +MG_SPICEDB_DB_PORT=5432 + +### SpiceDB config +MG_SPICEDB_PRE_SHARED_KEY="12345678" +MG_SPICEDB_SCHEMA_FILE="/schema.zed" +MG_SPICEDB_HOST=magistrala-spicedb +MG_SPICEDB_PORT=50051 +MG_SPICEDB_DATASTORE_ENGINE=postgres + +### Invitations +MG_INVITATIONS_LOG_LEVEL=info +MG_INVITATIONS_HTTP_HOST=invitations +MG_INVITATIONS_HTTP_PORT=9020 +MG_INVITATIONS_HTTP_SERVER_CERT= +MG_INVITATIONS_HTTP_SERVER_KEY= +MG_INVITATIONS_DB_HOST=invitations-db +MG_INVITATIONS_DB_PORT=5432 +MG_INVITATIONS_DB_USER=magistrala +MG_INVITATIONS_DB_PASS=magistrala +MG_INVITATIONS_DB_NAME=invitations +MG_INVITATIONS_DB_SSL_MODE=disable +MG_INVITATIONS_DB_SSL_CERT= +MG_INVITATIONS_DB_SSL_KEY= +MG_INVITATIONS_DB_SSL_ROOT_CERT= +MG_INVITATIONS_INSTANCE_ID= + +### UI +MG_UI_LOG_LEVEL=debug +MG_UI_PORT=9095 +MG_HTTP_ADAPTER_URL=http://http-adapter:8008 +MG_READER_URL=http://timescale-reader:9011 +MG_THINGS_URL=http://things:9000 +MG_USERS_URL=http://users:9002 +MG_INVITATIONS_URL=http://invitations:9020 +MG_DOMAINS_URL=http://auth:8189 +MG_BOOTSTRAP_URL=http://bootstrap:9013 +MG_UI_HOST_URL=http://localhost:9095 +MG_UI_VERIFICATION_TLS=false +MG_UI_CONTENT_TYPE=application/senml+json +MG_UI_INSTANCE_ID= +MG_UI_DB_HOST=ui-db +MG_UI_DB_PORT=5432 +MG_UI_DB_USER=magistrala +MG_UI_DB_PASS=magistrala +MG_UI_DB_NAME=ui +MG_UI_DB_SSL_MODE=disable +MG_UI_DB_SSL_CERT= +MG_UI_DB_SSL_KEY= +MG_UI_DB_SSL_ROOT_CERT= +MG_UI_HASH_KEY=5jx4x2Qg9OUmzpP5dbveWQ +MG_UI_BLOCK_KEY=UtgZjr92jwRY6SPUndHXiyl9QY8qTUyZ +MG_UI_PATH_PREFIX=/ui + +### Users +MG_USERS_LOG_LEVEL=debug +MG_USERS_SECRET_KEY=HyE2D4RUt9nnKG6v8zKEqAp6g6ka8hhZsqUpzgKvnwpXrNVQSH +MG_USERS_ADMIN_EMAIL=admin@example.com +MG_USERS_ADMIN_PASSWORD=12345678 +MG_USERS_PASS_REGEX=^.{8,}$ +MG_USERS_ACCESS_TOKEN_DURATION=15m +MG_USERS_REFRESH_TOKEN_DURATION=24h +MG_TOKEN_RESET_ENDPOINT=/reset-request +MG_USERS_HTTP_HOST=users +MG_USERS_HTTP_PORT=9002 +MG_USERS_HTTP_SERVER_CERT= +MG_USERS_HTTP_SERVER_KEY= +MG_USERS_DB_HOST=users-db +MG_USERS_DB_PORT=5432 +MG_USERS_DB_USER=magistrala +MG_USERS_DB_PASS=magistrala +MG_USERS_DB_NAME=users +MG_USERS_DB_SSL_MODE=disable +MG_USERS_DB_SSL_CERT= +MG_USERS_DB_SSL_KEY= +MG_USERS_DB_SSL_ROOT_CERT= +MG_USERS_RESET_PWD_TEMPLATE=users.tmpl +MG_USERS_INSTANCE_ID= +MG_USERS_ALLOW_SELF_REGISTER=true +MG_OAUTH_UI_REDIRECT_URL=http://localhost:9095${MG_UI_PATH_PREFIX}/tokens/secure +MG_OAUTH_UI_ERROR_URL=http://localhost:9095${MG_UI_PATH_PREFIX}/error +MG_USERS_DELETE_INTERVAL=24h +MG_USERS_DELETE_AFTER=720h + +### Email utility +MG_EMAIL_HOST=smtp.mailtrap.io +MG_EMAIL_PORT=2525 +MG_EMAIL_USERNAME=18bf7f70705139 +MG_EMAIL_PASSWORD=2b0d302e775b1e +MG_EMAIL_FROM_ADDRESS=from@example.com +MG_EMAIL_FROM_NAME=Example +MG_EMAIL_TEMPLATE=email.tmpl + +### Google OAuth2 +MG_GOOGLE_CLIENT_ID= +MG_GOOGLE_CLIENT_SECRET= +MG_GOOGLE_REDIRECT_URL= +MG_GOOGLE_STATE= + +### Things +MG_THINGS_LOG_LEVEL=debug +MG_THINGS_STANDALONE_ID= +MG_THINGS_STANDALONE_TOKEN= +MG_THINGS_CACHE_KEY_DURATION=10m +MG_THINGS_HTTP_HOST=things +MG_THINGS_HTTP_PORT=9000 +MG_THINGS_AUTH_GRPC_HOST=things +MG_THINGS_AUTH_GRPC_PORT=7000 +MG_THINGS_AUTH_GRPC_SERVER_CERT=${GRPC_MTLS:+./ssl/certs/things-grpc-server.crt}${GRPC_TLS:+./ssl/certs/things-grpc-server.crt} +MG_THINGS_AUTH_GRPC_SERVER_KEY=${GRPC_MTLS:+./ssl/certs/things-grpc-server.key}${GRPC_TLS:+./ssl/certs/things-grpc-server.key} +MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS=${GRPC_MTLS:+./ssl/certs/ca.crt}${GRPC_TLS:+./ssl/certs/ca.crt} +MG_THINGS_CACHE_URL=redis://things-redis:${MG_REDIS_TCP_PORT}/0 +MG_THINGS_DB_HOST=things-db +MG_THINGS_DB_PORT=5432 +MG_THINGS_DB_USER=magistrala +MG_THINGS_DB_PASS=magistrala +MG_THINGS_DB_NAME=things +MG_THINGS_DB_SSL_MODE=disable +MG_THINGS_DB_SSL_CERT= +MG_THINGS_DB_SSL_KEY= +MG_THINGS_DB_SSL_ROOT_CERT= +MG_THINGS_INSTANCE_ID= + +#### Things Client Config +MG_THINGS_URL=http://things:9000 +MG_THINGS_AUTH_GRPC_URL=things:7000 +MG_THINGS_AUTH_GRPC_TIMEOUT=1s +MG_THINGS_AUTH_GRPC_CLIENT_CERT=${GRPC_MTLS:+./ssl/certs/things-grpc-client.crt} +MG_THINGS_AUTH_GRPC_CLIENT_KEY=${GRPC_MTLS:+./ssl/certs/things-grpc-client.key} +MG_THINGS_AUTH_GRPC_CLIENT_CA_CERTS=${GRPC_MTLS:+./ssl/certs/ca.crt} + +### HTTP +MG_HTTP_ADAPTER_LOG_LEVEL=debug +MG_HTTP_ADAPTER_HOST=http-adapter +MG_HTTP_ADAPTER_PORT=8008 +MG_HTTP_ADAPTER_SERVER_CERT= +MG_HTTP_ADAPTER_SERVER_KEY= +MG_HTTP_ADAPTER_INSTANCE_ID= + +### MQTT +MG_MQTT_ADAPTER_LOG_LEVEL=debug +MG_MQTT_ADAPTER_MQTT_PORT=1883 +MG_MQTT_ADAPTER_FORWARDER_TIMEOUT=30s +MG_MQTT_ADAPTER_WS_PORT=8080 +MG_MQTT_ADAPTER_INSTANCE= +MG_MQTT_ADAPTER_INSTANCE_ID= +MG_MQTT_ADAPTER_ES_DB=0 + +### CoAP +MG_COAP_ADAPTER_LOG_LEVEL=debug +MG_COAP_ADAPTER_HOST=coap-adapter +MG_COAP_ADAPTER_PORT=5683 +MG_COAP_ADAPTER_SERVER_CERT= +MG_COAP_ADAPTER_SERVER_KEY= +MG_COAP_ADAPTER_HTTP_HOST=coap-adapter +MG_COAP_ADAPTER_HTTP_PORT=5683 +MG_COAP_ADAPTER_HTTP_SERVER_CERT= +MG_COAP_ADAPTER_HTTP_SERVER_KEY= +MG_COAP_ADAPTER_INSTANCE_ID= + +### WS +MG_WS_ADAPTER_LOG_LEVEL=debug +MG_WS_ADAPTER_HTTP_HOST=ws-adapter +MG_WS_ADAPTER_HTTP_PORT=8186 +MG_WS_ADAPTER_HTTP_SERVER_CERT= +MG_WS_ADAPTER_HTTP_SERVER_KEY= +MG_WS_ADAPTER_INSTANCE_ID= + +## Addons Services +### Bootstrap +MG_BOOTSTRAP_LOG_LEVEL=debug +MG_BOOTSTRAP_ENCRYPT_KEY=v7aT0HGxJxt2gULzr3RHwf4WIf6DusPp +MG_BOOTSTRAP_EVENT_CONSUMER=bootstrap +MG_BOOTSTRAP_HTTP_HOST=bootstrap +MG_BOOTSTRAP_HTTP_PORT=9013 +MG_BOOTSTRAP_HTTP_SERVER_CERT= +MG_BOOTSTRAP_HTTP_SERVER_KEY= +MG_BOOTSTRAP_DB_HOST=bootstrap-db +MG_BOOTSTRAP_DB_PORT=5432 +MG_BOOTSTRAP_DB_USER=magistrala +MG_BOOTSTRAP_DB_PASS=magistrala +MG_BOOTSTRAP_DB_NAME=bootstrap +MG_BOOTSTRAP_DB_SSL_MODE=disable +MG_BOOTSTRAP_DB_SSL_CERT= +MG_BOOTSTRAP_DB_SSL_KEY= +MG_BOOTSTRAP_DB_SSL_ROOT_CERT= +MG_BOOTSTRAP_INSTANCE_ID= + +### Provision +MG_PROVISION_CONFIG_FILE=/configs/config.toml +MG_PROVISION_LOG_LEVEL=debug +MG_PROVISION_HTTP_PORT=9016 +MG_PROVISION_ENV_CLIENTS_TLS=false +MG_PROVISION_SERVER_CERT= +MG_PROVISION_SERVER_KEY= +MG_PROVISION_USERS_LOCATION=http://users:9002 +MG_PROVISION_THINGS_LOCATION=http://things:9000 +MG_PROVISION_USER= +MG_PROVISION_PASS= +MG_PROVISION_API_KEY= +MG_PROVISION_CERTS_SVC_URL=http://certs:9019 +MG_PROVISION_X509_PROVISIONING=false +MG_PROVISION_BS_SVC_URL=http://bootstrap:9013 +MG_PROVISION_BS_CONFIG_PROVISIONING=true +MG_PROVISION_BS_AUTO_WHITELIST=true +MG_PROVISION_BS_CONTENT= +MG_PROVISION_CERTS_HOURS_VALID=2400h +MG_PROVISION_CERTS_RSA_BITS=2048 +MG_PROVISION_INSTANCE_ID= + +### Vault +MG_VAULT_HOST=vault +MG_VAULT_PORT=8200 +MG_VAULT_ADDR=http://vault:8200 +MG_VAULT_NAMESPACE=magistrala +MG_VAULT_UNSEAL_KEY_1= +MG_VAULT_UNSEAL_KEY_2= +MG_VAULT_UNSEAL_KEY_3= +MG_VAULT_TOKEN= + +MG_VAULT_PKI_PATH=pki +MG_VAULT_PKI_ROLE_NAME=magistrala_int_ca +MG_VAULT_PKI_FILE_NAME=mg_root +MG_VAULT_PKI_CA_CN='Magistrala Root Certificate Authority' +MG_VAULT_PKI_CA_OU='Magistrala' +MG_VAULT_PKI_CA_O='Magistrala' +MG_VAULT_PKI_CA_C='FRANCE' +MG_VAULT_PKI_CA_L='PARIS' +MG_VAULT_PKI_CA_ST='PARIS' +MG_VAULT_PKI_CA_ADDR='5 Av. Anatole' +MG_VAULT_PKI_CA_PO='75007' +MG_VAULT_PKI_CLUSTER_PATH=http://localhost +MG_VAULT_PKI_CLUSTER_AIA_PATH=http://localhost + +MG_VAULT_PKI_INT_PATH=pki_int +MG_VAULT_PKI_INT_SERVER_CERTS_ROLE_NAME=magistrala_server_certs +MG_VAULT_PKI_INT_THINGS_CERTS_ROLE_NAME=magistrala_things_certs +MG_VAULT_PKI_INT_FILE_NAME=mg_int +MG_VAULT_PKI_INT_CA_CN='Magistrala Intermediate Certificate Authority' +MG_VAULT_PKI_INT_CA_OU='Magistrala' +MG_VAULT_PKI_INT_CA_O='Magistrala' +MG_VAULT_PKI_INT_CA_C='FRANCE' +MG_VAULT_PKI_INT_CA_L='PARIS' +MG_VAULT_PKI_INT_CA_ST='PARIS' +MG_VAULT_PKI_INT_CA_ADDR='5 Av. Anatole' +MG_VAULT_PKI_INT_CA_PO='75007' +MG_VAULT_PKI_INT_CLUSTER_PATH=http://localhost +MG_VAULT_PKI_INT_CLUSTER_AIA_PATH=http://localhost + +MG_VAULT_THINGS_CERTS_ISSUER_ROLEID=magistrala +MG_VAULT_THINGS_CERTS_ISSUER_SECRET=magistrala + +# Certs +MG_CERTS_LOG_LEVEL=debug +MG_CERTS_SIGN_CA_PATH=/etc/ssl/certs/ca.crt +MG_CERTS_SIGN_CA_KEY_PATH=/etc/ssl/certs/ca.key +MG_CERTS_VAULT_HOST=${MG_VAULT_ADDR} +MG_CERTS_VAULT_NAMESPACE=${MG_VAULT_NAMESPACE} +MG_CERTS_VAULT_APPROLE_ROLEID=${MG_VAULT_THINGS_CERTS_ISSUER_ROLEID} +MG_CERTS_VAULT_APPROLE_SECRET=${MG_VAULT_THINGS_CERTS_ISSUER_SECRET} +MG_CERTS_VAULT_THINGS_CERTS_PKI_PATH=${MG_VAULT_PKI_INT_PATH} +MG_CERTS_VAULT_THINGS_CERTS_PKI_ROLE_NAME=${MG_VAULT_PKI_INT_THINGS_CERTS_ROLE_NAME} +MG_CERTS_HTTP_HOST=certs +MG_CERTS_HTTP_PORT=9019 +MG_CERTS_HTTP_SERVER_CERT= +MG_CERTS_HTTP_SERVER_KEY= +MG_CERTS_DB_HOST=certs-db +MG_CERTS_DB_PORT=5432 +MG_CERTS_DB_USER=magistrala +MG_CERTS_DB_PASS=magistrala +MG_CERTS_DB_NAME=certs +MG_CERTS_DB_SSL_MODE= +MG_CERTS_DB_SSL_CERT= +MG_CERTS_DB_SSL_KEY= +MG_CERTS_DB_SSL_ROOT_CERT= +MG_CERTS_INSTANCE_ID= + + +### LoRa +MG_LORA_ADAPTER_LOG_LEVEL=debug +MG_LORA_ADAPTER_MESSAGES_URL=tcp://magistrala-mqtt:1883 +MG_LORA_ADAPTER_MESSAGES_TOPIC=application/+/device/+/event/up +MG_LORA_ADAPTER_MESSAGES_USER= +MG_LORA_ADAPTER_MESSAGES_PASS= +MG_LORA_ADAPTER_MESSAGES_TIMEOUT=30s +MG_LORA_ADAPTER_EVENT_CONSUMER=lora-adapter +MG_LORA_ADAPTER_HTTP_HOST=lora-adapter +MG_LORA_ADAPTER_HTTP_PORT=9017 +MG_LORA_ADAPTER_HTTP_SERVER_CERT= +MG_LORA_ADAPTER_HTTP_SERVER_KEY= +MG_LORA_ADAPTER_ROUTE_MAP_URL=redis://lora-redis:${MG_REDIS_TCP_PORT}/0 +MG_LORA_ADAPTER_INSTANCE_ID= + +### OPC-UA +MG_OPCUA_ADAPTER_LOG_LEVEL=debug +MG_OPCUA_ADAPTER_EVENT_CONSUMER=opcua-adapter +MG_OPCUA_ADAPTER_HTTP_HOST=opcua-adapter +MG_OPCUA_ADAPTER_HTTP_PORT=8188 +MG_OPCUA_ADAPTER_HTTP_SERVER_CERT= +MG_OPCUA_ADAPTER_HTTP_SERVER_KEY= +MG_OPCUA_ADAPTER_ROUTE_MAP_URL=redis://opcua-redis:${MG_REDIS_TCP_PORT}/0 +MG_OPCUA_ADAPTER_INSTANCE_ID= + +### Cassandra +MG_CASSANDRA_CLUSTER=magistrala-cassandra +MG_CASSANDRA_KEYSPACE=magistrala +MG_CASSANDRA_USER=magistrala +MG_CASSANDRA_PASS=magistrala +MG_CASSANDRA_PORT=9042 + +### Cassandra Writer +MG_CASSANDRA_WRITER_LOG_LEVEL=debug +MG_CASSANDRA_WRITER_CONFIG_PATH=/config.toml +MG_CASSANDRA_WRITER_HTTP_HOST=cassandra-writer +MG_CASSANDRA_WRITER_HTTP_PORT=9004 +MG_CASSANDRA_WRITER_HTTP_SERVER_CERT= +MG_CASSANDRA_WRITER_HTTP_SERVER_KEY= +MG_CASSANDRA_WRITER_INSTANCE_ID= + +### Cassandra Reader +MG_CASSANDRA_READER_LOG_LEVEL=debug +MG_CASSANDRA_READER_HTTP_HOST=cassandra-reader +MG_CASSANDRA_READER_HTTP_PORT=9003 +MG_CASSANDRA_READER_HTTP_SERVER_CERT= +MG_CASSANDRA_READER_HTTP_SERVER_KEY= +MG_CASSANDRA_READER_INSTANCE_ID= + +### InfluxDB +MG_INFLUXDB_PROTOCOL=http +MG_INFLUXDB_HOST=magistrala-influxdb +MG_INFLUXDB_PORT=8086 +MG_INFLUXDB_ADMIN_USER=magistrala +MG_INFLUXDB_ADMIN_PASSWORD=magistrala +MG_INFLUXDB_NAME=magistrala +MG_INFLUXDB_BUCKET=magistrala-bucket +MG_INFLUXDB_ORG=magistrala +MG_INFLUXDB_TOKEN=magistrala-token +MG_INFLUXDB_DBURL= +MG_INFLUXDB_USER_AGENT=InfluxDBClient +MG_INFLUXDB_TIMEOUT=1s +MG_INFLUXDB_INSECURE_SKIP_VERIFY=false +MG_INFLUXDB_INIT_MODE=setup +MG_INFLUXDB_ADMIN_URL=http://${MG_INFLUXDB_HOST}:${MG_INFLUXDB_PORT} +MG_INFLUXDB_HTTP_ENABLED=true + +### InfluxDB Writer +MG_INFLUX_WRITER_LOG_LEVEL=debug +MG_INFLUX_WRITER_CONFIG_PATH=/config.toml +MG_INFLUX_WRITER_HTTP_HOST=influxdb-writer +MG_INFLUX_WRITER_HTTP_PORT=9006 +MG_INFLUX_WRITER_HTTP_SERVER_CERT= +MG_INFLUX_WRITER_HTTP_SERVER_KEY= +MG_INFLUX_WRITER_INSTANCE_ID= + +### InfluxDB Reader +MG_INFLUX_READER_LOG_LEVEL=debug +MG_INFLUX_READER_HTTP_HOST=influxdb-reader +MG_INFLUX_READER_HTTP_PORT=9005 +MG_INFLUX_READER_HTTP_SERVER_CERT= +MG_INFLUX_READER_HTTP_SERVER_KEY= +MG_INFLUX_READER_INSTANCE_ID= + +### MongoDB +MG_MONGO_HOST=magistrala-mongodb +MG_MONGO_PORT=27017 +MG_MONGO_NAME=magistrala + +### MongoDB Writer +MG_MONGO_WRITER_LOG_LEVEL=debug +MG_MONGO_WRITER_CONFIG_PATH=/config.toml +MG_MONGO_WRITER_HTTP_HOST=mongodb-writer +MG_MONGO_WRITER_HTTP_PORT=9008 +MG_MONGO_WRITER_HTTP_SERVER_CERT= +MG_MONGO_WRITER_HTTP_SERVER_KEY= +MG_MONGO_WRITER_INSTANCE_ID= + +### MongoDB Reader +MG_MONGO_READER_LOG_LEVEL=debug +MG_MONGO_READER_HTTP_HOST=mongodb-reader +MG_MONGO_READER_HTTP_PORT=9007 +MG_MONGO_READER_HTTP_SERVER_CERT= +MG_MONGO_READER_HTTP_SERVER_KEY= +MG_MONGO_READER_INSTANCE_ID= + +### Postgres +MG_POSTGRES_HOST=magistrala-postgres +MG_POSTGRES_PORT=5432 +MG_POSTGRES_USER=magistrala +MG_POSTGRES_PASS=magistrala +MG_POSTGRES_NAME=messages +MG_POSTGRES_SSL_MODE=disable +MG_POSTGRES_SSL_CERT= +MG_POSTGRES_SSL_KEY= +MG_POSTGRES_SSL_ROOT_CERT= + +### Postgres Writer +MG_POSTGRES_WRITER_LOG_LEVEL=debug +MG_POSTGRES_WRITER_CONFIG_PATH=/config.toml +MG_POSTGRES_WRITER_HTTP_HOST=postgres-writer +MG_POSTGRES_WRITER_HTTP_PORT=9010 +MG_POSTGRES_WRITER_HTTP_SERVER_CERT= +MG_POSTGRES_WRITER_HTTP_SERVER_KEY= +MG_POSTGRES_WRITER_INSTANCE_ID= + +### Postgres Reader +MG_POSTGRES_READER_LOG_LEVEL=debug +MG_POSTGRES_READER_HTTP_HOST=postgres-reader +MG_POSTGRES_READER_HTTP_PORT=9009 +MG_POSTGRES_READER_HTTP_SERVER_CERT= +MG_POSTGRES_READER_HTTP_SERVER_KEY= +MG_POSTGRES_READER_INSTANCE_ID= + +### Timescale +MG_TIMESCALE_HOST=magistrala-timescale +MG_TIMESCALE_PORT=5432 +MG_TIMESCALE_USER=magistrala +MG_TIMESCALE_PASS=magistrala +MG_TIMESCALE_NAME=magistrala +MG_TIMESCALE_SSL_MODE=disable +MG_TIMESCALE_SSL_CERT= +MG_TIMESCALE_SSL_KEY= +MG_TIMESCALE_SSL_ROOT_CERT= + +### Timescale Writer +MG_TIMESCALE_WRITER_LOG_LEVEL=debug +MG_TIMESCALE_WRITER_CONFIG_PATH=/config.toml +MG_TIMESCALE_WRITER_HTTP_HOST=timescale-writer +MG_TIMESCALE_WRITER_HTTP_PORT=9012 +MG_TIMESCALE_WRITER_HTTP_SERVER_CERT= +MG_TIMESCALE_WRITER_HTTP_SERVER_KEY= +MG_TIMESCALE_WRITER_INSTANCE_ID= + +### Timescale Reader +MG_TIMESCALE_READER_LOG_LEVEL=debug +MG_TIMESCALE_READER_HTTP_HOST=timescale-reader +MG_TIMESCALE_READER_HTTP_PORT=9011 +MG_TIMESCALE_READER_HTTP_SERVER_CERT= +MG_TIMESCALE_READER_HTTP_SERVER_KEY= +MG_TIMESCALE_READER_INSTANCE_ID= + +### Twins +MG_TWINS_LOG_LEVEL=debug +MG_TWINS_STANDALONE_ID= +MG_TWINS_STANDALONE_TOKEN= +MG_TWINS_CHANNEL_ID= +MG_TWINS_HTTP_HOST=twins +MG_TWINS_HTTP_PORT=9018 +MG_TWINS_HTTP_SERVER_CERT= +MG_TWINS_HTTP_SERVER_KEY= +MG_TWINS_CACHE_URL=redis://twins-redis:${MG_REDIS_TCP_PORT}/0 +MG_TWINS_DB_HOST=twins-db +MG_TWINS_DB_PORT=27018 +MG_TWINS_DB_NAME=twins +MG_TWINS_INSTANCE_ID= + +### SMTP Notifier +MG_SMTP_NOTIFIER_LOG_LEVEL=debug +MG_SMTP_NOTIFIER_CONFIG_PATH=/config.toml +MG_SMTP_NOTIFIER_FROM_ADDR= +MG_SMTP_NOTIFIER_HTTP_HOST=smtp-notifier +MG_SMTP_NOTIFIER_HTTP_PORT=9015 +MG_SMTP_NOTIFIER_HTTP_SERVER_CERT= +MG_SMTP_NOTIFIER_HTTP_SERVER_KEY= +MG_SMTP_NOTIFIER_DB_HOST=smtp-notifier-db +MG_SMTP_NOTIFIER_DB_PORT=5432 +MG_SMTP_NOTIFIER_DB_USER=magistrala +MG_SMTP_NOTIFIER_DB_PASS=magistrala +MG_SMTP_NOTIFIER_DB_NAME=subscriptions +MG_SMTP_NOTIFIER_DB_SSL_MODE=disable +MG_SMTP_NOTIFIER_DB_SSL_CERT= +MG_SMTP_NOTIFIER_DB_SSL_KEY= +MG_SMTP_NOTIFIER_DB_SSL_ROOT_CERT= +MG_SMTP_NOTIFIER_EMAIL_TEMPLATE=smtp-notifier.tmpl +MG_SMTP_NOTIFIER_INSTANCE_ID= + +### SMPP Notifier +MG_SMPP_NOTIFIER_LOG_LEVEL=debug +MG_SMPP_NOTIFIER_FROM_ADDR= +MG_SMPP_NOTIFIER_CONFIG_PATH=/config.toml +MG_SMPP_NOTIFIER_HTTP_HOST=smpp-notifier +MG_SMPP_NOTIFIER_HTTP_PORT=9014 +MG_SMPP_NOTIFIER_HTTP_SERVER_CERT= +MG_SMPP_NOTIFIER_HTTP_SERVER_KEY= +MG_SMPP_NOTIFIER_DB_HOST=smpp-notifier-db +MG_SMPP_NOTIFIER_DB_PORT=5432 +MG_SMPP_NOTIFIER_DB_USER=magistrala +MG_SMPP_NOTIFIER_DB_PASS=magistrala +MG_SMPP_NOTIFIER_DB_NAME=subscriptions +MG_SMPP_NOTIFIER_DB_SSL_MODE=disable +MG_SMPP_NOTIFIER_DB_SSL_CERT= +MG_SMPP_NOTIFIER_DB_SSL_KEY= +MG_SMPP_NOTIFIER_DB_SSL_ROOT_CERT= +MG_SMPP_ADDRESS=localhost:2775 +MG_SMPP_USERNAME= +MG_SMPP_PASSWORD= +MG_SMPP_SYSTEM_TYPE= +MG_SMPP_SRC_ADDR_TON=5 +MG_SMPP_DST_ADDR_TON=1 +MG_SMPP_SRC_ADDR_NPI=0 +MG_SMPP_DST_ADDR_NPI=1 +MG_SMPP_NOTIFIER_INSTANCE_ID= + +### Journal +MG_JOURNAL_LOG_LEVEL=info +MG_JOURNAL_HTTP_HOST=journal +MG_JOURNAL_HTTP_PORT=9021 +MG_JOURNAL_HTTP_SERVER_CERT= +MG_JOURNAL_HTTP_SERVER_KEY= +MG_JOURNAL_HOST=magistrala-journal-db +MG_JOURNAL_PORT=5432 +MG_JOURNAL_USER=magistrala +MG_JOURNAL_PASS=magistrala +MG_JOURNAL_NAME=journal +MG_JOURNAL_SSL_MODE=disable +MG_JOURNAL_SSL_CERT= +MG_JOURNAL_SSL_KEY= +MG_JOURNAL_SSL_ROOT_CERT= +MG_JOURNAL_INSTANCE_ID= + +### GRAFANA and PROMETHEUS +MG_PROMETHEUS_PORT=9090 +MG_GRAFANA_PORT=3000 +MG_GRAFANA_ADMIN_USER=magistrala +MG_GRAFANA_ADMIN_PASSWORD=magistrala + +# Docker image tag +MG_RELEASE_TAG=latest diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..9557117 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,23 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +FROM golang:1.22-alpine AS builder +ARG SVC +ARG GOARCH +ARG GOARM +ARG VERSION +ARG COMMIT +ARG TIME + +WORKDIR /go/src/github.com/absmach/magistrala +COPY . . +RUN apk update \ + && apk add make\ + && make $SVC \ + && mv build/$SVC /exe + +FROM scratch +# Certificates are needed so that mailing util can work. +COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=builder /exe / +ENTRYPOINT ["/exe"] diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev new file mode 100644 index 0000000..7d55569 --- /dev/null +++ b/docker/Dockerfile.dev @@ -0,0 +1,8 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +FROM scratch +ARG SVC +COPY $SVC /exe +COPY --from=alpine:latest /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +ENTRYPOINT ["/exe"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..c21e20d --- /dev/null +++ b/docker/README.md @@ -0,0 +1,134 @@ +# Docker Composition + +Configure environment variables and run Magistrala Docker Composition. + +\*Note\*\*: `docker-compose` uses `.env` file to set all environment variables. Ensure that you run the command from the same location as .env file. + +## Installation + +Follow the [official documentation](https://docs.docker.com/compose/install/). + +## Usage + +Run the following commands from the project root directory. + +```bash +docker compose -f docker/docker-compose.yml up +``` + +```bash +docker compose -f docker/addons/<path>/docker-compose.yml up +``` + +To pull docker images from a specific release you need to change the value of `MG_RELEASE_TAG` in `.env` before running these commands. + +## Broker Configuration + +Magistrala supports configurable MQTT broker and Message broker, which also acts as an events store. Magistrala uses two types of brokers: + +1. MQTT_BROKER: Handles MQTT communication between MQTT adapters and message broker. This can either be 'VerneMQ' or 'NATS'. +2. MESSAGE_BROKER: Manages message exchange between Magistrala core, optional, and external services. This can either be 'NATS' or 'RabbitMQ'. This is used to store messages for distributed processing. + +Events store: This is used by Magistrala services to store events for distributed processing. Magistrala uses a single service to be the message broker and events store. This can either be 'NATS' or 'RabbitMQ'. Redis can also be used as an events store, but it requires a message broker to be deployed along with it for message exchange. + +This is the same as MESSAGE_BROKER. This can either be 'NATS' or 'RabbitMQ' or 'Redis'. If Redis is used as an events store, then RabbitMQ or NATS is used as a message broker. + +The current deployment strategy for Magistrala in `docker/docker-compose.yml` is to use VerneMQ as a MQTT_BROKER and NATS as a MESSAGE_BROKER and EVENTS_STORE. + +Therefore, the following combinations are possible: + +- MQTT_BROKER: VerneMQ, MESSAGE_BROKER: NATS, EVENTS_STORE: NATS +- MQTT_BROKER: VerneMQ, MESSAGE_BROKER: NATS, EVENTS_STORE: Redis +- MQTT_BROKER: VerneMQ, MESSAGE_BROKER: RabbitMQ, EVENTS_STORE: RabbitMQ +- MQTT_BROKER: VerneMQ, MESSAGE_BROKER: RabbitMQ, EVENTS_STORE: Redis +- MQTT_BROKER: NATS, MESSAGE_BROKER: RabbitMQ, EVENTS_STORE: RabbitMQ +- MQTT_BROKER: NATS, MESSAGE_BROKER: RabbitMQ, EVENTS_STORE: Redis +- MQTT_BROKER: NATS, MESSAGE_BROKER: NATS, EVENTS_STORE: NATS +- MQTT_BROKER: NATS, MESSAGE_BROKER: NATS, EVENTS_STORE: Redis + +For Message brokers other than NATS, you would need to build the docker images with RabbitMQ as the build tag and change the `docker/.env`. For example, to use RabbitMQ as a message broker: + +```bash +MG_MESSAGE_BROKER_TYPE=rabbitmq make dockers +``` + +```env +MG_MESSAGE_BROKER_TYPE=rabbitmq +MG_MESSAGE_BROKER_URL=${MG_RABBITMQ_URL} +``` + +For Redis as an events store, you would need to run RabbitMQ or NATS as a message broker. For example, to use Redis as an events store with rabbitmq as a message broker: + +```bash +MG_ES_TYPE=redis MG_MESSAGE_BROKER_TYPE=rabbitmq make dockers +``` + +```env +MG_MESSAGE_BROKER_TYPE=rabbitmq +MG_MESSAGE_BROKER_URL=${MG_RABBITMQ_URL} +MG_ES_TYPE=redis +MG_ES_URL=${MG_REDIS_URL} +``` + +For MQTT broker other than VerneMQ, you would need to change the `docker/.env`. For example, to use NATS as a MQTT broker: + +```env +MG_MQTT_BROKER_TYPE=nats +MG_MQTT_BROKER_HEALTH_CHECK=${MG_NATS_HEALTH_CHECK} +MG_MQTT_ADAPTER_MQTT_QOS=${MG_NATS_MQTT_QOS} +MG_MQTT_ADAPTER_MQTT_TARGET_HOST=${MG_MQTT_BROKER_TYPE} +MG_MQTT_ADAPTER_MQTT_TARGET_PORT=1883 +MG_MQTT_ADAPTER_MQTT_TARGET_HEALTH_CHECK=${MG_MQTT_BROKER_HEALTH_CHECK} +MG_MQTT_ADAPTER_WS_TARGET_HOST=${MG_MQTT_BROKER_TYPE} +MG_MQTT_ADAPTER_WS_TARGET_PORT=8080 +MG_MQTT_ADAPTER_WS_TARGET_PATH=${MG_NATS_WS_TARGET_PATH} +``` + +### RabbitMQ configuration + +```yaml +services: + rabbitmq: + image: rabbitmq:3.12.12-management-alpine + container_name: magistrala-rabbitmq + restart: on-failure + environment: + RABBITMQ_ERLANG_COOKIE: ${MG_RABBITMQ_COOKIE} + RABBITMQ_DEFAULT_USER: ${MG_RABBITMQ_USER} + RABBITMQ_DEFAULT_PASS: ${MG_RABBITMQ_PASS} + RABBITMQ_DEFAULT_VHOST: ${MG_RABBITMQ_VHOST} + ports: + - ${MG_RABBITMQ_PORT}:${MG_RABBITMQ_PORT} + - ${MG_RABBITMQ_HTTP_PORT}:${MG_RABBITMQ_HTTP_PORT} + networks: + - magistrala-base-net +``` + +### Redis configuration + +```yaml +services: + redis: + image: redis:7.2.4-alpine + container_name: magistrala-es-redis + restart: on-failure + networks: + - magistrala-base-net + volumes: + - magistrala-broker-volume:/data +``` + +## Nginx Configuration + +Nginx is the entry point for all traffic to Magistrala. +By using environment variables file at `docker/.env` you can modify the below given Nginx directive. + +`MG_NGINX_SERVER_NAME` environmental variable is used to configure nginx directive `server_name`. If environmental variable `MG_NGINX_SERVER_NAME` is empty then default value `localhost` will set to `server_name`. + +`MG_NGINX_SERVER_CERT` environmental variable is used to configure nginx directive `ssl_certificate`. If environmental variable `MG_NGINX_SERVER_CERT` is empty then by default server certificate in the path `docker/ssl/certs/magistrala-server.crt` will be assigned. + +`MG_NGINX_SERVER_KEY` environmental variable is used to configure nginx directive `ssl_certificate_key`. If environmental variable `MG_NGINX_SERVER_KEY` is empty then by default server certificate key in the path `docker/ssl/certs/magistrala-server.key` will be assigned. + +`MG_NGINX_SERVER_CLIENT_CA` environmental variable is used to configure nginx directive `ssl_client_certificate`. If environmental variable `MG_NGINX_SERVER_CLIENT_CA` is empty then by default certificate in the path `docker/ssl/certs/ca.crt` will be assigned. + +`MG_NGINX_SERVER_DHPARAM` environmental variable is used to configure nginx directive `ssl_dhparam`. If environmental variable `MG_NGINX_SERVER_DHPARAM` is empty then by default file in the path `docker/ssl/dhparam.pem` will be assigned. diff --git a/docker/addons/bootstrap/docker-compose.yml b/docker/addons/bootstrap/docker-compose.yml new file mode 100644 index 0000000..9792972 --- /dev/null +++ b/docker/addons/bootstrap/docker-compose.yml @@ -0,0 +1,83 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional bootstrap services. Since it's optional, this file is +# dependent of docker-compose file from <project_root>/docker. In order to run this services, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/bootstrap/docker-compose.yml up +# from project root. + +networks: + magistrala-base-net: + +volumes: + magistrala-bootstrap-db-volume: + + +services: + bootstrap-db: + image: postgres:16.2-alpine + container_name: magistrala-bootstrap-db + restart: on-failure + environment: + POSTGRES_USER: ${MG_BOOTSTRAP_DB_USER} + POSTGRES_PASSWORD: ${MG_BOOTSTRAP_DB_PASS} + POSTGRES_DB: ${MG_BOOTSTRAP_DB_NAME} + networks: + - magistrala-base-net + volumes: + - magistrala-bootstrap-db-volume:/var/lib/postgresql/data + + bootstrap: + image: magistrala/bootstrap:${MG_RELEASE_TAG} + container_name: magistrala-bootstrap + depends_on: + - bootstrap-db + restart: on-failure + ports: + - ${MG_BOOTSTRAP_HTTP_PORT}:${MG_BOOTSTRAP_HTTP_PORT} + environment: + MG_BOOTSTRAP_LOG_LEVEL: ${MG_BOOTSTRAP_LOG_LEVEL} + MG_BOOTSTRAP_ENCRYPT_KEY: ${MG_BOOTSTRAP_ENCRYPT_KEY} + MG_BOOTSTRAP_EVENT_CONSUMER: ${MG_BOOTSTRAP_EVENT_CONSUMER} + MG_ES_URL: ${MG_ES_URL} + MG_BOOTSTRAP_HTTP_HOST: ${MG_BOOTSTRAP_HTTP_HOST} + MG_BOOTSTRAP_HTTP_PORT: ${MG_BOOTSTRAP_HTTP_PORT} + MG_BOOTSTRAP_HTTP_SERVER_CERT: ${MG_BOOTSTRAP_HTTP_SERVER_CERT} + MG_BOOTSTRAP_HTTP_SERVER_KEY: ${MG_BOOTSTRAP_HTTP_SERVER_KEY} + MG_BOOTSTRAP_DB_HOST: ${MG_BOOTSTRAP_DB_HOST} + MG_BOOTSTRAP_DB_PORT: ${MG_BOOTSTRAP_DB_PORT} + MG_BOOTSTRAP_DB_USER: ${MG_BOOTSTRAP_DB_USER} + MG_BOOTSTRAP_DB_PASS: ${MG_BOOTSTRAP_DB_PASS} + MG_BOOTSTRAP_DB_NAME: ${MG_BOOTSTRAP_DB_NAME} + MG_BOOTSTRAP_DB_SSL_MODE: ${MG_BOOTSTRAP_DB_SSL_MODE} + MG_BOOTSTRAP_DB_SSL_CERT: ${MG_BOOTSTRAP_DB_SSL_CERT} + MG_BOOTSTRAP_DB_SSL_KEY: ${MG_BOOTSTRAP_DB_SSL_KEY} + MG_BOOTSTRAP_DB_SSL_ROOT_CERT: ${MG_BOOTSTRAP_DB_SSL_ROOT_CERT} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_THINGS_URL: ${MG_THINGS_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_BOOTSTRAP_INSTANCE_ID: ${MG_BOOTSTRAP_INSTANCE_ID} + networks: + - magistrala-base-net + volumes: + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/cassandra-reader/docker-compose.yml b/docker/addons/cassandra-reader/docker-compose.yml new file mode 100644 index 0000000..fc381ea --- /dev/null +++ b/docker/addons/cassandra-reader/docker-compose.yml @@ -0,0 +1,77 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional cassandra-reader. Since it's optional, this file is +# dependent of docker-compose file from <project_root>/docker. In order to run this service, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/cassandra-reader/docker-compose.yml up +# from project root. + +networks: + magistrala-base-net: + +services: + cassandra-reader: + image: magistrala/cassandra-reader:${MG_RELEASE_TAG} + container_name: magistrala-cassandra-reader + restart: on-failure + environment: + MG_CASSANDRA_READER_LOG_LEVEL: ${MG_CASSANDRA_READER_LOG_LEVEL} + MG_CASSANDRA_READER_HTTP_HOST: ${MG_CASSANDRA_READER_HTTP_HOST} + MG_CASSANDRA_READER_HTTP_PORT: ${MG_CASSANDRA_READER_HTTP_PORT} + MG_CASSANDRA_READER_HTTP_SERVER_CERT: ${MG_CASSANDRA_READER_HTTP_SERVER_CERT} + MG_CASSANDRA_READER_HTTP_SERVER_KEY: ${MG_CASSANDRA_READER_HTTP_SERVER_KEY} + MG_CASSANDRA_PORT: ${MG_CASSANDRA_PORT} + MG_CASSANDRA_CLUSTER: ${MG_CASSANDRA_CLUSTER} + MG_CASSANDRA_KEYSPACE: ${MG_CASSANDRA_KEYSPACE} + MG_CASSANDRA_USER: ${MG_CASSANDRA_USER} + MG_CASSANDRA_PASS: ${MG_CASSANDRA_PASS} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_CASSANDRA_READER_INSTANCE_ID: ${MG_CASSANDRA_READER_INSTANCE_ID} + ports: + - ${MG_CASSANDRA_READER_HTTP_PORT}:${MG_CASSANDRA_READER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ../../ssl/certs:/etc/ssl/certs + # Auth gRPC client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/cassandra-writer/config.toml b/docker/addons/cassandra-writer/config.toml new file mode 100644 index 0000000..b04ce56 --- /dev/null +++ b/docker/addons/cassandra-writer/config.toml @@ -0,0 +1,19 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# To listen all messsage broker subjects use default value "channels.>". +# To subscribe to specific subjects use values starting by "channels." and +# followed by a subtopic (e.g ["channels.<channel_id>.sub.topic.x", ...]). +[subscriber] +subjects = ["channels.>"] + +[transformer] +# SenML or JSON +format = "senml" +# Used if format is SenML +content_type = "application/senml+json" +# Used as timestamp fields if format is JSON +time_fields = [{ field_name = "seconds_key", field_format = "unix", location = "UTC"}, + { field_name = "millis_key", field_format = "unix_ms", location = "UTC"}, + { field_name = "micros_key", field_format = "unix_us", location = "UTC"}, + { field_name = "nanos_key", field_format = "unix_ns", location = "UTC"}] diff --git a/docker/addons/cassandra-writer/docker-compose.yml b/docker/addons/cassandra-writer/docker-compose.yml new file mode 100644 index 0000000..c7edec8 --- /dev/null +++ b/docker/addons/cassandra-writer/docker-compose.yml @@ -0,0 +1,66 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional Cassandra and cassandra-writer. Since these are optional, this file is +# dependent of docker-compose file from <project_root>/docker. In order to run these services, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/cassandra-writer/docker-compose.yml up +# from project root. + +networks: + magistrala-base-net: + +volumes: + magistrala-cassandra-volume: + +services: + cassandra: + image: cassandra:3.11.16 + container_name: magistrala-cassandra + restart: on-failure + networks: + - magistrala-base-net + ports: + - ${MG_CASSANDRA_PORT}:${MG_CASSANDRA_PORT} + volumes: + - magistrala-cassandra-volume:/var/lib/cassandra + + cassandra-init-keyspace: + image: cassandra:3.11.16 + depends_on: + - cassandra + restart: on-failure + volumes: + - ./init.sh:/init.sh + entrypoint: ["/init.sh"] + networks: + - magistrala-base-net + + cassandra-writer: + image: magistrala/cassandra-writer:${MG_RELEASE_TAG} + container_name: magistrala-cassandra-writer + depends_on: + - cassandra + restart: on-failure + environment: + MG_CASSANDRA_WRITER_LOG_LEVEL: ${MG_CASSANDRA_WRITER_LOG_LEVEL} + MG_CASSANDRA_WRITER_CONFIG_PATH: ${MG_CASSANDRA_WRITER_CONFIG_PATH} + MG_CASSANDRA_WRITER_HTTP_HOST: ${MG_CASSANDRA_WRITER_HTTP_HOST} + MG_CASSANDRA_WRITER_HTTP_PORT: ${MG_CASSANDRA_WRITER_HTTP_PORT} + MG_CASSANDRA_WRITER_HTTP_SERVER_CERT: ${MG_CASSANDRA_WRITER_HTTP_SERVER_CERT} + MG_CASSANDRA_WRITER_HTTP_SERVER_KEY: ${MG_CASSANDRA_WRITER_HTTP_SERVER_KEY} + MG_CASSANDRA_PORT: ${MG_CASSANDRA_PORT} + MG_CASSANDRA_CLUSTER: ${MG_CASSANDRA_CLUSTER} + MG_CASSANDRA_KEYSPACE: ${MG_CASSANDRA_KEYSPACE} + MG_CASSANDRA_USER: ${MG_CASSANDRA_USER} + MG_CASSANDRA_PASS: ${MG_CASSANDRA_PASS} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_CASSANDRA_WRITER_INSTANCE_ID: ${MG_CASSANDRA_WRITER_INSTANCE_ID} + ports: + - ${MG_CASSANDRA_WRITER_HTTP_PORT}:${MG_CASSANDRA_WRITER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ./config.toml:/config.toml diff --git a/docker/addons/cassandra-writer/init.sh b/docker/addons/cassandra-writer/init.sh new file mode 100755 index 0000000..cc59d54 --- /dev/null +++ b/docker/addons/cassandra-writer/init.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +until printf "" 2>>/dev/null >>/dev/tcp/magistrala-cassandra/9042; do + sleep 5; + echo "Waiting for cassandra..."; +done + +echo "Creating keyspace and table..." +cqlsh magistrala-cassandra -e "CREATE KEYSPACE IF NOT EXISTS magistrala WITH replication = {'class':'SimpleStrategy','replication_factor':'1'};" diff --git a/docker/addons/certs/docker-compose.yml b/docker/addons/certs/docker-compose.yml new file mode 100644 index 0000000..69cc9e0 --- /dev/null +++ b/docker/addons/certs/docker-compose.yml @@ -0,0 +1,90 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional certs services. Since it's optional, this file is +# dependent of docker-compose file from <project_root>/docker. In order to run this services, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/certs/docker-compose.yml up +# from project root. + +networks: + magistrala-base-net: + +volumes: + magistrala-certs-db-volume: + + +services: + certs-db: + image: postgres:16.2-alpine + container_name: magistrala-certs-db + restart: on-failure + environment: + POSTGRES_USER: ${MG_CERTS_DB_USER} + POSTGRES_PASSWORD: ${MG_CERTS_DB_PASS} + POSTGRES_DB: ${MG_CERTS_DB_NAME} + networks: + - magistrala-base-net + volumes: + - magistrala-certs-db-volume:/var/lib/postgresql/data + + certs: + image: magistrala/certs:${MG_RELEASE_TAG} + container_name: magistrala-certs + depends_on: + - certs-db + restart: on-failure + networks: + - magistrala-base-net + ports: + - ${MG_CERTS_HTTP_PORT}:${MG_CERTS_HTTP_PORT} + environment: + MG_CERTS_LOG_LEVEL: ${MG_CERTS_LOG_LEVEL} + MG_CERTS_SIGN_CA_PATH: ${MG_CERTS_SIGN_CA_PATH} + MG_CERTS_SIGN_CA_KEY_PATH: ${MG_CERTS_SIGN_CA_KEY_PATH} + MG_CERTS_VAULT_HOST: ${MG_CERTS_VAULT_HOST} + MG_CERTS_VAULT_NAMESPACE: ${MG_CERTS_VAULT_NAMESPACE} + MG_CERTS_VAULT_APPROLE_ROLEID: ${MG_CERTS_VAULT_APPROLE_ROLEID} + MG_CERTS_VAULT_APPROLE_SECRET: ${MG_CERTS_VAULT_APPROLE_SECRET} + MG_CERTS_VAULT_THINGS_CERTS_PKI_PATH: ${MG_CERTS_VAULT_THINGS_CERTS_PKI_PATH} + MG_CERTS_VAULT_THINGS_CERTS_PKI_ROLE_NAME: ${MG_CERTS_VAULT_THINGS_CERTS_PKI_ROLE_NAME} + MG_CERTS_HTTP_HOST: ${MG_CERTS_HTTP_HOST} + MG_CERTS_HTTP_PORT: ${MG_CERTS_HTTP_PORT} + MG_CERTS_HTTP_SERVER_CERT: ${MG_CERTS_HTTP_SERVER_CERT} + MG_CERTS_HTTP_SERVER_KEY: ${MG_CERTS_HTTP_SERVER_KEY} + MG_CERTS_DB_HOST: ${MG_CERTS_DB_HOST} + MG_CERTS_DB_PORT: ${MG_CERTS_DB_PORT} + MG_CERTS_DB_PASS: ${MG_CERTS_DB_PASS} + MG_CERTS_DB_USER: ${MG_CERTS_DB_USER} + MG_CERTS_DB_NAME: ${MG_CERTS_DB_NAME} + MG_CERTS_DB_SSL_MODE: ${MG_CERTS_DB_SSL_MODE} + MG_CERTS_DB_SSL_CERT: ${MG_CERTS_DB_SSL_CERT} + MG_CERTS_DB_SSL_KEY: ${MG_CERTS_DB_SSL_KEY} + MG_CERTS_DB_SSL_ROOT_CERT: ${MG_CERTS_DB_SSL_ROOT_CERT} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_THINGS_URL: ${MG_THINGS_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_CERTS_INSTANCE_ID: ${MG_CERTS_INSTANCE_ID} + volumes: + - ../../ssl/certs/ca.key:/etc/ssl/certs/ca.key + - ../../ssl/certs/ca.crt:/etc/ssl/certs/ca.crt + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/influxdb-reader/docker-compose.yml b/docker/addons/influxdb-reader/docker-compose.yml new file mode 100644 index 0000000..464844d --- /dev/null +++ b/docker/addons/influxdb-reader/docker-compose.yml @@ -0,0 +1,87 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +### +# This docker-compose file contains optional InfluxDB-reader service for the Magistrala +# platform. Since this service is optional, this file is dependent on the docker-compose.yml +# file from <project_root>/docker/. In order to run this service, core services, +# as well as the network from the core composition, should be already running. +### + +networks: + magistrala-base-net: + +services: + influxdb-reader: + image: magistrala/influxdb-reader:${MG_RELEASE_TAG} + container_name: magistrala-influxdb-reader + restart: on-failure + environment: + MG_INFLUX_READER_LOG_LEVEL: ${MG_INFLUX_READER_LOG_LEVEL} + MG_INFLUX_READER_HTTP_HOST: ${MG_INFLUX_READER_HTTP_HOST} + MG_INFLUX_READER_HTTP_PORT: ${MG_INFLUX_READER_HTTP_PORT} + MG_INFLUX_READER_HTTP_SERVER_CERT: ${MG_INFLUX_READER_HTTP_SERVER_CERT} + MG_INFLUX_READER_HTTP_SERVER_KEY: ${MG_INFLUX_READER_HTTP_SERVER_KEY} + MG_INFLUXDB_PROTOCOL: ${MG_INFLUXDB_PROTOCOL} + MG_INFLUXDB_HOST: ${MG_INFLUXDB_HOST} + MG_INFLUXDB_PORT: ${MG_INFLUXDB_PORT} + MG_INFLUXDB_ADMIN_USER: ${MG_INFLUXDB_ADMIN_USER} + MG_INFLUXDB_ADMIN_PASSWORD: ${MG_INFLUXDB_ADMIN_PASSWORD} + MG_INFLUXDB_NAME: ${MG_INFLUXDB_NAME} + MG_INFLUXDB_BUCKET: ${MG_INFLUXDB_BUCKET} + MG_INFLUXDB_ORG: ${MG_INFLUXDB_ORG} + MG_INFLUXDB_TOKEN: ${MG_INFLUXDB_TOKEN} + MG_INFLUXDB_DBURL: ${MG_INFLUXDB_DBURL} + MG_INFLUXDB_USER_AGENT: ${MG_INFLUXDB_USER_AGENT} + MG_INFLUXDB_TIMEOUT: ${MG_INFLUXDB_TIMEOUT} + MG_INFLUXDB_INSECURE_SKIP_VERIFY: ${MG_INFLUXDB_INSECURE_SKIP_VERIFY} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_INFLUX_READER_INSTANCE_ID: ${MG_INFLUX_READER_INSTANCE_ID} + ports: + - ${MG_INFLUX_READER_HTTP_PORT}:${MG_INFLUX_READER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ../../ssl/certs:/etc/ssl/certs + # Auth gRPC client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/influxdb-writer/config.toml b/docker/addons/influxdb-writer/config.toml new file mode 100644 index 0000000..b04ce56 --- /dev/null +++ b/docker/addons/influxdb-writer/config.toml @@ -0,0 +1,19 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# To listen all messsage broker subjects use default value "channels.>". +# To subscribe to specific subjects use values starting by "channels." and +# followed by a subtopic (e.g ["channels.<channel_id>.sub.topic.x", ...]). +[subscriber] +subjects = ["channels.>"] + +[transformer] +# SenML or JSON +format = "senml" +# Used if format is SenML +content_type = "application/senml+json" +# Used as timestamp fields if format is JSON +time_fields = [{ field_name = "seconds_key", field_format = "unix", location = "UTC"}, + { field_name = "millis_key", field_format = "unix_ms", location = "UTC"}, + { field_name = "micros_key", field_format = "unix_us", location = "UTC"}, + { field_name = "nanos_key", field_format = "unix_ns", location = "UTC"}] diff --git a/docker/addons/influxdb-writer/docker-compose.yml b/docker/addons/influxdb-writer/docker-compose.yml new file mode 100644 index 0000000..0655aa5 --- /dev/null +++ b/docker/addons/influxdb-writer/docker-compose.yml @@ -0,0 +1,72 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional InfluxDB and InfluxDB-writer services +# for the Magistrala platform. Since this services are optional, this file is dependent on the +# docker-compose.yml file from <project_root>/docker/. In order to run these services, +# core services, as well as the network from the core composition, should be already running. + +networks: + magistrala-base-net: + +volumes: + magistrala-influxdb-volume: + +services: + influxdb: + image: influxdb:2.7.5-alpine + container_name: magistrala-influxdb + restart: on-failure + environment: + DOCKER_INFLUXDB_INIT_MODE: ${MG_INFLUXDB_INIT_MODE} + DOCKER_INFLUXDB_INIT_USERNAME: ${MG_INFLUXDB_ADMIN_USER} + DOCKER_INFLUXDB_INIT_PASSWORD: ${MG_INFLUXDB_ADMIN_PASSWORD} + DOCKER_INFLUXDB_ADMIN_URL: ${MG_INFLUXDB_ADMIN_URL} + DOCKER_INFLUXDB_INIT_ORG: ${MG_INFLUXDB_ORG} + DOCKER_INFLUXDB_INIT_BUCKET: ${MG_INFLUXDB_BUCKET} + DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: ${MG_INFLUXDB_TOKEN} + INFLUXDB_HTTP_FLUX_ENABLED: ${MG_INFLUXDB_HTTP_ENABLED} + networks: + - magistrala-base-net + ports: + - ${MG_INFLUXDB_PORT}:${MG_INFLUXDB_PORT} + volumes: + - magistrala-influxdb-volume:/var/lib/influxdb2 + + influxdb-writer: + image: magistrala/influxdb-writer:${MG_RELEASE_TAG} + container_name: magistrala-influxdb-writer + depends_on: + - influxdb + restart: on-failure + environment: + MG_INFLUX_WRITER_LOG_LEVEL: ${MG_INFLUX_WRITER_LOG_LEVEL} + MG_INFLUX_WRITER_CONFIG_PATH: ${MG_INFLUX_WRITER_CONFIG_PATH} + MG_INFLUX_WRITER_HTTP_HOST: ${MG_INFLUX_WRITER_HTTP_HOST} + MG_INFLUX_WRITER_HTTP_PORT: ${MG_INFLUX_WRITER_HTTP_PORT} + MG_INFLUX_WRITER_HTTP_SERVER_CERT: ${MG_INFLUX_WRITER_HTTP_SERVER_CERT} + MG_INFLUX_WRITER_HTTP_SERVER_KEY: ${MG_INFLUX_WRITER_HTTP_SERVER_KEY} + MG_INFLUXDB_PROTOCOL: ${MG_INFLUXDB_PROTOCOL} + MG_INFLUXDB_HOST: ${MG_INFLUXDB_HOST} + MG_INFLUXDB_PORT: ${MG_INFLUXDB_PORT} + MG_INFLUXDB_ADMIN_USER: ${MG_INFLUXDB_ADMIN_USER} + MG_INFLUXDB_ADMIN_PASSWORD: ${MG_INFLUXDB_ADMIN_PASSWORD} + MG_INFLUXDB_NAME: ${MG_INFLUXDB_NAME} + MG_INFLUXDB_BUCKET: ${MG_INFLUXDB_BUCKET} + MG_INFLUXDB_ORG: ${MG_INFLUXDB_ORG} + MG_INFLUXDB_TOKEN: ${MG_INFLUXDB_TOKEN} + MG_INFLUXDB_DBURL: ${MG_INFLUXDB_DBURL} + MG_INFLUXDB_USER_AGENT: ${MG_INFLUXDB_USER_AGENT} + MG_INFLUXDB_TIMEOUT: ${MG_INFLUXDB_TIMEOUT} + MG_INFLUXDB_INSECURE_SKIP_VERIFY: ${MG_INFLUXDB_INSECURE_SKIP_VERIFY} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_INFLUX_WRITER_INSTANCE_ID: ${MG_INFLUX_WRITER_INSTANCE_ID} + ports: + - ${MG_INFLUX_WRITER_HTTP_PORT}:${MG_INFLUX_WRITER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ./config.toml:/config.toml diff --git a/docker/addons/journal/docker-compose.yml b/docker/addons/journal/docker-compose.yml new file mode 100644 index 0000000..fa51df0 --- /dev/null +++ b/docker/addons/journal/docker-compose.yml @@ -0,0 +1,67 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional Postgres and journal services +# for Magistrala platform. Since these are optional, this file is dependent of docker-compose file +# from <project_root>/docker. In order to run these services, execute command: +# docker-compose -f docker/docker-compose.yml -f docker/addons/journal/docker-compose.yml up +# from project root. PostgreSQL default port (5432) is exposed, so you can use various tools for database +# inspection and data visualization. + +networks: + magistrala-base-net: + +volumes: + magistrala-journal-volume: + +services: + journal-db: + image: postgres:16.2-alpine + container_name: magistrala-journal-db + restart: on-failure + command: postgres -c "max_connections=${MG_POSTGRES_MAX_CONNECTIONS}" + environment: + POSTGRES_USER: ${MG_JOURNAL_USER} + POSTGRES_PASSWORD: ${MG_JOURNAL_PASS} + POSTGRES_DB: ${MG_JOURNAL_NAME} + MG_POSTGRES_MAX_CONNECTIONS: ${MG_POSTGRES_MAX_CONNECTIONS} + networks: + - magistrala-base-net + volumes: + - magistrala-journal-volume:/var/lib/postgresql/data + + journal: + image: magistrala/journal:${MG_RELEASE_TAG} + container_name: magistrala-journal + depends_on: + - journal-db + restart: on-failure + environment: + MG_JOURNAL_LOG_LEVEL: ${MG_JOURNAL_LOG_LEVEL} + MG_JOURNAL_HTTP_HOST: ${MG_JOURNAL_HTTP_HOST} + MG_JOURNAL_HTTP_PORT: ${MG_JOURNAL_HTTP_PORT} + MG_JOURNAL_HTTP_SERVER_CERT: ${MG_JOURNAL_HTTP_SERVER_CERT} + MG_JOURNAL_HTTP_SERVER_KEY: ${MG_JOURNAL_HTTP_SERVER_KEY} + MG_JOURNAL_HOST: ${MG_JOURNAL_HOST} + MG_JOURNAL_PORT: ${MG_JOURNAL_PORT} + MG_JOURNAL_USER: ${MG_JOURNAL_USER} + MG_JOURNAL_PASS: ${MG_JOURNAL_PASS} + MG_JOURNAL_NAME: ${MG_JOURNAL_NAME} + MG_JOURNAL_SSL_MODE: ${MG_JOURNAL_SSL_MODE} + MG_JOURNAL_SSL_CERT: ${MG_JOURNAL_SSL_CERT} + MG_JOURNAL_SSL_KEY: ${MG_JOURNAL_SSL_KEY} + MG_JOURNAL_SSL_ROOT_CERT: ${MG_JOURNAL_SSL_ROOT_CERT} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_ES_URL: ${MG_ES_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_JOURNAL_INSTANCE_ID: ${MG_JOURNAL_INSTANCE_ID} + ports: + - ${MG_JOURNAL_HTTP_PORT}:${MG_JOURNAL_HTTP_PORT} + networks: + - magistrala-base-net diff --git a/docker/addons/lora-adapter/docker-compose.yml b/docker/addons/lora-adapter/docker-compose.yml new file mode 100644 index 0000000..39e9c57 --- /dev/null +++ b/docker/addons/lora-adapter/docker-compose.yml @@ -0,0 +1,46 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional lora-adapter and lora-redis services +# for the Magistrala platform. Since this services are optional, this file is dependent on the +# docker-compose.yml file from <project_root>/docker/. In order to run these services, +# core services, as well as the network from the core composition, should be already running. + +networks: + magistrala-base-net: + +services: + lora-redis: + image: redis:7.2.4-alpine + container_name: magistrala-lora-redis + restart: on-failure + networks: + - magistrala-base-net + + lora-adapter: + image: magistrala/lora:${MG_RELEASE_TAG} + container_name: magistrala-lora + restart: on-failure + environment: + MG_LORA_ADAPTER_LOG_LEVEL: ${MG_LORA_ADAPTER_LOG_LEVEL} + MG_LORA_ADAPTER_MESSAGES_URL: ${MG_LORA_ADAPTER_MESSAGES_URL} + MG_LORA_ADAPTER_MESSAGES_TOPIC: ${MG_LORA_ADAPTER_MESSAGES_TOPIC} + MG_LORA_ADAPTER_MESSAGES_USER: ${MG_LORA_ADAPTER_MESSAGES_USER} + MG_LORA_ADAPTER_MESSAGES_PASS: ${MG_LORA_ADAPTER_MESSAGES_PASS} + MG_LORA_ADAPTER_MESSAGES_TIMEOUT: ${MG_LORA_ADAPTER_MESSAGES_TIMEOUT} + MG_LORA_ADAPTER_EVENT_CONSUMER: ${MG_LORA_ADAPTER_EVENT_CONSUMER} + MG_LORA_ADAPTER_HTTP_HOST: ${MG_LORA_ADAPTER_HTTP_HOST} + MG_LORA_ADAPTER_HTTP_PORT: ${MG_LORA_ADAPTER_HTTP_PORT} + MG_LORA_ADAPTER_HTTP_SERVER_CERT: ${MG_LORA_ADAPTER_HTTP_SERVER_CERT} + MG_LORA_ADAPTER_HTTP_SERVER_KEY: ${MG_LORA_ADAPTER_HTTP_SERVER_KEY} + MG_LORA_ADAPTER_ROUTE_MAP_URL: ${MG_LORA_ADAPTER_ROUTE_MAP_URL} + MG_ES_URL: ${MG_ES_URL} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_LORA_ADAPTER_INSTANCE_ID: ${MG_LORA_ADAPTER_INSTANCE_ID} + ports: + - ${MG_LORA_ADAPTER_HTTP_PORT}:${MG_LORA_ADAPTER_HTTP_PORT} + networks: + - magistrala-base-net diff --git a/docker/addons/mongodb-reader/docker-compose.yml b/docker/addons/mongodb-reader/docker-compose.yml new file mode 100644 index 0000000..5b39e46 --- /dev/null +++ b/docker/addons/mongodb-reader/docker-compose.yml @@ -0,0 +1,76 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional MongoDB-reader service +# for Magistrala platform. Since these are optional, this file is dependent of docker-compose file +# from <project_root>/docker. In order to run this service, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/mongodb-reader/docker-compose.yml up +# from project root. MongoDB service is defined in docker/addons/mongodb-writer/docker-compose.yml. + +networks: + magistrala-base-net: + +services: + mongodb-reader: + image: magistrala/mongodb-reader:${MG_RELEASE_TAG} + container_name: magistrala-mongodb-reader + restart: on-failure + environment: + MG_MONGO_READER_LOG_LEVEL: ${MG_MONGO_READER_LOG_LEVEL} + MG_MONGO_READER_HTTP_HOST: ${MG_MONGO_READER_HTTP_HOST} + MG_MONGO_READER_HTTP_PORT: ${MG_MONGO_READER_HTTP_PORT} + MG_MONGO_READER_HTTP_SERVER_CERT: ${MG_MONGO_READER_HTTP_SERVER_CERT} + MG_MONGO_READER_HTTP_SERVER_KEY: ${MG_MONGO_READER_HTTP_SERVER_KEY} + MG_MONGO_HOST: ${MG_MONGO_HOST} + MG_MONGO_PORT: ${MG_MONGO_PORT} + MG_MONGO_NAME: ${MG_MONGO_NAME} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_MONGO_READER_INSTANCE_ID: ${MG_MONGO_READER_INSTANCE_ID} + ports: + - ${MG_MONGO_READER_HTTP_PORT}:${MG_MONGO_READER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ../../ssl/certs:/etc/ssl/certs + # Auth gRPC client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/mongodb-writer/config.toml b/docker/addons/mongodb-writer/config.toml new file mode 100644 index 0000000..b04ce56 --- /dev/null +++ b/docker/addons/mongodb-writer/config.toml @@ -0,0 +1,19 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# To listen all messsage broker subjects use default value "channels.>". +# To subscribe to specific subjects use values starting by "channels." and +# followed by a subtopic (e.g ["channels.<channel_id>.sub.topic.x", ...]). +[subscriber] +subjects = ["channels.>"] + +[transformer] +# SenML or JSON +format = "senml" +# Used if format is SenML +content_type = "application/senml+json" +# Used as timestamp fields if format is JSON +time_fields = [{ field_name = "seconds_key", field_format = "unix", location = "UTC"}, + { field_name = "millis_key", field_format = "unix_ms", location = "UTC"}, + { field_name = "micros_key", field_format = "unix_us", location = "UTC"}, + { field_name = "nanos_key", field_format = "unix_ns", location = "UTC"}] diff --git a/docker/addons/mongodb-writer/docker-compose.yml b/docker/addons/mongodb-writer/docker-compose.yml new file mode 100644 index 0000000..7a18cdd --- /dev/null +++ b/docker/addons/mongodb-writer/docker-compose.yml @@ -0,0 +1,59 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional MongoDB and MongoDB-writer services +# for Magistrala platform. Since these are optional, this file is dependent of docker-compose file +# from <project_root>/docker. In order to run these services, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/mongodb-writer/docker-compose.yml up +# from project root. MongoDB default port (27017) is exposed, so you can use various tools for database +# inspection and data visualization. + +networks: + magistrala-base-net: + +volumes: + magistrala-mongodb-db-volume: + magistrala-mongodb-configdb-volume: + +services: + mongodb: + image: mongo:7.0.8 + container_name: magistrala-mongodb + restart: on-failure + environment: + MONGO_INITDB_DATABASE: ${MG_MONGO_NAME} + ports: + - ${MG_MONGO_PORT}:${MG_MONGO_PORT} + networks: + - magistrala-base-net + volumes: + - magistrala-mongodb-db-volume:/data/db + - magistrala-mongodb-configdb-volume:/data/configdb + + mongodb-writer: + image: magistrala/mongodb-writer:${MG_RELEASE_TAG} + container_name: magistrala-mongodb-writer + depends_on: + - mongodb + restart: on-failure + environment: + MG_MONGO_WRITER_LOG_LEVEL: ${MG_MONGO_WRITER_LOG_LEVEL} + MG_MONGO_WRITER_CONFIG_PATH: ${MG_MONGO_WRITER_CONFIG_PATH} + MG_MONGO_WRITER_HTTP_HOST: ${MG_MONGO_WRITER_HTTP_HOST} + MG_MONGO_WRITER_HTTP_PORT: ${MG_MONGO_WRITER_HTTP_PORT} + MG_MONGO_WRITER_HTTP_SERVER_CERT: ${MG_MONGO_WRITER_HTTP_SERVER_CERT} + MG_MONGO_WRITER_HTTP_SERVER_KEY: ${MG_MONGO_WRITER_HTTP_SERVER_KEY} + MG_MONGO_HOST: ${MG_MONGO_HOST} + MG_MONGO_PORT: ${MG_MONGO_PORT} + MG_MONGO_NAME: ${MG_MONGO_NAME} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_MONGO_WRITER_INSTANCE_ID: ${MG_MONGO_WRITER_INSTANCE_ID} + ports: + - ${MG_MONGO_WRITER_HTTP_PORT}:${MG_MONGO_WRITER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ./config.toml:/config.toml diff --git a/docker/addons/opcua-adapter/docker-compose.yml b/docker/addons/opcua-adapter/docker-compose.yml new file mode 100644 index 0000000..2c67227 --- /dev/null +++ b/docker/addons/opcua-adapter/docker-compose.yml @@ -0,0 +1,49 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional opcua-adapter and opcua-redis services +# for the Magistrala platform. Since this services are optional, this file is dependent on the +# docker-compose.yml file from <project_root>/docker/. In order to run these services, +# core services, as well as the network from the core composition, should be already running. + +networks: + magistrala-base-net: + +volumes: + magistrala-opcua-adapter-volume: + magistrala-opcua-redis-volume: + +services: + opcua-redis: + image: redis:7.2.4-alpine + container_name: magistrala-opcua-redis + restart: on-failure + networks: + - magistrala-base-net + volumes: + - magistrala-opcua-redis-volume:/data + + opcua-adapter: + image: magistrala/opcua:${MG_RELEASE_TAG} + container_name: magistrala-opcua + restart: on-failure + environment: + MG_OPCUA_ADAPTER_LOG_LEVEL: ${MG_OPCUA_ADAPTER_LOG_LEVEL} + MG_OPCUA_ADAPTER_EVENT_CONSUMER: ${MG_OPCUA_ADAPTER_EVENT_CONSUMER} + MG_OPCUA_ADAPTER_HTTP_HOST: ${MG_OPCUA_ADAPTER_HTTP_HOST} + MG_OPCUA_ADAPTER_HTTP_PORT: ${MG_OPCUA_ADAPTER_HTTP_PORT} + MG_OPCUA_ADAPTER_HTTP_SERVER_CERT: ${MG_OPCUA_ADAPTER_HTTP_SERVER_CERT} + MG_OPCUA_ADAPTER_HTTP_SERVER_KEY: ${MG_OPCUA_ADAPTER_HTTP_SERVER_KEY} + MG_ES_URL: ${MG_ES_URL} + MG_OPCUA_ADAPTER_ROUTE_MAP_URL: ${MG_OPCUA_ADAPTER_ROUTE_MAP_URL} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_OPCUA_ADAPTER_INSTANCE_ID: ${MG_OPCUA_ADAPTER_INSTANCE_ID} + ports: + - ${MG_OPCUA_ADAPTER_HTTP_PORT}:${MG_OPCUA_ADAPTER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - magistrala-opcua-adapter-volume:/store diff --git a/docker/addons/postgres-reader/docker-compose.yml b/docker/addons/postgres-reader/docker-compose.yml new file mode 100644 index 0000000..3b84d6c --- /dev/null +++ b/docker/addons/postgres-reader/docker-compose.yml @@ -0,0 +1,80 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional Postgres-reader service for Magistrala platform. +# Since this service is optional, this file is dependent of docker-compose.yml file +# from <project_root>/docker. In order to run this service, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/postgres-reader/docker-compose.yml up +# from project root. + +networks: + magistrala-base-net: + +services: + postgres-reader: + image: magistrala/postgres-reader:${MG_RELEASE_TAG} + container_name: magistrala-postgres-reader + restart: on-failure + environment: + MG_POSTGRES_READER_LOG_LEVEL: ${MG_POSTGRES_READER_LOG_LEVEL} + MG_POSTGRES_READER_HTTP_HOST: ${MG_POSTGRES_READER_HTTP_HOST} + MG_POSTGRES_READER_HTTP_PORT: ${MG_POSTGRES_READER_HTTP_PORT} + MG_POSTGRES_READER_HTTP_SERVER_CERT: ${MG_POSTGRES_READER_HTTP_SERVER_CERT} + MG_POSTGRES_READER_HTTP_SERVER_KEY: ${MG_POSTGRES_READER_HTTP_SERVER_KEY} + MG_POSTGRES_HOST: ${MG_POSTGRES_HOST} + MG_POSTGRES_PORT: ${MG_POSTGRES_PORT} + MG_POSTGRES_USER: ${MG_POSTGRES_USER} + MG_POSTGRES_PASS: ${MG_POSTGRES_PASS} + MG_POSTGRES_NAME: ${MG_POSTGRES_NAME} + MG_POSTGRES_SSL_MODE: ${MG_POSTGRES_SSL_MODE} + MG_POSTGRES_SSL_CERT: ${MG_POSTGRES_SSL_CERT} + MG_POSTGRES_SSL_KEY: ${MG_POSTGRES_SSL_KEY} + MG_POSTGRES_SSL_ROOT_CERT: ${MG_POSTGRES_SSL_ROOT_CERT} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_POSTGRES_READER_INSTANCE_ID: ${MG_POSTGRES_READER_INSTANCE_ID} + ports: + - ${MG_POSTGRES_READER_HTTP_PORT}:${MG_POSTGRES_READER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/postgres-writer/config.toml b/docker/addons/postgres-writer/config.toml new file mode 100644 index 0000000..b04ce56 --- /dev/null +++ b/docker/addons/postgres-writer/config.toml @@ -0,0 +1,19 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# To listen all messsage broker subjects use default value "channels.>". +# To subscribe to specific subjects use values starting by "channels." and +# followed by a subtopic (e.g ["channels.<channel_id>.sub.topic.x", ...]). +[subscriber] +subjects = ["channels.>"] + +[transformer] +# SenML or JSON +format = "senml" +# Used if format is SenML +content_type = "application/senml+json" +# Used as timestamp fields if format is JSON +time_fields = [{ field_name = "seconds_key", field_format = "unix", location = "UTC"}, + { field_name = "millis_key", field_format = "unix_ms", location = "UTC"}, + { field_name = "micros_key", field_format = "unix_us", location = "UTC"}, + { field_name = "nanos_key", field_format = "unix_ns", location = "UTC"}] diff --git a/docker/addons/postgres-writer/docker-compose.yml b/docker/addons/postgres-writer/docker-compose.yml new file mode 100644 index 0000000..c5e1964 --- /dev/null +++ b/docker/addons/postgres-writer/docker-compose.yml @@ -0,0 +1,63 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional Postgres and Postgres-writer services +# for Magistrala platform. Since these are optional, this file is dependent of docker-compose file +# from <project_root>/docker. In order to run these services, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/postgres-writer/docker-compose.yml up +# from project root. PostgreSQL default port (5432) is exposed, so you can use various tools for database +# inspection and data visualization. + +networks: + magistrala-base-net: + +volumes: + magistrala-postgres-writer-volume: + +services: + postgres: + image: postgres:16.2-alpine + container_name: magistrala-postgres + restart: on-failure + environment: + POSTGRES_USER: ${MG_POSTGRES_USER} + POSTGRES_PASSWORD: ${MG_POSTGRES_PASS} + POSTGRES_DB: ${MG_POSTGRES_NAME} + networks: + - magistrala-base-net + volumes: + - magistrala-postgres-writer-volume:/var/lib/postgresql/data + + postgres-writer: + image: magistrala/postgres-writer:${MG_RELEASE_TAG} + container_name: magistrala-postgres-writer + depends_on: + - postgres + restart: on-failure + environment: + MG_POSTGRES_WRITER_LOG_LEVEL: ${MG_POSTGRES_WRITER_LOG_LEVEL} + MG_POSTGRES_WRITER_CONFIG_PATH: ${MG_POSTGRES_WRITER_CONFIG_PATH} + MG_POSTGRES_WRITER_HTTP_HOST: ${MG_POSTGRES_WRITER_HTTP_HOST} + MG_POSTGRES_WRITER_HTTP_PORT: ${MG_POSTGRES_WRITER_HTTP_PORT} + MG_POSTGRES_WRITER_HTTP_SERVER_CERT: ${MG_POSTGRES_WRITER_HTTP_SERVER_CERT} + MG_POSTGRES_WRITER_HTTP_SERVER_KEY: ${MG_POSTGRES_WRITER_HTTP_SERVER_KEY} + MG_POSTGRES_HOST: ${MG_POSTGRES_HOST} + MG_POSTGRES_PORT: ${MG_POSTGRES_PORT} + MG_POSTGRES_USER: ${MG_POSTGRES_USER} + MG_POSTGRES_PASS: ${MG_POSTGRES_PASS} + MG_POSTGRES_NAME: ${MG_POSTGRES_NAME} + MG_POSTGRES_SSL_MODE: ${MG_POSTGRES_SSL_MODE} + MG_POSTGRES_SSL_CERT: ${MG_POSTGRES_SSL_CERT} + MG_POSTGRES_SSL_KEY: ${MG_POSTGRES_SSL_KEY} + MG_POSTGRES_SSL_ROOT_CERT: ${MG_POSTGRES_SSL_ROOT_CERT} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_POSTGRES_WRITER_INSTANCE_ID: ${MG_POSTGRES_WRITER_INSTANCE_ID} + ports: + - ${MG_POSTGRES_WRITER_HTTP_PORT}:${MG_POSTGRES_WRITER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ./config.toml:/config.toml diff --git a/docker/addons/prometheus/docker-compose.yml b/docker/addons/prometheus/docker-compose.yml new file mode 100644 index 0000000..100319b --- /dev/null +++ b/docker/addons/prometheus/docker-compose.yml @@ -0,0 +1,53 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional Prometheus and Grafana service for Magistrala platform. +# Since this service is optional, this file is dependent of docker-compose.yml file +# from <project_root>/docker. In order to run this service, execute command: +# docker compose -f docker/addons/prometheus/docker-compose.yml up +# from project root. + +networks: + magistrala-base-net: + +volumes: + magistrala-prometheus-volume: + +services: + promethues: + image: prom/prometheus:v2.49.1 + container_name: magistrala-prometheus + restart: on-failure + ports: + - ${MG_PROMETHEUS_PORT}:${MG_PROMETHEUS_PORT} + networks: + - magistrala-base-net + volumes: + - type: bind + source: ./metrics/prometheus.yml + target: /etc/prometheus/prometheus.yml + - magistrala-prometheus-volume:/prometheus + + grafana: + image: grafana/grafana:10.2.3 + container_name: magistrala-grafana + depends_on: + - promethues + restart: on-failure + ports: + - ${MG_GRAFANA_PORT}:${MG_GRAFANA_PORT} + environment: + - GF_SECURITY_ADMIN_USER=${MG_GRAFANA_ADMIN_USER} + - GF_SECURITY_ADMIN_PASSWORD=${MG_GRAFANA_ADMIN_PASSWORD} + networks: + - magistrala-base-net + volumes: + - type: bind + source: ./grafana/datasource.yml + target: /etc/grafana/provisioning/datasources/datasource.yml + - type: bind + source: ./grafana/dashboard.yml + target: /etc/grafana/provisioning/dashboards/main.yaml + - type: bind + source: ./grafana/example-dashboard.json + target: /var/lib/grafana/dashboards/example-dashboard.json diff --git a/docker/addons/prometheus/grafana/dashboard.yml b/docker/addons/prometheus/grafana/dashboard.yml new file mode 100644 index 0000000..91f95f3 --- /dev/null +++ b/docker/addons/prometheus/grafana/dashboard.yml @@ -0,0 +1,15 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: 1 + +providers: + - name: "Dashboard provider" + orgId: 1 + type: file + disableDeletion: false + updateIntervalSeconds: 10 + allowUiUpdates: false + options: + path: /var/lib/grafana/dashboards + foldersFromFilesStructure: true diff --git a/docker/addons/prometheus/grafana/datasource.yml b/docker/addons/prometheus/grafana/datasource.yml new file mode 100644 index 0000000..4db83aa --- /dev/null +++ b/docker/addons/prometheus/grafana/datasource.yml @@ -0,0 +1,12 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: 1 + +datasources: +- name: Prometheus + type: prometheus + url: http://magistrala-prometheus:9090 + isDefault: true + access: proxy + editable: true diff --git a/docker/addons/prometheus/grafana/example-dashboard.json b/docker/addons/prometheus/grafana/example-dashboard.json new file mode 100644 index 0000000..5604103 --- /dev/null +++ b/docker/addons/prometheus/grafana/example-dashboard.json @@ -0,0 +1,1317 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 1, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 39, + "panels": [], + "title": "General", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "color": "red", + "index": 1, + "text": "down" + }, + "1": { + "color": "green", + "index": 0, + "text": "up" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 14, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "vertical", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": false, + "expr": "up{}", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "State", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 8, + "interval": "30s", + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "go_memstats_alloc_bytes{}", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 10, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Allocated Bytes", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 22, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 4, + "interval": "15s", + "options": { + "legend": { + "calcs": [ + "mean", + "sum", + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "promhttp_metric_handler_requests_total{}", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} - Code {{code}}", + "refId": "A" + } + ], + "title": "Total HTTP Requests", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 14 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "go_goroutines{}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Goroutines instaces", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 22 + }, + "id": 35, + "panels": [], + "title": "Things-Service", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 10, + "x": 0, + "y": 23 + }, + "id": 10, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "things_api_request_count{}", + "instant": false, + "interval": "", + "legendFormat": "{{method}}", + "refId": "A" + } + ], + "title": "Things Request Count", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 35, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [ + { + "options": { + "NaN": { + "index": 0, + "text": "0" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Āµs" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 14, + "x": 10, + "y": 23 + }, + "id": 42, + "interval": "30", + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(label_replace(label_replace(things_api_request_latency_microseconds, \"quantile\", \"50th percentile\", \"quantile\", \"0.5\"), \"quantile\", \"90th percentile\", \"quantile\", \"0.9\"), \"quantile\", \"99th percentile\", \"quantile\", \"0.99\")", + "format": "time_series", + "instant": false, + "interval": "", + "key": "Q-cc5a9d33-5437-4862-abd9-60afd75f3f39-0", + "legendFormat": "{{method}} - {{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Things Latency Quantiles", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 33 + }, + "id": 33, + "panels": [], + "title": "Users-Service", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 10, + "x": 0, + "y": 34 + }, + "id": 22, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "users_api_request_count{}", + "interval": "", + "legendFormat": "{{method}}", + "refId": "A" + } + ], + "title": "Users Request Count", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 35, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [ + { + "options": { + "NaN": { + "index": 0, + "text": "0" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Āµs" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 14, + "x": 10, + "y": 34 + }, + "id": 41, + "interval": "30", + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(label_replace(label_replace(users_api_request_latency_microseconds, \"quantile\", \"50th percentile\", \"quantile\", \"0.5\"), \"quantile\", \"90th percentile\", \"quantile\", \"0.9\"), \"quantile\", \"99th percentile\", \"quantile\", \"0.99\")", + "format": "time_series", + "instant": false, + "interval": "", + "key": "Q-cc5a9d33-5437-4862-abd9-60afd75f3f39-0", + "legendFormat": "{{method}} - {{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Users Latency Quantiles", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 44 + }, + "id": 31, + "panels": [], + "title": "CoAP-Adapter", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 10, + "x": 0, + "y": 45 + }, + "id": 18, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "coap_adapter_api_request_count{}", + "interval": "", + "legendFormat": "{{method}}", + "refId": "A" + } + ], + "title": "Coap Adapter Request Count", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 35, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [ + { + "options": { + "NaN": { + "index": 0, + "text": "0" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Āµs" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 14, + "x": 10, + "y": 45 + }, + "id": 44, + "interval": "30", + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(label_replace(label_replace(coap_adapter_api_request_latency_microseconds, \"quantile\", \"50th percentile\", \"quantile\", \"0.5\"), \"quantile\", \"90th percentile\", \"quantile\", \"0.9\"), \"quantile\", \"99th percentile\", \"quantile\", \"0.99\")", + "format": "time_series", + "instant": false, + "interval": "", + "key": "Q-cc5a9d33-5437-4862-abd9-60afd75f3f39-0", + "legendFormat": "{{method}} - {{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "CoAP Latency Quantiles", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 55 + }, + "id": 29, + "panels": [], + "title": "Web Sockets-Adapter", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 10, + "x": 0, + "y": 56 + }, + "id": 20, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "ws_adapter_api_request_count{}", + "interval": "", + "legendFormat": "{{method}}", + "refId": "A" + } + ], + "title": "Web Sockets Request Count", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 35, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [ + { + "options": { + "NaN": { + "index": 0, + "text": "0" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Āµs" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 14, + "x": 10, + "y": 56 + }, + "id": 23, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(label_replace(label_replace(ws_adapter_api_request_latency_microseconds, \"quantile\", \"50th percentile\", \"quantile\", \"0.5\"), \"quantile\", \"90th percentile\", \"quantile\", \"0.9\"), \"quantile\", \"99th percentile\", \"quantile\", \"0.99\")", + "format": "time_series", + "instant": false, + "interval": "", + "key": "Q-cc5a9d33-5437-4862-abd9-60afd75f3f39-0", + "legendFormat": "{{method}} - {{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "WS Latency Quantiles", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 66 + }, + "id": 27, + "panels": [], + "title": "HTTP-Adapter", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 10, + "x": 0, + "y": 67 + }, + "id": 6, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "http_adapter_api_request_count{}", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{method}}", + "refId": "A" + } + ], + "title": "HTTP Adapter Request Count", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 35, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Āµs" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 14, + "x": 10, + "y": 67 + }, + "id": 40, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(label_replace(label_replace(http_adapter_api_request_latency_microseconds, \"quantile\", \"50th percentile\", \"quantile\", \"0.5\"), \"quantile\", \"90th percentile\", \"quantile\", \"0.9\"), \"quantile\", \"99th percentile\", \"quantile\", \"0.99\")", + "format": "time_series", + "instant": false, + "interval": "", + "key": "Q-cc5a9d33-5437-4862-abd9-60afd75f3f39-0", + "legendFormat": "{{method}} - {{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "HTTP Latency Quantiles", + "type": "timeseries" + } + ], + "refresh": "5s", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "magistrala", + "uid": "sgKwOwY4k", + "version": 1, + "weekStart": "" +} diff --git a/docker/addons/prometheus/metrics/prometheus.yml b/docker/addons/prometheus/metrics/prometheus.yml new file mode 100644 index 0000000..ecac123 --- /dev/null +++ b/docker/addons/prometheus/metrics/prometheus.yml @@ -0,0 +1,22 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + - job_name: 'magistrala' + honor_timestamps: true + scrape_interval: 15s + scrape_timeout: 10s + metrics_path: /metrics + follow_redirects: true + enable_http2: true + static_configs: + - targets: + - magistrala-things:9000 + - magistrala-users:9002 + - magistrala-http:8008 + - magistrala-ws:8186 + - magistrala-coap:5683 diff --git a/docker/addons/provision/configs/config.toml b/docker/addons/provision/configs/config.toml new file mode 100644 index 0000000..ec1ee38 --- /dev/null +++ b/docker/addons/provision/configs/config.toml @@ -0,0 +1,74 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +[bootstrap] + [bootstrap.content] + [bootstrap.content.agent.edgex] + url = "http://localhost:48090/api/v1/" + + [bootstrap.content.agent.log] + level = "info" + + [bootstrap.content.agent.mqtt] + mtls = false + qos = 0 + retain = false + skip_tls_ver = true + url = "localhost:1883" + + [bootstrap.content.agent.server] + nats_url = "localhost:4222" + port = "9000" + + [bootstrap.content.agent.heartbeat] + interval = "30s" + + [bootstrap.content.agent.terminal] + session_timeout = "30s" + + + [bootstrap.content.export.exp] + log_level = "debug" + nats = "nats://localhost:4222" + port = "8172" + cache_url = "localhost:6379" + cache_pass = "" + cache_db = "0" + + [bootstrap.content.export.mqtt] + ca_path = "ca.crt" + cert_path = "thing.crt" + channel = "" + host = "tcp://localhost:1883" + mtls = false + password = "" + priv_key_path = "thing.key" + qos = 0 + retain = false + skip_tls_ver = false + username = "" + + [[bootstrap.content.export.routes]] + mqtt_topic = "" + nats_topic = ">" + subtopic = "" + type = "plain" + workers = 10 + +[[things]] + name = "thing" + + [things.metadata] + external_id = "xxxxxx" + +[[channels]] + name = "control-channel" + + [channels.metadata] + type = "control" + +[[channels]] + name = "data-channel" + + [channels.metadata] + type = "data" diff --git a/docker/addons/provision/docker-compose.yml b/docker/addons/provision/docker-compose.yml new file mode 100644 index 0000000..7709f40 --- /dev/null +++ b/docker/addons/provision/docker-compose.yml @@ -0,0 +1,45 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional provision services. Since it's optional, this file is +# dependent of docker-compose file from <project_root>/docker. In order to run this services, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/provision/docker-compose.yml up +# from project root. + +networks: + magistrala-base-net: + +services: + provision: + image: magistrala/provision:${MG_RELEASE_TAG} + container_name: magistrala-provision + restart: on-failure + networks: + - magistrala-base-net + ports: + - ${MG_PROVISION_HTTP_PORT}:${MG_PROVISION_HTTP_PORT} + environment: + MG_PROVISION_LOG_LEVEL: ${MG_PROVISION_LOG_LEVEL} + MG_PROVISION_HTTP_PORT: ${MG_PROVISION_HTTP_PORT} + MG_PROVISION_CONFIG_FILE: ${MG_PROVISION_CONFIG_FILE} + MG_PROVISION_ENV_CLIENTS_TLS: ${MG_PROVISION_ENV_CLIENTS_TLS} + MG_PROVISION_SERVER_CERT: ${MG_PROVISION_SERVER_CERT} + MG_PROVISION_SERVER_KEY: ${MG_PROVISION_SERVER_KEY} + MG_PROVISION_USERS_LOCATION: ${MG_PROVISION_USERS_LOCATION} + MG_PROVISION_THINGS_LOCATION: ${MG_PROVISION_THINGS_LOCATION} + MG_PROVISION_USER: ${MG_PROVISION_USER} + MG_PROVISION_PASS: ${MG_PROVISION_PASS} + MG_PROVISION_API_KEY: ${MG_PROVISION_API_KEY} + MG_PROVISION_CERTS_SVC_URL: ${MG_PROVISION_CERTS_SVC_URL} + MG_PROVISION_X509_PROVISIONING: ${MG_PROVISION_X509_PROVISIONING} + MG_PROVISION_BS_SVC_URL: ${MG_PROVISION_BS_SVC_URL} + MG_PROVISION_BS_CONFIG_PROVISIONING: ${MG_PROVISION_BS_CONFIG_PROVISIONING} + MG_PROVISION_BS_AUTO_WHITELIST: ${MG_PROVISION_BS_AUTO_WHITELIST} + MG_PROVISION_BS_CONTENT: ${MG_PROVISION_BS_CONTENT} + MG_PROVISION_CERTS_HOURS_VALID: ${MG_PROVISION_CERTS_HOURS_VALID} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_PROVISION_INSTANCE_ID: ${MG_PROVISION_INSTANCE_ID} + volumes: + - ./configs:/configs + - ../../ssl/certs/ca.key:/etc/ssl/certs/ca.key + - ../../ssl/certs/ca.crt:/etc/ssl/certs/ca.crt diff --git a/docker/addons/smpp-notifier/config.toml b/docker/addons/smpp-notifier/config.toml new file mode 100644 index 0000000..cd080af --- /dev/null +++ b/docker/addons/smpp-notifier/config.toml @@ -0,0 +1,8 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# To listen all messsage broker subjects use default value "channels.>". +# To subscribe to specific subjects use values starting by "channels." and +# followed by a subtopic (e.g ["channels.<channel_id>.sub.topic.x", ...]). +[subscriber] +subjects = ["channels.>"] diff --git a/docker/addons/smpp-notifier/docker-compose.yml b/docker/addons/smpp-notifier/docker-compose.yml new file mode 100644 index 0000000..213eb1a --- /dev/null +++ b/docker/addons/smpp-notifier/docker-compose.yml @@ -0,0 +1,91 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional InfluxDB and InfluxDB-writer services +# for the Magistrala platform. Since this services are optional, this file is dependent on the +# docker-compose.yml file from <project_root>/docker/. In order to run these services, +# core services, as well as the network from the core composition, should be already running. + +networks: + magistrala-base-net: + +volumes: + magistrala-smpp-notifier-volume: + +services: + smpp-notifier-db: + image: postgres:16.2-alpine + container_name: magistrala-smpp-notifier-db + restart: on-failure + environment: + POSTGRES_USER: ${MG_SMPP_NOTIFIER_DB_USER} + POSTGRES_PASSWORD: ${MG_SMPP_NOTIFIER_DB_PASS} + POSTGRES_DB: ${MG_SMPP_NOTIFIER_DB_NAME} + networks: + - magistrala-base-net + volumes: + - magistrala-smpp-notifier-volume:/var/lib/postgresql/data + + smpp-notifier: + image: magistrala/smpp-notifier:latest + container_name: magistrala-smpp-notifier + depends_on: + - smpp-notifier-db + restart: on-failure + environment: + MG_SMPP_NOTIFIER_LOG_LEVEL: ${MG_SMPP_NOTIFIER_LOG_LEVEL} + MG_SMPP_NOTIFIER_FROM_ADDR: ${MG_SMPP_NOTIFIER_FROM_ADDR}] + MG_SMPP_NOTIFIER_CONFIG_PATH: ${MG_SMPP_NOTIFIER_CONFIG_PATH} + MG_SMPP_NOTIFIER_HTTP_HOST: ${MG_SMPP_NOTIFIER_HTTP_HOST} + MG_SMPP_NOTIFIER_HTTP_PORT: ${MG_SMPP_NOTIFIER_HTTP_PORT} + MG_SMPP_NOTIFIER_HTTP_SERVER_CERT: ${MG_SMPP_NOTIFIER_HTTP_SERVER_CERT} + MG_SMPP_NOTIFIER_HTTP_SERVER_KEY: ${MG_SMPP_NOTIFIER_HTTP_SERVER_KEY} + MG_SMPP_NOTIFIER_DB_HOST: ${MG_SMPP_NOTIFIER_DB_HOST} + MG_SMPP_NOTIFIER_DB_PORT: ${MG_SMPP_NOTIFIER_DB_PORT} + MG_SMPP_NOTIFIER_DB_USER: ${MG_SMPP_NOTIFIER_DB_USER} + MG_SMPP_NOTIFIER_DB_PASS: ${MG_SMPP_NOTIFIER_DB_PASS} + MG_SMPP_NOTIFIER_DB_NAME: ${MG_SMPP_NOTIFIER_DB_NAME} + MG_SMPP_NOTIFIER_DB_SSL_MODE: ${MG_SMPP_NOTIFIER_DB_SSL_MODE} + MG_SMPP_NOTIFIER_DB_SSL_CERT: ${MG_SMPP_NOTIFIER_DB_SSL_CERT} + MG_SMPP_NOTIFIER_DB_SSL_KEY: ${MG_SMPP_NOTIFIER_DB_SSL_KEY} + MG_SMPP_NOTIFIER_DB_SSL_ROOT_CERT: ${MG_SMPP_NOTIFIER_DB_SSL_ROOT_CERT} + MG_SMPP_ADDRESS: ${MG_SMPP_ADDRESS} + MG_SMPP_USERNAME: ${MG_SMPP_USERNAME} + MG_SMPP_PASSWORD: ${MG_SMPP_PASSWORD} + MG_SMPP_SYSTEM_TYPE: ${MG_SMPP_SYSTEM_TYPE} + MG_SMPP_SRC_ADDR_TON: ${MG_SMPP_SRC_ADDR_TON} + MG_SMPP_SRC_ADDR_NPI: ${MG_SMPP_SRC_ADDR_NPI} + MG_SMPP_DST_ADDR_TON: ${MG_SMPP_DST_ADDR_TON} + MG_SMPP_DST_ADDR_NPI: ${MG_SMPP_DST_ADDR_NPI} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_SMPP_NOTIFIER_INSTANCE_ID: ${MG_SMPP_NOTIFIER_INSTANCE_ID} + ports: + - ${MG_SMPP_NOTIFIER_HTTP_PORT}:${MG_SMPP_NOTIFIER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ./config.toml:/config.toml + # Auth gRPC client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/smtp-notifier/config.toml b/docker/addons/smtp-notifier/config.toml new file mode 100644 index 0000000..cd080af --- /dev/null +++ b/docker/addons/smtp-notifier/config.toml @@ -0,0 +1,8 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# To listen all messsage broker subjects use default value "channels.>". +# To subscribe to specific subjects use values starting by "channels." and +# followed by a subtopic (e.g ["channels.<channel_id>.sub.topic.x", ...]). +[subscriber] +subjects = ["channels.>"] diff --git a/docker/addons/smtp-notifier/docker-compose.yml b/docker/addons/smtp-notifier/docker-compose.yml new file mode 100644 index 0000000..8279cd5 --- /dev/null +++ b/docker/addons/smtp-notifier/docker-compose.yml @@ -0,0 +1,90 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional InfluxDB and InfluxDB-writer services +# for the Magistrala platform. Since this services are optional, this file is dependent on the +# docker-compose.yml file from <project_root>/docker/. In order to run these services, +# core services, as well as the network from the core composition, should be already running. + +networks: + magistrala-base-net: + +volumes: + magistrala-smtp-notifier-volume: + +services: + smtp-notifier-db: + image: postgres:16.2-alpine + container_name: magistrala-smtp-notifier-db + restart: on-failure + environment: + POSTGRES_USER: ${MG_SMTP_NOTIFIER_DB_USER} + POSTGRES_PASSWORD: ${MG_SMTP_NOTIFIER_DB_PASS} + POSTGRES_DB: ${MG_SMTP_NOTIFIER_DB_NAME} + networks: + - magistrala-base-net + volumes: + - magistrala-smtp-notifier-volume:/var/lib/postgresql/datab + + smtp-notifier: + image: magistrala/smtp-notifier:latest + container_name: magistrala-smtp-notifier + depends_on: + - smtp-notifier-db + restart: on-failure + environment: + MG_SMTP_NOTIFIER_LOG_LEVEL: ${MG_SMTP_NOTIFIER_LOG_LEVEL} + MG_SMTP_NOTIFIER_FROM_ADDR: ${MG_SMTP_NOTIFIER_FROM_ADDR}] + MG_SMTP_NOTIFIER_CONFIG_PATH: ${MG_SMTP_NOTIFIER_CONFIG_PATH} + MG_SMTP_NOTIFIER_HTTP_HOST: ${MG_SMTP_NOTIFIER_HTTP_HOST} + MG_SMTP_NOTIFIER_HTTP_PORT: ${MG_SMTP_NOTIFIER_HTTP_PORT} + MG_SMTP_NOTIFIER_HTTP_SERVER_CERT: ${MG_SMTP_NOTIFIER_HTTP_SERVER_CERT} + MG_SMTP_NOTIFIER_HTTP_SERVER_KEY: ${MG_SMTP_NOTIFIER_HTTP_SERVER_KEY} + MG_SMTP_NOTIFIER_DB_HOST: ${MG_SMTP_NOTIFIER_DB_HOST} + MG_SMTP_NOTIFIER_DB_PORT: ${MG_SMTP_NOTIFIER_DB_PORT} + MG_SMTP_NOTIFIER_DB_USER: ${MG_SMTP_NOTIFIER_DB_USER} + MG_SMTP_NOTIFIER_DB_PASS: ${MG_SMTP_NOTIFIER_DB_PASS} + MG_SMTP_NOTIFIER_DB_NAME: ${MG_SMTP_NOTIFIER_DB_NAME} + MG_SMTP_NOTIFIER_DB_SSL_MODE: ${MG_SMTP_NOTIFIER_DB_SSL_MODE} + MG_SMTP_NOTIFIER_DB_SSL_CERT: ${MG_SMTP_NOTIFIER_DB_SSL_CERT} + MG_SMTP_NOTIFIER_DB_SSL_KEY: ${MG_SMTP_NOTIFIER_DB_SSL_KEY} + MG_SMTP_NOTIFIER_DB_SSL_ROOT_CERT: ${MG_SMTP_NOTIFIER_DB_SSL_ROOT_CERT} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_EMAIL_USERNAME: ${MG_EMAIL_USERNAME} + MG_EMAIL_PASSWORD: ${MG_EMAIL_PASSWORD} + MG_EMAIL_HOST: ${MG_EMAIL_HOST} + MG_EMAIL_PORT: ${MG_EMAIL_PORT} + MG_EMAIL_FROM_ADDRESS: ${MG_EMAIL_FROM_ADDRESS} + MG_EMAIL_FROM_NAME: ${MG_EMAIL_FROM_NAME} + MG_EMAIL_TEMPLATE: ${MG_SMTP_NOTIFIER_EMAIL_TEMPLATE} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_SMTP_NOTIFIER_INSTANCE_ID: ${MG_SMTP_NOTIFIER_INSTANCE_ID} + ports: + - ${MG_SMTP_NOTIFIER_HTTP_PORT}:${MG_SMTP_NOTIFIER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ./config.toml:/config.toml + - ../../templates/${MG_SMTP_NOTIFIER_EMAIL_TEMPLATE}:/${MG_SMTP_NOTIFIER_EMAIL_TEMPLATE} + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/timescale-reader/docker-compose.yml b/docker/addons/timescale-reader/docker-compose.yml new file mode 100644 index 0000000..269e1c6 --- /dev/null +++ b/docker/addons/timescale-reader/docker-compose.yml @@ -0,0 +1,80 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional Timescale-reader service for Magistrala platform. +# Since this service is optional, this file is dependent of docker-compose.yml file +# from <project_root>/docker. In order to run this service, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/timescale-reader/docker-compose.yml up +# from project root. + +networks: + magistrala-base-net: + +services: + timescale-reader: + image: magistrala/timescale-reader:${MG_RELEASE_TAG} + container_name: magistrala-timescale-reader + restart: on-failure + environment: + MG_TIMESCALE_READER_LOG_LEVEL: ${MG_TIMESCALE_READER_LOG_LEVEL} + MG_TIMESCALE_READER_HTTP_HOST: ${MG_TIMESCALE_READER_HTTP_HOST} + MG_TIMESCALE_READER_HTTP_PORT: ${MG_TIMESCALE_READER_HTTP_PORT} + MG_TIMESCALE_READER_HTTP_SERVER_CERT: ${MG_TIMESCALE_READER_HTTP_SERVER_CERT} + MG_TIMESCALE_READER_HTTP_SERVER_KEY: ${MG_TIMESCALE_READER_HTTP_SERVER_KEY} + MG_TIMESCALE_HOST: ${MG_TIMESCALE_HOST} + MG_TIMESCALE_PORT: ${MG_TIMESCALE_PORT} + MG_TIMESCALE_USER: ${MG_TIMESCALE_USER} + MG_TIMESCALE_PASS: ${MG_TIMESCALE_PASS} + MG_TIMESCALE_NAME: ${MG_TIMESCALE_NAME} + MG_TIMESCALE_SSL_MODE: ${MG_TIMESCALE_SSL_MODE} + MG_TIMESCALE_SSL_CERT: ${MG_TIMESCALE_SSL_CERT} + MG_TIMESCALE_SSL_KEY: ${MG_TIMESCALE_SSL_KEY} + MG_TIMESCALE_SSL_ROOT_CERT: ${MG_TIMESCALE_SSL_ROOT_CERT} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_TIMESCALE_READER_INSTANCE_ID: ${MG_TIMESCALE_READER_INSTANCE_ID} + ports: + - ${MG_TIMESCALE_READER_HTTP_PORT}:${MG_TIMESCALE_READER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/timescale-writer/config.toml b/docker/addons/timescale-writer/config.toml new file mode 100644 index 0000000..f3ad91d --- /dev/null +++ b/docker/addons/timescale-writer/config.toml @@ -0,0 +1,8 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# To listen all messsage broker subjects use default value "channels.>". +# To subscribe to specific subjects use values starting by "channels." and +# followed by a subtopic (e.g ["channels.<channel_id>.sub.topic.x", ...]). +[subjects] +filter = ["channels.>"] diff --git a/docker/addons/timescale-writer/docker-compose.yml b/docker/addons/timescale-writer/docker-compose.yml new file mode 100644 index 0000000..125315a --- /dev/null +++ b/docker/addons/timescale-writer/docker-compose.yml @@ -0,0 +1,65 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional Timescale and Timescale-writer services +# for Magistrala platform. Since these are optional, this file is dependent of docker-compose file +# from <project_root>/docker. In order to run these services, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/timescale-writer/docker-compose.yml up +# from project root. PostgreSQL default port (5432) is exposed, so you can use various tools for database +# inspection and data visualization. + +networks: + magistrala-base-net: + +volumes: + magistrala-timescale-writer-volume: + +services: + timescale: + image: timescale/timescaledb:2.13.1-pg16 + container_name: magistrala-timescale + restart: on-failure + environment: + POSTGRES_PASSWORD: ${MG_TIMESCALE_PASS} + POSTGRES_USER: ${MG_TIMESCALE_USER} + POSTGRES_DB: ${MG_TIMESCALE_NAME} + ports: + - 5433:5432 + networks: + - magistrala-base-net + volumes: + - magistrala-timescale-writer-volume:/var/lib/timescalesql/data + + timescale-writer: + image: magistrala/timescale-writer:${MG_RELEASE_TAG} + container_name: magistrala-timescale-writer + depends_on: + - timescale + restart: on-failure + environment: + MG_TIMESCALE_WRITER_LOG_LEVEL: ${MG_TIMESCALE_WRITER_LOG_LEVEL} + MG_TIMESCALE_WRITER_CONFIG_PATH: ${MG_TIMESCALE_WRITER_CONFIG_PATH} + MG_TIMESCALE_WRITER_HTTP_HOST: ${MG_TIMESCALE_WRITER_HTTP_HOST} + MG_TIMESCALE_WRITER_HTTP_PORT: ${MG_TIMESCALE_WRITER_HTTP_PORT} + MG_TIMESCALE_WRITER_HTTP_SERVER_CERT: ${MG_TIMESCALE_WRITER_HTTP_SERVER_CERT} + MG_TIMESCALE_WRITER_HTTP_SERVER_KEY: ${MG_TIMESCALE_WRITER_HTTP_SERVER_KEY} + MG_TIMESCALE_HOST: ${MG_TIMESCALE_HOST} + MG_TIMESCALE_PORT: ${MG_TIMESCALE_PORT} + MG_TIMESCALE_USER: ${MG_TIMESCALE_USER} + MG_TIMESCALE_PASS: ${MG_TIMESCALE_PASS} + MG_TIMESCALE_NAME: ${MG_TIMESCALE_NAME} + MG_TIMESCALE_SSL_MODE: ${MG_TIMESCALE_SSL_MODE} + MG_TIMESCALE_SSL_CERT: ${MG_TIMESCALE_SSL_CERT} + MG_TIMESCALE_SSL_KEY: ${MG_TIMESCALE_SSL_KEY} + MG_TIMESCALE_SSL_ROOT_CERT: ${MG_TIMESCALE_SSL_ROOT_CERT} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_TIMESCALE_WRITER_INSTANCE_ID: ${MG_TIMESCALE_WRITER_INSTANCE_ID} + ports: + - ${MG_TIMESCALE_WRITER_HTTP_PORT}:${MG_TIMESCALE_WRITER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ./config.toml:/config.toml diff --git a/docker/addons/twins/docker-compose.yml b/docker/addons/twins/docker-compose.yml new file mode 100644 index 0000000..6f78a29 --- /dev/null +++ b/docker/addons/twins/docker-compose.yml @@ -0,0 +1,91 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional opcua-adapter and opcua-redis services +# for the Magistrala platform. Since this services are optional, this file is dependent on the +# docker-compose.yml file from <project_root>/docker/. In order to run these services, +# core services, as well as the network from the core composition, should be already running. + +networks: + magistrala-base-net: + +volumes: + magistrala-twins-db-volume: + magistrala-twins-db-configdb-volume: + +services: + twins-redis: + image: redis:7.2.4-alpine + container_name: magistrala-twins-redis + restart: on-failure + networks: + - magistrala-base-net + + twins-db: + image: mongo:bionic + command: mongod --port ${MG_TWINS_DB_PORT} + container_name: magistrala-twins-db + restart: on-failure + environment: + MONGO_INITDB_DATABASE: ${MG_TWINS_DB_NAME} + ports: + - ${MG_TWINS_DB_PORT}:${MG_TWINS_DB_PORT} + networks: + magistrala-base-net: + volumes: + - magistrala-twins-db-volume:/data/db + - magistrala-twins-db-configdb-volume:/data/configdb + + twins: + image: magistrala/twins:${MG_RELEASE_TAG} + container_name: magistrala-twins + restart: on-failure + environment: + MG_TWINS_LOG_LEVEL: ${MG_TWINS_LOG_LEVEL} + MG_TWINS_STANDALONE_ID: ${MG_TWINS_STANDALONE_ID} + MG_TWINS_STANDALONE_TOKEN: ${MG_TWINS_STANDALONE_TOKEN} + MG_TWINS_CHANNEL_ID: ${MG_TWINS_CHANNEL_ID} + MG_TWINS_HTTP_HOST: ${MG_TWINS_HTTP_HOST} + MG_TWINS_HTTP_PORT: ${MG_TWINS_HTTP_PORT} + MG_TWINS_HTTP_SERVER_CERT: ${MG_TWINS_HTTP_SERVER_CERT} + MG_TWINS_HTTP_SERVER_KEY: ${MG_TWINS_HTTP_SERVER_KEY} + MG_TWINS_CACHE_URL: ${MG_TWINS_CACHE_URL} + MG_ES_URL: ${MG_ES_URL} + MG_THINGS_STANDALONE_ID: ${MG_THINGS_STANDALONE_ID} + MG_THINGS_STANDALONE_TOKEN: ${MG_THINGS_STANDALONE_TOKEN} + MG_TWINS_DB_HOST: ${MG_TWINS_DB_HOST} + MG_TWINS_DB_PORT: ${MG_TWINS_DB_PORT} + MG_TWINS_DB_NAME: ${MG_TWINS_DB_NAME} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_TWINS_INSTANCE_ID: ${MG_TWINS_INSTANCE_ID} + ports: + - ${MG_TWINS_HTTP_PORT}:${MG_TWINS_HTTP_PORT} + networks: + magistrala-base-net: + depends_on: + - twins-db + - twins-redis + volumes: + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_CERT:-./ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_CLIENT_KEY:-./ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_ADDONS_CERTS_PATH_PREFIX}${MG_AUTH_GRPC_SERVER_CA_CERTS:-./ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true diff --git a/docker/addons/vault/.gitignore b/docker/addons/vault/.gitignore new file mode 100644 index 0000000..4f14d39 --- /dev/null +++ b/docker/addons/vault/.gitignore @@ -0,0 +1,5 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +data +magistrala_things_certs_issue.hcl diff --git a/docker/addons/vault/README.md b/docker/addons/vault/README.md new file mode 100644 index 0000000..1ac1136 --- /dev/null +++ b/docker/addons/vault/README.md @@ -0,0 +1,170 @@ +# Vault + +This is Vault service deployment to be used with Magistrala. + +When the Vault service is started, some initialization steps need to be done to set things up. + +## Configuration + +| Variable | Description | Default | +| :-------------------------------------- | ----------------------------------------------------------------------------- | ------------------------------------- | +| MG_VAULT_ADDR | Vault Address | http://vault:8200 | +| MG_VAULT_UNSEAL_KEY_1 | Vault unseal key | "" | +| MG_VAULT_UNSEAL_KEY_2 | Vault unseal key | "" | +| MG_VAULT_UNSEAL_KEY_3 | Vault unseal key | "" | +| MG_VAULT_TOKEN | Vault cli access token | "" | +| MG_VAULT_PKI_PATH | Vault secrets engine path for Root CA | pki | +| MG_VAULT_PKI_ROLE_NAME | Vault Root CA role name to issue intermediate CA | magistrala_int_ca | +| MG_VAULT_PKI_FILE_NAME | Root CA Certificates name used by`vault_set_pki.sh` | mg_root | +| MG_VAULT_PKI_CA_CN | Common name used for Root CA creation by`vault_set_pki.sh` | Magistrala Root Certificate Authority | +| MG_VAULT_PKI_CA_OU | Organization unit used for Root CA creation by`vault_set_pki.sh` | Magistrala | +| MG_VAULT_PKI_CA_O | Organization used for Root CA creation by`vault_set_pki.sh` | Magistrala | +| MG_VAULT_PKI_CA_C | Country used for Root CA creation by`vault_set_pki.sh` | FRANCE | +| MG_VAULT_PKI_CA_L | Location used for Root CA creation by`vault_set_pki.sh` | PARIS | +| MG_VAULT_PKI_CA_ST | State or Provisions used for Root CA creation by`vault_set_pki.sh` | PARIS | +| MG_VAULT_PKI_CA_ADDR | Address used for Root CA creation by`vault_set_pki.sh` | 5 Av. Anatole | +| MG_VAULT_PKI_CA_PO | Postal code used for Root CA creation by`vault_set_pki.sh` | 75007 | +| MG_VAULT_PKI_CLUSTER_PATH | Vault Root CA Cluster Path | http://localhost | +| MG_VAULT_PKI_CLUSTER_AIA_PATH | Vault Root CA Cluster AIA Path | http://localhost | +| MG_VAULT_PKI_INT_PATH | Vault secrets engine path for Intermediate CA | pki_int | +| MG_VAULT_PKI_INT_SERVER_CERTS_ROLE_NAME | Vault Intermediate CA role name to issue server certificate | magistrala_server_certs | +| MG_VAULT_PKI_INT_THINGS_CERTS_ROLE_NAME | Vault Intermediate CA role name to issue Things certificates | magistrala_things_certs | +| MG_VAULT_PKI_INT_FILE_NAME | Intermediate CA Certificates name used by`vault_set_pki.sh` | mg_root | +| MG_VAULT_PKI_INT_CA_CN | Common name used for Intermediate CA creation by`vault_set_pki.sh` | Magistrala Root Certificate Authority | +| MG_VAULT_PKI_INT_CA_OU | Organization unit used for Root CA creation by`vault_set_pki.sh` | Magistrala | +| MG_VAULT_PKI_INT_CA_O | Organization used for Intermediate CA creation by`vault_set_pki.sh` | Magistrala | +| MG_VAULT_PKI_INT_CA_C | Country used for Intermediate CA creation by`vault_set_pki.sh` | FRANCE | +| MG_VAULT_PKI_INT_CA_L | Location used for Intermediate CA creation by`vault_set_pki.sh` | PARIS | +| MG_VAULT_PKI_INT_CA_ST | State or Provisions used for Intermediate CA creation by`vault_set_pki.sh` | PARIS | +| MG_VAULT_PKI_INT_CA_ADDR | Address used for Intermediate CA creation by`vault_set_pki.sh` | 5 Av. Anatole | +| MG_VAULT_PKI_INT_CA_PO | Postal code used for Intermediate CA creation by`vault_set_pki.sh` | 75007 | +| MG_VAULT_PKI_INT_CLUSTER_PATH | Vault Intermediate CA Cluster Path | http://localhost | +| MG_VAULT_PKI_INT_CLUSTER_AIA_PATH | Vault Intermediate CA Cluster AIA Path | http://localhost | +| MG_VAULT_THINGS_CERTS_ISSUER_ROLEID | Vault Intermediate CA Things Certificate issuer AppRole authentication RoleID | magistrala | +| MG_VAULT_THINGS_CERTS_ISSUER_SECRET | Vault Intermediate CA Things Certificate issuer AppRole authentication Secret | magistrala | + +## Setup + +The following scripts are provided, which work on the running Vault service in Docker. + +### 1. `vault_init.sh` + +Calls `vault operator init` to perform the initial vault initialization and generates a `docker/addons/vault/data/secrets` file which contains the Vault unseal keys and root tokens. + +Example contents for `data/secrets`: + +```bash +Unseal Key 1: Ay0YZecYJ2HVtNtXfPootXK5LtF+JZoDmBb7IbbYdLBI +Unseal Key 2: P6hb7x2cglv0p61jdLyNE3+d44cJUOFaDt9jHFDfr8Df +Unseal Key 3: zSBfDHzUiWoOzXKY1pnnBqKO8UD2MDLuy8DNTxNtEBFy +Unseal Key 4: 5oJuDDuMI0I8snaw/n4VLNpvndvvKi6JlkgOxuWXqMSz +Unseal Key 5: ZhsUkk2tXBYEcWgz4WUCHH9rocoW6qZoiARWlkE5Epi5 + +Initial Root Token: s.V2hdd00P4bHtUQnoWZK2hSaS + +Vault initialized with 5 key shares and a key threshold of 3. Please securely +distribute the key shares printed above. When the Vault is re-sealed, +restarted, or stopped, you must supply at least 3 of these keys to unseal it +before it can start servicing requests. + +Vault does not store the generated master key. Without at least 3 key to +reconstruct the master key, Vault will remain permanently sealed! + +It is possible to generate new unseal keys, provided you have a quorum of +existing unseal keys shares. See "vault operator rekey" for more information. +bash-4.4 + +Use 3 out of five keys presented and put it into .env file and than start the composition again Vault should be in unsealed state ( take a note that this is not recommended in terms of security, this is deployment for development) A real production deployment can use Vault auto unseal mode where vault gets unseal keys from some 3rd party KMS ( on AWS for example) +``` + +### 2. `vault_copy_env.sh` + +After first step, the corresponding Vault environment variables (`MG_VAULT_TOKEN`, `MG_VAULT_UNSEAL_KEY_1`, `MG_VAULT_UNSEAL_KEY_2`, `MG_VAULT_UNSEAL_KEY_3`) should be updated in `.env` file. + +`vault_copy_env.sh` scripts copies values from `docker/addons/vault/data/secrets` file and update environmental variables `MG_VAULT_TOKEN`, `MG_VAULT_UNSEAL_KEY_1`, `MG_VAULT_UNSEAL_KEY_2`, `MG_VAULT_UNSEAL_KEY_3` present in `.env` file. + +### 3. `vault_unseal.sh` + +This can be run after the initialization to unseal Vault, which is necessary for it to be used to store and/or get secrets. + +This can be used if you don't want to restart the service. + +The unseal environment variables need to be set in `.env` for the script to work (`MG_VAULT_TOKEN`,`MG_VAULT_UNSEAL_KEY_1`, `MG_VAULT_UNSEAL_KEY_2`, `MG_VAULT_UNSEAL_KEY_3`). + +This script should not be necessary to run after the initial setup, since the Vault service unseals itself when starting the container. + +### 4. `vault_set_pki.sh` + +This script is used to generate the root certificate, intermediate certificate and HTTPS server certificate. +All generate certificates, keys and CSR by `vault_set_pki.sh` will be present at `docker/addons/vault/data`. + +The parameters required for generating certificate are obtained from the environment variables which are loaded from `docker/.env`. + +Environmental variables starting with `MG_VAULT_PKI` in `docker/.env` file are used by `vault_set_pki.sh` to generate root CA. +Environmental variables starting with`MG_VAULT_PKI_INT` in `docker/.env` file are used by `vault_set_pki.sh` to generate intermediate CA. + +Passing command line args `--skip-server-cert` to `vault_set_pki.sh` will skip server certificate role & process of generation of server certificate & key. + +### 5. `vault_create_approle.sh` + +This script is used to enable app role authorization in Vault. Certs service used the approle credentials to issue, revoke things certificate from vault intermedate CA. + +`vault_create_approle.sh` script by default tries to enable auth approle. +If approle is already enabled in vault, then use args `--skip-enable-approle` to skip enable auth approle step. +To skip enable auth approle step use the following `vault_create_approle.sh --skip-enable-approle` + +### 6. `vault_copy_certs.sh` + +This scripts copies the necessary certificates and keys from `docker/addons/vault/data` to the `docker/ssl/certs` folder. + +## Hashicorp Cloud Platform (HCP) Vault + +To have the same PKI setup can done in Hashicorp Cloud Platform (HCP) Vault follow the below steps: +Requirement: [VAULT CLI](https://developer.hashicorp.com/vault/tutorials/getting-started/getting-started-install) + +- Replace the environmental variable `MG_VAULT_ADDR` in `docker/.env` with HCP Vault address. +- Replace the environmental variable `MG_VAULT_TOKEN` in `docker/.env` with HCP Vault Admin token. +- Run script `vault_set_pki.sh` and `vault_create_approle.sh`. +- Optional step, run script `vault_copy_certs.sh` to copy certificates to magistrala default path. + +## Vault CLI + +It can also be useful to run the Vault CLI for inspection and administration work. + +```bash +Usage: vault <command> [args] + +Common commands: + read Read data and retrieves secrets + write Write data, configuration, and secrets + delete Delete secrets and configuration + list List data or secrets + login Authenticate locally + agent Start a Vault agent + server Start a Vault server + status Print seal and HA status + unwrap Unwrap a wrapped secret + +Other commands: + audit Interact with audit devices + auth Interact with auth methods + debug Runs the debug command + kv Interact with Vault's Key-Value storage + lease Interact with leases + monitor Stream log messages from a Vault server + namespace Interact with namespaces + operator Perform operator-specific tasks + path-help Retrieve API help for paths + plugin Interact with Vault plugins and catalog + policy Interact with policies + print Prints runtime configurations + secrets Interact with secrets engines + ssh Initiate an SSH session + token Interact with tokens +``` + +If the Vault is setup through `docker/addons/vault`, then Vault CLI can be run directly using the Vault image in Docker: `docker run -it magistrala/vault:latest vault` + +## Vault Web UI + +If the Vault is setup through `docker/addons/vault`, Then Vault Web UI is accessible by default on `http://localhost:8200/ui`. diff --git a/docker/addons/vault/config.hcl b/docker/addons/vault/config.hcl new file mode 100644 index 0000000..192dd5a --- /dev/null +++ b/docker/addons/vault/config.hcl @@ -0,0 +1,10 @@ +storage "file" { + path = "/vault/file" +} + +listener "tcp" { + address = "0.0.0.0:8200" + tls_disable = 1 +} + +ui = true diff --git a/docker/addons/vault/docker-compose.yml b/docker/addons/vault/docker-compose.yml new file mode 100644 index 0000000..8f380b4 --- /dev/null +++ b/docker/addons/vault/docker-compose.yml @@ -0,0 +1,39 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This docker-compose file contains optional Vault service for Magistrala platform. +# Since this is optional, this file is dependent of docker-compose file +# from <project_root>/docker. In order to run these services, execute command: +# docker compose -f docker/docker-compose.yml -f docker/addons/vault/docker-compose.yml up +# from project root. Vault default port (8200) is exposed, so you can use Vault CLI tool for +# vault inspection and administration, as well as access the UI. + +networks: + magistrala-base-net: + +volumes: + magistrala-vault-volume: + +services: + vault: + image: hashicorp/vault:1.15.4 + container_name: magistrala-vault + ports: + - ${MG_VAULT_PORT}:8200 + networks: + - magistrala-base-net + volumes: + - magistrala-vault-volume:/vault/file + - magistrala-vault-volume:/vault/logs + - ./config.hcl:/vault/config/config.hcl + - ./entrypoint.sh:/entrypoint.sh + environment: + VAULT_ADDR: http://127.0.0.1:${MG_VAULT_PORT} + MG_VAULT_PORT: ${MG_VAULT_PORT} + MG_VAULT_UNSEAL_KEY_1: ${MG_VAULT_UNSEAL_KEY_1} + MG_VAULT_UNSEAL_KEY_2: ${MG_VAULT_UNSEAL_KEY_2} + MG_VAULT_UNSEAL_KEY_3: ${MG_VAULT_UNSEAL_KEY_3} + entrypoint: /bin/sh + command: /entrypoint.sh + cap_add: + - IPC_LOCK diff --git a/docker/addons/vault/entrypoint.sh b/docker/addons/vault/entrypoint.sh new file mode 100644 index 0000000..efc6f5a --- /dev/null +++ b/docker/addons/vault/entrypoint.sh @@ -0,0 +1,25 @@ +#!/usr/bin/dumb-init /bin/sh +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +VAULT_CONFIG_DIR=/vault/config + +docker-entrypoint.sh server & +VAULT_PID=$! + +sleep 2 + +echo $MG_VAULT_UNSEAL_KEY_1 +echo $MG_VAULT_UNSEAL_KEY_2 +echo $MG_VAULT_UNSEAL_KEY_3 + +if [[ ! -z "${MG_VAULT_UNSEAL_KEY_1}" ]] && + [[ ! -z "${MG_VAULT_UNSEAL_KEY_2}" ]] && + [[ ! -z "${MG_VAULT_UNSEAL_KEY_3}" ]]; then + echo "Unsealing Vault" + vault operator unseal ${MG_VAULT_UNSEAL_KEY_1} + vault operator unseal ${MG_VAULT_UNSEAL_KEY_2} + vault operator unseal ${MG_VAULT_UNSEAL_KEY_3} +fi + +wait $VAULT_PID \ No newline at end of file diff --git a/docker/addons/vault/magistrala_things_certs_issue.template.hcl b/docker/addons/vault/magistrala_things_certs_issue.template.hcl new file mode 100644 index 0000000..1b13f6d --- /dev/null +++ b/docker/addons/vault/magistrala_things_certs_issue.template.hcl @@ -0,0 +1,32 @@ + +# Allow issue certificate with role with default issuer from Intermediate PKI +path "${MG_VAULT_PKI_INT_PATH}/issue/${MG_VAULT_PKI_INT_THINGS_CERTS_ROLE_NAME}" { + capabilities = ["create", "update"] +} + +## Revole certificate from Intermediate PKI +path "${MG_VAULT_PKI_INT_PATH}/revoke" { + capabilities = ["create", "update"] +} + +## List Revoked Certificates from Intermediate PKI +path "${MG_VAULT_PKI_INT_PATH}/certs/revoked" { + capabilities = ["list"] +} + + +## List Certificates from Intermediate PKI +path "${MG_VAULT_PKI_INT_PATH}/certs" { + capabilities = ["list"] +} + +## Read Certificate from Intermediate PKI +path "${MG_VAULT_PKI_INT_PATH}/cert/+" { + capabilities = ["read"] +} +path "${MG_VAULT_PKI_INT_PATH}/cert/+/raw" { + capabilities = ["read"] +} +path "${MG_VAULT_PKI_INT_PATH}/cert/+/raw/pem" { + capabilities = ["read"] +} diff --git a/docker/addons/vault/vault_cmd.sh b/docker/addons/vault/vault_cmd.sh new file mode 100644 index 0000000..97a8cc9 --- /dev/null +++ b/docker/addons/vault/vault_cmd.sh @@ -0,0 +1,24 @@ +#!/usr/bin/bash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +vault() { + if is_container_running "magistrala-vault"; then + docker exec -it magistrala-vault vault "$@" + else + if which vault &> /dev/null; then + $(which vault) "$@" + else + echo "magistrala-vault container or vault command not found. Please refer to the documentation: https://github.com/absmach/magistrala/blob/main/docker/addons/vault/README.md" + fi + fi +} + +is_container_running() { + local container_name="$1" + if [ "$(docker inspect --format '{{.State.Running}}' "$container_name" 2>/dev/null)" = "true" ]; then + return 0 + else + return 1 + fi +} diff --git a/docker/addons/vault/vault_copy_certs.sh b/docker/addons/vault/vault_copy_certs.sh new file mode 100755 index 0000000..c4656df --- /dev/null +++ b/docker/addons/vault/vault_copy_certs.sh @@ -0,0 +1,53 @@ +#!/usr/bin/bash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export MAGISTRALA_DIR=$scriptdir/../../../ + +cd $scriptdir + +readDotEnv() { + set -o allexport + source $MAGISTRALA_DIR/docker/.env + set +o allexport +} + +readDotEnv + +server_name="localhost" + +# Check if MG_NGINX_SERVER_NAME is set or not empty +if [ -n "${MG_NGINX_SERVER_NAME:-}" ]; then + server_name="$MG_NGINX_SERVER_NAME" +fi + +echo "Copying certificate files" + +if [ -e "data/${server_name}.crt" ]; then + cp -v data/${server_name}.crt ${MAGISTRALA_DIR}/docker/ssl/certs/magistrala-server.crt +else + echo "${server_name}.crt file not available" +fi + +if [ -e "data/${server_name}.key" ]; then + cp -v data/${server_name}.key ${MAGISTRALA_DIR}/docker/ssl/certs/magistrala-server.key +else + echo "${server_name}.key file not available" +fi + +if [ -e "data/${MG_VAULT_PKI_INT_FILE_NAME}.key" ]; then + cp -v data/${MG_VAULT_PKI_INT_FILE_NAME}.key ${MAGISTRALA_DIR}/docker/ssl/certs/ca.key +else + echo "data/${MG_VAULT_PKI_INT_FILE_NAME}.key file not available" +fi + +if [ -e "data/${MG_VAULT_PKI_INT_FILE_NAME}_bundle.crt" ]; then + cp -v data/${MG_VAULT_PKI_INT_FILE_NAME}_bundle.crt ${MAGISTRALA_DIR}/docker/ssl/certs/ca.crt +else + echo "data/${MG_VAULT_PKI_INT_FILE_NAME}_bundle.crt file not available" +fi + +exit 0 diff --git a/docker/addons/vault/vault_copy_env.sh b/docker/addons/vault/vault_copy_env.sh new file mode 100755 index 0000000..dbb5fe4 --- /dev/null +++ b/docker/addons/vault/vault_copy_env.sh @@ -0,0 +1,24 @@ +#!/usr/bin/bash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +scriptdir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +export MAGISTRALA_DIR=$scriptdir/../../../ + +cd $scriptdir + +write_env() { + if [ -e "data/secrets" ]; then + sed -i "s,MG_VAULT_UNSEAL_KEY_1=.*,MG_VAULT_UNSEAL_KEY_1=$(awk -F ": " '$1 == "Unseal Key 1" {print $2}' data/secrets)," $MAGISTRALA_DIR/docker/.env + sed -i "s,MG_VAULT_UNSEAL_KEY_2=.*,MG_VAULT_UNSEAL_KEY_2=$(awk -F ": " '$1 == "Unseal Key 2" {print $2}' data/secrets)," $MAGISTRALA_DIR/docker/.env + sed -i "s,MG_VAULT_UNSEAL_KEY_3=.*,MG_VAULT_UNSEAL_KEY_3=$(awk -F ": " '$1 == "Unseal Key 3" {print $2}' data/secrets)," $MAGISTRALA_DIR/docker/.env + sed -i "s,MG_VAULT_TOKEN=.*,MG_VAULT_TOKEN=$(awk -F ": " '$1 == "Initial Root Token" {print $2}' data/secrets)," $MAGISTRALA_DIR/docker/.env + echo "Vault environment varaibles are set successfully in docker/.env" + else + echo "Error: Source file 'data/secrets' not found." + fi +} + +write_env diff --git a/docker/addons/vault/vault_create_approle.sh b/docker/addons/vault/vault_create_approle.sh new file mode 100755 index 0000000..614f8dc --- /dev/null +++ b/docker/addons/vault/vault_create_approle.sh @@ -0,0 +1,97 @@ +#!/usr/bin/bash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export MAGISTRALA_DIR=$scriptdir/../../../ + +cd $scriptdir + +SKIP_ENABLE_APP_ROLE=${1:-} + +readDotEnv() { + set -o allexport + source $MAGISTRALA_DIR/docker/.env + set +o allexport +} + +source vault_cmd.sh + +vaultCreatePolicyFile() { + envsubst ' + ${MG_VAULT_PKI_INT_PATH} + ${MG_VAULT_PKI_INT_THINGS_CERTS_ROLE_NAME} + ' < magistrala_things_certs_issue.template.hcl > magistrala_things_certs_issue.hcl +} +vaultCreatePolicy() { + echo "Creating new policy for AppRole" + if is_container_running "magistrala-vault"; then + docker cp magistrala_things_certs_issue.hcl magistrala-vault:/vault/magistrala_things_certs_issue.hcl + vault policy write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} magistrala_things_certs_issue /vault/magistrala_things_certs_issue.hcl + else + vault policy write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} magistrala_things_certs_issue magistrala_things_certs_issue.hcl + fi +} + +vaultEnableAppRole() { + if [ "$SKIP_ENABLE_APP_ROLE" == "--skip-enable-approle" ]; then + echo "Skipping Enable AppRole" + else + echo "Enabling AppRole" + vault auth enable -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} approle + fi +} + +vaultDeleteRole() { + echo "Deleteing old AppRole" + vault delete -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} auth/approle/role/magistrala_things_certs_issuer +} + +vaultCreateRole() { + echo "Creating new AppRole" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} auth/approle/role/magistrala_things_certs_issuer \ + token_policies=magistrala_things_certs_issue secret_id_num_uses=0 \ + secret_id_ttl=0 token_ttl=1h token_max_ttl=3h token_num_uses=0 +} + +vaultWriteCustomRoleID(){ + echo "Writing custom role id" + vault read -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} auth/approle/role/magistrala_things_certs_issuer/role-id + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} auth/approle/role/magistrala_things_certs_issuer/role-id role_id=${MG_VAULT_THINGS_CERTS_ISSUER_ROLEID} +} + +vaultWriteCustomSecret() { + echo "Writing custom secret" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -f auth/approle/role/magistrala_things_certs_issuer/secret-id + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} auth/approle/role/magistrala_things_certs_issuer/custom-secret-id secret_id=${MG_VAULT_THINGS_CERTS_ISSUER_SECRET} num_uses=0 ttl=0 +} + +vaultTestRoleLogin() { + echo "Testing custom roleid secret by logging in" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} auth/approle/login \ + role_id=${MG_VAULT_THINGS_CERTS_ISSUER_ROLEID} \ + secret_id=${MG_VAULT_THINGS_CERTS_ISSUER_SECRET} + +} +if ! command -v jq &> /dev/null +then + echo "jq command could not be found, please install it and try again." + exit +fi + +readDotEnv + +vault login -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_TOKEN} + +vaultCreatePolicyFile +vaultCreatePolicy +vaultEnableAppRole +vaultDeleteRole +vaultCreateRole +vaultWriteCustomRoleID +vaultWriteCustomSecret +vaultTestRoleLogin + +exit 0 diff --git a/docker/addons/vault/vault_init.sh b/docker/addons/vault/vault_init.sh new file mode 100755 index 0000000..bd1e05f --- /dev/null +++ b/docker/addons/vault/vault_init.sh @@ -0,0 +1,24 @@ +#!/usr/bin/bash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +scriptdir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +export MAGISTRALA_DIR=$scriptdir/../../../ + +cd $scriptdir + +readDotEnv() { + set -o allexport + source $MAGISTRALA_DIR/docker/.env + set +o allexport +} + +source vault_cmd.sh + +readDotEnv + +mkdir -p data + +vault operator init -address=$MG_VAULT_ADDR 2>&1 | tee >(sed -r 's/\x1b\[[0-9;]*m//g' > data/secrets) diff --git a/docker/addons/vault/vault_set_pki.sh b/docker/addons/vault/vault_set_pki.sh new file mode 100755 index 0000000..6f8ebdc --- /dev/null +++ b/docker/addons/vault/vault_set_pki.sh @@ -0,0 +1,229 @@ +#!/usr/bin/bash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export MAGISTRALA_DIR=$scriptdir/../../../ + +SKIP_SERVER_CERT=${1:-} + +cd $scriptdir + +readDotEnv() { + set -o allexport + source $MAGISTRALA_DIR/docker/.env + set +o allexport +} + +server_name="localhost" + +# Check if MG_NGINX_SERVER_NAME is set or not empty +if [ -n "${MG_NGINX_SERVER_NAME:-}" ]; then + server_name="$MG_NGINX_SERVER_NAME" +fi + +source vault_cmd.sh + +vaultEnablePKI() { + vault secrets enable -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -path ${MG_VAULT_PKI_PATH} pki + vault secrets tune -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -max-lease-ttl=87600h ${MG_VAULT_PKI_PATH} +} + +vaultConfigPKIClusterPath() { + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_PATH}/config/cluster aia_path=${MG_VAULT_PKI_CLUSTER_AIA_PATH} path=${MG_VAULT_PKI_CLUSTER_PATH} +} + +vaultConfigPKICrl() { + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_PATH}/config/crl expiry="5m" ocsp_disable=false ocsp_expiry=0 auto_rebuild=true auto_rebuild_grace_period="2m" enable_delta=true delta_rebuild_interval="1m" +} + +vaultAddRoleToSecret() { + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_PATH}/roles/${MG_VAULT_PKI_ROLE_NAME} \ + allow_any_name=true \ + max_ttl="8760h" \ + default_ttl="8760h" \ + generate_lease=true +} + +vaultGenerateRootCACertificate() { + echo "Generate root CA certificate" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -format=json ${MG_VAULT_PKI_PATH}/root/generate/exported \ + common_name="\"$MG_VAULT_PKI_CA_CN\"" \ + ou="\"$MG_VAULT_PKI_CA_OU\"" \ + organization="\"$MG_VAULT_PKI_CA_O\"" \ + country="\"$MG_VAULT_PKI_CA_C\"" \ + locality="\"$MG_VAULT_PKI_CA_L\"" \ + province="\"$MG_VAULT_PKI_CA_ST\"" \ + street_address="\"$MG_VAULT_PKI_CA_ADDR\"" \ + postal_code="\"$MG_VAULT_PKI_CA_PO\"" \ + ttl=87600h | tee >(jq -r .data.certificate >data/${MG_VAULT_PKI_FILE_NAME}_ca.crt) \ + >(jq -r .data.issuing_ca >data/${MG_VAULT_PKI_FILE_NAME}_issuing_ca.crt) \ + >(jq -r .data.private_key >data/${MG_VAULT_PKI_FILE_NAME}_ca.key) +} + +vaultSetupRootCAIssuingURLs() { + echo "Setup URLs for CRL and issuing" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_PATH}/config/urls \ + issuing_certificates="{{cluster_aia_path}}/v1/${MG_VAULT_PKI_PATH}/ca" \ + crl_distribution_points="{{cluster_aia_path}}/v1/${MG_VAULT_PKI_PATH}/crl" \ + ocsp_servers="{{cluster_aia_path}}/v1/${MG_VAULT_PKI_PATH}/ocsp" \ + enable_templating=true +} + +vaultGenerateIntermediateCAPKI() { + echo "Generate Intermediate CA PKI" + vault secrets enable -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -path=${MG_VAULT_PKI_INT_PATH} pki + vault secrets tune -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -max-lease-ttl=43800h ${MG_VAULT_PKI_INT_PATH} +} + +vaultConfigIntermediatePKIClusterPath() { + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_INT_PATH}/config/cluster aia_path=${MG_VAULT_PKI_INT_CLUSTER_AIA_PATH} path=${MG_VAULT_PKI_INT_CLUSTER_PATH} +} + +vaultConfigIntermediatePKICrl() { + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_INT_PATH}/config/crl expiry="5m" ocsp_disable=false ocsp_expiry=0 auto_rebuild=true auto_rebuild_grace_period="2m" enable_delta=true delta_rebuild_interval="1m" +} + +vaultGenerateIntermediateCSR() { + echo "Generate intermediate CSR" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -format=json ${MG_VAULT_PKI_INT_PATH}/intermediate/generate/exported \ + common_name="\"$MG_VAULT_PKI_INT_CA_CN\"" \ + ou="\"$MG_VAULT_PKI_INT_CA_OU\""\ + organization="\"$MG_VAULT_PKI_INT_CA_O\"" \ + country="\"$MG_VAULT_PKI_INT_CA_C\"" \ + locality="\"$MG_VAULT_PKI_INT_CA_L\"" \ + province="\"$MG_VAULT_PKI_INT_CA_ST\"" \ + street_address="\"$MG_VAULT_PKI_INT_CA_ADDR\"" \ + postal_code="\"$MG_VAULT_PKI_INT_CA_PO\"" \ + | tee >(jq -r .data.csr >data/${MG_VAULT_PKI_INT_FILE_NAME}.csr) \ + >(jq -r .data.private_key >data/${MG_VAULT_PKI_INT_FILE_NAME}.key) +} + +vaultSignIntermediateCSR() { + echo "Sign intermediate CSR" + if is_container_running "magistrala-vault"; then + docker cp data/${MG_VAULT_PKI_INT_FILE_NAME}.csr magistrala-vault:/vault/${MG_VAULT_PKI_INT_FILE_NAME}.csr + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -format=json ${MG_VAULT_PKI_PATH}/root/sign-intermediate \ + csr=@/vault/${MG_VAULT_PKI_INT_FILE_NAME}.csr ttl="8760h" \ + ou="\"$MG_VAULT_PKI_INT_CA_OU\""\ + organization="\"$MG_VAULT_PKI_INT_CA_O\"" \ + country="\"$MG_VAULT_PKI_INT_CA_C\"" \ + locality="\"$MG_VAULT_PKI_INT_CA_L\"" \ + province="\"$MG_VAULT_PKI_INT_CA_ST\"" \ + street_address="\"$MG_VAULT_PKI_INT_CA_ADDR\"" \ + postal_code="\"$MG_VAULT_PKI_INT_CA_PO\"" \ + | tee >(jq -r .data.certificate >data/${MG_VAULT_PKI_INT_FILE_NAME}.crt) \ + >(jq -r .data.issuing_ca >data/${MG_VAULT_PKI_INT_FILE_NAME}_issuing_ca.crt) + else + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -format=json ${MG_VAULT_PKI_PATH}/root/sign-intermediate \ + csr=@data/${MG_VAULT_PKI_INT_FILE_NAME}.csr ttl="8760h" \ + ou="\"$MG_VAULT_PKI_INT_CA_OU\""\ + organization="\"$MG_VAULT_PKI_INT_CA_O\"" \ + country="\"$MG_VAULT_PKI_INT_CA_C\"" \ + locality="\"$MG_VAULT_PKI_INT_CA_L\"" \ + province="\"$MG_VAULT_PKI_INT_CA_ST\"" \ + street_address="\"$MG_VAULT_PKI_INT_CA_ADDR\"" \ + postal_code="\"$MG_VAULT_PKI_INT_CA_PO\"" \ + | tee >(jq -r .data.certificate >data/${MG_VAULT_PKI_INT_FILE_NAME}.crt) \ + >(jq -r .data.issuing_ca >data/${MG_VAULT_PKI_INT_FILE_NAME}_issuing_ca.crt) + fi + +} + +vaultInjectIntermediateCertificate() { + echo "Inject Intermediate Certificate" + if is_container_running "magistrala-vault"; then + docker cp data/${MG_VAULT_PKI_INT_FILE_NAME}.crt magistrala-vault:/vault/${MG_VAULT_PKI_INT_FILE_NAME}.crt + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_INT_PATH}/intermediate/set-signed certificate=@/vault/${MG_VAULT_PKI_INT_FILE_NAME}.crt + else + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_INT_PATH}/intermediate/set-signed certificate=@data/${MG_VAULT_PKI_INT_FILE_NAME}.crt + fi +} + +vaultGenerateIntermediateCertificateBundle() { + echo "Generate intermediate certificate bundle" + cat data/${MG_VAULT_PKI_INT_FILE_NAME}.crt data/${MG_VAULT_PKI_FILE_NAME}_ca.crt \ + > data/${MG_VAULT_PKI_INT_FILE_NAME}_bundle.crt +} + +vaultSetupIntermediateIssuingURLs() { + echo "Setup URLs for CRL and issuing" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_INT_PATH}/config/urls \ + issuing_certificates="{{cluster_aia_path}}/v1/${MG_VAULT_PKI_INT_PATH}/ca" \ + crl_distribution_points="{{cluster_aia_path}}/v1/${MG_VAULT_PKI_INT_PATH}/crl" \ + ocsp_servers="{{cluster_aia_path}}/v1/${MG_VAULT_PKI_INT_PATH}/ocsp" \ + enable_templating=true +} + +vaultSetupServerCertsRole() { + if [ "$SKIP_SERVER_CERT" == "--skip-server-cert" ]; then + echo "Skipping server certificate role" + else + echo "Setup Server certificate role" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_INT_PATH}/roles/${MG_VAULT_PKI_INT_SERVER_CERTS_ROLE_NAME} \ + allow_subdomains=true \ + max_ttl="4320h" + fi +} + +vaultGenerateServerCertificate() { + if [ "$SKIP_SERVER_CERT" == "--skip-server-cert" ]; then + echo "Skipping generate server certificate" + else + echo "Generate server certificate" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} -format=json ${MG_VAULT_PKI_INT_PATH}/issue/${MG_VAULT_PKI_INT_SERVER_CERTS_ROLE_NAME} \ + common_name="$server_name" ttl="4320h" \ + | tee >(jq -r .data.certificate >data/${server_name}.crt) \ + >(jq -r .data.private_key >data/${server_name}.key) + fi + +} + +vaultSetupThingCertsRole() { + echo "Setup Thing Certs role" + vault write -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_PKI_INT_PATH}/roles/${MG_VAULT_PKI_INT_THINGS_CERTS_ROLE_NAME} \ + allow_subdomains=true \ + allow_any_name=true \ + max_ttl="2160h" +} + +vaultCleanupFiles() { + if is_container_running "magistrala-vault"; then + docker exec magistrala-vault sh -c 'rm -rf /vault/*.{crt,csr}' + fi +} + +if ! command -v jq &> /dev/null +then + echo "jq command could not be found, please install it and try again." + exit +fi + +readDotEnv + +mkdir -p data + +vault login -namespace=${MG_VAULT_NAMESPACE} -address=${MG_VAULT_ADDR} ${MG_VAULT_TOKEN} + +vaultEnablePKI +vaultConfigPKIClusterPath +vaultConfigPKICrl +vaultAddRoleToSecret +vaultGenerateRootCACertificate +vaultSetupRootCAIssuingURLs +vaultGenerateIntermediateCAPKI +vaultConfigIntermediatePKIClusterPath +vaultConfigIntermediatePKICrl +vaultGenerateIntermediateCSR +vaultSignIntermediateCSR +vaultInjectIntermediateCertificate +vaultGenerateIntermediateCertificateBundle +vaultSetupIntermediateIssuingURLs +vaultSetupServerCertsRole +vaultGenerateServerCertificate +vaultSetupThingCertsRole +vaultCleanupFiles + +exit 0 diff --git a/docker/addons/vault/vault_unseal.sh b/docker/addons/vault/vault_unseal.sh new file mode 100755 index 0000000..b80b6ee --- /dev/null +++ b/docker/addons/vault/vault_unseal.sh @@ -0,0 +1,24 @@ +#!/usr/bin/bash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export MAGISTRALA_DIR=$scriptdir/../../../ + +cd $scriptdir + +readDotEnv() { + set -o allexport + source $MAGISTRALA_DIR/docker/.env + set +o allexport +} + +source vault_cmd.sh + +readDotEnv + +vault operator unseal -address=${MG_VAULT_ADDR} ${MG_VAULT_UNSEAL_KEY_1} +vault operator unseal -address=${MG_VAULT_ADDR} ${MG_VAULT_UNSEAL_KEY_2} +vault operator unseal -address=${MG_VAULT_ADDR} ${MG_VAULT_UNSEAL_KEY_3} diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 0000000..c206e75 --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,765 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +name: "magistrala" + +networks: + magistrala-base-net: + driver: bridge + +volumes: + magistrala-users-db-volume: + magistrala-things-db-volume: + magistrala-things-redis-volume: + magistrala-broker-volume: + magistrala-mqtt-broker-volume: + magistrala-spicedb-db-volume: + magistrala-auth-db-volume: + magistrala-invitations-db-volume: + magistrala-ui-db-volume: + +services: + spicedb: + image: "authzed/spicedb:v1.30.0" + container_name: magistrala-spicedb + command: "serve" + restart: "always" + networks: + - magistrala-base-net + ports: + - "8080:8080" + - "9091:9090" + - "50051:50051" + environment: + SPICEDB_GRPC_PRESHARED_KEY: ${MG_SPICEDB_PRE_SHARED_KEY} + SPICEDB_DATASTORE_ENGINE: ${MG_SPICEDB_DATASTORE_ENGINE} + SPICEDB_DATASTORE_CONN_URI: "${MG_SPICEDB_DATASTORE_ENGINE}://${MG_SPICEDB_DB_USER}:${MG_SPICEDB_DB_PASS}@spicedb-db:${MG_SPICEDB_DB_PORT}/${MG_SPICEDB_DB_NAME}?sslmode=disable" + depends_on: + - spicedb-migrate + + spicedb-migrate: + image: "authzed/spicedb:v1.30.0" + container_name: magistrala-spicedb-migrate + command: "migrate head" + restart: "on-failure" + networks: + - magistrala-base-net + environment: + SPICEDB_DATASTORE_ENGINE: ${MG_SPICEDB_DATASTORE_ENGINE} + SPICEDB_DATASTORE_CONN_URI: "${MG_SPICEDB_DATASTORE_ENGINE}://${MG_SPICEDB_DB_USER}:${MG_SPICEDB_DB_PASS}@spicedb-db:${MG_SPICEDB_DB_PORT}/${MG_SPICEDB_DB_NAME}?sslmode=disable" + depends_on: + - spicedb-db + + spicedb-db: + image: "postgres:16.2-alpine" + container_name: magistrala-spicedb-db + networks: + - magistrala-base-net + ports: + - "6010:5432" + environment: + POSTGRES_USER: ${MG_SPICEDB_DB_USER} + POSTGRES_PASSWORD: ${MG_SPICEDB_DB_PASS} + POSTGRES_DB: ${MG_SPICEDB_DB_NAME} + volumes: + - magistrala-spicedb-db-volume:/var/lib/postgresql/data + + auth-db: + image: postgres:16.2-alpine + container_name: magistrala-auth-db + restart: on-failure + ports: + - 6004:5432 + environment: + POSTGRES_USER: ${MG_AUTH_DB_USER} + POSTGRES_PASSWORD: ${MG_AUTH_DB_PASS} + POSTGRES_DB: ${MG_AUTH_DB_NAME} + networks: + - magistrala-base-net + volumes: + - magistrala-auth-db-volume:/var/lib/postgresql/data + + auth: + image: magistrala/auth:${MG_RELEASE_TAG} + container_name: magistrala-auth + depends_on: + - auth-db + - spicedb + expose: + - ${MG_AUTH_GRPC_PORT} + restart: on-failure + environment: + MG_AUTH_LOG_LEVEL: ${MG_AUTH_LOG_LEVEL} + MG_SPICEDB_SCHEMA_FILE: ${MG_SPICEDB_SCHEMA_FILE} + MG_SPICEDB_PRE_SHARED_KEY: ${MG_SPICEDB_PRE_SHARED_KEY} + MG_SPICEDB_HOST: ${MG_SPICEDB_HOST} + MG_SPICEDB_PORT: ${MG_SPICEDB_PORT} + MG_AUTH_ACCESS_TOKEN_DURATION: ${MG_AUTH_ACCESS_TOKEN_DURATION} + MG_AUTH_REFRESH_TOKEN_DURATION: ${MG_AUTH_REFRESH_TOKEN_DURATION} + MG_AUTH_INVITATION_DURATION: ${MG_AUTH_INVITATION_DURATION} + MG_AUTH_SECRET_KEY: ${MG_AUTH_SECRET_KEY} + MG_AUTH_HTTP_HOST: ${MG_AUTH_HTTP_HOST} + MG_AUTH_HTTP_PORT: ${MG_AUTH_HTTP_PORT} + MG_AUTH_HTTP_SERVER_CERT: ${MG_AUTH_HTTP_SERVER_CERT} + MG_AUTH_HTTP_SERVER_KEY: ${MG_AUTH_HTTP_SERVER_KEY} + MG_AUTH_GRPC_HOST: ${MG_AUTH_GRPC_HOST} + MG_AUTH_GRPC_PORT: ${MG_AUTH_GRPC_PORT} + ## Compose supports parameter expansion in environment, + ## Eg: ${VAR:+replacement} or ${VAR+replacement} -> replacement if VAR is set and non-empty, otherwise empty + ## Eg :${VAR:-default} or ${VAR-default} -> value of VAR if set and non-empty, otherwise default + MG_AUTH_GRPC_SERVER_CERT: ${MG_AUTH_GRPC_SERVER_CERT:+/auth-grpc-server.crt} + MG_AUTH_GRPC_SERVER_KEY: ${MG_AUTH_GRPC_SERVER_KEY:+/auth-grpc-server.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_AUTH_GRPC_CLIENT_CA_CERTS: ${MG_AUTH_GRPC_CLIENT_CA_CERTS:+/auth-grpc-client-ca.crt} + MG_AUTH_DB_HOST: ${MG_AUTH_DB_HOST} + MG_AUTH_DB_PORT: ${MG_AUTH_DB_PORT} + MG_AUTH_DB_USER: ${MG_AUTH_DB_USER} + MG_AUTH_DB_PASS: ${MG_AUTH_DB_PASS} + MG_AUTH_DB_NAME: ${MG_AUTH_DB_NAME} + MG_AUTH_DB_SSL_MODE: ${MG_AUTH_DB_SSL_MODE} + MG_AUTH_DB_SSL_CERT: ${MG_AUTH_DB_SSL_CERT} + MG_AUTH_DB_SSL_KEY: ${MG_AUTH_DB_SSL_KEY} + MG_AUTH_DB_SSL_ROOT_CERT: ${MG_AUTH_DB_SSL_ROOT_CERT} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_AUTH_ADAPTER_INSTANCE_ID: ${MG_AUTH_ADAPTER_INSTANCE_ID} + MG_ES_URL: ${MG_ES_URL} + ports: + - ${MG_AUTH_HTTP_PORT}:${MG_AUTH_HTTP_PORT} + - ${MG_AUTH_GRPC_PORT}:${MG_AUTH_GRPC_PORT} + networks: + - magistrala-base-net + volumes: + - ./spicedb/schema.zed:${MG_SPICEDB_SCHEMA_FILE} + # Auth gRPC mTLS server certificates + - type: bind + source: ${MG_AUTH_GRPC_SERVER_CERT:-ssl/certs/dummy/server_cert} + target: /auth-grpc-server${MG_AUTH_GRPC_SERVER_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_SERVER_KEY:-ssl/certs/dummy/server_key} + target: /auth-grpc-server${MG_AUTH_GRPC_SERVER_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca_certs} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_CLIENT_CA_CERTS:-ssl/certs/dummy/client_ca_certs} + target: /auth-grpc-client-ca${MG_AUTH_GRPC_CLIENT_CA_CERTS:+.crt} + bind: + create_host_path: true + + invitations-db: + image: postgres:16.2-alpine + container_name: magistrala-invitations-db + restart: on-failure + command: postgres -c "max_connections=${MG_POSTGRES_MAX_CONNECTIONS}" + environment: + POSTGRES_USER: ${MG_INVITATIONS_DB_USER} + POSTGRES_PASSWORD: ${MG_INVITATIONS_DB_PASS} + POSTGRES_DB: ${MG_INVITATIONS_DB_NAME} + MG_POSTGRES_MAX_CONNECTIONS: ${MG_POSTGRES_MAX_CONNECTIONS} + ports: + - 6021:5432 + networks: + - magistrala-base-net + volumes: + - magistrala-invitations-db-volume:/var/lib/postgresql/data + + invitations: + image: magistrala/invitations:${MG_RELEASE_TAG} + container_name: magistrala-invitations + restart: on-failure + depends_on: + - auth + - invitations-db + environment: + MG_INVITATIONS_LOG_LEVEL: ${MG_INVITATIONS_LOG_LEVEL} + MG_USERS_URL: ${MG_USERS_URL} + MG_DOMAINS_URL: ${MG_DOMAINS_URL} + MG_INVITATIONS_HTTP_HOST: ${MG_INVITATIONS_HTTP_HOST} + MG_INVITATIONS_HTTP_PORT: ${MG_INVITATIONS_HTTP_PORT} + MG_INVITATIONS_HTTP_SERVER_CERT: ${MG_INVITATIONS_HTTP_SERVER_CERT} + MG_INVITATIONS_HTTP_SERVER_KEY: ${MG_INVITATIONS_HTTP_SERVER_KEY} + MG_INVITATIONS_DB_HOST: ${MG_INVITATIONS_DB_HOST} + MG_INVITATIONS_DB_USER: ${MG_INVITATIONS_DB_USER} + MG_INVITATIONS_DB_PASS: ${MG_INVITATIONS_DB_PASS} + MG_INVITATIONS_DB_PORT: ${MG_INVITATIONS_DB_PORT} + MG_INVITATIONS_DB_NAME: ${MG_INVITATIONS_DB_NAME} + MG_INVITATIONS_DB_SSL_MODE: ${MG_INVITATIONS_DB_SSL_MODE} + MG_INVITATIONS_DB_SSL_CERT: ${MG_INVITATIONS_DB_SSL_CERT} + MG_INVITATIONS_DB_SSL_KEY: ${MG_INVITATIONS_DB_SSL_KEY} + MG_INVITATIONS_DB_SSL_ROOT_CERT: ${MG_INVITATIONS_DB_SSL_ROOT_CERT} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_INVITATIONS_INSTANCE_ID: ${MG_INVITATIONS_INSTANCE_ID} + ports: + - ${MG_INVITATIONS_HTTP_PORT}:${MG_INVITATIONS_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + # Auth gRPC client certificates + - type: bind + source: ${MG_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + + nginx: + image: nginx:1.25.4-alpine + container_name: magistrala-nginx + restart: on-failure + volumes: + - ./nginx/nginx-${AUTH-key}.conf:/etc/nginx/nginx.conf.template + - ./nginx/entrypoint.sh:/docker-entrypoint.d/entrypoint.sh + - ./nginx/snippets:/etc/nginx/snippets + - ./ssl/authorization.js:/etc/nginx/authorization.js + - type: bind + source: ${MG_NGINX_SERVER_CERT:-./ssl/certs/magistrala-server.crt} + target: /etc/ssl/certs/magistrala-server.crt + - type: bind + source: ${MG_NGINX_SERVER_KEY:-./ssl/certs/magistrala-server.key} + target: /etc/ssl/private/magistrala-server.key + - type: bind + source: ${MG_NGINX_SERVER_CLIENT_CA:-./ssl/certs/ca.crt} + target: /etc/ssl/certs/ca.crt + - type: bind + source: ${MG_NGINX_SERVER_DHPARAM:-./ssl/dhparam.pem} + target: /etc/ssl/certs/dhparam.pem + ports: + - ${MG_NGINX_HTTP_PORT}:${MG_NGINX_HTTP_PORT} + - ${MG_NGINX_SSL_PORT}:${MG_NGINX_SSL_PORT} + - ${MG_NGINX_MQTT_PORT}:${MG_NGINX_MQTT_PORT} + - ${MG_NGINX_MQTTS_PORT}:${MG_NGINX_MQTTS_PORT} + networks: + - magistrala-base-net + env_file: + - .env + depends_on: + - auth + - things + - users + - mqtt-adapter + - http-adapter + - ws-adapter + - coap-adapter + + things-db: + image: postgres:16.2-alpine + container_name: magistrala-things-db + restart: on-failure + command: postgres -c "max_connections=${MG_POSTGRES_MAX_CONNECTIONS}" + environment: + POSTGRES_USER: ${MG_THINGS_DB_USER} + POSTGRES_PASSWORD: ${MG_THINGS_DB_PASS} + POSTGRES_DB: ${MG_THINGS_DB_NAME} + MG_POSTGRES_MAX_CONNECTIONS: ${MG_POSTGRES_MAX_CONNECTIONS} + networks: + - magistrala-base-net + ports: + - 6006:5432 + volumes: + - magistrala-things-db-volume:/var/lib/postgresql/data + + things-redis: + image: redis:7.2.4-alpine + container_name: magistrala-things-redis + restart: on-failure + networks: + - magistrala-base-net + volumes: + - magistrala-things-redis-volume:/data + + things: + image: magistrala/things:${MG_RELEASE_TAG} + container_name: magistrala-things + depends_on: + - things-db + - users + - auth + - nats + restart: on-failure + environment: + MG_THINGS_LOG_LEVEL: ${MG_THINGS_LOG_LEVEL} + MG_THINGS_STANDALONE_ID: ${MG_THINGS_STANDALONE_ID} + MG_THINGS_STANDALONE_TOKEN: ${MG_THINGS_STANDALONE_TOKEN} + MG_THINGS_CACHE_KEY_DURATION: ${MG_THINGS_CACHE_KEY_DURATION} + MG_THINGS_HTTP_HOST: ${MG_THINGS_HTTP_HOST} + MG_THINGS_HTTP_PORT: ${MG_THINGS_HTTP_PORT} + MG_THINGS_AUTH_GRPC_HOST: ${MG_THINGS_AUTH_GRPC_HOST} + MG_THINGS_AUTH_GRPC_PORT: ${MG_THINGS_AUTH_GRPC_PORT} + ## Compose supports parameter expansion in environment, + ## Eg: ${VAR:+replacement} or ${VAR+replacement} -> replacement if VAR is set and non-empty, otherwise empty + ## Eg :${VAR:-default} or ${VAR-default} -> value of VAR if set and non-empty, otherwise default + MG_THINGS_AUTH_GRPC_SERVER_CERT: ${MG_THINGS_AUTH_GRPC_SERVER_CERT:+/things-grpc-server.crt} + MG_THINGS_AUTH_GRPC_SERVER_KEY: ${MG_THINGS_AUTH_GRPC_SERVER_KEY:+/things-grpc-server.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_THINGS_AUTH_GRPC_CLIENT_CA_CERTS: ${MG_THINGS_AUTH_GRPC_CLIENT_CA_CERTS:+/things-grpc-client-ca.crt} + MG_ES_URL: ${MG_ES_URL} + MG_THINGS_CACHE_URL: ${MG_THINGS_CACHE_URL} + MG_THINGS_DB_HOST: ${MG_THINGS_DB_HOST} + MG_THINGS_DB_PORT: ${MG_THINGS_DB_PORT} + MG_THINGS_DB_USER: ${MG_THINGS_DB_USER} + MG_THINGS_DB_PASS: ${MG_THINGS_DB_PASS} + MG_THINGS_DB_NAME: ${MG_THINGS_DB_NAME} + MG_THINGS_DB_SSL_MODE: ${MG_THINGS_DB_SSL_MODE} + MG_THINGS_DB_SSL_CERT: ${MG_THINGS_DB_SSL_CERT} + MG_THINGS_DB_SSL_KEY: ${MG_THINGS_DB_SSL_KEY} + MG_THINGS_DB_SSL_ROOT_CERT: ${MG_THINGS_DB_SSL_ROOT_CERT} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + ports: + - ${MG_THINGS_HTTP_PORT}:${MG_THINGS_HTTP_PORT} + - ${MG_THINGS_AUTH_GRPC_PORT}:${MG_THINGS_AUTH_GRPC_PORT} + networks: + - magistrala-base-net + volumes: + # Things gRPC server certificates + - type: bind + source: ${MG_THINGS_AUTH_GRPC_SERVER_CERT:-ssl/certs/dummy/server_cert} + target: /things-grpc-server${MG_THINGS_AUTH_GRPC_SERVER_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_SERVER_KEY:-ssl/certs/dummy/server_key} + target: /things-grpc-server${MG_THINGS_AUTH_GRPC_SERVER_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca_certs} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_CA_CERTS:-ssl/certs/dummy/client_ca_certs} + target: /things-grpc-client-ca${MG_THINGS_AUTH_GRPC_CLIENT_CA_CERTS:+.crt} + bind: + create_host_path: true + # Auth gRPC client certificates + - type: bind + source: ${MG_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + + users-db: + image: postgres:16.2-alpine + container_name: magistrala-users-db + restart: on-failure + command: postgres -c "max_connections=${MG_POSTGRES_MAX_CONNECTIONS}" + environment: + POSTGRES_USER: ${MG_USERS_DB_USER} + POSTGRES_PASSWORD: ${MG_USERS_DB_PASS} + POSTGRES_DB: ${MG_USERS_DB_NAME} + MG_POSTGRES_MAX_CONNECTIONS: ${MG_POSTGRES_MAX_CONNECTIONS} + ports: + - 6000:5432 + networks: + - magistrala-base-net + volumes: + - magistrala-users-db-volume:/var/lib/postgresql/data + + users: + image: magistrala/users:${MG_RELEASE_TAG} + container_name: magistrala-users + depends_on: + - users-db + - auth + - nats + restart: on-failure + environment: + MG_USERS_LOG_LEVEL: ${MG_USERS_LOG_LEVEL} + MG_USERS_SECRET_KEY: ${MG_USERS_SECRET_KEY} + MG_USERS_ADMIN_EMAIL: ${MG_USERS_ADMIN_EMAIL} + MG_USERS_ADMIN_PASSWORD: ${MG_USERS_ADMIN_PASSWORD} + MG_USERS_PASS_REGEX: ${MG_USERS_PASS_REGEX} + MG_USERS_ACCESS_TOKEN_DURATION: ${MG_USERS_ACCESS_TOKEN_DURATION} + MG_USERS_REFRESH_TOKEN_DURATION: ${MG_USERS_REFRESH_TOKEN_DURATION} + MG_TOKEN_RESET_ENDPOINT: ${MG_TOKEN_RESET_ENDPOINT} + MG_USERS_HTTP_HOST: ${MG_USERS_HTTP_HOST} + MG_USERS_HTTP_PORT: ${MG_USERS_HTTP_PORT} + MG_USERS_HTTP_SERVER_CERT: ${MG_USERS_HTTP_SERVER_CERT} + MG_USERS_HTTP_SERVER_KEY: ${MG_USERS_HTTP_SERVER_KEY} + MG_USERS_DB_HOST: ${MG_USERS_DB_HOST} + MG_USERS_DB_PORT: ${MG_USERS_DB_PORT} + MG_USERS_DB_USER: ${MG_USERS_DB_USER} + MG_USERS_DB_PASS: ${MG_USERS_DB_PASS} + MG_USERS_DB_NAME: ${MG_USERS_DB_NAME} + MG_USERS_DB_SSL_MODE: ${MG_USERS_DB_SSL_MODE} + MG_USERS_DB_SSL_CERT: ${MG_USERS_DB_SSL_CERT} + MG_USERS_DB_SSL_KEY: ${MG_USERS_DB_SSL_KEY} + MG_USERS_DB_SSL_ROOT_CERT: ${MG_USERS_DB_SSL_ROOT_CERT} + MG_USERS_ALLOW_SELF_REGISTER: ${MG_USERS_ALLOW_SELF_REGISTER} + MG_EMAIL_HOST: ${MG_EMAIL_HOST} + MG_EMAIL_PORT: ${MG_EMAIL_PORT} + MG_EMAIL_USERNAME: ${MG_EMAIL_USERNAME} + MG_EMAIL_PASSWORD: ${MG_EMAIL_PASSWORD} + MG_EMAIL_FROM_ADDRESS: ${MG_EMAIL_FROM_ADDRESS} + MG_EMAIL_FROM_NAME: ${MG_EMAIL_FROM_NAME} + MG_EMAIL_TEMPLATE: ${MG_EMAIL_TEMPLATE} + MG_ES_URL: ${MG_ES_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_AUTH_GRPC_URL: ${MG_AUTH_GRPC_URL} + MG_AUTH_GRPC_TIMEOUT: ${MG_AUTH_GRPC_TIMEOUT} + MG_AUTH_GRPC_CLIENT_CERT: ${MG_AUTH_GRPC_CLIENT_CERT:+/auth-grpc-client.crt} + MG_AUTH_GRPC_CLIENT_KEY: ${MG_AUTH_GRPC_CLIENT_KEY:+/auth-grpc-client.key} + MG_AUTH_GRPC_SERVER_CA_CERTS: ${MG_AUTH_GRPC_SERVER_CA_CERTS:+/auth-grpc-server-ca.crt} + MG_GOOGLE_CLIENT_ID: ${MG_GOOGLE_CLIENT_ID} + MG_GOOGLE_CLIENT_SECRET: ${MG_GOOGLE_CLIENT_SECRET} + MG_GOOGLE_REDIRECT_URL: ${MG_GOOGLE_REDIRECT_URL} + MG_GOOGLE_STATE: ${MG_GOOGLE_STATE} + MG_OAUTH_UI_REDIRECT_URL: ${MG_OAUTH_UI_REDIRECT_URL} + MG_OAUTH_UI_ERROR_URL: ${MG_OAUTH_UI_ERROR_URL} + MG_USERS_DELETE_INTERVAL: ${MG_USERS_DELETE_INTERVAL} + MG_USERS_DELETE_AFTER: ${MG_USERS_DELETE_AFTER} + ports: + - ${MG_USERS_HTTP_PORT}:${MG_USERS_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + - ./templates/${MG_USERS_RESET_PWD_TEMPLATE}:/email.tmpl + # Auth gRPC client certificates + - type: bind + source: ${MG_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /auth-grpc-client${MG_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /auth-grpc-server-ca${MG_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + + jaeger: + image: jaegertracing/all-in-one:1.53.0 + container_name: magistrala-jaeger + environment: + COLLECTOR_OTLP_ENABLED: ${MG_JAEGER_COLLECTOR_OTLP_ENABLED} + command: --memory.max-traces ${MG_JAEGER_MEMORY_MAX_TRACES} + ports: + - ${MG_JAEGER_FRONTEND}:${MG_JAEGER_FRONTEND} + - ${MG_JAEGER_OLTP_HTTP}:${MG_JAEGER_OLTP_HTTP} + networks: + - magistrala-base-net + + mqtt-adapter: + image: magistrala/mqtt:${MG_RELEASE_TAG} + container_name: magistrala-mqtt + depends_on: + - things + - vernemq + - nats + restart: on-failure + environment: + MG_MQTT_ADAPTER_LOG_LEVEL: ${MG_MQTT_ADAPTER_LOG_LEVEL} + MG_MQTT_ADAPTER_MQTT_PORT: ${MG_MQTT_ADAPTER_MQTT_PORT} + MG_MQTT_ADAPTER_MQTT_TARGET_HOST: ${MG_MQTT_ADAPTER_MQTT_TARGET_HOST} + MG_MQTT_ADAPTER_MQTT_TARGET_PORT: ${MG_MQTT_ADAPTER_MQTT_TARGET_PORT} + MG_MQTT_ADAPTER_FORWARDER_TIMEOUT: ${MG_MQTT_ADAPTER_FORWARDER_TIMEOUT} + MG_MQTT_ADAPTER_MQTT_TARGET_HEALTH_CHECK: ${MG_MQTT_ADAPTER_MQTT_TARGET_HEALTH_CHECK} + MG_MQTT_ADAPTER_MQTT_QOS: ${MG_MQTT_ADAPTER_MQTT_QOS} + MG_MQTT_ADAPTER_WS_PORT: ${MG_MQTT_ADAPTER_WS_PORT} + MG_MQTT_ADAPTER_INSTANCE_ID: ${MG_MQTT_ADAPTER_INSTANCE_ID} + MG_MQTT_ADAPTER_WS_TARGET_HOST: ${MG_MQTT_ADAPTER_WS_TARGET_HOST} + MG_MQTT_ADAPTER_WS_TARGET_PORT: ${MG_MQTT_ADAPTER_WS_TARGET_PORT} + MG_MQTT_ADAPTER_WS_TARGET_PATH: ${MG_MQTT_ADAPTER_WS_TARGET_PATH} + MG_MQTT_ADAPTER_INSTANCE: ${MG_MQTT_ADAPTER_INSTANCE} + MG_ES_URL: ${MG_ES_URL} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + networks: + - magistrala-base-net + volumes: + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + + http-adapter: + image: magistrala/http:${MG_RELEASE_TAG} + container_name: magistrala-http + depends_on: + - things + - nats + restart: on-failure + environment: + MG_HTTP_ADAPTER_LOG_LEVEL: ${MG_HTTP_ADAPTER_LOG_LEVEL} + MG_HTTP_ADAPTER_HOST: ${MG_HTTP_ADAPTER_HOST} + MG_HTTP_ADAPTER_PORT: ${MG_HTTP_ADAPTER_PORT} + MG_HTTP_ADAPTER_SERVER_CERT: ${MG_HTTP_ADAPTER_SERVER_CERT} + MG_HTTP_ADAPTER_SERVER_KEY: ${MG_HTTP_ADAPTER_SERVER_KEY} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_HTTP_ADAPTER_INSTANCE_ID: ${MG_HTTP_ADAPTER_INSTANCE_ID} + ports: + - ${MG_HTTP_ADAPTER_PORT}:${MG_HTTP_ADAPTER_PORT} + networks: + - magistrala-base-net + volumes: + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + + coap-adapter: + image: magistrala/coap:${MG_RELEASE_TAG} + container_name: magistrala-coap + depends_on: + - things + - nats + restart: on-failure + environment: + MG_COAP_ADAPTER_LOG_LEVEL: ${MG_COAP_ADAPTER_LOG_LEVEL} + MG_COAP_ADAPTER_HOST: ${MG_COAP_ADAPTER_HOST} + MG_COAP_ADAPTER_PORT: ${MG_COAP_ADAPTER_PORT} + MG_COAP_ADAPTER_SERVER_CERT: ${MG_COAP_ADAPTER_SERVER_CERT} + MG_COAP_ADAPTER_SERVER_KEY: ${MG_COAP_ADAPTER_SERVER_KEY} + MG_COAP_ADAPTER_HTTP_HOST: ${MG_COAP_ADAPTER_HTTP_HOST} + MG_COAP_ADAPTER_HTTP_PORT: ${MG_COAP_ADAPTER_HTTP_PORT} + MG_COAP_ADAPTER_HTTP_SERVER_CERT: ${MG_COAP_ADAPTER_HTTP_SERVER_CERT} + MG_COAP_ADAPTER_HTTP_SERVER_KEY: ${MG_COAP_ADAPTER_HTTP_SERVER_KEY} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_COAP_ADAPTER_INSTANCE_ID: ${MG_COAP_ADAPTER_INSTANCE_ID} + ports: + - ${MG_COAP_ADAPTER_PORT}:${MG_COAP_ADAPTER_PORT}/udp + - ${MG_COAP_ADAPTER_HTTP_PORT}:${MG_COAP_ADAPTER_HTTP_PORT}/tcp + networks: + - magistrala-base-net + volumes: + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + + ws-adapter: + image: magistrala/ws:${MG_RELEASE_TAG} + container_name: magistrala-ws + depends_on: + - things + - nats + restart: on-failure + environment: + MG_WS_ADAPTER_LOG_LEVEL: ${MG_WS_ADAPTER_LOG_LEVEL} + MG_WS_ADAPTER_HTTP_HOST: ${MG_WS_ADAPTER_HTTP_HOST} + MG_WS_ADAPTER_HTTP_PORT: ${MG_WS_ADAPTER_HTTP_PORT} + MG_WS_ADAPTER_HTTP_SERVER_CERT: ${MG_WS_ADAPTER_HTTP_SERVER_CERT} + MG_WS_ADAPTER_HTTP_SERVER_KEY: ${MG_WS_ADAPTER_HTTP_SERVER_KEY} + MG_THINGS_AUTH_GRPC_URL: ${MG_THINGS_AUTH_GRPC_URL} + MG_THINGS_AUTH_GRPC_TIMEOUT: ${MG_THINGS_AUTH_GRPC_TIMEOUT} + MG_THINGS_AUTH_GRPC_CLIENT_CERT: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+/things-grpc-client.crt} + MG_THINGS_AUTH_GRPC_CLIENT_KEY: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+/things-grpc-client.key} + MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+/things-grpc-server-ca.crt} + MG_MESSAGE_BROKER_URL: ${MG_MESSAGE_BROKER_URL} + MG_JAEGER_URL: ${MG_JAEGER_URL} + MG_JAEGER_TRACE_RATIO: ${MG_JAEGER_TRACE_RATIO} + MG_SEND_TELEMETRY: ${MG_SEND_TELEMETRY} + MG_WS_ADAPTER_INSTANCE_ID: ${MG_WS_ADAPTER_INSTANCE_ID} + ports: + - ${MG_WS_ADAPTER_HTTP_PORT}:${MG_WS_ADAPTER_HTTP_PORT} + networks: + - magistrala-base-net + volumes: + # Things gRPC mTLS client certificates + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_CERT:-ssl/certs/dummy/client_cert} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_CERT:+.crt} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_CLIENT_KEY:-ssl/certs/dummy/client_key} + target: /things-grpc-client${MG_THINGS_AUTH_GRPC_CLIENT_KEY:+.key} + bind: + create_host_path: true + - type: bind + source: ${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:-ssl/certs/dummy/server_ca} + target: /things-grpc-server-ca${MG_THINGS_AUTH_GRPC_SERVER_CA_CERTS:+.crt} + bind: + create_host_path: true + + vernemq: + image: magistrala/vernemq:${MG_RELEASE_TAG} + container_name: magistrala-vernemq + restart: on-failure + environment: + DOCKER_VERNEMQ_ALLOW_ANONYMOUS: ${MG_DOCKER_VERNEMQ_ALLOW_ANONYMOUS} + DOCKER_VERNEMQ_LOG__CONSOLE__LEVEL: ${MG_DOCKER_VERNEMQ_LOG__CONSOLE__LEVEL} + networks: + - magistrala-base-net + volumes: + - magistrala-mqtt-broker-volume:/var/lib/vernemq + + nats: + image: nats:2.10.9-alpine + container_name: magistrala-nats + restart: on-failure + command: "--config=/etc/nats/nats.conf" + environment: + - MG_NATS_PORT=${MG_NATS_PORT} + - MG_NATS_HTTP_PORT=${MG_NATS_HTTP_PORT} + - MG_NATS_JETSTREAM_KEY=${MG_NATS_JETSTREAM_KEY} + ports: + - ${MG_NATS_PORT}:${MG_NATS_PORT} + - ${MG_NATS_HTTP_PORT}:${MG_NATS_HTTP_PORT} + volumes: + - magistrala-broker-volume:/data + - ./nats:/etc/nats + networks: + - magistrala-base-net + + ui: + image: magistrala/ui:${MG_RELEASE_TAG} + container_name: magistrala-ui + restart: on-failure + environment: + MG_UI_LOG_LEVEL: ${MG_UI_LOG_LEVEL} + MG_UI_PORT: ${MG_UI_PORT} + MG_HTTP_ADAPTER_URL: ${MG_HTTP_ADAPTER_URL} + MG_READER_URL: ${MG_READER_URL} + MG_THINGS_URL: ${MG_THINGS_URL} + MG_USERS_URL: ${MG_USERS_URL} + MG_INVITATIONS_URL: ${MG_INVITATIONS_URL} + MG_DOMAINS_URL: ${MG_DOMAINS_URL} + MG_BOOTSTRAP_URL: ${MG_BOOTSTRAP_URL} + MG_UI_HOST_URL: ${MG_UI_HOST_URL} + MG_UI_VERIFICATION_TLS: ${MG_UI_VERIFICATION_TLS} + MG_UI_CONTENT_TYPE: ${MG_UI_CONTENT_TYPE} + MG_UI_INSTANCE_ID: ${MG_UI_INSTANCE_ID} + MG_UI_DB_HOST: ${MG_UI_DB_HOST} + MG_UI_DB_PORT: ${MG_UI_DB_PORT} + MG_UI_DB_USER: ${MG_UI_DB_USER} + MG_UI_DB_PASS: ${MG_UI_DB_PASS} + MG_UI_DB_NAME: ${MG_UI_DB_NAME} + MG_UI_DB_SSL_MODE: ${MG_UI_DB_SSL_MODE} + MG_UI_DB_SSL_CERT: ${MG_UI_DB_SSL_CERT} + MG_UI_DB_SSL_KEY: ${MG_UI_DB_SSL_KEY} + MG_UI_DB_SSL_ROOT_CERT: ${MG_UI_DB_SSL_ROOT_CERT} + MG_GOOGLE_CLIENT_ID: ${MG_GOOGLE_CLIENT_ID} + MG_GOOGLE_CLIENT_SECRET: ${MG_GOOGLE_CLIENT_SECRET} + MG_GOOGLE_REDIRECT_URL: ${MG_GOOGLE_REDIRECT_URL} + MG_GOOGLE_STATE: ${MG_GOOGLE_STATE} + MG_UI_HASH_KEY: ${MG_UI_HASH_KEY} + MG_UI_BLOCK_KEY: ${MG_UI_BLOCK_KEY} + MG_UI_PATH_PREFIX: ${MG_UI_PATH_PREFIX} + ports: + - ${MG_UI_PORT}:${MG_UI_PORT} + networks: + - magistrala-base-net + + ui-db: + image: postgres:16.2-alpine + container_name: magistrala-ui-db + restart: on-failure + command: postgres -c "max_connections=${MG_POSTGRES_MAX_CONNECTIONS}" + environment: + POSTGRES_USER: ${MG_UI_DB_USER} + POSTGRES_PASSWORD: ${MG_UI_DB_PASS} + POSTGRES_DB: ${MG_UI_DB_NAME} + MG_POSTGRES_MAX_CONNECTIONS: ${MG_POSTGRES_MAX_CONNECTIONS} + ports: + - 6007:5432 + networks: + - magistrala-base-net + volumes: + - magistrala-ui-db-volume:/var/lib/postgresql/data diff --git a/docker/nats/nats.conf b/docker/nats/nats.conf new file mode 100644 index 0000000..688a58d --- /dev/null +++ b/docker/nats/nats.conf @@ -0,0 +1,27 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +server_name: "nats_internal_broker" +max_payload: 1MB +max_connections: 1M +port: $MG_NATS_PORT +http_port: $MG_NATS_HTTP_PORT +trace: true + +jetstream { + store_dir: "/data" + cipher: "aes" + key: $MG_NATS_JETSTREAM_KEY + max_mem: 1G +} + +mqtt { + port: 1883 + max_ack_pending: 1 +} + +websocket { + port: 8080 + + no_tls: true +} diff --git a/docker/nginx/.gitignore b/docker/nginx/.gitignore new file mode 100644 index 0000000..9453269 --- /dev/null +++ b/docker/nginx/.gitignore @@ -0,0 +1,5 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +snippets/mqtt-upstream.conf +snippets/mqtt-ws-upstream.conf \ No newline at end of file diff --git a/docker/nginx/entrypoint.sh b/docker/nginx/entrypoint.sh new file mode 100755 index 0000000..6b90377 --- /dev/null +++ b/docker/nginx/entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/ash +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +if [ -z "$MG_MQTT_CLUSTER" ] +then + envsubst '${MG_MQTT_ADAPTER_MQTT_PORT}' < /etc/nginx/snippets/mqtt-upstream-single.conf > /etc/nginx/snippets/mqtt-upstream.conf + envsubst '${MG_MQTT_ADAPTER_WS_PORT}' < /etc/nginx/snippets/mqtt-ws-upstream-single.conf > /etc/nginx/snippets/mqtt-ws-upstream.conf +else + envsubst '${MG_MQTT_ADAPTER_MQTT_PORT}' < /etc/nginx/snippets/mqtt-upstream-cluster.conf > /etc/nginx/snippets/mqtt-upstream.conf + envsubst '${MG_MQTT_ADAPTER_WS_PORT}' < /etc/nginx/snippets/mqtt-ws-upstream-cluster.conf > /etc/nginx/snippets/mqtt-ws-upstream.conf +fi + +envsubst ' + ${MG_NGINX_SERVER_NAME} + ${MG_AUTH_HTTP_PORT} + ${MG_USERS_HTTP_PORT} + ${MG_THINGS_HTTP_PORT} + ${MG_THINGS_AUTH_HTTP_PORT} + ${MG_HTTP_ADAPTER_PORT} + ${MG_NGINX_MQTT_PORT} + ${MG_NGINX_MQTTS_PORT} + ${MG_INVITATIONS_HTTP_PORT} + ${MG_WS_ADAPTER_HTTP_PORT}' < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf + +exec nginx -g "daemon off;" diff --git a/docker/nginx/nginx-key.conf b/docker/nginx/nginx-key.conf new file mode 100644 index 0000000..153a7b7 --- /dev/null +++ b/docker/nginx/nginx-key.conf @@ -0,0 +1,211 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This is the default Magistrala NGINX configuration. + +user nginx; +worker_processes auto; +worker_cpu_affinity auto; +pid /run/nginx.pid; +include /etc/nginx/modules-enabled/*.conf; + +events { + # Explanation: https://serverfault.com/questions/787919/optimal-value-for-nginx-worker-connections + # We'll keep 10k connections per core (assuming one worker per core) + worker_connections 10000; +} + +http { + include snippets/http_access_log.conf; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ssl_protocols TLSv1.2 TLSv1.3; + ssl_prefer_server_ciphers on; + + # Include single-node or multiple-node (cluster) upstream + include snippets/mqtt-ws-upstream.conf; + + server { + listen 80 default_server; + listen [::]:80 default_server; + listen 443 ssl default_server; + listen [::]:443 ssl default_server; + http2 on; + + set $dynamic_server_name "$MG_NGINX_SERVER_NAME"; + + if ($dynamic_server_name = '') { + set $dynamic_server_name "localhost"; + } + + server_name $dynamic_server_name; + + include snippets/ssl.conf; + + add_header Strict-Transport-Security "max-age=63072000; includeSubdomains"; + add_header X-Frame-Options DENY; + add_header X-Content-Type-Options nosniff; + add_header Access-Control-Allow-Origin '*'; + add_header Access-Control-Allow-Methods '*'; + add_header Access-Control-Allow-Headers '*'; + + location ~ ^/(channels)/(.+)/(things)/(.+) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + # Proxy pass to users & groups id to things service for listing of channels + # /users/{userID}/channels - Listing of channels belongs to userID + # /groups/{userGroupID}/channels - Listing of channels belongs to userGroupID + location ~ ^/(users|groups)/(.+)/(channels|things) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + if ($request_method = GET) { + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + break; + } + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + } + + # Proxy pass to channel id to users service for listing of channels + # /channels/{channelID}/users - Listing of Users belongs to channelID + # /channels/{channelID}/groups - Listing of User Groups belongs to channelID + location ~ ^/(channels|things)/(.+)/(users|groups) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + if ($request_method = GET) { + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + break; + } + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + + # Proxy pass to user id to auth service for listing of domains + # /users/{userID}/domains - Listing of Domains belongs to userID + location ~ ^/(users)/(.+)/(domains) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + if ($request_method = GET) { + proxy_pass http://auth:${MG_AUTH_HTTP_PORT}; + break; + } + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + } + + # Proxy pass to domain id to users service for listing of users + # /domains/{domainID}/users - Listing of Users belongs to domainID + location ~ ^/(domains)/(.+)/(users) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + if ($request_method = GET) { + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + break; + } + proxy_pass http://auth:${MG_AUTH_HTTP_PORT}; + } + + + # Proxy pass to auth service + location ~ ^/(domains) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://auth:${MG_AUTH_HTTP_PORT}; + } + + # Proxy pass to users service + location ~ ^/(users|groups|password|authorize|oauth/callback/[^/]+) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + } + + location ^~ /users/policies { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://users:${MG_USERS_HTTP_PORT}/policies; + } + + # Proxy pass to things service + location ~ ^/(things|channels|connect|disconnect|identify) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + + location ^~ /things/policies { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}/policies; + } + + # Proxy pass to invitations service + location ~ ^/(invitations) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://invitations:${MG_INVITATIONS_HTTP_PORT}; + } + + location /health { + include snippets/proxy-headers.conf; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + + location /metrics { + include snippets/proxy-headers.conf; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + + # Proxy pass to magistrala-http-adapter + location /http/ { + include snippets/proxy-headers.conf; + + # Trailing `/` is mandatory. Refer to the http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass + # If the proxy_pass directive is specified with a URI, then when a request is passed to the server, + # the part of a normalized request URI matching the location is replaced by a URI specified in the directive + proxy_pass http://http-adapter:${MG_HTTP_ADAPTER_PORT}/; + } + + # Proxy pass to magistrala-mqtt-adapter over WS + location /mqtt { + include snippets/proxy-headers.conf; + include snippets/ws-upgrade.conf; + proxy_pass http://mqtt_ws_cluster; + } + + # Proxy pass to magistrala-ws-adapter + location /ws/ { + include snippets/proxy-headers.conf; + include snippets/ws-upgrade.conf; + proxy_pass http://ws-adapter:${MG_WS_ADAPTER_HTTP_PORT}/; + } + } +} + +# MQTT +stream { + include snippets/stream_access_log.conf; + + # Include single-node or multiple-node (cluster) upstream + include snippets/mqtt-upstream.conf; + + server { + listen ${MG_NGINX_MQTT_PORT}; + listen [::]:${MG_NGINX_MQTT_PORT}; + listen ${MG_NGINX_MQTTS_PORT} ssl; + listen [::]:${MG_NGINX_MQTTS_PORT} ssl; + + include snippets/ssl.conf; + + proxy_pass mqtt_cluster; + } +} + +error_log info.log info; diff --git a/docker/nginx/nginx-x509.conf b/docker/nginx/nginx-x509.conf new file mode 100644 index 0000000..1da22b0 --- /dev/null +++ b/docker/nginx/nginx-x509.conf @@ -0,0 +1,232 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# This is the Magistrala NGINX configuration for mututal authentication based on X.509 certifiactes. + +user nginx; +worker_processes auto; +worker_cpu_affinity auto; +pid /run/nginx.pid; +load_module /etc/nginx/modules/ngx_stream_js_module.so; +load_module /etc/nginx/modules/ngx_http_js_module.so; +include /etc/nginx/modules-enabled/*.conf; + +events { + # Explanation: https://serverfault.com/questions/787919/optimal-value-for-nginx-worker-connections + # We'll keep 10k connections per core (assuming one worker per core) + worker_connections 10000; +} + +http { + include snippets/http_access_log.conf; + + js_path "/etc/nginx/njs/"; + js_import authorization from /etc/nginx/authorization.js; + + js_set $auth_key authorization.setKey; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ssl_protocols TLSv1.2 TLSv1.3; + ssl_prefer_server_ciphers on; + + # Include single-node or multiple-node (cluster) upstream + include snippets/mqtt-ws-upstream.conf; + + server { + listen 80 default_server; + listen [::]:80 default_server; + listen 443 ssl default_server; + listen [::]:443 ssl default_server; + http2 on; + + set $dynamic_server_name "$MG_NGINX_SERVER_NAME"; + + if ($dynamic_server_name = '') { + set $dynamic_server_name "localhost"; + } + + server_name $dynamic_server_name; + + ssl_verify_client optional; + include snippets/ssl.conf; + include snippets/ssl-client.conf; + + add_header Strict-Transport-Security "max-age=63072000; includeSubdomains"; + add_header X-Frame-Options DENY; + add_header X-Content-Type-Options nosniff; + add_header Access-Control-Allow-Origin '*'; + add_header Access-Control-Allow-Methods '*'; + add_header Access-Control-Allow-Headers '*'; + + location ~ ^/(channels)/(.+)/(things)/(.+) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + # Proxy pass to users & groups id to things service for listing of channels + # /users/{userID}/channels - Listing of channels belongs to userID + # /groups/{userGroupID}/channels - Listing of channels belongs to userGroupID + location ~ ^/(users|groups)/(.+)/(channels|things) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + if ($request_method = GET) { + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + break; + } + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + } + + # Proxy pass to channel id to users service for listing of channels + # /channels/{channelID}/users - Listing of Users belongs to channelID + # /channels/{channelID}/groups - Listing of User Groups belongs to channelID + location ~ ^/(channels|things)/(.+)/(users|groups) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + if ($request_method = GET) { + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + break; + } + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + + # Proxy pass to user id to auth service for listing of domains + # /users/{userID}/domains - Listing of Domains belongs to userID + location ~ ^/(users)/(.+)/(domains) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + if ($request_method = GET) { + proxy_pass http://auth:${MG_AUTH_HTTP_PORT}; + break; + } + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + } + + # Proxy pass to domain id to users service for listing of users + # /domains/{domainID}/users - Listing of Users belongs to domainID + location ~ ^/(domains)/(.+)/(users) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + if ($request_method = GET) { + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + break; + } + proxy_pass http://auth:${MG_AUTH_HTTP_PORT}; + } + + + # Proxy pass to auth service + location ~ ^/(domains) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://auth:${MG_AUTH_HTTP_PORT}; + } + + # Proxy pass to users service + location ~ ^/(users|groups|password|authorize|oauth/callback/[^/]+) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://users:${MG_USERS_HTTP_PORT}; + } + + location ^~ /users/policies { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://users:${MG_USERS_HTTP_PORT}/policies; + } + + # Proxy pass to things service + location ~ ^/(things|channels|connect|disconnect|identify) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + + location ^~ /things/policies { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}/policies; + } + + # Proxy pass to invitations service + location ~ ^/(invitations) { + include snippets/proxy-headers.conf; + add_header Access-Control-Expose-Headers Location; + proxy_pass http://invitations:${MG_INVITATIONS_HTTP_PORT}; + } + + location /health { + include snippets/proxy-headers.conf; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + + location /metrics { + include snippets/proxy-headers.conf; + proxy_pass http://things:${MG_THINGS_HTTP_PORT}; + } + + # Proxy pass to magistrala-http-adapter + location /http/ { + include snippets/verify-ssl-client.conf; + include snippets/proxy-headers.conf; + proxy_set_header Authorization $auth_key; + + # Trailing `/` is mandatory. Refer to the http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass + # If the proxy_pass directive is specified with a URI, then when a request is passed to the server, + # the part of a normalized request URI matching the location is replaced by a URI specified in the directive + proxy_pass http://http-adapter:${MG_HTTP_ADAPTER_PORT}/; + } + + # Proxy pass to magistrala-mqtt-adapter over WS + location /mqtt { + include snippets/verify-ssl-client.conf; + include snippets/proxy-headers.conf; + include snippets/ws-upgrade.conf; + proxy_pass http://mqtt_ws_cluster; + } + + # Proxy pass to magistrala-ws-adapter + location /ws/ { + include snippets/verify-ssl-client.conf; + include snippets/proxy-headers.conf; + include snippets/ws-upgrade.conf; + proxy_pass http://ws-adapter:${MG_WS_ADAPTER_HTTP_PORT}/; + } + } +} + +# MQTT +stream { + include snippets/stream_access_log.conf; + + # Include JS script for mTLS + js_path "/etc/nginx/njs/"; + + js_import authorization from /etc/nginx/authorization.js; + + # Include single-node or multiple-node (cluster) upstream + include snippets/mqtt-upstream.conf; + ssl_verify_client on; + include snippets/ssl-client.conf; + + server { + listen ${MG_NGINX_MQTT_PORT}; + listen [::]:${MG_NGINX_MQTT_PORT}; + listen ${MG_NGINX_MQTTS_PORT} ssl; + listen [::]:${MG_NGINX_MQTTS_PORT} ssl; + + include snippets/ssl.conf; + js_preread authorization.authenticate; + + proxy_pass mqtt_cluster; + } +} + +error_log info.log info; diff --git a/docker/nginx/snippets/http_access_log.conf b/docker/nginx/snippets/http_access_log.conf new file mode 100644 index 0000000..d9adfa1 --- /dev/null +++ b/docker/nginx/snippets/http_access_log.conf @@ -0,0 +1,8 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +log_format access_log_format 'HTTP/WS ' + '$remote_addr: ' + '"$request" $status; ' + 'request time=$request_time upstream connect time=$upstream_connect_time upstream response time=$upstream_response_time'; +access_log access.log access_log_format; diff --git a/docker/nginx/snippets/mqtt-upstream-cluster.conf b/docker/nginx/snippets/mqtt-upstream-cluster.conf new file mode 100644 index 0000000..72db846 --- /dev/null +++ b/docker/nginx/snippets/mqtt-upstream-cluster.conf @@ -0,0 +1,9 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +upstream mqtt_cluster { + least_conn; + server mqtt-adapter-1:${MG_MQTT_ADAPTER_MQTT_PORT}; + server mqtt-adapter-2:${MG_MQTT_ADAPTER_MQTT_PORT}; + server mqtt-adapter-3:${MG_MQTT_ADAPTER_MQTT_PORT}; +} \ No newline at end of file diff --git a/docker/nginx/snippets/mqtt-upstream-single.conf b/docker/nginx/snippets/mqtt-upstream-single.conf new file mode 100644 index 0000000..1613dc7 --- /dev/null +++ b/docker/nginx/snippets/mqtt-upstream-single.conf @@ -0,0 +1,6 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +upstream mqtt_cluster { + server mqtt-adapter:${MG_MQTT_ADAPTER_MQTT_PORT}; +} \ No newline at end of file diff --git a/docker/nginx/snippets/mqtt-ws-upstream-cluster.conf b/docker/nginx/snippets/mqtt-ws-upstream-cluster.conf new file mode 100644 index 0000000..1103c8f --- /dev/null +++ b/docker/nginx/snippets/mqtt-ws-upstream-cluster.conf @@ -0,0 +1,9 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +upstream mqtt_ws_cluster { + least_conn; + server mqtt-adapter-1:${MG_MQTT_ADAPTER_WS_PORT}; + server mqtt-adapter-2:${MG_MQTT_ADAPTER_WS_PORT}; + server mqtt-adapter-3:${MG_MQTT_ADAPTER_WS_PORT}; +} \ No newline at end of file diff --git a/docker/nginx/snippets/mqtt-ws-upstream-single.conf b/docker/nginx/snippets/mqtt-ws-upstream-single.conf new file mode 100644 index 0000000..637a953 --- /dev/null +++ b/docker/nginx/snippets/mqtt-ws-upstream-single.conf @@ -0,0 +1,6 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +upstream mqtt_ws_cluster { + server mqtt-adapter:${MG_MQTT_ADAPTER_WS_PORT}; +} \ No newline at end of file diff --git a/docker/nginx/snippets/proxy-headers.conf b/docker/nginx/snippets/proxy-headers.conf new file mode 100644 index 0000000..0890578 --- /dev/null +++ b/docker/nginx/snippets/proxy-headers.conf @@ -0,0 +1,15 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +proxy_redirect off; +proxy_set_header Host $host; +proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; + +# Allow OPTIONS method CORS +if ($request_method = OPTIONS) { + add_header Content-Length 0; + add_header Content-Type text/plain; + return 200; +} \ No newline at end of file diff --git a/docker/nginx/snippets/ssl-client.conf b/docker/nginx/snippets/ssl-client.conf new file mode 100644 index 0000000..712d46a --- /dev/null +++ b/docker/nginx/snippets/ssl-client.conf @@ -0,0 +1,5 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +ssl_client_certificate /etc/ssl/certs/ca.crt; +ssl_verify_depth 2; diff --git a/docker/nginx/snippets/ssl.conf b/docker/nginx/snippets/ssl.conf new file mode 100644 index 0000000..9650f1f --- /dev/null +++ b/docker/nginx/snippets/ssl.conf @@ -0,0 +1,16 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# These paths are set to its default values as +# a volume in the docker/docker-compose.yml file. +ssl_certificate /etc/ssl/certs/magistrala-server.crt; +ssl_certificate_key /etc/ssl/private/magistrala-server.key; +ssl_dhparam /etc/ssl/certs/dhparam.pem; + +ssl_protocols TLSv1.2 TLSv1.3; +ssl_prefer_server_ciphers on; +ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH"; +ssl_ecdh_curve secp384r1; +ssl_session_tickets off; +resolver 8.8.8.8 8.8.4.4 valid=300s; +resolver_timeout 5s; diff --git a/docker/nginx/snippets/stream_access_log.conf b/docker/nginx/snippets/stream_access_log.conf new file mode 100644 index 0000000..7e06612 --- /dev/null +++ b/docker/nginx/snippets/stream_access_log.conf @@ -0,0 +1,7 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +log_format access_log_format '$protocol ' + '$remote_addr: ' + 'status=$status; upstream connect time=$upstream_connect_time'; +access_log access.log access_log_format; diff --git a/docker/nginx/snippets/verify-ssl-client.conf b/docker/nginx/snippets/verify-ssl-client.conf new file mode 100644 index 0000000..991e1fb --- /dev/null +++ b/docker/nginx/snippets/verify-ssl-client.conf @@ -0,0 +1,9 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +if ($ssl_client_verify != SUCCESS) { + return 403; +} +if ($auth_key = '') { + return 403; +} \ No newline at end of file diff --git a/docker/nginx/snippets/ws-upgrade.conf b/docker/nginx/snippets/ws-upgrade.conf new file mode 100644 index 0000000..a2be04e --- /dev/null +++ b/docker/nginx/snippets/ws-upgrade.conf @@ -0,0 +1,9 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +proxy_http_version 1.1; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection "Upgrade"; +proxy_connect_timeout 7d; +proxy_send_timeout 7d; +proxy_read_timeout 7d; \ No newline at end of file diff --git a/docker/spicedb/schema.zed b/docker/spicedb/schema.zed new file mode 100644 index 0000000..215797a --- /dev/null +++ b/docker/spicedb/schema.zed @@ -0,0 +1,78 @@ +definition user {} + +definition thing { + relation administrator: user + relation group: group + relation domain: domain + + permission admin = administrator + group->admin + domain->admin + permission delete = admin + permission edit = admin + group->edit + domain->edit + permission view = edit + group->view + domain->view + permission share = edit + permission publish = group + permission subscribe = group + + // These permission are made for only list purpose. It helps to list users have only particular permission excluding other higher and lower permission. + permission admin_only = admin + permission edit_only = edit - admin + permission view_only = view + + // These permission are made for only list purpose. It helps to list users from external, users who are not in group but have permission on the group through parent group + permission ext_admin = admin - administrator // For list of external admin , not having direct relation with group, but have indirect relation from parent group +} + +definition group { + relation administrator: user + relation editor: user + relation contributor: user + relation member: user + relation guest: user + + relation parent_group: group + relation domain: domain + + permission admin = administrator + parent_group->admin + domain->admin + permission delete = admin + permission edit = admin + editor + parent_group->edit + domain->edit + permission share = edit + permission view = contributor + edit + parent_group->view + domain->view + guest + permission membership = view + member + permission create = membership - guest + + // These permissions are made for listing purposes. They enable listing users who have only particular permission excluding higher-level permissions users. + permission admin_only = admin + permission edit_only = edit - admin + permission view_only = view + permission membership_only = membership - view + + // These permission are made for only list purpose. They enable listing users who have only particular permission from parent group excluding higher-level permissions. + permission ext_admin = admin - administrator // For list of external admin , not having direct relation with group, but have indirect relation from parent group + permission ext_edit = edit - editor // For list of external edit , not having direct relation with group, but have indirect relation from parent group + permission ext_view = view - contributor // For list of external view , not having direct relation with group, but have indirect relation from parent group +} + +definition domain { + relation administrator: user // combination domain + user id + relation editor: user + relation contributor: user + relation member: user + relation guest: user + + relation platform: platform + + permission admin = administrator + platform->admin + permission edit = admin + editor + permission share = edit + permission view = edit + contributor + guest + permission membership = view + member + permission create = membership - guest +} + +definition platform { + relation administrator: user + relation member: user + + permission admin = administrator + permission membership = administrator + member +} diff --git a/docker/ssl/.gitignore b/docker/ssl/.gitignore new file mode 100644 index 0000000..9ea7050 --- /dev/null +++ b/docker/ssl/.gitignore @@ -0,0 +1,7 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +*grpc-server* +*grpc-client* +*srl +*conf diff --git a/docker/ssl/Makefile b/docker/ssl/Makefile new file mode 100644 index 0000000..f0561b8 --- /dev/null +++ b/docker/ssl/Makefile @@ -0,0 +1,170 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +CRT_LOCATION = certs +O = Magistrala +OU_CA = magistrala_ca +OU_CRT = magistrala_crt +EA = info@magistrala.com +CN_CA = Magistrala_Self_Signed_CA +CN_SRV = localhost +THING_SECRET = <THING_SECRET> # e.g. 8f65ed04-0770-4ce4-a291-6d1bf2000f4d +CRT_FILE_NAME = thing +THINGS_GRPC_SERVER_CONF_FILE_NAME=thing-grpc-server.conf +THINGS_GRPC_CLIENT_CONF_FILE_NAME=thing-grpc-client.conf +THINGS_GRPC_SERVER_CN=things +THINGS_GRPC_CLIENT_CN=things-client +THINGS_GRPC_SERVER_CRT_FILE_NAME=things-grpc-server +THINGS_GRPC_CLIENT_CRT_FILE_NAME=things-grpc-client +AUTH_GRPC_SERVER_CONF_FILE_NAME=auth-grpc-server.conf +AUTH_GRPC_CLIENT_CONF_FILE_NAME=auth-grpc-client.conf +AUTH_GRPC_SERVER_CN=auth +AUTH_GRPC_CLIENT_CN=auth-client +AUTH_GRPC_SERVER_CRT_FILE_NAME=auth-grpc-server +AUTH_GRPC_CLIENT_CRT_FILE_NAME=auth-grpc-client + +define GRPC_CERT_CONFIG +[req] +req_extensions = v3_req +distinguished_name = dn +prompt = no + +[dn] +CN = mg.svc +C = RS +ST = RS +L = BELGRADE +O = MAGISTRALA +OU = MAGISTRALA + +[v3_req] +subjectAltName = @alt_names + +[alt_names] +DNS.1 = <<SERVICE_NAME>> +endef + +define ANNOUNCE_BODY +Version $(VERSION) of $(PACKAGE_NAME) has been released. + +It can be downloaded from $(DOWNLOAD_URL). + +etc, etc. +endef +all: clean_certs ca server_cert things_grpc_certs auth_grpc_certs + +# CA name and key is "ca". +ca: + openssl req -newkey rsa:2048 -x509 -nodes -sha512 -days 1095 \ + -keyout $(CRT_LOCATION)/ca.key -out $(CRT_LOCATION)/ca.crt -subj "/CN=$(CN_CA)/O=$(O)/OU=$(OU_CA)/emailAddress=$(EA)" + +# Server cert and key name is "magistrala-server". +server_cert: + # Create magistrala server key and CSR. + openssl req -new -sha256 -newkey rsa:4096 -nodes -keyout $(CRT_LOCATION)/magistrala-server.key \ + -out $(CRT_LOCATION)/magistrala-server.csr -subj "/CN=$(CN_SRV)/O=$(O)/OU=$(OU_CRT)/emailAddress=$(EA)" + + # Sign server CSR. + openssl x509 -req -days 1000 -in $(CRT_LOCATION)/magistrala-server.csr -CA $(CRT_LOCATION)/ca.crt -CAkey $(CRT_LOCATION)/ca.key -CAcreateserial -out $(CRT_LOCATION)/magistrala-server.crt + + # Remove CSR. + rm $(CRT_LOCATION)/magistrala-server.csr + +thing_cert: + # Create magistrala server key and CSR. + openssl req -new -sha256 -newkey rsa:4096 -nodes -keyout $(CRT_LOCATION)/$(CRT_FILE_NAME).key \ + -out $(CRT_LOCATION)/$(CRT_FILE_NAME).csr -subj "/CN=$(THING_SECRET)/O=$(O)/OU=$(OU_CRT)/emailAddress=$(EA)" + + # Sign client CSR. + openssl x509 -req -days 730 -in $(CRT_LOCATION)/$(CRT_FILE_NAME).csr -CA $(CRT_LOCATION)/ca.crt -CAkey $(CRT_LOCATION)/ca.key -CAcreateserial -out $(CRT_LOCATION)/$(CRT_FILE_NAME).crt + + # Remove CSR. + rm $(CRT_LOCATION)/$(CRT_FILE_NAME).csr + +things_grpc_certs: + # Things server grpc certificates + $(file > $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).conf,$(subst <<SERVICE_NAME>>,$(THINGS_GRPC_SERVER_CN),$(GRPC_CERT_CONFIG)) ) + + openssl req -new -sha256 -newkey rsa:4096 -nodes \ + -keyout $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).key \ + -out $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).csr \ + -config $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).conf \ + -extensions v3_req + + openssl x509 -req -sha256 \ + -in $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).csr \ + -CA $(CRT_LOCATION)/ca.crt \ + -CAkey $(CRT_LOCATION)/ca.key \ + -CAcreateserial \ + -out $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).crt \ + -days 365 \ + -extfile $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).conf \ + -extensions v3_req + + rm -rf $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).csr $(CRT_LOCATION)/$(THINGS_GRPC_SERVER_CRT_FILE_NAME).conf + # Things client grpc certificates + $(file > $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).conf,$(subst <<SERVICE_NAME>>,$(THINGS_GRPC_CLIENT_CN),$(GRPC_CERT_CONFIG)) ) + + openssl req -new -sha256 -newkey rsa:4096 -nodes \ + -keyout $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).key \ + -out $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).csr \ + -config $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).conf \ + -extensions v3_req + + openssl x509 -req -sha256 \ + -in $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).csr \ + -CA $(CRT_LOCATION)/ca.crt \ + -CAkey $(CRT_LOCATION)/ca.key \ + -CAcreateserial \ + -out $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).crt \ + -days 365 \ + -extfile $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).conf \ + -extensions v3_req + + rm -rf $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).csr $(CRT_LOCATION)/$(THINGS_GRPC_CLIENT_CRT_FILE_NAME).conf + +auth_grpc_certs: + # Auth gRPC server certificate + $(file > $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).conf,$(subst <<SERVICE_NAME>>,$(AUTH_GRPC_SERVER_CN),$(GRPC_CERT_CONFIG)) ) + + openssl req -new -sha256 -newkey rsa:4096 -nodes \ + -keyout $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).key \ + -out $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).csr \ + -config $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).conf \ + -extensions v3_req + + openssl x509 -req -sha256 \ + -in $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).csr \ + -CA $(CRT_LOCATION)/ca.crt \ + -CAkey $(CRT_LOCATION)/ca.key \ + -CAcreateserial \ + -out $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).crt \ + -days 365 \ + -extfile $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).conf \ + -extensions v3_req + + rm -rf $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).csr $(CRT_LOCATION)/$(AUTH_GRPC_SERVER_CRT_FILE_NAME).conf + # Auth gRPC client certificate + $(file > $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).conf,$(subst <<SERVICE_NAME>>,$(AUTH_GRPC_CLIENT_CN),$(GRPC_CERT_CONFIG)) ) + + openssl req -new -sha256 -newkey rsa:4096 -nodes \ + -keyout $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).key \ + -out $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).csr \ + -config $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).conf \ + -extensions v3_req + + openssl x509 -req -sha256 \ + -in $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).csr \ + -CA $(CRT_LOCATION)/ca.crt \ + -CAkey $(CRT_LOCATION)/ca.key \ + -CAcreateserial \ + -out $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).crt \ + -days 365 \ + -extfile $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).conf \ + -extensions v3_req + + rm -rf $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).csr $(CRT_LOCATION)/$(AUTH_GRPC_CLIENT_CRT_FILE_NAME).conf + +clean_certs: + rm -r $(CRT_LOCATION)/*.crt + rm -r $(CRT_LOCATION)/*.key diff --git a/docker/ssl/authorization.js b/docker/ssl/authorization.js new file mode 100644 index 0000000..5bfedbe --- /dev/null +++ b/docker/ssl/authorization.js @@ -0,0 +1,181 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +var clientKey = ''; + +// Check certificate MQTTS. +function authenticate(s) { + if (!s.variables.ssl_client_s_dn || !s.variables.ssl_client_s_dn.length || + !s.variables.ssl_client_verify || s.variables.ssl_client_verify != "SUCCESS") { + s.deny(); + return; + } + + s.on('upload', function (data) { + if (data == '') { + return; + } + + var packet_type_flags_byte = data.codePointAt(0); + // First MQTT packet contain message type and flags. CONNECT message type + // is encoded as 0001, and we're not interested in flags, so only values + // 0001xxxx (which is between 16 and 32) should be checked. + if (packet_type_flags_byte < 16 || packet_type_flags_byte >= 32) { + s.off('upload'); + s.allow(); + return; + } + + if (clientKey === '') { + clientKey = parseCert(s.variables.ssl_client_s_dn, 'CN'); + } + + var pass = parsePackage(s, data); + + if (!clientKey.length || !clientKey.endsWith(pass) ) { + s.error('Cert CN (' + clientKey + ') does not contain client password'); + s.off('upload') + s.deny(); + return; + } + + s.off('upload'); + s.allow(); + }) +} + +function parsePackage(s, data) { + // An explanation of MQTT packet structure can be found here: + // https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#msg-format. + + // CONNECT message is explained here: + // https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect. + + /* + 0 1 2 3 + 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | TYPE | RSRVD | REMAINING LEN | PROTOCOL NAME LEN | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | PROTOCOL NAME | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-| + | VERSION | FLAGS | KEEP ALIVE | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-| + | Payload (if any) ... | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + First byte with remaining length represents fixed header. + Remaining Length is the length of the variable header (10 bytes) plus the length of the Payload. + It is encoded in the manner described here: + http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/errata01/os/mqtt-v3.1.1-errata01-os-complete.html#_Toc442180836. + + Connect flags byte looks like this: + | 7 | 6 | 5 | 4 3 | 2 | 1 | 0 | + | Username Flag | Password Flag | Will Retain | Will QoS | Will Flag | Clean Session | Reserved | + + The payload is determined by the flags and comes in this order: + 1. Client ID (2 bytes length + ID value) + 2. Will Topic (2 bytes length + Will Topic value) if Will Flag is 1. + 3. Will Message (2 bytes length + Will Message value) if Will Flag is 1. + 4. User Name (2 bytes length + User Name value) if User Name Flag is 1. + 5. Password (2 bytes length + Password value) if Password Flag is 1. + + This method extracts Password field. + */ + + // Extract variable length header. It's 1-4 bytes. As long as continuation byte is + // 1, there are more bytes in this header. This algorithm is explained here: + // http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/errata01/os/mqtt-v3.1.1-errata01-os-complete.html#_Toc442180836 + var len_size = 1; + for (var remaining_len = 1; remaining_len < 5; remaining_len++) { + if (data.codePointAt(remaining_len) > 128) { + len_size += 1; + continue; + } + break; + } + + // CONTROL(1) + MSG_LEN(1-4) + PROTO_NAME_LEN(2) + PROTO_NAME(4) + PROTO_VERSION(1) + var flags_pos = 1 + len_size + 2 + 4 + 1; + var flags = data.codePointAt(flags_pos); + + // If there are no username and password flags (11xxxxxx), return. + if (flags < 192) { + s.error('MQTT username or password not provided'); + return ''; + } + + // FLAGS(1) + KEEP_ALIVE(2) + var shift = flags_pos + 1 + 2; + + // Number of bytes to encode length. + var len_bytes_num = 2; + + // If Wil Flag is present, Will Topic and Will Message need to be skipped as well. + var shift_flags = 196 <= flags ? 5 : 3; + var len_msb, len_lsb, len; + + for (var i = 0; i < shift_flags; i++) { + len_msb = data.codePointAt(shift).toString(16); + len_lsb = data.codePointAt(shift + 1).toString(16); + len = calcLen(len_msb, len_lsb); + shift += len_bytes_num; + if (i != shift_flags - 1) { + shift += len; + } + } + + var password = data.substring(shift, shift + len); + return password; +} + +// Check certificate HTTPS and WSS. +function setKey(r) { + if (clientKey === '') { + clientKey = parseCert(r.variables.ssl_client_s_dn, 'CN'); + } + + var auth = r.headersIn['Authorization']; + if (auth && auth.length && auth != clientKey) { + r.error('Authorization header does not match certificate'); + return ''; + } + + if (r.uri.startsWith('/ws') && (!auth || !auth.length)) { + var a; + for (a in r.args) { + if (a == 'authorization' && r.args[a] === clientKey) { + return clientKey + } + } + + r.error('Authorization param does not match certificate') + return ''; + } + + return clientKey; +} + +function calcLen(msb, lsb) { + if (lsb < 2) { + lsb = '0' + lsb; + } + + return parseInt(msb + lsb, 16); +} + +function parseCert(cert, key) { + if (cert.length) { + var pairs = cert.split(','); + for (var i = 0; i < pairs.length; i++) { + var pair = pairs[i].split('='); + if (pair[0].toUpperCase() == key) { + return "Thing " + pair[1].replace("\\", "").trim(); + } + } + } + + return ''; +} + +export default {setKey,authenticate}; diff --git a/docker/ssl/certs/ca.crt b/docker/ssl/certs/ca.crt new file mode 100644 index 0000000..34f0728 --- /dev/null +++ b/docker/ssl/certs/ca.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDyzCCArOgAwIBAgIUDIJg63dQVzoD9nmWi9YPscQwTgIwDQYJKoZIhvcNAQEN +BQAwdTEiMCAGA1UEAwwZTWFnaXN0cmFsYV9TZWxmX1NpZ25lZF9DQTETMBEGA1UE +CgwKTWFnaXN0cmFsYTEWMBQGA1UECwwNbWFnaXN0cmFsYV9jYTEiMCAGCSqGSIb3 +DQEJARYTaW5mb0BtYWdpc3RyYWxhLmNvbTAeFw0yMzEwMzAwODE5MDFaFw0yNjEw +MjkwODE5MDFaMHUxIjAgBgNVBAMMGU1hZ2lzdHJhbGFfU2VsZl9TaWduZWRfQ0Ex +EzARBgNVBAoMCk1hZ2lzdHJhbGExFjAUBgNVBAsMDW1hZ2lzdHJhbGFfY2ExIjAg +BgkqhkiG9w0BCQEWE2luZm9AbWFnaXN0cmFsYS5jb20wggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWNIeGfo/SePOvviJE6UHJhBzWcPfNVbzSF6A42WgB +DEgI3KFr+/rgWMEaCOD4QzCl3Lqa89EgCA7xCgxcqFwEo33SyhAivwoHL2pRVHXn +oee3z9U757T63YLE0qrXQY2cbyChX/OU99rZxyd5l5jUGN7MCu+RYurfTIiYN+Uv +NZdl8a3X84g7fa70EOYas7cTunWUt9x64/jYDoYmn+XPXET1yEU1dQTnKY4cRjhv +HS1u2QsadHKi1hgeILyLbB4u1T5N+WfxFknhFHTu8PVPxfowrVv/xzmxOe0zSZFd +SbhtrmwT4S1wJ4PfUa3+tYZVtjEKKbyObsAW91WzOLS9AgMBAAGjUzBRMB0GA1Ud +DgQWBBQkE4koZctEZpTz9pq6a6s6xg+myTAfBgNVHSMEGDAWgBQkE4koZctEZpTz +9pq6a6s6xg+myTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDQUAA4IBAQA7 +w/oh5U9loJsigf3X3T3jQM8PVmhsUfNMJ3kc1Yumr72S4sGKjdWwuU0vk+B3eQzh +zXAj65BHhs1pXcukeoLR7YcHABEsEMg6lar/E4A+MgAZfZFVSvPpsByIK8I5ARk+ +K1V/lWso+GJJM/lImPPnpvUWBdbntqC5WtjoMMGL9uyV3kVS6yT/kJ2ercnPzhPh +uBkL1ZH3ivDn/0JDY+T8Sfeq08vNWaTcoC7qpPwqXhuT0ytY7oaBS5wmPcvvzpZg +6zZYPZfhjhdEFYY1hDrrPYNYO72jncUnwQVp3X0DQpSvbxp681hVkcEtwHB2B8l0 +tBGhgoH+TqZs0AUjoXM0 +-----END CERTIFICATE----- diff --git a/docker/ssl/certs/ca.key b/docker/ssl/certs/ca.key new file mode 100644 index 0000000..0ba786b --- /dev/null +++ b/docker/ssl/certs/ca.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCWNIeGfo/SePOv +viJE6UHJhBzWcPfNVbzSF6A42WgBDEgI3KFr+/rgWMEaCOD4QzCl3Lqa89EgCA7x +CgxcqFwEo33SyhAivwoHL2pRVHXnoee3z9U757T63YLE0qrXQY2cbyChX/OU99rZ +xyd5l5jUGN7MCu+RYurfTIiYN+UvNZdl8a3X84g7fa70EOYas7cTunWUt9x64/jY +DoYmn+XPXET1yEU1dQTnKY4cRjhvHS1u2QsadHKi1hgeILyLbB4u1T5N+WfxFknh +FHTu8PVPxfowrVv/xzmxOe0zSZFdSbhtrmwT4S1wJ4PfUa3+tYZVtjEKKbyObsAW +91WzOLS9AgMBAAECggEAEOxEq6jFO/WgIPgHROPR42ok1J1AMgx7nGEIjnciImIX +mJYBAtlOM+oUAYKoFBh/2eQTSyN2t4jo5AvZhjP6wBQKeE4HQN7supADRrwBF7KU +WI+MKvZpW81KrzG8CUoLsikMEFpu52UAbYJkZmznzVeq/GqsAKGYLEXjauD7S5Tu +GeGVKO4novus6t3AHnBvfalIQ1JUuJFvcd5ZDhPljlzPbbWdM4WpRPaFZIKmfXft +G7Izt58yPCYwhxohjrunRudyX3oKvmCBUOBXC8HdHzND/dLxwlrVu7OjmXprmC6P +8ggNpjAPeO8Y6+EKGne1fETNsKgODY/lXGOwECY4eQKBgQDSGi3WuoT/+DecVeSF +GfmavdGCQKOD0kdl7qCeQYAL+SPVz4157AtxZs3idapvlbrc7wvw4Ev1XT7ZmWUj +Lc4/UAITR8EkkFRVbxt2PvV86AiQtmXFguTNEX5vTszRwZ2+eqijZga5niBkqyAi +SRuTwR8WrDZau4mRNnF8bUl8dQKBgQC3BKYifRp4hHqBycHe9rSMZ8Xz+ZOy+IFA +vYap1Az+e8KuqlmD9Kfpp2Mjba1+HL5WKeTJGpFE7bhvb/xMPJgbMgtQ/cw4uDJ/ +fwv4m6arf76ebOhaZtkT1vD4NyiyB+z6xP0TRgQRr2Or98XBSvGAYDXIn5vL7fUg +KrDF0ePuKQKBgDfaOcFRiDW7uJzYwI0ZoJ8gQufLYyyR4+UXEJ/BbdbA/mPCbyuw +MkKNP8Ip4YsUVL6S1avNFKQ/i4uxGY/Gh4ORM1wIwTGFJMYpaTV/+yafUFeYBWoC +J+zT77aLTiucuuB+HwKBBtylSps4WqyCntAikK8oTLLGFAYEYRrgup5ZAoGAbQ8j +JNghxwFCs0aT9ZZTfnt0NW9auUJmWzrVHSxUVe1P1J+EWiKXUJ/DbuAzizv7nAK4 +57GiMU3rItS7pn5RMZt/rNKgOIhi5yDA9HNkPTwRTfyd9QjmgHEMBQ1xfa1FZSWv +nSWS1SsLnPU37XgIMzShuByMTVhOQs3NqwPo7AkCgYAf8AzQNjFCoTwU3SJezJ4H +9j1jvMO232hAl8UDNtqvJ1APn87tOtnfX48OMoRrP9kKI0oygE3pq7rFxu1qmTns +Zir0+KLeWGg58fSZkUEAp6kbO5CKwoeVAY9EMgd7BYBqlXLqUNfdH0L+KUOFKHha +7e82VxpgBeskzAqN1e7YRA== +-----END PRIVATE KEY----- diff --git a/docker/ssl/certs/magistrala-server.crt b/docker/ssl/certs/magistrala-server.crt new file mode 100644 index 0000000..4e893c1 --- /dev/null +++ b/docker/ssl/certs/magistrala-server.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEYjCCA0oCFGXr7rfGAynaa4KMTG1+23EEF0lYMA0GCSqGSIb3DQEBCwUAMHUx +IjAgBgNVBAMMGU1hZ2lzdHJhbGFfU2VsZl9TaWduZWRfQ0ExEzARBgNVBAoMCk1h +Z2lzdHJhbGExFjAUBgNVBAsMDW1hZ2lzdHJhbGFfY2ExIjAgBgkqhkiG9w0BCQEW +E2luZm9AbWFnaXN0cmFsYS5jb20wHhcNMjMxMDMwMDgxOTA4WhcNMjYwNzI2MDgx +OTA4WjBmMRIwEAYDVQQDDAlsb2NhbGhvc3QxEzARBgNVBAoMCk1hZ2lzdHJhbGEx +FzAVBgNVBAsMDm1hZ2lzdHJhbGFfY3J0MSIwIAYJKoZIhvcNAQkBFhNpbmZvQG1h +Z2lzdHJhbGEuY29tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAojas +t6M294uS5q8oFmYM6DULVQ1lY3K659VusJshjGvn8bi50vhKo8PpxL6ygVpjWcHG ++/gclQnTaYZumC1TUohibpBnrFx1PZUvGiryAPudFY2nC5af5BQnYGi845FcVWx5 +FNLq+IsedgSZf7FuGcZruXiukBCWVyWJRJh+8FDakc65BPeG9FpCxbeLZ1nrDpnQ +bhHbwEQrwwHk0FHZ/3cuVFJAjwqJSivJ9598eU0YWAsqsLM3uYyvOMd8alMs5vCZ +9tMCpO2v6xTdJ6kr68SwQQAiefRy6gsD5J5A4ySyCz7KX9fHCrqx1kdcDJ/CXZmh +mXxrCFKSjqjuSn2qtm+gxvAc26Zbt5z5eihpdISDUKrjW11+yapNZLATGBX8ktek +gW467V9DQYOsbA3fNkWgd5UcV5HIViUpqFMFvi1NpWc2INi/PTDWuAIBLUiVNk0W +qMtG7/HqFRPn6MrNGpvFpglgxXGNfjsggkK/3INtFnAou2rN9+ieeuzO7Zjrtwsq +sP64GVw/vLv3tgT6TIZmDnCDCqtEGEVutt7ldu3M0/fLm4qOUsZqFGrIOO1cfI4x +7FRnHwaTsTB1Og+I7lEujb4efHV+uRjKyrGh6L6hDt94IkGm6ZEj5z/iEmq16jRX +dUbYsu4f1KlfTYdHWGHp+6kAmDn0jGCwz2BBrnsCAwEAATANBgkqhkiG9w0BAQsF +AAOCAQEAKyg5kvDk+TQ6ZDCK7qxKY+uN9setYvvsLfde+Uy51a3zj8RIHRgkOT2C +LuuTtTYKu3XmfCKId0oTXynGuP+yDAIuVwuZz3S0VmA8ijoZ87LJXzsLjjTjQSzZ +ar6RmlRDH+8Bm4AOrT4TDupqifag4J0msHkNPo0jVK6fnuniqJoSlhIbbHrJTHhv +jKNXrThjr/irgg1MZ7slojieOS0QoZHRE9eunIR5enDJwB5pWUJSmZWlisI7+Ibi +06+j8wZegU0nqeWp4wFSZxKnrzz5B5Qu9SrALwlHWirzBpyr0gAcF2v7nzbWviZ/ +0VMyY4FGEbkp6trMxwJs5hGYhAiyXg== +-----END CERTIFICATE----- diff --git a/docker/ssl/certs/magistrala-server.key b/docker/ssl/certs/magistrala-server.key new file mode 100644 index 0000000..f2b56f4 --- /dev/null +++ b/docker/ssl/certs/magistrala-server.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCiNqy3ozb3i5Lm +rygWZgzoNQtVDWVjcrrn1W6wmyGMa+fxuLnS+Eqjw+nEvrKBWmNZwcb7+ByVCdNp +hm6YLVNSiGJukGesXHU9lS8aKvIA+50VjacLlp/kFCdgaLzjkVxVbHkU0ur4ix52 +BJl/sW4Zxmu5eK6QEJZXJYlEmH7wUNqRzrkE94b0WkLFt4tnWesOmdBuEdvARCvD +AeTQUdn/dy5UUkCPColKK8n3n3x5TRhYCyqwsze5jK84x3xqUyzm8Jn20wKk7a/r +FN0nqSvrxLBBACJ59HLqCwPknkDjJLILPspf18cKurHWR1wMn8JdmaGZfGsIUpKO +qO5Kfaq2b6DG8Bzbplu3nPl6KGl0hINQquNbXX7Jqk1ksBMYFfyS16SBbjrtX0NB +g6xsDd82RaB3lRxXkchWJSmoUwW+LU2lZzYg2L89MNa4AgEtSJU2TRaoy0bv8eoV +E+foys0am8WmCWDFcY1+OyCCQr/cg20WcCi7as336J567M7tmOu3Cyqw/rgZXD+8 +u/e2BPpMhmYOcIMKq0QYRW623uV27czT98ubio5SxmoUasg47Vx8jjHsVGcfBpOx +MHU6D4juUS6Nvh58dX65GMrKsaHovqEO33giQabpkSPnP+ISarXqNFd1Rtiy7h/U +qV9Nh0dYYen7qQCYOfSMYLDPYEGuewIDAQABAoICACvgzTyJTkOMwipbQ+U3KpOf +UZbqnjvV23/9iEkGVX9V6vJETSOnnQ0KYBAjo0aBLDGpzIj41sZr13+KaR0J2amQ +EcwljJ2fjukfExQpfLfOV/HuFLr6Pfrkhrg57KpD9i13P5Nl8EBV5WH4IYtcc9NO +DHKpldKLYhdlpGllNKUNwenB+ONCj4NGbRxtZyyIMqCK88nqU76A0jOYLgw5r9W+ +J86QRz1KFNP231V3kyR+ubCLKLuOZuruhrE9qMZcBF/dwk/1SRhS4QyeYqopRSOr +2x9iCXFisbjkTOPI+PVYRj7rd7OQOxuIX7V+LQSPLHTEK2XItW0VZOZpBLgqoQP1 +Eu19LOOs77DI5FBia1qhSpjjVGOE6koQmCki8KSFZM+CzuflTPkWNVvTNzjKrhUj +Rbezx40VVFt+q38bsTjWJbimMSo1jChianwjtotGnGpC6pD0KnHsBmfceWaL7+eC +n9KtSeAbnXlFN/rHdK7ZeP/PTSjHa+6i1awGZxhwdVsERJy/2xwZzh3uMLS2ZhXM +Tuh1D5GzlUlkMP8K23rfaXnaOXkwYxHFGi23NmxHGSqzA3TVVreWLqRSZJd/Ar67 +9Pl4S9p9f+Xkvq8tQANfoaTbjc//dpK8rjCKnwdWA3cL7eekq9sm4+lTmik9Bn2v +Bo+3/89Fr1FvlkuQvktJAoIBAQDNuc2r/9sthHZg1hOCFd5XmnMX/mXNPs+SDPRW +/VZBHjxGApz+CoZS7qk0q7f/vzYFTB6N3778f7RsgwrZYSD4I4jumvSFNFsxsHCY +K3O4kkd2YaFaZPwUYbbAcBr6nVnW/9b1aagEfWIMQ18FHLaQ6u2OfUOcNDGZEqwj +YqJmZr8plhWLeKP2c673j6g/ztnL0w77y3LnIuLjFGex17l1lQzbUgOPSKyoQj03 +d5eRoJv2aQTaOXaBzGrDtBDDd3BpXrriJEMqSZbZFRLM28jD+VuHjfHOZRUMy1hw +vZCifRrBYA6Frko7ZweRxIkcOwQsQjV/tkzVkg9FHrVhMKQTAoIBAQDJ2r+lR73d +va1JjWoXKe5qAWtprRyI8DpJM/G2/V/V3+RVOGgBeRlu6WDiMpMd9hFB6bAmX+1y +S17svw1f4DQskkTKi9EWBsWRnh2Pnd4q91TjKFsBuci8/EtAXb7C0KV5nEtasEUJ +klMmO1evAXMhn7VzmE3Ic/ttcQHxQZ+TC4G5dGsYcideJ5zOeEIATtFypDNG/0Bw +rvmBbIIylY2KwUAx3UexRgH1hRSecTzkokT39WJbefUg952h7yZXrrhb71AfWLTC +A5MJeArqPK6z/RMxDyvnk7xW326dtBBgqYyTOIHCANRB1kAG0xEyia/WI94uyNfH +YfIHglDFGIj5AoIBAEVVNEqeXPi3Jso1+7cgtaFijR1uAFMusvfu474ZfSNPFFMn ++E7pryFuC5qTsNxBTex1HesEmDIyu9TCSTq/sEPQfgqkMHpgDcfuRdQS+NogenMc +Livv0sDvuY6beYwy0Z9S89gbtqNkulGVtwVbCvBGLK+T6eBP+tMy5s66JC9Mu2pB +iZtKmj+p9zK5uKNgjChURj138I6TRFHxg4z9PiSxifa0ajy06nN+d3ElHfDXZxih +hiAhs53FDcpM+kVWEI2CfotOW1B6IpugrYhbHgtmE4HYxcCgcnqwYWsFiCQq84Ru +YhaNibkBXRy0Vt0rypk76xnSj4x+wCS0V76cjP8CggEAHXdoaJlLdzY8OLODHDSL +0D+6zWdu9fKTn6IMlBjyx4byjxo33JcwBkfdU8fsQABuzn9trnxsbjXgepD9Q9S3 +6RXFIwg8EooUh0hcql1yVDVc1/hJKLxVOHlgBtpogYnxzgnp2ihHO7l3l+orx6lf +hDYLR/+gwzVjK7vGe9CHmfChFFCRXbU0WANSWbWmdOMMoj6kGaYjYw+37pPHgdjh +G7NQSrcxwwgkOxIdS2/eYsXpaYURwabRCOn8wenmYABqe0k5GgpaAMSCz2wNs9n9 +6tpz1cKQNzMS2F+vhygFCAdYNRmXn5l9YssC97wSE52T5J/BzHSXQ0ziBwSYA92s +CQKCAQAFPujh1HhOBtn3FOT3I2jNSTv9OJsmAeiFrhVfIw+Ij8XzzUf0aV04Et/R +/EetirP6WjNQuJ5/YYVUFWj07vSl20YP7NtDGFUlvWugJUvQByidHt5DkmehBWax +cfp5LWwZ4W/wm4F/DtPkgEXgEwY/TMXHvhvN6+JaQPO7iemWL7qsRAPea0oDLkMm +0phT3hKgcnbyewH6GU53KQgr2hUzhgGOKibAo+4ud9lY6M/X1axCepetKMl78Cz9 +rK2MgJOhDr6Nu/K2bKL8Q3zSB1n1WRNaTVnH6wY4j/FpeQvVv+qTAbZhJm7cRT5m ++C7JCqJGg66liqIMq6YyYXK//Ddl +-----END PRIVATE KEY----- diff --git a/docker/ssl/dhparam.pem b/docker/ssl/dhparam.pem new file mode 100644 index 0000000..e0f2ebb --- /dev/null +++ b/docker/ssl/dhparam.pem @@ -0,0 +1,8 @@ +-----BEGIN DH PARAMETERS----- +MIIBCAKCAQEAquN8NRcSdLOM9RiumqWH8Jw3CGVR/eQQeq+jvT3zpxlUQPAMExQb +MRCspm1oRgDWGvch3Z4zfMmBZyzKJA4BDTh4USzcE5zvnx8aUcUPZPQpwSicKgzb +QGnl0Xf/75GAWrwhxn8GNyMP29wrpcd1Qg8fEQ3HAW1fCd9girKMKY9aBaHli/h2 +R9Rd/KTbeqN88aoMjUvZHooIIZXu0A+kyulOajYQO4k3Sp6CBqv0FFcoLQnYNH13 +kMUE5qJ68U732HybTw8sofTCOxKcCfM2kVP7dVoF3prlGjUw3z3l3STY8vuTdq0B +R7PslkoQHNmqcL+2gouoWP3GI+IeRzGSSwIBAg== +-----END DH PARAMETERS----- diff --git a/docker/templates/smtp-notifier.tmpl b/docker/templates/smtp-notifier.tmpl new file mode 100644 index 0000000..64caa94 --- /dev/null +++ b/docker/templates/smtp-notifier.tmpl @@ -0,0 +1,8 @@ +To: {{range $index, $v := .To}}{{if $index}},{{end}}{{$v}}{{end}} +From: {{.From}} +Subject: {{.Subject}} +{{.Header}} +You have a new message: +{{.Content}} +{{.Footer}} + diff --git a/docker/templates/users.tmpl b/docker/templates/users.tmpl new file mode 100644 index 0000000..642dae7 --- /dev/null +++ b/docker/templates/users.tmpl @@ -0,0 +1,13 @@ +Dear {{.User}}, + +We have received a request to reset your password for your account on {{.Host}}. To proceed with resetting your password, please click on the link below: + +{{.Content}} + +If you did not initiate this request, please disregard this message and your password will remain unchanged. + +Thank you for using {{.Host}}. + +Best regards, + +{{.Footer}} diff --git a/docker/vernemq/Dockerfile b/docker/vernemq/Dockerfile new file mode 100644 index 0000000..76152b1 --- /dev/null +++ b/docker/vernemq/Dockerfile @@ -0,0 +1,56 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +# Builder +FROM erlang:25.3.2.8-alpine AS builder +RUN apk add --update git build-base bsd-compat-headers openssl-dev snappy-dev curl \ + && git clone -b 1.13.0 https://github.com/vernemq/vernemq \ + && cd vernemq \ + && make -j 16 rel + +# Executor +FROM alpine:3.19 + +COPY --from=builder /vernemq/_build/default/rel / + +RUN apk --no-cache --update --available upgrade && \ + apk add --no-cache ncurses-libs openssl libstdc++ jq curl bash snappy-dev && \ + addgroup --gid 10000 vernemq && \ + adduser --uid 10000 -H -D -G vernemq -h /vernemq vernemq && \ + install -d -o vernemq -g vernemq /vernemq + +# Defaults +ENV DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR="app=vernemq" \ + DOCKER_VERNEMQ_LOG__CONSOLE=console \ + PATH="/vernemq/bin:$PATH" \ + VERNEMQ_VERSION="1.13.0" + +WORKDIR /vernemq + +COPY --chown=10000:10000 bin/vernemq.sh /usr/sbin/start_vernemq +COPY --chown=10000:10000 files/vm.args /vernemq/etc/vm.args + +RUN chown -R 10000:10000 /vernemq && \ + ln -s /vernemq/etc /etc/vernemq && \ + ln -s /vernemq/data /var/lib/vernemq && \ + ln -s /vernemq/log /var/log/vernemq + +# Ports +# 1883 MQTT +# 8883 MQTT/SSL +# 8080 MQTT WebSockets +# 44053 VerneMQ Message Distribution +# 4369 EPMD - Erlang Port Mapper Daemon +# 8888 Health, API, Prometheus Metrics +# 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 Specific Distributed Erlang Port Range + +EXPOSE 1883 8883 8080 44053 4369 8888 \ + 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 + + +VOLUME ["/vernemq/log", "/vernemq/data", "/vernemq/etc"] + +HEALTHCHECK CMD vernemq ping | grep -q pong + +USER vernemq +CMD ["start_vernemq"] \ No newline at end of file diff --git a/docker/vernemq/bin/vernemq.sh b/docker/vernemq/bin/vernemq.sh new file mode 100755 index 0000000..4c990da --- /dev/null +++ b/docker/vernemq/bin/vernemq.sh @@ -0,0 +1,352 @@ +#!/usr/bin/env sh + +NET_INTERFACE=$(route | grep '^default' | grep -o '[^ ]*$') +NET_INTERFACE=${DOCKER_NET_INTERFACE:-${NET_INTERFACE}} +IP_ADDRESS=$(ip -4 addr show ${NET_INTERFACE} | grep -oE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | sed -e "s/^[[:space:]]*//" | head -n 1) +IP_ADDRESS=${DOCKER_IP_ADDRESS:-${IP_ADDRESS}} + +VERNEMQ_ETC_DIR="/vernemq/etc" +VERNEMQ_VM_ARGS_FILE="${VERNEMQ_ETC_DIR}/vm.args" +VERNEMQ_CONF_FILE="${VERNEMQ_ETC_DIR}/vernemq.conf" +VERNEMQ_CONF_LOCAL_FILE="${VERNEMQ_ETC_DIR}/vernemq.conf.local" + +SECRETS_KUBERNETES_DIR="/var/run/secrets/kubernetes.io/serviceaccount" + +# Function to check istio readiness +istio_health() { + cmd=$(curl -s http://localhost:15021/healthz/ready > /dev/null) + status=$? + return $status +} + +# Ensure we have all files and needed directory write permissions +if [ ! -d ${VERNEMQ_ETC_DIR} ]; then + echo "Configuration directory at ${VERNEMQ_ETC_DIR} does not exist, exiting" >&2 + exit 1 +fi +if [ ! -f ${VERNEMQ_VM_ARGS_FILE} ]; then + echo "ls -l ${VERNEMQ_ETC_DIR}" + ls -l ${VERNEMQ_ETC_DIR} + echo "###" >&2 + echo "### Configuration file ${VERNEMQ_VM_ARGS_FILE} does not exist, exiting" >&2 + echo "###" >&2 + exit 1 +fi +if [ ! -w ${VERNEMQ_VM_ARGS_FILE} ]; then + echo "# whoami" + whoami + echo "# ls -l ${VERNEMQ_ETC_DIR}" + ls -l ${VERNEMQ_ETC_DIR} + echo "###" >&2 + echo "### Configuration file ${VERNEMQ_VM_ARGS_FILE} exists, but there are no write permissions! Exiting." >&2 + echo "###" >&2 + exit 1 +fi +if [ ! -s ${VERNEMQ_VM_ARGS_FILE} ]; then + echo "ls -l ${VERNEMQ_ETC_DIR}" + ls -l ${VERNEMQ_ETC_DIR} + echo "###" >&2 + echo "### Configuration file ${VERNEMQ_VM_ARGS_FILE} is empty! This will not work." >&2 + echo "### Exiting now." >&2 + echo "###" >&2 + exit 1 +fi + +# Ensure the Erlang node name is set correctly +if env | grep "DOCKER_VERNEMQ_NODENAME" -q; then + sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${DOCKER_VERNEMQ_NODENAME}/" ${VERNEMQ_VM_ARGS_FILE} +else + if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then + NODENAME=$(hostname -i) + sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${NODENAME}/" ${VERNEMQ_VM_ARGS_FILE} + else + sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${IP_ADDRESS}/" ${VERNEMQ_VM_ARGS_FILE} + fi +fi + +if env | grep "DOCKER_VERNEMQ_DISCOVERY_NODE" -q; then + discovery_node=$DOCKER_VERNEMQ_DISCOVERY_NODE + if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then + tmp='' + while [[ -z "$tmp" ]]; do + tmp=$(getent hosts tasks.$discovery_node | awk '{print $1}' | head -n 1) + sleep 1 + done + discovery_node=$tmp + fi + if [ -n "$DOCKER_VERNEMQ_COMPOSE" ]; then + tmp='' + while [[ -z "$tmp" ]]; do + tmp=$(getent hosts $discovery_node | awk '{print $1}' | head -n 1) + sleep 1 + done + discovery_node=$tmp + fi + + sed -i.bak -r "/-eval.+/d" ${VERNEMQ_VM_ARGS_FILE} + echo "-eval \"vmq_server_cmd:node_join('VerneMQ@$discovery_node')\"" >> ${VERNEMQ_VM_ARGS_FILE} +fi + +# If you encounter "SSL certification error (subject name does not match the host name)", you may try to set DOCKER_VERNEMQ_KUBERNETES_INSECURE to "1". +insecure="" +if env | grep "DOCKER_VERNEMQ_KUBERNETES_INSECURE" -q; then + echo "Using curl with \"--insecure\" argument to access kubernetes API without matching SSL certificate" + insecure="--insecure" +fi + +if env | grep "DOCKER_VERNEMQ_KUBERNETES_ISTIO_ENABLED" -q; then + istio_health + while [ $status != 0 ]; do + istio_health + sleep 1 + done + echo "Istio ready" +fi + +# Function to call a HTTP GET request on the given URL Path, using the hostname +# of the current k8s cluster name. Usage: "k8sCurlGet /my/path" +function k8sCurlGet () { + local urlPath=$1 + + local hostname="kubernetes.default.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}" + local certsFile="${SECRETS_KUBERNETES_DIR}/ca.crt" + local token=$(cat ${SECRETS_KUBERNETES_DIR}/token) + local header="Authorization: Bearer ${token}" + local url="https://${hostname}/${urlPath}" + + curl -sS ${insecure} --cacert ${certsFile} -H "${header}" ${url} \ + || ( echo "### Error on accessing URL ${url}" ) +} + +DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME=${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME:-cluster.local} +if [ -d "${SECRETS_KUBERNETES_DIR}" ] ; then + # Let's get the namespace if it isn't set + DOCKER_VERNEMQ_KUBERNETES_NAMESPACE=${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE:-$(cat "${SECRETS_KUBERNETES_DIR}/namespace")} + + # Check the API access that will be needed in the TERM signal handler + podResponse=$(k8sCurlGet api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods/$(hostname) ) + statefulSetName=$(echo ${podResponse} | jq -r '.metadata.ownerReferences[0].name') + statefulSetPath="apis/apps/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/statefulsets/${statefulSetName}" + statefulSetResponse=$(k8sCurlGet ${statefulSetPath} ) + isCodeForbidden=$(echo ${statefulSetResponse} | jq '.code == 403') + if [[ ${isCodeForbidden} == "true" ]]; then + echo "Permission error: Cannot access URL ${statefulSetPath}: $(echo ${statefulSetResponse} | jq '.reason,.code,.message')" + exit 1 + else + numReplicas=$(echo ${statefulSetResponse} | jq '.status.replicas') + echo "Permissions ok: Our pod $(hostname) belongs to StatefulSet ${statefulSetName} with ${numReplicas} replicas" + fi +fi + +# Set up kubernetes node discovery +start_join_cluster=0 +if env | grep "DOCKER_VERNEMQ_DISCOVERY_KUBERNETES" -q; then + # Let's set our nodename correctly + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#list-pod-v1-core + podList=$(k8sCurlGet "api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods?labelSelector=${DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR}") + VERNEMQ_KUBERNETES_SUBDOMAIN=${DOCKER_VERNEMQ_KUBERNETES_SUBDOMAIN:-$(echo ${podList} | jq '.items[0].spec.subdomain' | tr '\n' '"' | sed 's/"//g')} + if [[ $VERNEMQ_KUBERNETES_SUBDOMAIN == "null" ]]; then + VERNEMQ_KUBERNETES_HOSTNAME=${MY_POD_NAME}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME} + else + VERNEMQ_KUBERNETES_HOSTNAME=${MY_POD_NAME}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME} + fi + + sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${VERNEMQ_KUBERNETES_HOSTNAME}/" ${VERNEMQ_VM_ARGS_FILE} + # Hack into K8S DNS resolution (temporarily) + kube_pod_names=$(echo ${podList} | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ' | sed 's/ *$//') + + for kube_pod_name in $kube_pod_names; do + if [[ $kube_pod_name == "null" ]]; then + echo "Kubernetes discovery selected, but no pods found. Maybe we're the first?" + echo "Anyway, we won't attempt to join any cluster." + break + fi + if [[ $kube_pod_name != $MY_POD_NAME ]]; then + discoveryHostname="${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}" + start_join_cluster=1 + echo "Will join an existing Kubernetes cluster with discovery node at ${discoveryHostname}" + echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${discoveryHostname}')\"" >> ${VERNEMQ_VM_ARGS_FILE} + echo "Did I previously leave the cluster? If so, purging old state." + curl -fsSL http://${discoveryHostname}:8888/status.json >/dev/null 2>&1 || + (echo "Can't download status.json, better to exit now" && exit 1) + curl -fsSL http://${discoveryHostname}:8888/status.json | grep -q ${VERNEMQ_KUBERNETES_HOSTNAME} || + (echo "Cluster doesn't know about me, this means I've left previously. Purging old state..." && rm -rf /vernemq/data/*) + break + fi + done +fi + +if [ -f "${VERNEMQ_CONF_LOCAL_FILE}" ]; then + cp "${VERNEMQ_CONF_LOCAL_FILE}" ${VERNEMQ_CONF_FILE} + sed -i -r "s/###IPADDRESS###/${IP_ADDRESS}/" ${VERNEMQ_CONF_FILE} +else + sed -i '/########## Start ##########/,/########## End ##########/d' ${VERNEMQ_CONF_FILE} + + echo "########## Start ##########" >> ${VERNEMQ_CONF_FILE} + + env | grep DOCKER_VERNEMQ | grep -v 'DISCOVERY_NODE\|KUBERNETES\|SWARM\|COMPOSE\|DOCKER_VERNEMQ_USER' | cut -c 16- | awk '{match($0,/^[A-Z0-9_]*/)}{print tolower(substr($0,RSTART,RLENGTH)) substr($0,RLENGTH+1)}' | sed 's/__/./g' >> ${VERNEMQ_CONF_FILE} + + users_are_set=$(env | grep DOCKER_VERNEMQ_USER) + if [ ! -z "$users_are_set" ]; then + echo "vmq_passwd.password_file = /vernemq/etc/vmq.passwd" >> ${VERNEMQ_CONF_FILE} + touch /vernemq/etc/vmq.passwd + fi + + for vernemq_user in $(env | grep DOCKER_VERNEMQ_USER); do + username=$(echo $vernemq_user | awk -F '=' '{ print $1 }' | sed 's/DOCKER_VERNEMQ_USER_//g' | tr '[:upper:]' '[:lower:]') + password=$(echo $vernemq_user | awk -F '=' '{ print $2 }') + /vernemq/bin/vmq-passwd /vernemq/etc/vmq.passwd $username <<EOF +$password +$password +EOF + done + + if [ -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION__PORT_RANGE__MINIMUM" ]; then + echo "erlang.distribution.port_range.minimum = 9100" >> ${VERNEMQ_CONF_FILE} + fi + + if [ -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION__PORT_RANGE__MAXIMUM" ]; then + echo "erlang.distribution.port_range.maximum = 9109" >> ${VERNEMQ_CONF_FILE} + fi + + if [ -z "$DOCKER_VERNEMQ_LISTENER__TCP__DEFAULT" ]; then + echo "listener.tcp.default = ${IP_ADDRESS}:1883" >> ${VERNEMQ_CONF_FILE} + fi + + if [ -z "$DOCKER_VERNEMQ_LISTENER__WS__DEFAULT" ]; then + echo "listener.ws.default = ${IP_ADDRESS}:8080" >> ${VERNEMQ_CONF_FILE} + fi + + if [ -z "$DOCKER_VERNEMQ_LISTENER__VMQ__CLUSTERING" ]; then + echo "listener.vmq.clustering = ${IP_ADDRESS}:44053" >> ${VERNEMQ_CONF_FILE} + fi + + if [ -z "$DOCKER_VERNEMQ_LISTENER__HTTP__METRICS" ]; then + echo "listener.http.metrics = ${IP_ADDRESS}:8888" >> ${VERNEMQ_CONF_FILE} + fi + + echo "########## End ##########" >> ${VERNEMQ_CONF_FILE} +fi + +if [ ! -z "$DOCKER_VERNEMQ_ERLANG__MAX_PORTS" ]; then + sed -i.bak -r "s/\+Q.+/\+Q ${DOCKER_VERNEMQ_ERLANG__MAX_PORTS}/" ${VERNEMQ_VM_ARGS_FILE} +fi + +if [ ! -z "$DOCKER_VERNEMQ_ERLANG__PROCESS_LIMIT" ]; then + sed -i.bak -r "s/\+P.+/\+P ${DOCKER_VERNEMQ_ERLANG__PROCESS_LIMIT}/" ${VERNEMQ_VM_ARGS_FILE} +fi + +if [ ! -z "$DOCKER_VERNEMQ_ERLANG__MAX_ETS_TABLES" ]; then + sed -i.bak -r "s/\+e.+/\+e ${DOCKER_VERNEMQ_ERLANG__MAX_ETS_TABLES}/" ${VERNEMQ_VM_ARGS_FILE} +fi + +if [ ! -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION_BUFFER_SIZE" ]; then + sed -i.bak -r "s/\+zdbbl.+/\+zdbbl ${DOCKER_VERNEMQ_ERLANG__DISTRIBUTION_BUFFER_SIZE}/" ${VERNEMQ_VM_ARGS_FILE} +fi + +# Check configuration file +/vernemq/bin/vernemq config generate 2>&1 > /dev/null | tee /tmp/config.out | grep error + +if [ $? -ne 1 ]; then + echo "configuration error, exit" + echo "$(cat /tmp/config.out)" + exit $? +fi + +pid=0 + +# SIGUSR1-handler +siguser1_handler() { + echo "stopped" +} + +# SIGTERM-handler +sigterm_handler() { + if [ $pid -ne 0 ]; then + if [ -d "${SECRETS_KUBERNETES_DIR}" ] ; then + # this will stop the VerneMQ process, but first drain the node from all existing client sessions (-k) + if [ -n "$VERNEMQ_KUBERNETES_HOSTNAME" ]; then + terminating_node_name=VerneMQ@$VERNEMQ_KUBERNETES_HOSTNAME + else + terminating_node_name=VerneMQ@$IP_ADDRESS + fi + podList=$(k8sCurlGet "api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods?labelSelector=${DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR}") + kube_pod_names=$(echo ${podList} | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ' | sed 's/ *$//') + if [ "$kube_pod_names" = "$MY_POD_NAME" ]; then + echo "I'm the only pod remaining. Not performing leave and/or state purge." + /vernemq/bin/vmq-admin node stop >/dev/null + else + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#read-pod-v1-core + podResponse=$(k8sCurlGet api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods/$(hostname) ) + statefulSetName=$(echo ${podResponse} | jq -r '.metadata.ownerReferences[0].name') + + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#-strong-read-operations-statefulset-v1-apps-strong- + statefulSetResponse=$(k8sCurlGet "apis/apps/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/statefulsets/${statefulSetName}" ) + + isCodeForbidden=$(echo ${statefulSetResponse} | jq '.code == 403') + if [[ ${isCodeForbidden} == "true" ]]; then + echo "Permission error: Cannot access URL ${statefulSetPath}: $(echo ${statefulSetResponse} | jq '.reason,.code,.message')" + fi + + reschedule=$(echo ${statefulSetResponse} | jq '.status.replicas == .status.readyReplicas') + scaled_down=$(echo ${statefulSetResponse} | jq '.status.currentReplicas == .status.updatedReplicas') + + if [[ $reschedule == "true" ]]; then + # Perhaps is an scale down? + if [[ $scaled_down == "true" ]]; then + echo "Seems that this is a scale down scenario. Leaving cluster." + /vernemq/bin/vmq-admin cluster leave node=${terminating_node_name} -k && rm -rf /vernemq/data/* + else + echo "Reschedule is true. Not leaving the cluster." + /vernemq/bin/vmq-admin node stop >/dev/null + fi + else + echo "Reschedule is false. Leaving the cluster." + /vernemq/bin/vmq-admin cluster leave node=${terminating_node_name} -k && rm -rf /vernemq/data/* + fi + fi + else + if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then + terminating_node_name=VerneMQ@$(hostname -i) + # For Swarm we keep the old "cluster leave" approach for now + echo "Swarm node is leaving the cluster." + /vernemq/bin/vmq-admin cluster leave node=${terminating_node_name} -k && rm -rf /vernemq/data/* + else + # In non-k8s mode: Stop the vernemq node gracefully + /vernemq/bin/vmq-admin node stop >/dev/null + fi + fi + kill -s TERM ${pid} + WAITFOR_PID=${pid} + pid=0 + wait ${WAITFOR_PID} + fi + exit 143; # 128 + 15 -- SIGTERM +} + +if [ ! -s ${VERNEMQ_VM_ARGS_FILE} ]; then + echo "ls -l ${VERNEMQ_ETC_DIR}" + ls -l ${VERNEMQ_ETC_DIR} + echo "###" >&2 + echo "### Configuration file ${VERNEMQ_VM_ARGS_FILE} is empty! This will not work." >&2 + echo "### Exiting now." >&2 + echo "###" >&2 + exit 1 +fi + +# Setup OS signal handlers +trap 'siguser1_handler' SIGUSR1 +trap 'sigterm_handler' SIGTERM + +# Start VerneMQ +/vernemq/bin/vernemq console -noshell -noinput $@ & +pid=$! +if [ $start_join_cluster -eq 1 ]; then + mkdir -p /var/log/vernemq/log + join_cluster > /var/log/vernemq/log/join_cluster.log & +fi +if [ -n "$API_KEY" ]; then + sleep 10 && echo "Adding API_KEY..." && /vernemq/bin/vmq-admin api-key add key="${API_KEY:-DEFAULT}" + vmq-admin api-key show +fi +wait $pid diff --git a/docker/vernemq/files/vm.args b/docker/vernemq/files/vm.args new file mode 100644 index 0000000..afb3c02 --- /dev/null +++ b/docker/vernemq/files/vm.args @@ -0,0 +1,15 @@ ++P 512000 ++e 256000 +-env ERL_CRASH_DUMP /erl_crash.dump +-env ERL_FULLSWEEP_AFTER 0 ++Q 512000 ++A 64 +-setcookie vmq +-name VerneMQ@127.0.0.1 ++K true ++W w ++sbwt none ++sbwtdcpu none ++sbwtdio none +-smp enable ++zdbbl 32768 diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..e9784bc --- /dev/null +++ b/go.mod @@ -0,0 +1,141 @@ +module github.com/absmach/mg-contrib + +go 1.22.5 + +require ( + github.com/0x6flab/namegenerator v1.4.0 + github.com/absmach/callhome v0.14.0 + github.com/absmach/magistrala v0.14.1-0.20240704203022-083e655bde82 + github.com/absmach/senml v1.0.5 + github.com/caarlos0/env/v10 v10.0.0 + github.com/eclipse/paho.mqtt.golang v1.4.3 + github.com/fiorix/go-smpp v0.0.0-20210403173735-2894b96e70ba + github.com/go-chi/chi/v5 v5.1.0 + github.com/go-kit/kit v0.13.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/gocql/gocql v1.6.0 + github.com/gofrs/uuid v4.4.0+incompatible + github.com/gopcua/opcua v0.1.6 + github.com/hashicorp/vault/api v1.14.0 + github.com/hashicorp/vault/api/auth/approle v0.7.0 + github.com/influxdata/influxdb-client-go/v2 v2.13.0 + github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 + github.com/jackc/pgx/v5 v5.6.0 + github.com/jmoiron/sqlx v1.4.0 + github.com/mitchellh/mapstructure v1.5.0 + github.com/ory/dockertest/v3 v3.10.0 + github.com/pelletier/go-toml v1.9.5 + github.com/prometheus/client_golang v1.19.1 + github.com/rubenv/sql-migrate v1.6.1 + github.com/stretchr/testify v1.9.0 + go.mongodb.org/mongo-driver v1.15.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 + golang.org/x/sync v0.7.0 + google.golang.org/grpc v1.64.0 + gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df +) + +require ( + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/continuity v0.4.3 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/docker/cli v26.0.0+incompatible // indirect + github.com/docker/docker v26.0.2+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.6.0 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gopherjs/gopherjs v1.17.2 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.6 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect + github.com/jackc/pgtype v1.14.3 // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect + github.com/klauspost/compress v1.17.8 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/montanaflynn/stats v0.7.1 // indirect + github.com/nats-io/nats.go v1.36.0 // indirect + github.com/nats-io/nkeys v0.4.7 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/oapi-codegen/runtime v1.1.1 // indirect + github.com/oklog/ulid/v2 v2.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runc v1.1.12 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.52.2 // indirect + github.com/prometheus/procfs v0.13.0 // indirect + github.com/rabbitmq/amqp091-go v1.10.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/smarty/assertions v1.16.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/sdk v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/mod v0.18.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.22.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + moul.io/http2curl v1.0.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..96cb44a --- /dev/null +++ b/go.sum @@ -0,0 +1,571 @@ +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/0x6flab/namegenerator v1.4.0 h1:QnkI813SZsI/hYnKD9pg3mkIlcYzCx0N4hnzb0YYME4= +github.com/0x6flab/namegenerator v1.4.0/go.mod h1:2sQzXuS6dX/KEwWtB6GJU729O3m4gBdD5oAU8hd0SyY= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/absmach/callhome v0.14.0 h1:zB4tIZJ1YUmZ1VGHFPfMA/Lo6/Mv19y2dvoOiXj2BWs= +github.com/absmach/callhome v0.14.0/go.mod h1:l12UJOfibK4Muvg/AbupHuquNV9qSz/ROdTEPg7f2Vk= +github.com/absmach/magistrala v0.14.1-0.20240704203022-083e655bde82 h1:8EQqSe0oj88smyZcHwndRw1Ye4v6xQs35Nkvs5Mi9lI= +github.com/absmach/magistrala v0.14.1-0.20240704203022-083e655bde82/go.mod h1:Nk3rTAEyI8S83eibXnGEfnRrTT5dyb+v2nJgXfeMGLU= +github.com/absmach/mproxy v0.4.3-0.20240430090627-27dad4c91c6c h1:wGtfVk3knDUsrUoyOxfyDPK3lJB6Yc6BMePf62UaTOo= +github.com/absmach/mproxy v0.4.3-0.20240430090627-27dad4c91c6c/go.mod h1:Nevip6o8u5Zx7l3LTtN8BwlCI5h5KpsnI9YnAxF5RT8= +github.com/absmach/senml v1.0.5 h1:zNPRYpGr2Wsb8brAusz8DIfFqemy1a2dNbmMnegY3GE= +github.com/absmach/senml v1.0.5/go.mod h1:NDEjk3O4V4YYu9Bs2/+t/AZ/F+0wu05ikgecp+/FsSU= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/caarlos0/env/v10 v10.0.0 h1:yIHUBZGsyqCnpTkbjk8asUlx6RFhhEs+h7TOBdgdzXA= +github.com/caarlos0/env/v10 v10.0.0/go.mod h1:ZfulV76NvVPw3tm591U4SwL3Xx9ldzBP9aGxzeN7G18= +github.com/caarlos0/env/v11 v11.0.0 h1:ZIlkOjuL3xoZS0kmUJlF74j2Qj8GMOq3CDLX/Viak8Q= +github.com/caarlos0/env/v11 v11.0.0/go.mod h1:2RC3HQu8BQqtEK3V4iHPxj0jOdWdbPpWJ6pOueeU1xM= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/docker/cli v26.0.0+incompatible h1:90BKrx1a1HKYpSnnBFR6AgDq/FqkHxwlUyzJVPxD30I= +github.com/docker/cli v26.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v26.0.2+incompatible h1:yGVmKUFGgcxA6PXWAokO0sQL22BrQ67cgVjko8tGdXE= +github.com/docker/docker v26.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik= +github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fiorix/go-smpp v0.0.0-20210403173735-2894b96e70ba h1:vBqABUa2HUSc6tj22Tw+ZMVGHuBzKtljM38kbRanmrM= +github.com/fiorix/go-smpp v0.0.0-20210403173735-2894b96e70ba/go.mod h1:VfKFK7fGeCP81xEhbrOqUEh45n73Yy6jaPWwTVbxprI= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw= +github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gocql/gocql v1.6.0 h1:IdFdOTbnpbd0pDhl4REKQDM+Q0SzKXQ1Yh+YZZ8T/qU= +github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopcua/opcua v0.1.6 h1:B9SVRKQGzcWcwP2QPYN93Uku32+3wL+v5cgzBxE6V5I= +github.com/gopcua/opcua v0.1.6/go.mod h1:INwnDoRxmNWAt7+tzqxuGqQkSF2c1C69VAL0c2q6AcY= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= +github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= +github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= +github.com/hashicorp/vault/api/auth/approle v0.7.0 h1:R5IRVuFA5JSdG3UdGVcGysi0StrL1lPmyJnrawiV0Ss= +github.com/hashicorp/vault/api/auth/approle v0.7.0/go.mod h1:B+WaC6VR+aSXiUxykpaPUoFiiZAhic53tDLbGjWZmRA= +github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM= +github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4= +github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU= +github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 h1:Dj0L5fhJ9F82ZJyVOmBx6msDp/kfd1t9GRfny/mfJA0= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.3 h1:h6W9cPuHsRWQFTWUZMAKMgG5jSwQI0Zurzdvlx3Plus= +github.com/jackc/pgtype v1.14.3/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc v1.0.5 h1:bsTfiH8xaKOJPrg1R+E3iE/AWZr/x0Phj9PBTG/OLUk= +github.com/lestrrat-go/httprc v1.0.5/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= +github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= +github.com/lestrrat-go/jwx/v2 v2.0.21 h1:jAPKupy4uHgrHFEdjVjNkUgoBKtVDgrQPB/h55FHrR0= +github.com/lestrrat-go/jwx/v2 v2.0.21/go.mod h1:09mLW8zto6bWL9GbwnqAli+ArLf+5M33QLQPDggkUWM= +github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU= +github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= +github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= +github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= +github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= +github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= +github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= +github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= +github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rubenv/sql-migrate v1.6.1 h1:bo6/sjsan9HaXAsNxYP/jCEDUGibHp8JmOBw7NTGRos= +github.com/rubenv/sql-migrate v1.6.1/go.mod h1:tPzespupJS0jacLfhbwto/UjSX+8h2FdWB7ar+QlHa0= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smarty/assertions v1.16.0 h1:EvHNkdRA4QHMrn75NZSoUQ/mAUXAYWfatfB01yTCzfY= +github.com/smarty/assertions v1.16.0/go.mod h1:duaaFdCS0K9dnoM50iyek/eYINOZ64gbh1Xlf6LG7AI= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= +go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +moul.io/http2curl v1.0.0 h1:6XwpyZOYsgZJrU8exnG87ncVkU1FVCcTRpwzOkTDUi8= +moul.io/http2curl v1.0.0/go.mod h1:f6cULg+e4Md/oW1cYmwW4IWQOVl2lGbmCNGOHvzX2kE= diff --git a/lora/README.md b/lora/README.md new file mode 100644 index 0000000..4f61994 --- /dev/null +++ b/lora/README.md @@ -0,0 +1,87 @@ +# LoRa Adapter + +Adapter between Magistrala IoT system and [LoRa Server](https://github.com/brocaar/chirpstack-network-server). + +This adapter sits between Magistrala and LoRa Server and just forwards the messages from one system to another via MQTT protocol, using the adequate MQTT topics and in the good message format (JSON and SenML), i.e. respecting the APIs of both systems. + +LoRa Server is used for connectivity layer and data is pushed via this adapter service to Magistrala, where it is persisted and routed to other protocols via Magistrala multi-protocol message broker. Magistrala adds user accounts, application management and security in order to obtain the overall end-to-end LoRa solution. + +## Configuration + +The service is configured using the environment variables presented in the following table. Note that any unset variables will be replaced with their default values. + +| Variable | Description | Default | +| -------------------------------- | --------------------------------------------------------- | ----------------------------------- | +| MG_LORA_ADAPTER_LOG_LEVEL | Log level for the LoRa Adapter (debug, info, warn, error) | info | +| MG_LORA_ADAPTER_HTTP_HOST | Service LoRa host | "" | +| MG_LORA_ADAPTER_HTTP_PORT | Service LoRa port | 9017 | +| MG_LORA_ADAPTER_HTTP_SERVER_CERT | Path to the PEM encoded server certificate file | "" | +| MG_LORA_ADAPTER_HTTP_SERVER_KEY | Path to the PEM encoded server key file | "" | +| MG_LORA_ADAPTER_MESSAGES_URL | LoRa adapter MQTT broker URL | tcp://localhost:1883 | +| MG_LORA_ADAPTER_MESSAGES_TOPIC | LoRa adapter MQTT subscriber Topic | application/+/device/+/event/up | +| MG_LORA_ADAPTER_MESSAGES_USER | LoRa adapter MQTT subscriber Username | "" | +| MG_LORA_ADAPTER_MESSAGES_PASS | LoRa adapter MQTT subscriber Password | "" | +| MG_LORA_ADAPTER_MESSAGES_TIMEOUT | LoRa adapter MQTT subscriber Timeout | 30s | +| MG_LORA_ADAPTER_ROUTE_MAP_URL | Route-map database URL | redis://localhost:6379 | +| MG_ES_URL | Event source URL | <nats://localhost:4222> | +| MG_LORA_ADAPTER_EVENT_CONSUMER | Service event consumer name | lora-adapter | +| MG_MESSAGE_BROKER_URL | Message broker instance URL | <nats://localhost:4222> | +| MG_JAEGER_URL | Jaeger server URL | <http://localhost:14268/api/traces> | +| MG_JAEGER_TRACE_RATIO | Jaeger sampling ratio | 1.0 | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_LORA_ADAPTER_INSTANCE_ID | Service instance ID | "" | + +## Deployment + +The service itself is distributed as Docker container. Check the [`lora-adapter`](https://github.com/absmach/magistrala/blob/main/docker/addons/lora-adapter/docker-compose.yml) service section in docker-compose file to see how service is deployed. + +Running this service outside of container requires working instance of the message broker service, LoRa server, things service and Jaeger server. +To start the service outside of the container, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the lora adapter +make lora + +# copy binary to bin +make install + +# set the environment variables and run the service +MG_LORA_ADAPTER_LOG_LEVEL=info \ +MG_LORA_ADAPTER_HTTP_HOST=localhost \ +MG_LORA_ADAPTER_HTTP_PORT=9017 \ +MG_LORA_ADAPTER_HTTP_SERVER_CERT="" \ +MG_LORA_ADAPTER_HTTP_SERVER_KEY="" \ +MG_LORA_ADAPTER_MESSAGES_URL=tcp://localhost:1883 \ +MG_LORA_ADAPTER_MESSAGES_TOPIC=application/+/device/+/event/up \ +MG_LORA_ADAPTER_MESSAGES_USER="" \ +MG_LORA_ADAPTER_MESSAGES_PASS="" \ +MG_LORA_ADAPTER_MESSAGES_TIMEOUT=30s \ +MG_LORA_ADAPTER_ROUTE_MAP_URL=redis://localhost:6379 \ +MG_ES_URL=nats://localhost:4222 \ +MG_LORA_ADAPTER_EVENT_CONSUMER=lora-adapter \ +MG_MESSAGE_BROKER_URL=nats://localhost:4222 \ +MG_JAEGER_URL=http://localhost:14268/api/traces \ +MG_JAEGER_TRACE_RATIO=1.0 \ +MG_SEND_TELEMETRY=true \ +MG_LORA_ADAPTER_INSTANCE_ID="" \ +$GOBIN/magistrala-lora +``` + +Setting `MG_LORA_ADAPTER_HTTP_SERVER_CERT` and `MG_LORA_ADAPTER_HTTP_SERVER_KEY` will enable TLS against the service. The service expects a file in PEM format for both the certificate and the key. + +### Using docker-compose + +This service can be deployed using docker containers. Docker compose file is available in `<project_root>/docker/addons/lora-adapter/docker-compose.yml`. In order to run Magistrala lora-adapter, execute the following command: + +```bash +docker compose -f docker/addons/lora-adapter/docker-compose.yml up -d +``` + +## Usage + +For more information about service capabilities and its usage, please check out the [Magistrala documentation](https://docs.magistrala.abstractmachines.fr/lora). diff --git a/lora/adapter.go b/lora/adapter.go new file mode 100644 index 0000000..038ffda --- /dev/null +++ b/lora/adapter.go @@ -0,0 +1,179 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package lora + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/absmach/magistrala/pkg/messaging" +) + +const protocol = "lora" + +var ( + // ErrMalformedMessage indicates malformed LoRa message. + ErrMalformedMessage = errors.New("malformed message received") + + // ErrNotFoundDev indicates a non-existent route map for a device EUI. + ErrNotFoundDev = errors.New("route map not found for this device EUI") + + // ErrNotFoundApp indicates a non-existent route map for an application ID. + ErrNotFoundApp = errors.New("route map not found for this application ID") + + // ErrNotConnected indicates a non-existent route map for a connection. + ErrNotConnected = errors.New("route map not found for this connection") +) + +// Service specifies an API that must be fullfiled by the domain service +// implementation, and all of its decorators (e.g. logging & metrics). +type Service interface { + // CreateThing creates thingID:devEUI route-map + CreateThing(ctx context.Context, thingID, devEUI string) error + + // UpdateThing updates thingID:devEUI route-map + UpdateThing(ctx context.Context, thingID, devEUI string) error + + // RemoveThing removes thingID:devEUI route-map + RemoveThing(ctx context.Context, thingID string) error + + // CreateChannel creates channelID:appID route-map + CreateChannel(ctx context.Context, chanID, appID string) error + + // UpdateChannel updates channelID:appID route-map + UpdateChannel(ctx context.Context, chanID, appID string) error + + // RemoveChannel removes channelID:appID route-map + RemoveChannel(ctx context.Context, chanID string) error + + // ConnectThing creates thingID:channelID route-map + ConnectThing(ctx context.Context, chanID, thingID string) error + + // DisconnectThing removes thingID:channelID route-map + DisconnectThing(ctx context.Context, chanID, thingID string) error + + // Publish forwards messages from the LoRa MQTT broker to Magistrala Message Broker + Publish(ctx context.Context, msg *Message) error +} + +var _ Service = (*adapterService)(nil) + +type adapterService struct { + publisher messaging.Publisher + thingsRM RouteMapRepository + channelsRM RouteMapRepository + connectRM RouteMapRepository +} + +// New instantiates the LoRa adapter implementation. +func New(publisher messaging.Publisher, thingsRM, channelsRM, connectRM RouteMapRepository) Service { + return &adapterService{ + publisher: publisher, + thingsRM: thingsRM, + channelsRM: channelsRM, + connectRM: connectRM, + } +} + +// Publish forwards messages from Lora MQTT broker to Magistrala Message broker. +func (as *adapterService) Publish(ctx context.Context, m *Message) error { + // Get route map of lora application + thingID, err := as.thingsRM.Get(ctx, m.DevEUI) + if err != nil { + return ErrNotFoundDev + } + + // Get route map of lora application + chanID, err := as.channelsRM.Get(ctx, m.ApplicationID) + if err != nil { + return ErrNotFoundApp + } + + c := fmt.Sprintf("%s:%s", chanID, thingID) + if _, err := as.connectRM.Get(ctx, c); err != nil { + return ErrNotConnected + } + + // Use the SenML message decoded on LoRa Server application if + // field Object isn't empty. Otherwise, decode standard field Data. + var payload []byte + switch m.Object { + case nil: + payload, err = base64.StdEncoding.DecodeString(m.Data) + if err != nil { + return ErrMalformedMessage + } + default: + jo, err := json.Marshal(m.Object) + if err != nil { + return err + } + payload = jo + } + + // Publish on Magistrala Message broker + msg := messaging.Message{ + Publisher: thingID, + Protocol: protocol, + Channel: chanID, + Payload: payload, + Created: time.Now().UnixNano(), + } + + return as.publisher.Publish(ctx, msg.Channel, &msg) +} + +func (as *adapterService) CreateThing(ctx context.Context, thingID, devEUI string) error { + return as.thingsRM.Save(ctx, thingID, devEUI) +} + +func (as *adapterService) UpdateThing(ctx context.Context, thingID, devEUI string) error { + return as.thingsRM.Save(ctx, thingID, devEUI) +} + +func (as *adapterService) RemoveThing(ctx context.Context, thingID string) error { + return as.thingsRM.Remove(ctx, thingID) +} + +func (as *adapterService) CreateChannel(ctx context.Context, chanID, appID string) error { + return as.channelsRM.Save(ctx, chanID, appID) +} + +func (as *adapterService) UpdateChannel(ctx context.Context, chanID, appID string) error { + return as.channelsRM.Save(ctx, chanID, appID) +} + +func (as *adapterService) RemoveChannel(ctx context.Context, chanID string) error { + return as.channelsRM.Remove(ctx, chanID) +} + +func (as *adapterService) ConnectThing(ctx context.Context, chanID, thingID string) error { + if _, err := as.channelsRM.Get(ctx, chanID); err != nil { + return ErrNotFoundApp + } + + if _, err := as.thingsRM.Get(ctx, thingID); err != nil { + return ErrNotFoundDev + } + + c := fmt.Sprintf("%s:%s", chanID, thingID) + return as.connectRM.Save(ctx, c, c) +} + +func (as *adapterService) DisconnectThing(ctx context.Context, chanID, thingID string) error { + if _, err := as.channelsRM.Get(ctx, chanID); err != nil { + return ErrNotFoundApp + } + + if _, err := as.thingsRM.Get(ctx, thingID); err != nil { + return ErrNotFoundDev + } + + c := fmt.Sprintf("%s:%s", chanID, thingID) + return as.connectRM.Remove(ctx, c) +} diff --git a/lora/adapter_test.go b/lora/adapter_test.go new file mode 100644 index 0000000..d0bc9a7 --- /dev/null +++ b/lora/adapter_test.go @@ -0,0 +1,478 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package lora_test + +import ( + "context" + "encoding/base64" + "fmt" + "testing" + + "github.com/absmach/magistrala/lora" + "github.com/absmach/magistrala/lora/mocks" + "github.com/absmach/magistrala/pkg/errors" + pubmocks "github.com/absmach/magistrala/pkg/messaging/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + thingID = "thingID-1" + chanID = "chanID-1" + devEUI = "devEUI-1" + appID = "appID-1" + thingID2 = "thingID-2" + chanID2 = "chanID-2" + devEUI2 = "devEUI-2" + appID2 = "appID-2" + msg = `[{"bn":"msg-base-name","n":"temperature","v": 17},{"n":"humidity","v": 56}]` + invalid = "wrong" +) + +var ( + pub *pubmocks.PubSub + thingsRM, channelsRM, connsRM *mocks.RouteMapRepository +) + +func newService() lora.Service { + pub = new(pubmocks.PubSub) + thingsRM = new(mocks.RouteMapRepository) + channelsRM = new(mocks.RouteMapRepository) + connsRM = new(mocks.RouteMapRepository) + + return lora.New(pub, thingsRM, channelsRM, connsRM) +} + +func TestPublish(t *testing.T) { + svc := newService() + + msgBase64 := base64.StdEncoding.EncodeToString([]byte(msg)) + + cases := []struct { + desc string + err error + msg lora.Message + getThingErr error + getChannelErr error + connectionsErr error + publishErr error + }{ + { + desc: "publish message with existing route-map and valid Data", + err: nil, + msg: lora.Message{ + ApplicationID: appID, + DevEUI: devEUI, + Data: msgBase64, + }, + getThingErr: nil, + getChannelErr: nil, + connectionsErr: nil, + publishErr: nil, + }, + { + desc: "publish message with existing route-map and invalid Data", + err: lora.ErrMalformedMessage, + msg: lora.Message{ + ApplicationID: appID, + DevEUI: devEUI, + Data: "wrong", + }, + getThingErr: nil, + getChannelErr: nil, + connectionsErr: nil, + publishErr: errors.New("Failed publishing"), + }, + { + desc: "publish message with non existing appID route-map", + err: lora.ErrNotFoundApp, + msg: lora.Message{ + ApplicationID: "wrong", + DevEUI: devEUI, + }, + getChannelErr: lora.ErrNotFoundApp, + }, + { + desc: "publish message with non existing devEUI route-map", + err: lora.ErrNotFoundDev, + msg: lora.Message{ + ApplicationID: appID, + DevEUI: "wrong", + }, + getThingErr: lora.ErrNotFoundDev, + }, + { + desc: "publish message with non existing connection route-map", + err: lora.ErrNotConnected, + msg: lora.Message{ + ApplicationID: appID2, + DevEUI: devEUI2, + }, + connectionsErr: lora.ErrNotConnected, + }, + { + desc: "publish message with wrong Object", + err: errors.New("json: unsupported type: chan int"), + msg: lora.Message{ + ApplicationID: appID2, + DevEUI: devEUI2, + Object: make(chan int), + }, + }, + { + desc: "publish message with valid Object", + err: nil, + msg: lora.Message{ + ApplicationID: appID2, + DevEUI: devEUI2, + Object: map[string]interface{}{"key": "value"}, + }, + }, + } + + for _, tc := range cases { + repoCall := thingsRM.On("Get", context.Background(), tc.msg.DevEUI).Return(tc.msg.DevEUI, tc.getThingErr) + repoCall1 := channelsRM.On("Get", context.Background(), tc.msg.ApplicationID).Return(tc.msg.ApplicationID, tc.getChannelErr) + repoCall2 := connsRM.On("Get", context.Background(), mock.Anything).Return("", tc.connectionsErr) + repoCall3 := pub.On("Publish", context.Background(), tc.msg.ApplicationID, mock.Anything).Return(tc.publishErr) + err := svc.Publish(context.Background(), &tc.msg) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + repoCall3.Unset() + } +} + +func TestCreateChannel(t *testing.T) { + svc := newService() + + cases := []struct { + desc string + err error + ChanID string + AppID string + }{ + { + desc: "create channel with valid data", + err: nil, + ChanID: chanID, + AppID: appID, + }, + { + desc: "create channel with empty chanID", + err: lora.ErrNotFoundApp, + ChanID: "", + AppID: appID, + }, + { + desc: "create channel with empty appID", + err: lora.ErrNotFoundApp, + ChanID: chanID, + AppID: "", + }, + } + + for _, tc := range cases { + repoCall := channelsRM.On("Save", context.Background(), tc.ChanID, tc.AppID).Return(tc.err) + err := svc.CreateChannel(context.Background(), tc.ChanID, tc.AppID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + } +} + +func TestCreateThing(t *testing.T) { + svc := newService() + + cases := []struct { + desc string + err error + ThingID string + DevEUI string + }{ + { + desc: "create thing with valid data", + err: nil, + ThingID: thingID, + DevEUI: devEUI, + }, + { + desc: "create thing with empty thingID", + err: lora.ErrNotFoundDev, + ThingID: "", + DevEUI: devEUI, + }, + { + desc: "create thing with empty devEUI", + err: lora.ErrNotFoundDev, + ThingID: thingID, + DevEUI: "", + }, + } + + for _, tc := range cases { + repoCall := thingsRM.On("Save", context.Background(), tc.ThingID, tc.DevEUI).Return(tc.err) + err := svc.CreateThing(context.Background(), tc.ThingID, tc.DevEUI) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + } +} + +func TestConnectThing(t *testing.T) { + svc := newService() + + cases := []struct { + desc string + err error + channelID string + thingID string + getThingErr error + getChannelErr error + }{ + { + desc: "connect thing with valid data", + err: nil, + channelID: chanID, + thingID: thingID, + getThingErr: nil, + getChannelErr: nil, + }, + { + desc: "connect thing with non existing thing", + err: lora.ErrNotFoundDev, + channelID: chanID, + thingID: invalid, + getThingErr: lora.ErrNotFoundDev, + }, + { + desc: "connect thing with non existing channel", + err: lora.ErrNotFoundApp, + channelID: invalid, + thingID: thingID, + getChannelErr: lora.ErrNotFoundApp, + }, + } + + for _, tc := range cases { + repoCall := thingsRM.On("Get", context.Background(), tc.thingID).Return(devEUI, tc.getThingErr) + repoCall1 := channelsRM.On("Get", context.Background(), tc.channelID).Return(appID, tc.getChannelErr) + repoCall2 := connsRM.On("Save", context.Background(), mock.Anything, mock.Anything).Return(tc.err) + err := svc.ConnectThing(context.Background(), tc.channelID, tc.thingID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } +} + +func TestDisconnectThing(t *testing.T) { + svc := newService() + + cases := []struct { + desc string + err error + channelID string + thingID string + getThingErr error + getChannelErr error + }{ + { + desc: "disconnect thing with valid data", + err: nil, + channelID: chanID, + thingID: thingID, + getThingErr: nil, + getChannelErr: nil, + }, + { + desc: "disconnect thing with non existing thing ID", + err: lora.ErrNotFoundDev, + channelID: chanID, + thingID: invalid, + getThingErr: lora.ErrNotFoundDev, + }, + { + desc: "disconnect thing with non existing channel", + err: lora.ErrNotFoundApp, + channelID: invalid, + thingID: thingID, + getChannelErr: lora.ErrNotFoundApp, + }, + } + + for _, tc := range cases { + repoCall := thingsRM.On("Get", context.Background(), tc.thingID).Return(devEUI, tc.getThingErr) + repoCall1 := channelsRM.On("Get", context.Background(), tc.channelID).Return(appID, tc.getChannelErr) + repoCall2 := connsRM.On("Remove", context.Background(), mock.Anything).Return(tc.err) + err := svc.DisconnectThing(context.Background(), tc.channelID, tc.thingID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } +} + +func TestRemoveChannel(t *testing.T) { + svc := newService() + + cases := []struct { + desc string + err error + ChanID string + }{ + { + desc: "remove channel with valid data", + err: nil, + ChanID: chanID, + }, + { + desc: "remove channel with non existing channel", + err: lora.ErrNotFoundApp, + ChanID: invalid, + }, + { + desc: "remove channel with empty channelID", + err: lora.ErrNotFoundApp, + ChanID: "", + }, + } + + for _, tc := range cases { + repoCall := channelsRM.On("Remove", context.Background(), tc.ChanID).Return(tc.err) + err := svc.RemoveChannel(context.Background(), tc.ChanID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + } +} + +func TestRemoveThing(t *testing.T) { + svc := newService() + + cases := []struct { + desc string + err error + ThingID string + }{ + { + desc: "remove thing with valid data", + err: nil, + ThingID: thingID, + }, + { + desc: "remove thing with non existing thing", + err: lora.ErrNotFoundDev, + ThingID: invalid, + }, + { + desc: "remove thing with empty thingID", + err: lora.ErrNotFoundDev, + ThingID: "", + }, + } + + for _, tc := range cases { + repoCall := thingsRM.On("Remove", context.Background(), tc.ThingID).Return(tc.err) + err := svc.RemoveThing(context.Background(), tc.ThingID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + } +} + +func TestUpdateChannel(t *testing.T) { + svc := newService() + + cases := []struct { + desc string + err error + ChanID string + AppID string + }{ + { + desc: "update channel with valid data", + err: nil, + ChanID: chanID, + AppID: appID, + }, + { + desc: "update channel with non existing channel", + err: lora.ErrNotFoundApp, + ChanID: invalid, + AppID: appID, + }, + { + desc: "update channel with empty channelID", + err: lora.ErrNotFoundApp, + ChanID: "", + AppID: appID, + }, + { + desc: "update channel with empty appID", + err: lora.ErrNotFoundApp, + ChanID: chanID, + AppID: "", + }, + { + desc: "update channel with non existing appID", + err: lora.ErrNotFoundApp, + ChanID: chanID, + AppID: invalid, + }, + } + + for _, tc := range cases { + repoCall := channelsRM.On("Save", context.Background(), tc.ChanID, tc.AppID).Return(tc.err) + err := svc.UpdateChannel(context.Background(), tc.ChanID, tc.AppID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + } +} + +func TestUpdateThing(t *testing.T) { + svc := newService() + + cases := []struct { + desc string + err error + ThingID string + DevEUI string + }{ + { + desc: "update thing with valid data", + err: nil, + ThingID: thingID, + DevEUI: devEUI, + }, + { + desc: "update thing with non existing thing", + err: lora.ErrNotFoundDev, + ThingID: invalid, + DevEUI: devEUI, + }, + { + desc: "update thing with empty thingID", + err: lora.ErrNotFoundDev, + ThingID: "", + DevEUI: devEUI, + }, + { + desc: "update thing with empty devEUI", + err: lora.ErrNotFoundDev, + ThingID: thingID, + DevEUI: "", + }, + { + desc: "update thing with non existing devEUI", + err: lora.ErrNotFoundDev, + ThingID: thingID, + DevEUI: invalid, + }, + } + + for _, tc := range cases { + repoCall := thingsRM.On("Save", context.Background(), tc.ThingID, tc.DevEUI).Return(tc.err) + err := svc.UpdateThing(context.Background(), tc.ThingID, tc.DevEUI) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + } +} diff --git a/lora/api/api.go b/lora/api/api.go new file mode 100644 index 0000000..06cdf1b --- /dev/null +++ b/lora/api/api.go @@ -0,0 +1,21 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "net/http" + + "github.com/absmach/magistrala" + "github.com/go-chi/chi/v5" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(instanceID string) http.Handler { + r := chi.NewRouter() + r.Get("/health", magistrala.Health("lora-adapter", instanceID)) + r.Handle("/metrics", promhttp.Handler()) + + return r +} diff --git a/lora/api/doc.go b/lora/api/doc.go new file mode 100644 index 0000000..2424852 --- /dev/null +++ b/lora/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains API-related concerns: endpoint definitions, middlewares +// and all resource representations. +package api diff --git a/lora/api/logging.go b/lora/api/logging.go new file mode 100644 index 0000000..82f9a0b --- /dev/null +++ b/lora/api/logging.go @@ -0,0 +1,189 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "log/slog" + "time" + + "github.com/absmach/magistrala/lora" +) + +var _ lora.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + svc lora.Service +} + +// LoggingMiddleware adds logging facilities to the core service. +func LoggingMiddleware(svc lora.Service, logger *slog.Logger) lora.Service { + return &loggingMiddleware{ + logger: logger, + svc: svc, + } +} + +func (lm loggingMiddleware) CreateThing(ctx context.Context, thingID, loraDevEUI string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", thingID), + slog.String("dev_eui", loraDevEUI), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Create thing route-map failed", args...) + return + } + lm.logger.Info("Create thing route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.CreateThing(ctx, thingID, loraDevEUI) +} + +func (lm loggingMiddleware) UpdateThing(ctx context.Context, thingID, loraDevEUI string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", thingID), + slog.String("dev_eui", loraDevEUI), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Update thing route-map failed", args...) + return + } + lm.logger.Info("Update thing route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.UpdateThing(ctx, thingID, loraDevEUI) +} + +func (lm loggingMiddleware) RemoveThing(ctx context.Context, thingID string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", thingID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Remove thing route-map failed", args...) + return + } + lm.logger.Info("Remove thing route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.RemoveThing(ctx, thingID) +} + +func (lm loggingMiddleware) CreateChannel(ctx context.Context, chanID, loraApp string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", chanID), + slog.String("lora_app", loraApp), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Create channel route-map failed", args...) + return + } + lm.logger.Info("Create channel route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.CreateChannel(ctx, chanID, loraApp) +} + +func (lm loggingMiddleware) UpdateChannel(ctx context.Context, chanID, loraApp string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", chanID), + slog.String("lora_app", loraApp), + } + if err != nil { + lm.logger.Warn("Update channel route-map failed", args...) + return + } + lm.logger.Info("Update channel route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.UpdateChannel(ctx, chanID, loraApp) +} + +func (lm loggingMiddleware) RemoveChannel(ctx context.Context, chanID string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", chanID), + } + if err != nil { + lm.logger.Warn("Remove channel route-map failed", args...) + return + } + lm.logger.Info("Remove channel route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.RemoveChannel(ctx, chanID) +} + +func (lm loggingMiddleware) ConnectThing(ctx context.Context, chanID, thingID string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", chanID), + slog.String("thing_id", thingID), + } + if err != nil { + args := append(args, slog.String("error", err.Error())) + lm.logger.Warn("Connect thing to channel failed", args...) + return + } + lm.logger.Info("Connect thing to channel completed successfully", args...) + }(time.Now()) + + return lm.svc.ConnectThing(ctx, chanID, thingID) +} + +func (lm loggingMiddleware) DisconnectThing(ctx context.Context, chanID, thingID string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", chanID), + slog.String("thing_id", thingID), + } + if err != nil { + args := append(args, slog.String("error", err.Error())) + lm.logger.Warn("Disconnect thing from channel failed", args...) + return + } + lm.logger.Info("Disconnect thing from channel completed successfully", args...) + }(time.Now()) + + return lm.svc.DisconnectThing(ctx, chanID, thingID) +} + +func (lm loggingMiddleware) Publish(ctx context.Context, msg *lora.Message) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("message", + slog.String("application_id", msg.ApplicationID), + slog.String("device_eui", msg.DevEUI), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Publish failed", args...) + return + } + lm.logger.Info("Publish completed successfully", args...) + }(time.Now()) + + return lm.svc.Publish(ctx, msg) +} diff --git a/lora/api/metrics.go b/lora/api/metrics.go new file mode 100644 index 0000000..f9dacc2 --- /dev/null +++ b/lora/api/metrics.go @@ -0,0 +1,112 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "time" + + "github.com/absmach/magistrala/lora" + "github.com/go-kit/kit/metrics" +) + +var _ lora.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc lora.Service +} + +// MetricsMiddleware instruments core service by tracking request count and latency. +func MetricsMiddleware(svc lora.Service, counter metrics.Counter, latency metrics.Histogram) lora.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (mm *metricsMiddleware) CreateThing(ctx context.Context, thingID, loraDevEUI string) error { + defer func(begin time.Time) { + mm.counter.With("method", "create_thing").Add(1) + mm.latency.With("method", "create_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.CreateThing(ctx, thingID, loraDevEUI) +} + +func (mm *metricsMiddleware) UpdateThing(ctx context.Context, thingID, loraDevEUI string) error { + defer func(begin time.Time) { + mm.counter.With("method", "update_thing").Add(1) + mm.latency.With("method", "update_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.UpdateThing(ctx, thingID, loraDevEUI) +} + +func (mm *metricsMiddleware) RemoveThing(ctx context.Context, thingID string) error { + defer func(begin time.Time) { + mm.counter.With("method", "remove_thing").Add(1) + mm.latency.With("method", "remove_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.RemoveThing(ctx, thingID) +} + +func (mm *metricsMiddleware) CreateChannel(ctx context.Context, chanID, loraApp string) error { + defer func(begin time.Time) { + mm.counter.With("method", "create_channel").Add(1) + mm.latency.With("method", "create_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.CreateChannel(ctx, chanID, loraApp) +} + +func (mm *metricsMiddleware) UpdateChannel(ctx context.Context, chanID, loraApp string) error { + defer func(begin time.Time) { + mm.counter.With("method", "update_channel").Add(1) + mm.latency.With("method", "update_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.UpdateChannel(ctx, chanID, loraApp) +} + +func (mm *metricsMiddleware) RemoveChannel(ctx context.Context, chanID string) error { + defer func(begin time.Time) { + mm.counter.With("method", "remove_channel").Add(1) + mm.latency.With("method", "remove_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.RemoveChannel(ctx, chanID) +} + +func (mm *metricsMiddleware) ConnectThing(ctx context.Context, chanID, thingID string) error { + defer func(begin time.Time) { + mm.counter.With("method", "connect_thing").Add(1) + mm.latency.With("method", "connect_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.ConnectThing(ctx, chanID, thingID) +} + +func (mm *metricsMiddleware) DisconnectThing(ctx context.Context, chanID, thingID string) error { + defer func(begin time.Time) { + mm.counter.With("method", "disconnect_thing").Add(1) + mm.latency.With("method", "disconnect_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.DisconnectThing(ctx, chanID, thingID) +} + +func (mm *metricsMiddleware) Publish(ctx context.Context, msg *lora.Message) error { + defer func(begin time.Time) { + mm.counter.With("method", "publish").Add(1) + mm.latency.With("method", "publish").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.Publish(ctx, msg) +} diff --git a/lora/doc.go b/lora/doc.go new file mode 100644 index 0000000..466c278 --- /dev/null +++ b/lora/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package lora contains the domain concept definitions needed to support +// Magistrala LoRa service functionality. +package lora diff --git a/lora/events/doc.go b/lora/events/doc.go new file mode 100644 index 0000000..08b70a5 --- /dev/null +++ b/lora/events/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package events provides the domain concept definitions needed to support +// lora events functionality. +package events diff --git a/lora/events/events.go b/lora/events/events.go new file mode 100644 index 0000000..a0d0be2 --- /dev/null +++ b/lora/events/events.go @@ -0,0 +1,27 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +type createThingEvent struct { + id string + loraDevEUI string +} + +type removeThingEvent struct { + id string +} + +type createChannelEvent struct { + id string + loraAppID string +} + +type removeChannelEvent struct { + id string +} + +type connectionThingEvent struct { + chanID string + thingIDs []string +} diff --git a/lora/events/routemap.go b/lora/events/routemap.go new file mode 100644 index 0000000..38884ea --- /dev/null +++ b/lora/events/routemap.go @@ -0,0 +1,58 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "context" + "fmt" + + "github.com/absmach/magistrala/lora" + "github.com/go-redis/redis/v8" +) + +var _ lora.RouteMapRepository = (*routerMap)(nil) + +type routerMap struct { + client *redis.Client + prefix string +} + +// NewRouteMapRepository returns redis thing cache implementation. +func NewRouteMapRepository(client *redis.Client, prefix string) lora.RouteMapRepository { + return &routerMap{ + client: client, + prefix: prefix, + } +} + +func (mr *routerMap) Save(ctx context.Context, mgxID, loraID string) error { + tkey := fmt.Sprintf("%s:%s", mr.prefix, mgxID) + if err := mr.client.Set(ctx, tkey, loraID, 0).Err(); err != nil { + return err + } + + lkey := fmt.Sprintf("%s:%s", mr.prefix, loraID) + return mr.client.Set(ctx, lkey, mgxID, 0).Err() +} + +func (mr *routerMap) Get(ctx context.Context, id string) (string, error) { + lKey := fmt.Sprintf("%s:%s", mr.prefix, id) + mval, err := mr.client.Get(ctx, lKey).Result() + if err != nil { + return "", err + } + + return mval, nil +} + +func (mr *routerMap) Remove(ctx context.Context, mgxID string) error { + mkey := fmt.Sprintf("%s:%s", mr.prefix, mgxID) + lval, err := mr.client.Get(ctx, mkey).Result() + if err != nil { + return err + } + + lkey := fmt.Sprintf("%s:%s", mr.prefix, lval) + return mr.client.Del(ctx, mkey, lkey).Err() +} diff --git a/lora/events/streams.go b/lora/events/streams.go new file mode 100644 index 0000000..a4aa4b1 --- /dev/null +++ b/lora/events/streams.go @@ -0,0 +1,175 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "context" + "errors" + + "github.com/absmach/magistrala/lora" + "github.com/absmach/magistrala/pkg/events" +) + +const ( + keyType = "lora" + keyDevEUI = "dev_eui" + keyAppID = "app_id" + + thingPrefix = "thing." + thingCreate = thingPrefix + "create" + thingUpdate = thingPrefix + "update" + thingRemove = thingPrefix + "remove" + thingConnect = thingPrefix + "connect" + thingDisconnect = thingPrefix + "disconnect" + + channelPrefix = "group." + channelCreate = channelPrefix + "create" + channelUpdate = channelPrefix + "update" + channelRemove = channelPrefix + "remove" +) + +var ( + errMetadataType = errors.New("field lora is missing in the metadata") + + errMetadataFormat = errors.New("malformed metadata") + + errMetadataAppID = errors.New("application ID not found in channel metadatada") + + errMetadataDevEUI = errors.New("device EUI not found in thing metadatada") +) + +type eventHandler struct { + svc lora.Service +} + +// NewEventHandler returns new event store handler. +func NewEventHandler(svc lora.Service) events.EventHandler { + return &eventHandler{ + svc: svc, + } +} + +func (es *eventHandler) Handle(ctx context.Context, event events.Event) error { + msg, err := event.Encode() + if err != nil { + return err + } + + switch msg["operation"] { + case thingCreate, thingUpdate: + cte, derr := decodeCreateThing(msg) + if derr != nil { + err = derr + break + } + err = es.svc.CreateThing(ctx, cte.id, cte.loraDevEUI) + case channelCreate, channelUpdate: + cce, derr := decodeCreateChannel(msg) + if derr != nil { + err = derr + break + } + err = es.svc.CreateChannel(ctx, cce.id, cce.loraAppID) + case thingRemove: + rte := decodeRemoveThing(msg) + err = es.svc.RemoveThing(ctx, rte.id) + case channelRemove: + rce := decodeRemoveChannel(msg) + err = es.svc.RemoveChannel(ctx, rce.id) + case thingConnect: + tce := decodeConnectionThing(msg) + + for _, thingID := range tce.thingIDs { + err = es.svc.ConnectThing(ctx, tce.chanID, thingID) + if err != nil { + return err + } + } + case thingDisconnect: + tde := decodeConnectionThing(msg) + + for _, thingID := range tde.thingIDs { + err = es.svc.DisconnectThing(ctx, tde.chanID, thingID) + if err != nil { + return err + } + } + } + if err != nil && err != errMetadataType { + return err + } + + return nil +} + +func decodeCreateThing(event map[string]interface{}) (createThingEvent, error) { + metadata := events.Read(event, "metadata", map[string]interface{}{}) + + cte := createThingEvent{ + id: events.Read(event, "id", ""), + } + + m, ok := metadata[keyType] + if !ok { + return createThingEvent{}, errMetadataType + } + + lm, ok := m.(map[string]interface{}) + if !ok { + return createThingEvent{}, errMetadataFormat + } + + val, ok := lm[keyDevEUI].(string) + if !ok { + return createThingEvent{}, errMetadataDevEUI + } + + cte.loraDevEUI = val + return cte, nil +} + +func decodeRemoveThing(event map[string]interface{}) removeThingEvent { + return removeThingEvent{ + id: events.Read(event, "id", ""), + } +} + +func decodeCreateChannel(event map[string]interface{}) (createChannelEvent, error) { + metadata := events.Read(event, "metadata", map[string]interface{}{}) + + cce := createChannelEvent{ + id: events.Read(event, "id", ""), + } + + m, ok := metadata[keyType] + if !ok { + return createChannelEvent{}, errMetadataType + } + + lm, ok := m.(map[string]interface{}) + if !ok { + return createChannelEvent{}, errMetadataFormat + } + + val, ok := lm[keyAppID].(string) + if !ok { + return createChannelEvent{}, errMetadataAppID + } + + cce.loraAppID = val + return cce, nil +} + +func decodeConnectionThing(event map[string]interface{}) connectionThingEvent { + return connectionThingEvent{ + chanID: events.Read(event, "group_id", ""), + thingIDs: events.ReadStringSlice(event, "member_ids"), + } +} + +func decodeRemoveChannel(event map[string]interface{}) removeChannelEvent { + return removeChannelEvent{ + id: events.Read(event, "id", ""), + } +} diff --git a/lora/message.go b/lora/message.go new file mode 100644 index 0000000..662bbb0 --- /dev/null +++ b/lora/message.go @@ -0,0 +1,47 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package lora + +// RxInfo receiver parameters. +type RxInfo []struct { + Mac string `json:"mac"` + Name string `json:"name"` + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + Altitude float64 `json:"altitude"` + Time string `json:"time"` + Rssi float64 `json:"rssi"` + LoRaSNR float64 `json:"loRaSNR"` +} + +// DataRate lora data rate. +type DataRate struct { + Modulation string `json:"modulation"` + Bandwidth float64 `json:"bandwidth"` + SpreadFactor int64 `json:"spreadFactor"` +} + +// TxInfo transmeter parameters. +type TxInfo struct { + Frequency float64 `json:"frequency"` + DataRate DataRate `json:"dataRate"` + Adr bool `json:"adr"` + CodeRate string `json:"codeRate"` +} + +// Message lora msg (https://www.chirpstack.io/application-server/integrations/events). +type Message struct { + ApplicationID string `json:"applicationID"` + ApplicationName string `json:"applicationName"` + DeviceName string `json:"deviceName"` + DevEUI string `json:"devEUI"` + DeviceStatusBattery string `json:"deviceStatusBattery"` + DeviceStatusMrgin string `json:"deviceStatusMargin"` + RxInfo RxInfo `json:"rxInfo"` + TxInfo TxInfo `json:"txInfo"` + FCnt int `json:"fCnt"` + FPort int `json:"fPort"` + Data string `json:"data"` + Object interface{} `json:"object"` +} diff --git a/lora/mocks/doc.go b/lora/mocks/doc.go new file mode 100644 index 0000000..16ed198 --- /dev/null +++ b/lora/mocks/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mocks contains mocks for testing purposes. +package mocks diff --git a/lora/mocks/routes.go b/lora/mocks/routes.go new file mode 100644 index 0000000..f8542d1 --- /dev/null +++ b/lora/mocks/routes.go @@ -0,0 +1,94 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// RouteMapRepository is an autogenerated mock type for the RouteMapRepository type +type RouteMapRepository struct { + mock.Mock +} + +// Get provides a mock function with given fields: _a0, _a1 +func (_m *RouteMapRepository) Get(_a0 context.Context, _a1 string) (string, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (string, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Remove provides a mock function with given fields: _a0, _a1 +func (_m *RouteMapRepository) Remove(_a0 context.Context, _a1 string) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Save provides a mock function with given fields: _a0, _a1, _a2 +func (_m *RouteMapRepository) Save(_a0 context.Context, _a1 string, _a2 string) error { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for Save") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRouteMapRepository creates a new instance of RouteMapRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRouteMapRepository(t interface { + mock.TestingT + Cleanup(func()) +}) *RouteMapRepository { + mock := &RouteMapRepository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lora/mqtt/doc.go b/lora/mqtt/doc.go new file mode 100644 index 0000000..efd6681 --- /dev/null +++ b/lora/mqtt/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mqtt contains the domain concept definitions needed to +// support Magistrala MQTT adapter functionality. +package mqtt diff --git a/lora/mqtt/sub.go b/lora/mqtt/sub.go new file mode 100644 index 0000000..e883ed7 --- /dev/null +++ b/lora/mqtt/sub.go @@ -0,0 +1,62 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mqtt + +// LoraSubscribe subscribe to lora server messages. +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "time" + + "github.com/absmach/magistrala/lora" + mqtt "github.com/eclipse/paho.mqtt.golang" +) + +// Subscriber represents the MQTT broker. +type Subscriber interface { + // Subscribes to given subject and receives events. + Subscribe(string) error +} + +type broker struct { + svc lora.Service + client mqtt.Client + logger *slog.Logger + timeout time.Duration +} + +// NewBroker returns new MQTT broker instance. +func NewBroker(svc lora.Service, client mqtt.Client, t time.Duration, log *slog.Logger) Subscriber { + return broker{ + svc: svc, + client: client, + logger: log, + timeout: t, + } +} + +// Subscribe subscribes to the Lora MQTT message broker. +func (b broker) Subscribe(subject string) error { + s := b.client.Subscribe(subject, 0, b.handleMsg) + if err := s.Error(); s.WaitTimeout(b.timeout) && err != nil { + return err + } + + return nil +} + +// handleMsg triggered when new message is received on Lora MQTT broker. +func (b broker) handleMsg(c mqtt.Client, msg mqtt.Message) { + m := lora.Message{} + if err := json.Unmarshal(msg.Payload(), &m); err != nil { + b.logger.Warn(fmt.Sprintf("Failed to unmarshal message: %s", err.Error())) + return + } + + if err := b.svc.Publish(context.Background(), &m); err != nil { + b.logger.Error(fmt.Sprintf("got error while publishing messages: %s", err)) + } +} diff --git a/lora/routemap.go b/lora/routemap.go new file mode 100644 index 0000000..c20e234 --- /dev/null +++ b/lora/routemap.go @@ -0,0 +1,20 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package lora + +import "context" + +// RouteMapRepository store route map between Lora App Server and Magistrala. +// +//go:generate mockery --name RouteMapRepository --output=./mocks --filename routes.go --quiet --note "Copyright (c) Abstract Machines" +type RouteMapRepository interface { + // Save stores/routes pair lora application topic & magistrala channel. + Save(context.Context, string, string) error + + // Channel returns magistrala channel for given lora application. + Get(context.Context, string) (string, error) + + // Removes mapping from cache. + Remove(context.Context, string) error +} diff --git a/opcua/README.md b/opcua/README.md new file mode 100644 index 0000000..3856bf1 --- /dev/null +++ b/opcua/README.md @@ -0,0 +1,77 @@ +# OPC-UA Adapter + +Adapter between Magistrala IoT system and an OPC-UA Server. + +This adapter sits between Magistrala and an OPC-UA server and just forwards the messages from one system to another. + +OPC-UA Server is used for connectivity layer and the data is pushed via this adapter service to Magistrala, where it is persisted and routed to other protocols via Magistrala multi-protocol message broker. Magistrala adds user accounts, application management and security in order to obtain the overall end-to-end OPC-UA solution. + +## Configuration + +The service is configured using the environment variables presented in the following table. Note that any unset variables will be replaced with their default values. + +| Variable | Description | Default | +| --------------------------------- | ------------------------------------------------------- | ----------------------------------- | +| MG_OPCUA_ADAPTER_LOG_LEVEL | Log level for the WS Adapter (debug, info, warn, error) | info | +| MG_OPCUA_ADAPTER_HTTP_HOST | Service OPC-UA host | "" | +| MG_OPCUA_ADAPTER_HTTP_PORT | Service WOPC-UAS port | 8180 | +| MG_OPCUA_ADAPTER_HTTP_SERVER_CERT | Path to the PEM encoded server certificate file | "" | +| MG_OPCUA_ADAPTER_HTTP_SERVER_KEY | Path to the PEM encoded server key file | "" | +| MG_OPCUA_ADAPTER_ROUTE_MAP_URL | Route-map database URL | <redis://localhost:6379/0> | +| MG_ES_URL | Event source URL | <nats://localhost:4222> | +| MG_OPCUA_ADAPTER_EVENT_CONSUMER | Service event consumer name | opcua-adapter | +| MG_MESSAGE_BROKER_URL | Message broker instance URL | <nats://localhost:4222> | +| MG_JAEGER_URL | Jaeger server URL | <http://localhost:14268/api/traces> | +| MG_JAEGER_TRACE_RATIO | Jaeger sampling ratio | 1.0 | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_OPCUA_ADAPTER_INSTANCE_ID | Service instance ID | "" | + +## Deployment + +The service itself is distributed as Docker container. Check the [`opcua-adapter`](https://github.com/absmach/magistrala/blob/main/docker/addons/opcua-adapter/docker-compose.yml) service section in docker-compose file to see how service is deployed. + +Running this service outside of container requires working instance of the message broker service, redis routemap server and Jaeger server. +To start the service outside of the container, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the opcua-adapter +make opcua + +# copy binary to bin +make install + +# set the environment variables and run the service +MG_OPCUA_ADAPTER_LOG_LEVEL=info \ +MG_OPCUA_ADAPTER_HTTP_HOST=localhost \ +MG_OPCUA_ADAPTER_HTTP_PORT=8180 \ +MG_OPCUA_ADAPTER_HTTP_SERVER_CERT="" \ +MG_OPCUA_ADAPTER_HTTP_SERVER_KEY="" \ +MG_OPCUA_ADAPTER_ROUTE_MAP_URL=redis://localhost:6379/0 \ +MG_ES_URL=nats://localhost:4222 \ +MG_OPCUA_ADAPTER_EVENT_CONSUMER=opcua-adapter \ +MG_MESSAGE_BROKER_URL=nats://localhost:4222 \ +MG_JAEGER_URL=http://localhost:14268/api/traces \ +MG_JAEGER_TRACE_RATIO=1.0 \ +MG_SEND_TELEMETRY=true \ +MG_OPCUA_ADAPTER_INSTANCE_ID="" \ +$GOBIN/magistrala-opcua +``` + +Setting `MG_LORA_ADAPTER_HTTP_SERVER_CERT` and `MG_LORA_ADAPTER_HTTP_SERVER_KEY` will enable TLS against the service. The service expects a file in PEM format for both the certificate and the key. + +### Using docker-compose + +This service can be deployed using docker containers. Docker compose file is available in `<project_root>/docker/addons/opcua-adapter/docker-compose.yml`. In order to run Magistrala opcua-adapter, execute the following command: + +```bash +docker compose -f docker/addons/opcua-adapter/docker-compose.yml up -d +``` + +## Usage + +For more information about service capabilities and its usage, please check out the [Magistrala documentation](https://docs.magistrala.abstractmachines.fr/opcua). diff --git a/opcua/adapter.go b/opcua/adapter.go new file mode 100644 index 0000000..488907e --- /dev/null +++ b/opcua/adapter.go @@ -0,0 +1,200 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package opcua + +import ( + "context" + "encoding/base64" + "fmt" + "log/slog" + "regexp" + "strconv" + + "github.com/absmach/mg-contrib/opcua/db" +) + +// Service specifies an API that must be fullfiled by the domain service +// implementation, and all of its decorators (e.g. logging & metrics). +type Service interface { + // CreateThing creates thingID:OPC-UA-nodeID route-map + CreateThing(ctx context.Context, thingID, nodeID string) error + + // UpdateThing updates thingID:OPC-UA-nodeID route-map + UpdateThing(ctx context.Context, thingID, nodeID string) error + + // RemoveThing removes thingID:OPC-UA-nodeID route-map + RemoveThing(ctx context.Context, thingID string) error + + // CreateChannel creates channelID:OPC-UA-serverURI route-map + CreateChannel(ctx context.Context, chanID, serverURI string) error + + // UpdateChannel updates channelID:OPC-UA-serverURI route-map + UpdateChannel(ctx context.Context, chanID, serverURI string) error + + // RemoveChannel removes channelID:OPC-UA-serverURI route-map + RemoveChannel(ctx context.Context, chanID string) error + + // ConnectThing creates thingID:channelID route-map + ConnectThing(ctx context.Context, chanID string, thingIDs []string) error + + // DisconnectThing removes thingID:channelID route-map + DisconnectThing(ctx context.Context, chanID string, thingIDs []string) error + + // Browse browses available nodes for a given OPC-UA Server URI and NodeID + Browse(ctx context.Context, serverURI, namespace, identifier, identifierType string) ([]BrowsedNode, error) +} + +// Config OPC-UA Server. +type Config struct { + ServerURI string + NodeID string + Interval string `env:"MG_OPCUA_ADAPTER_INTERVAL_MS" envDefault:"1000"` + Policy string `env:"MG_OPCUA_ADAPTER_POLICY" envDefault:""` + Mode string `env:"MG_OPCUA_ADAPTER_MODE" envDefault:""` + CertFile string `env:"MG_OPCUA_ADAPTER_CERT_FILE" envDefault:""` + KeyFile string `env:"MG_OPCUA_ADAPTER_KEY_FILE" envDefault:""` +} + +var ( + _ Service = (*adapterService)(nil) + guidRegex = regexp.MustCompile(`^\{?[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}\}?$`) +) + +type adapterService struct { + subscriber Subscriber + browser Browser + thingsRM RouteMapRepository + channelsRM RouteMapRepository + connectRM RouteMapRepository + cfg Config + logger *slog.Logger +} + +// New instantiates the OPC-UA adapter implementation. +func New(sub Subscriber, brow Browser, thingsRM, channelsRM, connectRM RouteMapRepository, cfg Config, log *slog.Logger) Service { + return &adapterService{ + subscriber: sub, + browser: brow, + thingsRM: thingsRM, + channelsRM: channelsRM, + connectRM: connectRM, + cfg: cfg, + logger: log, + } +} + +func (as *adapterService) CreateThing(ctx context.Context, thingID, nodeID string) error { + return as.thingsRM.Save(ctx, thingID, nodeID) +} + +func (as *adapterService) UpdateThing(ctx context.Context, thingID, nodeID string) error { + return as.thingsRM.Save(ctx, thingID, nodeID) +} + +func (as *adapterService) RemoveThing(ctx context.Context, thingID string) error { + return as.thingsRM.Remove(ctx, thingID) +} + +func (as *adapterService) CreateChannel(ctx context.Context, chanID, serverURI string) error { + return as.channelsRM.Save(ctx, chanID, serverURI) +} + +func (as *adapterService) UpdateChannel(ctx context.Context, chanID, serverURI string) error { + return as.channelsRM.Save(ctx, chanID, serverURI) +} + +func (as *adapterService) RemoveChannel(ctx context.Context, chanID string) error { + return as.channelsRM.Remove(ctx, chanID) +} + +func (as *adapterService) ConnectThing(ctx context.Context, chanID string, thingIDs []string) error { + serverURI, err := as.channelsRM.Get(ctx, chanID) + if err != nil { + return err + } + + for _, thingID := range thingIDs { + nodeID, err := as.thingsRM.Get(ctx, thingID) + if err != nil { + return err + } + + as.cfg.NodeID = nodeID + as.cfg.ServerURI = serverURI + + c := fmt.Sprintf("%s:%s", chanID, thingID) + if err := as.connectRM.Save(ctx, c, c); err != nil { + return err + } + + go func() { + if err := as.subscriber.Subscribe(ctx, as.cfg); err != nil { + as.logger.Warn("subscription failed", slog.Any("error", err)) + } + }() + + // Store subscription details + if err := db.Save(serverURI, nodeID); err != nil { + return err + } + } + + return nil +} + +func (as *adapterService) Browse(ctx context.Context, serverURI, namespace, identifier, identifierType string) ([]BrowsedNode, error) { + idFormat := "s" + switch identifierType { + case "string": + break + case "numeric": + if _, err := strconv.Atoi(identifier); err != nil { + args := []any{ + slog.String("namespace", namespace), + slog.String("identifier", identifier), + slog.Any("error", err), + } + as.logger.Warn("failed to parse numeric identifier", args...) + break + } + idFormat = "i" + case "guid": + if !guidRegex.MatchString(identifier) { + args := []any{ + slog.String("namespace", namespace), + slog.String("identifier", identifier), + } + as.logger.Warn("GUID identifier has invalid format", args...) + break + } + idFormat = "g" + case "opaque": + if _, err := base64.StdEncoding.DecodeString(identifier); err != nil { + args := []any{ + slog.String("namespace", namespace), + slog.String("identifier", identifier), + slog.Any("error", err), + } + as.logger.Warn("opaque identifier has invalid base64 format", args...) + break + } + idFormat = "b" + } + nodeID := fmt.Sprintf("ns=%s;%s=%s", namespace, idFormat, identifier) + nodes, err := as.browser.Browse(serverURI, nodeID) + if err != nil { + return nil, err + } + return nodes, nil +} + +func (as *adapterService) DisconnectThing(ctx context.Context, chanID string, thingIDs []string) error { + for _, thingID := range thingIDs { + c := fmt.Sprintf("%s:%s", chanID, thingID) + if err := as.connectRM.Remove(ctx, c); err != nil { + return err + } + } + return nil +} diff --git a/opcua/api/doc.go b/opcua/api/doc.go new file mode 100644 index 0000000..2424852 --- /dev/null +++ b/opcua/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains API-related concerns: endpoint definitions, middlewares +// and all resource representations. +package api diff --git a/opcua/api/endpoint.go b/opcua/api/endpoint.go new file mode 100644 index 0000000..15dd647 --- /dev/null +++ b/opcua/api/endpoint.go @@ -0,0 +1,34 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/opcua" + "github.com/go-kit/kit/endpoint" +) + +func browseEndpoint(svc opcua.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(browseReq) + + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + nodes, err := svc.Browse(ctx, req.ServerURI, req.Namespace, req.Identifier, req.IdentifierType) + if err != nil { + return nil, err + } + + res := browseRes{ + Nodes: nodes, + } + + return res, nil + } +} diff --git a/opcua/api/logging.go b/opcua/api/logging.go new file mode 100644 index 0000000..554cb93 --- /dev/null +++ b/opcua/api/logging.go @@ -0,0 +1,191 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "log/slog" + "time" + + "github.com/absmach/mg-contrib/opcua" +) + +var _ opcua.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + svc opcua.Service +} + +// LoggingMiddleware adds logging facilities to the core service. +func LoggingMiddleware(svc opcua.Service, logger *slog.Logger) opcua.Service { + return &loggingMiddleware{ + logger: logger, + svc: svc, + } +} + +func (lm loggingMiddleware) CreateThing(ctx context.Context, mgxThing, opcuaNodeID string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", mgxThing), + slog.String("node_id", opcuaNodeID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Create thing route-map failed", args...) + return + } + lm.logger.Info("Create thing route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.CreateThing(ctx, mgxThing, opcuaNodeID) +} + +func (lm loggingMiddleware) UpdateThing(ctx context.Context, mgxThing, opcuaNodeID string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", mgxThing), + slog.String("node_id", opcuaNodeID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Update thing route-map failed", args...) + return + } + lm.logger.Info("Update thing route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.UpdateThing(ctx, mgxThing, opcuaNodeID) +} + +func (lm loggingMiddleware) RemoveThing(ctx context.Context, mgxThing string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", mgxThing), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Remove thing route-map failed", args...) + return + } + lm.logger.Info("Remove thing route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.RemoveThing(ctx, mgxThing) +} + +func (lm loggingMiddleware) CreateChannel(ctx context.Context, mgxChan, opcuaServerURI string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", mgxChan), + slog.String("server_uri", opcuaServerURI), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Create channel route-map failed", args...) + return + } + lm.logger.Info("Create channel route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.CreateChannel(ctx, mgxChan, opcuaServerURI) +} + +func (lm loggingMiddleware) UpdateChannel(ctx context.Context, mgxChanID, opcuaServerURI string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", mgxChanID), + slog.String("server_uri", opcuaServerURI), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Update channel route-map failed", args...) + return + } + lm.logger.Info("Update channel route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.UpdateChannel(ctx, mgxChanID, opcuaServerURI) +} + +func (lm loggingMiddleware) RemoveChannel(ctx context.Context, mgxChanID string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", mgxChanID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Remove channel route-map failed", args...) + return + } + lm.logger.Info("Remove channel route-map completed successfully", args...) + }(time.Now()) + + return lm.svc.RemoveChannel(ctx, mgxChanID) +} + +func (lm loggingMiddleware) ConnectThing(ctx context.Context, mgxChanID string, mgxThingIDs []string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", mgxChanID), + slog.Any("thing_ids", mgxThingIDs), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Connect thing to channel failed", args...) + return + } + lm.logger.Info("Connect thing to channel completed successfully", args...) + }(time.Now()) + + return lm.svc.ConnectThing(ctx, mgxChanID, mgxThingIDs) +} + +func (lm loggingMiddleware) DisconnectThing(ctx context.Context, mgxChanID string, mgxThingIDs []string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", mgxChanID), + slog.Any("thing_ids", mgxThingIDs), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Disconnect thing from channel failed", args...) + return + } + lm.logger.Info("Disconnect thing from channel completed successfully", args...) + }(time.Now()) + + return lm.svc.DisconnectThing(ctx, mgxChanID, mgxThingIDs) +} + +func (lm loggingMiddleware) Browse(ctx context.Context, serverURI, namespace, identifier, identifierType string) (nodes []opcua.BrowsedNode, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("server_uri", serverURI), + slog.String("namespace", namespace), + slog.String("identifier", identifier), + slog.String("identifier_type", identifierType), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Browse available nodes failed", args...) + return + } + lm.logger.Info("Browse available nodes completed successfully", args...) + }(time.Now()) + + return lm.svc.Browse(ctx, serverURI, namespace, identifier, identifierType) +} diff --git a/opcua/api/metrics.go b/opcua/api/metrics.go new file mode 100644 index 0000000..c6bc04e --- /dev/null +++ b/opcua/api/metrics.go @@ -0,0 +1,112 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "time" + + "github.com/absmach/mg-contrib/opcua" + "github.com/go-kit/kit/metrics" +) + +var _ opcua.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc opcua.Service +} + +// MetricsMiddleware instruments core service by tracking request count and latency. +func MetricsMiddleware(svc opcua.Service, counter metrics.Counter, latency metrics.Histogram) opcua.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (mm *metricsMiddleware) CreateThing(ctx context.Context, mgxDevID, opcuaNodeID string) error { + defer func(begin time.Time) { + mm.counter.With("method", "create_thing").Add(1) + mm.latency.With("method", "create_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.CreateThing(ctx, mgxDevID, opcuaNodeID) +} + +func (mm *metricsMiddleware) UpdateThing(ctx context.Context, mgxDevID, opcuaNodeID string) error { + defer func(begin time.Time) { + mm.counter.With("method", "update_thing").Add(1) + mm.latency.With("method", "update_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.UpdateThing(ctx, mgxDevID, opcuaNodeID) +} + +func (mm *metricsMiddleware) RemoveThing(ctx context.Context, mgxDevID string) error { + defer func(begin time.Time) { + mm.counter.With("method", "remove_thing").Add(1) + mm.latency.With("method", "remove_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.RemoveThing(ctx, mgxDevID) +} + +func (mm *metricsMiddleware) CreateChannel(ctx context.Context, mgxChanID, opcuaServerURI string) error { + defer func(begin time.Time) { + mm.counter.With("method", "create_channel").Add(1) + mm.latency.With("method", "create_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.CreateChannel(ctx, mgxChanID, opcuaServerURI) +} + +func (mm *metricsMiddleware) UpdateChannel(ctx context.Context, mgxChanID, opcuaServerURI string) error { + defer func(begin time.Time) { + mm.counter.With("method", "update_channel").Add(1) + mm.latency.With("method", "update_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.UpdateChannel(ctx, mgxChanID, opcuaServerURI) +} + +func (mm *metricsMiddleware) RemoveChannel(ctx context.Context, mgxChanID string) error { + defer func(begin time.Time) { + mm.counter.With("method", "remove_channel").Add(1) + mm.latency.With("method", "remove_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.RemoveChannel(ctx, mgxChanID) +} + +func (mm *metricsMiddleware) ConnectThing(ctx context.Context, mgxChanID string, mgxThingIDs []string) error { + defer func(begin time.Time) { + mm.counter.With("method", "connect_thing").Add(1) + mm.latency.With("method", "connect_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.ConnectThing(ctx, mgxChanID, mgxThingIDs) +} + +func (mm *metricsMiddleware) DisconnectThing(ctx context.Context, mgxChanID string, mgxThingIDs []string) error { + defer func(begin time.Time) { + mm.counter.With("method", "disconnect_thing").Add(1) + mm.latency.With("method", "disconnect_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.DisconnectThing(ctx, mgxChanID, mgxThingIDs) +} + +func (mm *metricsMiddleware) Browse(ctx context.Context, serverURI, namespace, identifier, identifierType string) ([]opcua.BrowsedNode, error) { + defer func(begin time.Time) { + mm.counter.With("method", "browse").Add(1) + mm.latency.With("method", "browse").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.Browse(ctx, serverURI, namespace, identifier, identifierType) +} diff --git a/opcua/api/requests.go b/opcua/api/requests.go new file mode 100644 index 0000000..8121b63 --- /dev/null +++ b/opcua/api/requests.go @@ -0,0 +1,21 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import "github.com/absmach/magistrala/pkg/apiutil" + +type browseReq struct { + ServerURI string + Namespace string + Identifier string + IdentifierType string +} + +func (req *browseReq) validate() error { + if req.ServerURI == "" { + return apiutil.ErrMissingID + } + + return nil +} diff --git a/opcua/api/responses.go b/opcua/api/responses.go new file mode 100644 index 0000000..102bc98 --- /dev/null +++ b/opcua/api/responses.go @@ -0,0 +1,29 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "net/http" + + "github.com/absmach/magistrala" + "github.com/absmach/mg-contrib/opcua" +) + +var _ magistrala.Response = (*browseRes)(nil) + +type browseRes struct { + Nodes []opcua.BrowsedNode `json:"nodes"` +} + +func (res browseRes) Code() int { + return http.StatusOK +} + +func (res browseRes) Headers() map[string]string { + return map[string]string{} +} + +func (res browseRes) Empty() bool { + return false +} diff --git a/opcua/api/transport.go b/opcua/api/transport.go new file mode 100644 index 0000000..da7d2d6 --- /dev/null +++ b/opcua/api/transport.go @@ -0,0 +1,132 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "log/slog" + "net/http" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/opcua" + "github.com/go-chi/chi/v5" + kithttp "github.com/go-kit/kit/transport/http" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +const ( + contentType = "application/json" + serverParam = "server" + namespaceParam = "namespace" + identifierParam = "identifier" + identifierTypeParam = "identifierType" + defNamespace = "ns=0" // Standard root namespace + defIdentifier = "i=84" // Standard root identifier +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(svc opcua.Service, logger *slog.Logger, instanceID string) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), + } + + r := chi.NewRouter() + + r.Get("/browse", kithttp.NewServer( + browseEndpoint(svc), + decodeBrowse, + encodeResponse, + opts..., + ).ServeHTTP) + + r.Get("/health", magistrala.Health("opcua-adapter", instanceID)) + r.Handle("/metrics", promhttp.Handler()) + + return r +} + +func decodeBrowse(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, serverParam, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + n, err := apiutil.ReadStringQuery(r, namespaceParam, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + i, err := apiutil.ReadStringQuery(r, identifierParam, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + iType, err := apiutil.ReadStringQuery(r, identifierTypeParam, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + if n == "" || i == "" { + n = defNamespace + i = defIdentifier + } + + req := browseReq{ + ServerURI: s, + Namespace: n, + Identifier: i, + IdentifierType: iType, + } + + return req, nil +} + +func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", contentType) + + if ar, ok := response.(magistrala.Response); ok { + for k, v := range ar.Headers() { + w.Header().Set(k, v) + } + + w.WriteHeader(ar.Code()) + + if ar.Empty() { + return nil + } + } + + return json.NewEncoder(w).Encode(response) +} + +func encodeError(_ context.Context, err error, w http.ResponseWriter) { + var wrapper error + if errors.Contains(err, apiutil.ErrValidation) { + wrapper, err = errors.Unwrap(err) + } + + switch { + case errors.Contains(err, apiutil.ErrInvalidQueryParams), + errors.Contains(err, errors.ErrMalformedEntity), + err == apiutil.ErrMissingID: + w.WriteHeader(http.StatusBadRequest) + + default: + w.WriteHeader(http.StatusInternalServerError) + } + + if wrapper != nil { + err = errors.Wrap(wrapper, err) + } + + if errorVal, ok := err.(errors.Error); ok { + w.Header().Set("Content-Type", contentType) + if err := json.NewEncoder(w).Encode(errorVal); err != nil { + w.WriteHeader(http.StatusInternalServerError) + } + } +} diff --git a/opcua/browser.go b/opcua/browser.go new file mode 100644 index 0000000..a9687f6 --- /dev/null +++ b/opcua/browser.go @@ -0,0 +1,20 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package opcua + +// BrowsedNode represents the details of a browsed OPC-UA node. +type BrowsedNode struct { + NodeID string + DataType string + Description string + Unit string + Scale string + BrowseName string +} + +// Browser represents the OPC-UA Server Nodes browser. +type Browser interface { + // Browse availlable Nodes for a given URI. + Browse(string, string) ([]BrowsedNode, error) +} diff --git a/opcua/db/doc.go b/opcua/db/doc.go new file mode 100644 index 0000000..72850ec --- /dev/null +++ b/opcua/db/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package db contains the database implementation of opcua repository layer. +package db diff --git a/opcua/db/subs.go b/opcua/db/subs.go new file mode 100644 index 0000000..c6bbdd0 --- /dev/null +++ b/opcua/db/subs.go @@ -0,0 +1,81 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package db + +import ( + "encoding/csv" + "io" + "os" + + "github.com/absmach/magistrala/pkg/errors" +) + +const ( + columns = 2 + path = "/store/nodes.csv" +) + +var ( + errNotFound = errors.New("file not found") + errWriteFile = errors.New("failed de write file") + errOpenFile = errors.New("failed to open file") + errReadFile = errors.New("failed to read file") + errEmptyLine = errors.New("empty or incomplete line found in file") +) + +// Node represents an OPC-UA node. +type Node struct { + ServerURI string + NodeID string +} + +// Save stores a successful subscription. +func Save(serverURI, nodeID string) error { + file, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) + if err != nil { + return errors.Wrap(errWriteFile, err) + } + defer file.Close() + csvWriter := csv.NewWriter(file) + err = csvWriter.Write([]string{serverURI, nodeID}) + csvWriter.Flush() + if err != nil { + return errors.Wrap(errWriteFile, err) + } + + return nil +} + +// ReadAll returns all stored subscriptions. +func ReadAll() ([]Node, error) { + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, errors.Wrap(errNotFound, err) + } + + file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm) + if err != nil { + return nil, errors.Wrap(errOpenFile, err) + } + defer file.Close() + + reader := csv.NewReader(file) + nodes := []Node{} + for { + l, err := reader.Read() + if err == io.EOF { + break + } + if err != nil { + return nil, errors.Wrap(errReadFile, err) + } + + if len(l) < columns { + return nil, errEmptyLine + } + + nodes = append(nodes, Node{l[0], l[1]}) + } + + return nodes, nil +} diff --git a/opcua/doc.go b/opcua/doc.go new file mode 100644 index 0000000..5044c32 --- /dev/null +++ b/opcua/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package opcua contains OPC-UA server implementation. +package opcua diff --git a/opcua/events/doc.go b/opcua/events/doc.go new file mode 100644 index 0000000..1d141f2 --- /dev/null +++ b/opcua/events/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package events provides the domain concept definitions needed to support +// opcua events functionality. +package events diff --git a/opcua/events/events.go b/opcua/events/events.go new file mode 100644 index 0000000..d778420 --- /dev/null +++ b/opcua/events/events.go @@ -0,0 +1,27 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +type createThingEvent struct { + id string + opcuaNodeID string +} + +type removeThingEvent struct { + id string +} + +type connectThingEvent struct { + chanID string + thingIDs []string +} + +type createChannelEvent struct { + id string + opcuaServerURI string +} + +type removeChannelEvent struct { + id string +} diff --git a/opcua/events/routemap.go b/opcua/events/routemap.go new file mode 100644 index 0000000..44f4ed1 --- /dev/null +++ b/opcua/events/routemap.go @@ -0,0 +1,62 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "context" + "fmt" + + "github.com/absmach/mg-contrib/opcua" + "github.com/go-redis/redis/v8" +) + +var _ opcua.RouteMapRepository = (*routerMap)(nil) + +type routerMap struct { + client *redis.Client + prefix string +} + +// NewRouteMapRepository returns redis thing cache implementation. +func NewRouteMapRepository(client *redis.Client, prefix string) opcua.RouteMapRepository { + return &routerMap{ + client: client, + prefix: prefix, + } +} + +func (mr *routerMap) Save(ctx context.Context, mgxID, opcuaID string) error { + tkey := fmt.Sprintf("%s:%s", mr.prefix, mgxID) + if err := mr.client.Set(ctx, tkey, opcuaID, 0).Err(); err != nil { + return err + } + + lkey := fmt.Sprintf("%s:%s", mr.prefix, opcuaID) + if err := mr.client.Set(ctx, lkey, mgxID, 0).Err(); err != nil { + return err + } + + return nil +} + +func (mr *routerMap) Get(ctx context.Context, opcuaID string) (string, error) { + lKey := fmt.Sprintf("%s:%s", mr.prefix, opcuaID) + mval, err := mr.client.Get(ctx, lKey).Result() + if err != nil { + return "", err + } + + return mval, nil +} + +func (mr *routerMap) Remove(ctx context.Context, mgxID string) error { + mkey := fmt.Sprintf("%s:%s", mr.prefix, mgxID) + lval, err := mr.client.Get(ctx, mkey).Result() + if err != nil { + return err + } + + lkey := fmt.Sprintf("%s:%s", mr.prefix, lval) + return mr.client.Del(ctx, mkey, lkey).Err() +} diff --git a/opcua/events/streams.go b/opcua/events/streams.go new file mode 100644 index 0000000..ddb405e --- /dev/null +++ b/opcua/events/streams.go @@ -0,0 +1,184 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "context" + "errors" + + "github.com/absmach/magistrala/pkg/events" + "github.com/absmach/mg-contrib/opcua" +) + +const ( + keyType = "opcua" + keyNodeID = "node_id" + keyServerURI = "server_uri" + + thingPrefix = "thing." + thingCreate = thingPrefix + "create" + thingUpdate = thingPrefix + "update" + thingRemove = thingPrefix + "remove" + + channelPrefix = "channel." + channelCreate = channelPrefix + "create" + channelUpdate = channelPrefix + "update" + channelRemove = channelPrefix + "remove" + channelConnect = channelPrefix + "assign" + channelDisconnect = channelPrefix + "unassign" +) + +var ( + errMetadataType = errors.New("metadatada is not of type opcua") + + errMetadataFormat = errors.New("malformed metadata") + + errMetadataServerURI = errors.New("ServerURI not found in channel metadatada") + + errMetadataNodeID = errors.New("NodeID not found in thing metadatada") +) + +type eventHandler struct { + svc opcua.Service +} + +// NewEventHandler returns new event store handler. +func NewEventHandler(svc opcua.Service) events.EventHandler { + return &eventHandler{ + svc: svc, + } +} + +func (es *eventHandler) Handle(ctx context.Context, event events.Event) error { + msg, err := event.Encode() + if err != nil { + return err + } + + switch msg["operation"] { + case thingCreate: + cte, e := decodeCreateThing(msg) + if e != nil { + err = e + break + } + err = es.svc.CreateThing(ctx, cte.id, cte.opcuaNodeID) + case thingUpdate: + ute, e := decodeCreateThing(msg) + if e != nil { + err = e + break + } + err = es.svc.CreateThing(ctx, ute.id, ute.opcuaNodeID) + case thingRemove: + rte := decodeRemoveThing(msg) + err = es.svc.RemoveThing(ctx, rte.id) + case channelCreate: + cce, e := decodeCreateChannel(msg) + if e != nil { + err = e + break + } + err = es.svc.CreateChannel(ctx, cce.id, cce.opcuaServerURI) + case channelUpdate: + uce, e := decodeCreateChannel(msg) + if e != nil { + err = e + break + } + err = es.svc.CreateChannel(ctx, uce.id, uce.opcuaServerURI) + case channelRemove: + rce := decodeRemoveChannel(msg) + err = es.svc.RemoveChannel(ctx, rce.id) + case channelConnect: + rce := decodeConnectThing(msg) + err = es.svc.ConnectThing(ctx, rce.chanID, rce.thingIDs) + case channelDisconnect: + rce := decodeDisconnectThing(msg) + err = es.svc.DisconnectThing(ctx, rce.chanID, rce.thingIDs) + } + if err != nil && err != errMetadataType { + return err + } + + return nil +} + +func decodeCreateThing(event map[string]interface{}) (createThingEvent, error) { + metadata := events.Read(event, "metadata", map[string]interface{}{}) + + cte := createThingEvent{ + id: events.Read(event, "id", ""), + } + + metadataOpcua, ok := metadata[keyType] + if !ok { + return createThingEvent{}, errMetadataType + } + + metadataVal, ok := metadataOpcua.(map[string]interface{}) + if !ok { + return createThingEvent{}, errMetadataFormat + } + + val, ok := metadataVal[keyNodeID].(string) + if !ok || val == "" { + return createThingEvent{}, errMetadataNodeID + } + + cte.opcuaNodeID = val + return cte, nil +} + +func decodeRemoveThing(event map[string]interface{}) removeThingEvent { + return removeThingEvent{ + id: events.Read(event, "id", ""), + } +} + +func decodeCreateChannel(event map[string]interface{}) (createChannelEvent, error) { + metadata := events.Read(event, "metadata", map[string]interface{}{}) + + cce := createChannelEvent{ + id: events.Read(event, "id", ""), + } + + metadataOpcua, ok := metadata[keyType] + if !ok { + return createChannelEvent{}, errMetadataType + } + + metadataVal, ok := metadataOpcua.(map[string]interface{}) + if !ok { + return createChannelEvent{}, errMetadataFormat + } + + val, ok := metadataVal[keyServerURI].(string) + if !ok || val == "" { + return createChannelEvent{}, errMetadataServerURI + } + + cce.opcuaServerURI = val + return cce, nil +} + +func decodeRemoveChannel(event map[string]interface{}) removeChannelEvent { + return removeChannelEvent{ + id: events.Read(event, "id", ""), + } +} + +func decodeConnectThing(event map[string]interface{}) connectThingEvent { + return connectThingEvent{ + chanID: events.Read(event, "group_id", ""), + thingIDs: events.ReadStringSlice(event, "member_ids"), + } +} + +func decodeDisconnectThing(event map[string]interface{}) connectThingEvent { + return connectThingEvent{ + chanID: events.Read(event, "group_id", ""), + thingIDs: events.ReadStringSlice(event, "member_ids"), + } +} diff --git a/opcua/gopcua/browser.go b/opcua/gopcua/browser.go new file mode 100644 index 0000000..63ecdca --- /dev/null +++ b/opcua/gopcua/browser.go @@ -0,0 +1,228 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package gopcua + +import ( + "context" + "log/slog" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/opcua" + opcuagocpua "github.com/gopcua/opcua" + "github.com/gopcua/opcua/id" + uagocpua "github.com/gopcua/opcua/ua" +) + +const maxChildrens = 4 // max browsing node children level + +// NodeDef represents the node browser responnse. +type NodeDef struct { + NodeID *uagocpua.NodeID + NodeClass uagocpua.NodeClass + BrowseName string + Description string + AccessLevel uagocpua.AccessLevelType + Path string + DataType string + Writable bool + Unit string + Scale string + Min string + Max string +} + +var _ opcua.Browser = (*browser)(nil) + +type browser struct { + ctx context.Context + logger *slog.Logger +} + +// NewBrowser returns new OPC-UA browser instance. +func NewBrowser(ctx context.Context, log *slog.Logger) opcua.Browser { + return browser{ + ctx: ctx, + logger: log, + } +} + +func (c browser) Browse(serverURI, nodeID string) ([]opcua.BrowsedNode, error) { + opts := []opcuagocpua.Option{ + opcuagocpua.SecurityMode(uagocpua.MessageSecurityModeNone), + } + + oc := opcuagocpua.NewClient(serverURI, opts...) + if err := oc.Connect(c.ctx); err != nil { + return nil, errors.Wrap(errFailedConn, err) + } + defer oc.Close() + + nodeList, err := browse(oc, nodeID, "", 0) + if err != nil { + return nil, err + } + + nodes := []opcua.BrowsedNode{} + for _, s := range nodeList { + node := opcua.BrowsedNode{ + NodeID: s.NodeID.String(), + DataType: s.DataType, + Description: s.Description, + Unit: s.Unit, + Scale: s.Scale, + BrowseName: s.BrowseName, + } + nodes = append(nodes, node) + } + + return nodes, nil +} + +func browse(oc *opcuagocpua.Client, nodeID, path string, level int) ([]NodeDef, error) { + if level > maxChildrens { + return nil, nil + } + + nid, err := uagocpua.ParseNodeID(nodeID) + if err != nil { + return []NodeDef{}, err + } + n := oc.Node(nid) + + attrs, err := n.Attributes( + uagocpua.AttributeIDNodeClass, + uagocpua.AttributeIDBrowseName, + uagocpua.AttributeIDDescription, + uagocpua.AttributeIDAccessLevel, + uagocpua.AttributeIDDataType, + ) + if err != nil { + return nil, err + } + + def := NodeDef{ + NodeID: n.ID, + } + + switch err := attrs[0].Status; err { + case uagocpua.StatusOK: + def.NodeClass = uagocpua.NodeClass(attrs[0].Value.Int()) + default: + return nil, err + } + + switch err := attrs[1].Status; err { + case uagocpua.StatusOK: + def.BrowseName = attrs[1].Value.String() + default: + return nil, err + } + + switch err := attrs[2].Status; err { + case uagocpua.StatusOK: + def.Description = attrs[2].Value.String() + case uagocpua.StatusBadAttributeIDInvalid: + // ignore + default: + return nil, err + } + + switch err := attrs[3].Status; err { + case uagocpua.StatusOK: + def.AccessLevel = uagocpua.AccessLevelType(attrs[3].Value.Int()) + def.Writable = def.AccessLevel&uagocpua.AccessLevelTypeCurrentWrite == uagocpua.AccessLevelTypeCurrentWrite + case uagocpua.StatusBadAttributeIDInvalid: + // ignore + default: + return nil, err + } + + switch err := attrs[4].Status; err { + case uagocpua.StatusOK: + switch v := attrs[4].Value.NodeID().IntID(); v { + case id.DateTime: + def.DataType = "time.Time" + case id.Boolean: + def.DataType = "bool" + case id.SByte: + def.DataType = "int8" + case id.Int16: + def.DataType = "int16" + case id.Int32: + def.DataType = "int32" + case id.Byte: + def.DataType = "byte" + case id.UInt16: + def.DataType = "uint16" + case id.UInt32: + def.DataType = "uint32" + case id.UtcTime: + def.DataType = "time.Time" + case id.String: + def.DataType = "string" + case id.Float: + def.DataType = "float32" + case id.Double: + def.DataType = "float64" + default: + def.DataType = attrs[4].Value.NodeID().String() + } + case uagocpua.StatusBadAttributeIDInvalid: + // ignore + default: + return nil, err + } + + def.Path = join(path, def.BrowseName) + + var nodes []NodeDef + if def.NodeClass == uagocpua.NodeClassVariable { + nodes = append(nodes, def) + } + + bc, err := browseChildren(oc, n, def.Path, level, id.HasComponent) + if err != nil { + return nil, err + } + nodes = append(nodes, bc...) + + bc, err = browseChildren(oc, n, def.Path, level, id.Organizes) + if err != nil { + return nil, err + } + nodes = append(nodes, bc...) + + bc, err = browseChildren(oc, n, def.Path, level, id.HasProperty) + if err != nil { + return nil, err + } + nodes = append(nodes, bc...) + + return nodes, nil +} + +func browseChildren(c *opcuagocpua.Client, n *opcuagocpua.Node, path string, level int, typeDef uint32) ([]NodeDef, error) { + nodes := []NodeDef{} + refs, err := n.ReferencedNodes(typeDef, uagocpua.BrowseDirectionForward, uagocpua.NodeClassAll, true) + if err != nil { + return []NodeDef{}, err + } + + for _, ref := range refs { + children, err := browse(c, ref.ID.String(), path, level+1) + if err != nil { + return []NodeDef{}, err + } + nodes = append(nodes, children...) + } + + return nodes, nil +} + +func join(a, b string) string { + if a == "" { + return b + } + return a + "." + b +} diff --git a/opcua/gopcua/doc.go b/opcua/gopcua/doc.go new file mode 100644 index 0000000..e6a60bf --- /dev/null +++ b/opcua/gopcua/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package gopcua contains the OPC-UA client implementation. +package gopcua diff --git a/opcua/gopcua/subscribe.go b/opcua/gopcua/subscribe.go new file mode 100644 index 0000000..53ddf29 --- /dev/null +++ b/opcua/gopcua/subscribe.go @@ -0,0 +1,251 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package gopcua + +import ( + "context" + "fmt" + "log/slog" + "strconv" + "time" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/mg-contrib/opcua" + opcuagopcua "github.com/gopcua/opcua" + uagopcua "github.com/gopcua/opcua/ua" +) + +const ( + protocol = "opcua" + token = "" +) + +var ( + errNotFoundServerURI = errors.New("route map not found for Server URI") + errNotFoundNodeID = errors.New("route map not found for Node ID") + errNotFoundConn = errors.New("connection not found") + + errFailedConn = errors.New("failed to connect") + errFailedParseInterval = errors.New("failed to parse subscription interval") + errFailedSub = errors.New("failed to subscribe") + errFailedFindEndpoint = errors.New("failed to find suitable endpoint") + errFailedFetchEndpoint = errors.New("failed to fetch OPC-UA server endpoints") + errFailedParseNodeID = errors.New("failed to parse NodeID") + errFailedCreateReq = errors.New("failed to create request") + errResponseStatus = errors.New("response status not OK") +) + +var _ opcua.Subscriber = (*client)(nil) + +type client struct { + ctx context.Context + publisher messaging.Publisher + thingsRM opcua.RouteMapRepository + channelsRM opcua.RouteMapRepository + connectRM opcua.RouteMapRepository + logger *slog.Logger +} + +type message struct { + ServerURI string + NodeID string + Type string + Time int64 + DataKey string + Data interface{} +} + +// NewSubscriber returns new OPC-UA client instance. +func NewSubscriber(ctx context.Context, publisher messaging.Publisher, thingsRM, channelsRM, connectRM opcua.RouteMapRepository, log *slog.Logger) opcua.Subscriber { + return client{ + ctx: ctx, + publisher: publisher, + thingsRM: thingsRM, + channelsRM: channelsRM, + connectRM: connectRM, + logger: log, + } +} + +// Subscribe subscribes to the OPC-UA Server. +func (c client) Subscribe(ctx context.Context, cfg opcua.Config) error { + opts := []opcuagopcua.Option{ + opcuagopcua.SecurityMode(uagopcua.MessageSecurityModeNone), + } + + if cfg.Mode != "" { + endpoints, err := opcuagopcua.GetEndpoints(cfg.ServerURI) + if err != nil { + return errors.Wrap(errFailedFetchEndpoint, err) + } + + ep := opcuagopcua.SelectEndpoint(endpoints, cfg.Policy, uagopcua.MessageSecurityModeFromString(cfg.Mode)) + if ep == nil { + return errFailedFindEndpoint + } + + opts = []opcuagopcua.Option{ + opcuagopcua.SecurityPolicy(cfg.Policy), + opcuagopcua.SecurityModeString(cfg.Mode), + opcuagopcua.CertificateFile(cfg.CertFile), + opcuagopcua.PrivateKeyFile(cfg.KeyFile), + opcuagopcua.AuthAnonymous(), + opcuagopcua.SecurityFromEndpoint(ep, uagopcua.UserTokenTypeAnonymous), + } + } + + oc := opcuagopcua.NewClient(cfg.ServerURI, opts...) + if err := oc.Connect(ctx); err != nil { + return errors.Wrap(errFailedConn, err) + } + defer oc.Close() + + i, err := strconv.Atoi(cfg.Interval) + if err != nil { + return errors.Wrap(errFailedParseInterval, err) + } + + sub, err := oc.Subscribe(&opcuagopcua.SubscriptionParameters{ + Interval: time.Duration(i) * time.Millisecond, + }) + if err != nil { + return errors.Wrap(errFailedSub, err) + } + defer func() { + if err = sub.Cancel(); err != nil { + c.logger.Error(fmt.Sprintf("subscription could not be cancelled: %s", err)) + } + }() + + if err := c.runHandler(ctx, sub, cfg.ServerURI, cfg.NodeID); err != nil { + c.logger.Warn(fmt.Sprintf("Unsubscribed from OPC-UA node %s.%s: %s", cfg.ServerURI, cfg.NodeID, err)) + } + + return nil +} + +func (c client) runHandler(ctx context.Context, sub *opcuagopcua.Subscription, uri, node string) error { + nodeID, err := uagopcua.ParseNodeID(node) + if err != nil { + return errors.Wrap(errFailedParseNodeID, err) + } + + // arbitrary client handle for the monitoring item + handle := uint32(42) + miCreateRequest := opcuagopcua.NewMonitoredItemCreateRequestWithDefaults(nodeID, uagopcua.AttributeIDValue, handle) + res, err := sub.Monitor(uagopcua.TimestampsToReturnBoth, miCreateRequest) + if err != nil { + return errors.Wrap(errFailedCreateReq, err) + } + if res.Results[0].StatusCode != uagopcua.StatusOK { + return errResponseStatus + } + + go sub.Run(ctx) + + c.logger.Info(fmt.Sprintf("subscribed to server %s and node_id %s", uri, node)) + + for { + select { + case <-c.ctx.Done(): + return nil + case res := <-sub.Notifs: + if res.Error != nil { + c.logger.Error(res.Error.Error()) + continue + } + + switch x := res.Value.(type) { + case *uagopcua.DataChangeNotification: + for _, item := range x.MonitoredItems { + msg := message{ + ServerURI: uri, + NodeID: node, + Type: item.Value.Value.Type().String(), + Time: item.Value.SourceTimestamp.Unix(), + DataKey: "v", + } + + switch item.Value.Value.Type() { + case uagopcua.TypeIDBoolean: + msg.DataKey = "vb" + msg.Data = item.Value.Value.Bool() + case uagopcua.TypeIDString, uagopcua.TypeIDByteString: + msg.DataKey = "vs" + msg.Data = item.Value.Value.String() + case uagopcua.TypeIDDataValue: + msg.DataKey = "vd" + msg.Data = item.Value.Value.String() + case uagopcua.TypeIDInt64, uagopcua.TypeIDInt32, uagopcua.TypeIDInt16: + msg.Data = float64(item.Value.Value.Int()) + case uagopcua.TypeIDUint64, uagopcua.TypeIDUint32, uagopcua.TypeIDUint16: + msg.Data = float64(item.Value.Value.Uint()) + case uagopcua.TypeIDFloat, uagopcua.TypeIDDouble: + msg.Data = item.Value.Value.Float() + case uagopcua.TypeIDByte: + msg.Data = float64(item.Value.Value.Uint()) + case uagopcua.TypeIDDateTime: + msg.Data = item.Value.Value.Time().Unix() + default: + msg.Data = 0 + } + + if err := c.publish(ctx, token, msg); err != nil { + switch err { + case errNotFoundServerURI, errNotFoundNodeID, errNotFoundConn: + return err + default: + c.logger.Error(fmt.Sprintf("Failed to publish: %s", err)) + } + } + } + + default: + c.logger.Info(fmt.Sprintf("unknown publish result: %T", res.Value)) + } + } + } +} + +// Publish forwards messages from the OPC-UA Server to Magistrala Message broker. +func (c client) publish(ctx context.Context, token string, m message) error { + // Get route-map of the OPC-UA ServerURI + chanID, err := c.channelsRM.Get(ctx, m.ServerURI) + if err != nil { + return errNotFoundServerURI + } + + // Get route-map of the OPC-UA NodeID + thingID, err := c.thingsRM.Get(ctx, m.NodeID) + if err != nil { + return errNotFoundNodeID + } + + // Check connection between ServerURI and NodeID + cKey := fmt.Sprintf("%s:%s", chanID, thingID) + if _, err := c.connectRM.Get(ctx, cKey); err != nil { + return fmt.Errorf("%s between channel %s and thing %s", errNotFoundConn, chanID, thingID) + } + + // Publish on Magistrala Message broker + SenML := fmt.Sprintf(`[{"n":"%s", "t": %d, "%s":%v}]`, m.Type, m.Time, m.DataKey, m.Data) + payload := []byte(SenML) + + msg := messaging.Message{ + Publisher: thingID, + Protocol: protocol, + Channel: chanID, + Payload: payload, + Subtopic: m.NodeID, + Created: time.Now().UnixNano(), + } + + if err := c.publisher.Publish(ctx, msg.GetChannel(), &msg); err != nil { + return err + } + + c.logger.Info(fmt.Sprintf("publish from server %s and node_id %s with value %v", m.ServerURI, m.NodeID, m.Data)) + return nil +} diff --git a/opcua/routemap.go b/opcua/routemap.go new file mode 100644 index 0000000..a0ea41a --- /dev/null +++ b/opcua/routemap.go @@ -0,0 +1,18 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package opcua + +import "context" + +// RouteMapRepository store route-map between the OPC-UA Server and Magistrala. +type RouteMapRepository interface { + // Save stores/routes pair OPC-UA Server & Magistrala. + Save(context.Context, string, string) error + + // Get returns the stored Magistrala route-map for a given OPC-UA pair. + Get(context.Context, string) (string, error) + + // Remove route-map from cache. + Remove(context.Context, string) error +} diff --git a/opcua/subscriber.go b/opcua/subscriber.go new file mode 100644 index 0000000..91e7d37 --- /dev/null +++ b/opcua/subscriber.go @@ -0,0 +1,12 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package opcua + +import "context" + +// Subscriber represents the OPC-UA Server client. +type Subscriber interface { + // Subscribes to given NodeID and receives events. + Subscribe(context.Context, Config) error +} diff --git a/pkg/api/common.go b/pkg/api/common.go new file mode 100644 index 0000000..68388e0 --- /dev/null +++ b/pkg/api/common.go @@ -0,0 +1,209 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/bootstrap" + "github.com/absmach/magistrala/pkg/apiutil" + mgclients "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/gofrs/uuid" +) + +const ( + MemberKindKey = "member_kind" + PermissionKey = "permission" + RelationKey = "relation" + StatusKey = "status" + OffsetKey = "offset" + OrderKey = "order" + LimitKey = "limit" + MetadataKey = "metadata" + ParentKey = "parent_id" + OwnerKey = "owner_id" + ClientKey = "client" + IdentityKey = "identity" + GroupKey = "group" + ActionKey = "action" + TagKey = "tag" + NameKey = "name" + TotalKey = "total" + SubjectKey = "subject" + ObjectKey = "object" + LevelKey = "level" + TreeKey = "tree" + DirKey = "dir" + ListPerms = "list_perms" + VisibilityKey = "visibility" + SharedByKey = "shared_by" + TokenKey = "token" + DefPermission = "view" + DefTotal = uint64(100) + DefOffset = 0 + DefOrder = "updated_at" + DefDir = "asc" + DefLimit = 10 + DefLevel = 0 + DefStatus = "enabled" + DefClientStatus = mgclients.Enabled + DefGroupStatus = mgclients.Enabled + DefListPerms = false + SharedVisibility = "shared" + MyVisibility = "mine" + AllVisibility = "all" + // ContentType represents JSON content type. + ContentType = "application/json" + + // MaxNameSize limits name size to prevent making them too complex. + MaxLimitSize = 100 + MaxNameSize = 1024 + NameOrder = "name" + IDOrder = "id" + AscDir = "asc" + DescDir = "desc" +) + +// ValidateUUID validates UUID format. +func ValidateUUID(extID string) (err error) { + id, err := uuid.FromString(extID) + if id.String() != extID || err != nil { + return apiutil.ErrInvalidIDFormat + } + + return nil +} + +// EncodeResponse encodes successful response. +func EncodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + if ar, ok := response.(magistrala.Response); ok { + for k, v := range ar.Headers() { + w.Header().Set(k, v) + } + w.Header().Set("Content-Type", ContentType) + w.WriteHeader(ar.Code()) + + if ar.Empty() { + return nil + } + } + + return json.NewEncoder(w).Encode(response) +} + +// EncodeError encodes an error response. +func EncodeError(_ context.Context, err error, w http.ResponseWriter) { + var wrapper error + if errors.Contains(err, apiutil.ErrValidation) { + wrapper, err = errors.Unwrap(err) + } + + w.Header().Set("Content-Type", ContentType) + switch { + case errors.Contains(err, svcerr.ErrAuthorization), + errors.Contains(err, svcerr.ErrDomainAuthorization), + errors.Contains(err, bootstrap.ErrExternalKey), + errors.Contains(err, bootstrap.ErrExternalKeySecure): + err = unwrap(err) + w.WriteHeader(http.StatusForbidden) + + case errors.Contains(err, svcerr.ErrAuthentication), + errors.Contains(err, apiutil.ErrBearerToken), + errors.Contains(err, svcerr.ErrLogin): + err = unwrap(err) + w.WriteHeader(http.StatusUnauthorized) + case errors.Contains(err, svcerr.ErrMalformedEntity), + errors.Contains(err, apiutil.ErrMalformedPolicy), + errors.Contains(err, apiutil.ErrMissingSecret), + errors.Contains(err, errors.ErrMalformedEntity), + errors.Contains(err, apiutil.ErrMissingID), + errors.Contains(err, apiutil.ErrMissingName), + errors.Contains(err, apiutil.ErrMissingAlias), + errors.Contains(err, apiutil.ErrMissingEmail), + errors.Contains(err, apiutil.ErrMissingHost), + errors.Contains(err, apiutil.ErrInvalidResetPass), + errors.Contains(err, apiutil.ErrEmptyList), + errors.Contains(err, apiutil.ErrMissingMemberKind), + errors.Contains(err, apiutil.ErrMissingMemberType), + errors.Contains(err, apiutil.ErrLimitSize), + errors.Contains(err, apiutil.ErrBearerKey), + errors.Contains(err, svcerr.ErrInvalidStatus), + errors.Contains(err, apiutil.ErrNameSize), + errors.Contains(err, apiutil.ErrInvalidIDFormat), + errors.Contains(err, apiutil.ErrInvalidQueryParams), + errors.Contains(err, apiutil.ErrMissingRelation), + errors.Contains(err, apiutil.ErrValidation), + errors.Contains(err, apiutil.ErrMissingIdentity), + errors.Contains(err, apiutil.ErrMissingPass), + errors.Contains(err, apiutil.ErrMissingConfPass), + errors.Contains(err, apiutil.ErrPasswordFormat), + errors.Contains(err, svcerr.ErrInvalidRole), + errors.Contains(err, svcerr.ErrInvalidPolicy), + errors.Contains(err, apiutil.ErrInvitationState), + errors.Contains(err, apiutil.ErrInvalidAPIKey), + errors.Contains(err, svcerr.ErrViewEntity), + errors.Contains(err, apiutil.ErrBootstrapState), + errors.Contains(err, apiutil.ErrMissingCertData), + errors.Contains(err, apiutil.ErrInvalidContact), + errors.Contains(err, apiutil.ErrInvalidTopic), + errors.Contains(err, bootstrap.ErrAddBootstrap), + errors.Contains(err, apiutil.ErrInvalidCertData), + errors.Contains(err, apiutil.ErrEmptyMessage), + errors.Contains(err, apiutil.ErrInvalidLevel), + errors.Contains(err, apiutil.ErrInvalidDirection), + errors.Contains(err, apiutil.ErrInvalidEntityType), + errors.Contains(err, apiutil.ErrMissingEntityType), + errors.Contains(err, apiutil.ErrInvalidTimeFormat), + errors.Contains(err, svcerr.ErrSearch): + err = unwrap(err) + w.WriteHeader(http.StatusBadRequest) + + case errors.Contains(err, svcerr.ErrCreateEntity), + errors.Contains(err, svcerr.ErrUpdateEntity), + errors.Contains(err, svcerr.ErrRemoveEntity), + errors.Contains(err, svcerr.ErrEnableClient): + err = unwrap(err) + w.WriteHeader(http.StatusUnprocessableEntity) + + case errors.Contains(err, svcerr.ErrNotFound), + errors.Contains(err, bootstrap.ErrBootstrap): + err = unwrap(err) + w.WriteHeader(http.StatusNotFound) + + case errors.Contains(err, errors.ErrStatusAlreadyAssigned), + errors.Contains(err, svcerr.ErrConflict): + err = unwrap(err) + w.WriteHeader(http.StatusConflict) + + case errors.Contains(err, apiutil.ErrUnsupportedContentType): + err = unwrap(err) + w.WriteHeader(http.StatusUnsupportedMediaType) + + default: + w.WriteHeader(http.StatusInternalServerError) + } + + if wrapper != nil { + err = errors.Wrap(wrapper, err) + } + + if errorVal, ok := err.(errors.Error); ok { + if err := json.NewEncoder(w).Encode(errorVal); err != nil { + w.WriteHeader(http.StatusInternalServerError) + } + } +} + +func unwrap(err error) error { + wrapper, err := errors.Unwrap(err) + if wrapper != nil { + return wrapper + } + return err +} diff --git a/pkg/api/common_test.go b/pkg/api/common_test.go new file mode 100644 index 0000000..b854aff --- /dev/null +++ b/pkg/api/common_test.go @@ -0,0 +1,338 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api_test + +import ( + "context" + "encoding/json" + "net/http" + "testing" + "time" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/mg-contrib/pkg/api" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" +) + +var _ magistrala.Response = (*response)(nil) + +var validUUID = testsutil.GenerateUUID(&testing.T{}) + +type responseWriter struct { + body []byte + statusCode int + header http.Header +} + +func newResponseWriter() *responseWriter { + return &responseWriter{ + header: http.Header{}, + } +} + +func (w *responseWriter) Header() http.Header { + return w.header +} + +func (w *responseWriter) Write(b []byte) (int, error) { + w.body = b + return 0, nil +} + +func (w *responseWriter) WriteHeader(statusCode int) { + w.statusCode = statusCode +} + +func (w *responseWriter) StatusCode() int { + return w.statusCode +} + +func (w *responseWriter) Body() []byte { + return w.body +} + +type response struct { + code int + headers map[string]string + empty bool + + ID string `json:"id"` + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` +} + +func (res response) Code() int { + return res.code +} + +func (res response) Headers() map[string]string { + return res.headers +} + +func (res response) Empty() bool { + return res.empty +} + +type body struct { + Error string `json:"error,omitempty"` + Message string `json:"message"` +} + +func TestValidateUUID(t *testing.T) { + cases := []struct { + desc string + uuid string + err error + }{ + { + desc: "valid uuid", + uuid: validUUID, + err: nil, + }, + { + desc: "invalid uuid", + uuid: "invalid", + err: apiutil.ErrInvalidIDFormat, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + err := api.ValidateUUID(c.uuid) + assert.Equal(t, c.err, err) + }) + } +} + +func TestEncodeResponse(t *testing.T) { + now := time.Now() + validBody := []byte(`{"id":"` + validUUID + `","name":"test","created_at":"` + now.Format(time.RFC3339Nano) + `"}` + "\n" + ``) + + cases := []struct { + desc string + resp interface{} + header http.Header + code int + body []byte + err error + }{ + { + desc: "valid response", + resp: response{ + code: http.StatusOK, + headers: map[string]string{ + "Location": "/groups/" + validUUID, + }, + ID: validUUID, + Name: "test", + CreatedAt: now, + }, + header: http.Header{ + "Content-Type": []string{"application/json"}, + "Location": []string{"/groups/" + validUUID}, + }, + code: http.StatusOK, + body: validBody, + err: nil, + }, + { + desc: "valid response with no headers", + resp: response{ + code: http.StatusOK, + ID: validUUID, + Name: "test", + CreatedAt: now, + }, + header: http.Header{ + "Content-Type": []string{"application/json"}, + }, + code: http.StatusOK, + body: validBody, + err: nil, + }, + { + desc: "valid response with many headers", + resp: response{ + code: http.StatusOK, + headers: map[string]string{ + "X-Test": "test", + "X-Test2": "test2", + }, + ID: validUUID, + Name: "test", + CreatedAt: now, + }, + header: http.Header{ + "Content-Type": []string{"application/json"}, + "X-Test": []string{"test"}, + "X-Test2": []string{"test2"}, + }, + code: http.StatusOK, + body: validBody, + err: nil, + }, + { + desc: "valid response with empty body", + resp: response{ + code: http.StatusOK, + empty: true, + ID: validUUID, + }, + header: http.Header{ + "Content-Type": []string{"application/json"}, + }, + code: http.StatusOK, + body: []byte(``), + err: nil, + }, + { + desc: "invalid response", + resp: struct { + ID string `json:"id"` + }{ + ID: validUUID, + }, + header: http.Header{}, + code: 0, + body: []byte(`{"id":"` + validUUID + `"}` + "\n" + ``), + err: nil, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + responseWriter := newResponseWriter() + err := api.EncodeResponse(context.Background(), responseWriter, c.resp) + assert.Equal(t, c.err, err) + assert.Equal(t, c.header, responseWriter.Header()) + assert.Equal(t, c.code, responseWriter.StatusCode()) + assert.Equal(t, string(c.body), string(responseWriter.Body())) + }) + } +} + +func TestEncodeError(t *testing.T) { + cases := []struct { + desc string + errs []error + code int + }{ + { + desc: "BadRequest", + errs: []error{ + apiutil.ErrMissingSecret, + svcerr.ErrMalformedEntity, + errors.ErrMalformedEntity, + apiutil.ErrMissingID, + apiutil.ErrEmptyList, + apiutil.ErrMissingMemberType, + apiutil.ErrMissingMemberKind, + apiutil.ErrLimitSize, + apiutil.ErrNameSize, + svcerr.ErrViewEntity, + }, + code: http.StatusBadRequest, + }, + { + desc: "BadRequest with validation error", + errs: []error{ + errors.Wrap(apiutil.ErrValidation, apiutil.ErrMissingSecret), + errors.Wrap(apiutil.ErrValidation, svcerr.ErrMalformedEntity), + errors.Wrap(apiutil.ErrValidation, errors.ErrMalformedEntity), + errors.Wrap(apiutil.ErrValidation, apiutil.ErrMissingID), + errors.Wrap(apiutil.ErrValidation, apiutil.ErrEmptyList), + errors.Wrap(apiutil.ErrValidation, apiutil.ErrMissingMemberType), + errors.Wrap(apiutil.ErrValidation, apiutil.ErrMissingMemberKind), + errors.Wrap(apiutil.ErrValidation, apiutil.ErrLimitSize), + errors.Wrap(apiutil.ErrValidation, apiutil.ErrNameSize), + }, + code: http.StatusBadRequest, + }, + { + desc: "Unauthorized", + errs: []error{ + svcerr.ErrAuthentication, + svcerr.ErrAuthentication, + apiutil.ErrBearerToken, + }, + code: http.StatusUnauthorized, + }, + + { + desc: "NotFound", + errs: []error{ + svcerr.ErrNotFound, + }, + code: http.StatusNotFound, + }, + { + desc: "Conflict", + errs: []error{ + svcerr.ErrConflict, + svcerr.ErrConflict, + }, + code: http.StatusConflict, + }, + { + desc: "Forbidden", + errs: []error{ + svcerr.ErrAuthorization, + svcerr.ErrAuthorization, + svcerr.ErrDomainAuthorization, + }, + code: http.StatusForbidden, + }, + { + desc: "UnsupportedMediaType", + errs: []error{ + apiutil.ErrUnsupportedContentType, + }, + code: http.StatusUnsupportedMediaType, + }, + { + desc: "StatusUnprocessableEntity", + errs: []error{ + svcerr.ErrCreateEntity, + svcerr.ErrUpdateEntity, + svcerr.ErrRemoveEntity, + }, + code: http.StatusUnprocessableEntity, + }, + { + desc: "InternalServerError", + errs: []error{ + errors.New("test"), + }, + code: http.StatusInternalServerError, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + responseWriter := newResponseWriter() + for _, err := range c.errs { + api.EncodeError(context.Background(), err, responseWriter) + assert.Equal(t, c.code, responseWriter.StatusCode()) + + message := body{} + jerr := json.Unmarshal(responseWriter.Body(), &message) + assert.NoError(t, jerr) + + var wrapper error + switch errors.Contains(err, apiutil.ErrValidation) { + case true: + wrapper, err = errors.Unwrap(err) + assert.Equal(t, err.Error(), message.Error) + assert.Equal(t, wrapper.Error(), message.Message) + case false: + assert.Equal(t, err.Error(), message.Message) + } + } + }) + } +} diff --git a/pkg/api/doc.go b/pkg/api/doc.go new file mode 100644 index 0000000..6bffadc --- /dev/null +++ b/pkg/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains commonly used constants and functions +// for the HTTP endpoints. +package api diff --git a/pkg/clients/cassandra/cassandra.go b/pkg/clients/cassandra/cassandra.go new file mode 100644 index 0000000..a809164 --- /dev/null +++ b/pkg/clients/cassandra/cassandra.go @@ -0,0 +1,72 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package cassandra + +import ( + "github.com/absmach/magistrala/pkg/errors" + "github.com/caarlos0/env/v10" + "github.com/gocql/gocql" +) + +var ( + errConfig = errors.New("failed to load Cassandra configuration") + errConnect = errors.New("failed to connect to Cassandra database") + errInit = errors.New("failed to execute initialization query in Cassandra ") +) + +// Config contains Cassandra DB specific parameters. +type Config struct { + Hosts []string `env:"CLUSTER" envDefault:"127.0.0.1" envSeparator:","` + Keyspace string `env:"KEYSPACE" envDefault:"magistrala"` + User string `env:"USER" envDefault:""` + Pass string `env:"PASS" envDefault:""` + Port int `env:"PORT" envDefault:"9042"` +} + +// Setup load configuration from environment and creates new cassandra connection. +func Setup(envPrefix string) (*gocql.Session, error) { + return SetupDB(envPrefix, "") +} + +// SetupDB load configuration from environment, +// creates new cassandra connection and executes +// the initial query in database. +func SetupDB(envPrefix, initQuery string) (*gocql.Session, error) { + cfg := Config{} + if err := env.ParseWithOptions(&cfg, env.Options{Prefix: envPrefix}); err != nil { + return nil, errors.Wrap(errConfig, err) + } + cs, err := Connect(cfg) + if err != nil { + return nil, err + } + if initQuery != "" { + if err := InitDB(cs, initQuery); err != nil { + return nil, errors.Wrap(errInit, err) + } + } + return cs, nil +} + +// Connect establishes connection to the Cassandra cluster. +func Connect(cfg Config) (*gocql.Session, error) { + cluster := gocql.NewCluster(cfg.Hosts...) + cluster.Keyspace = cfg.Keyspace + cluster.Consistency = gocql.Quorum + cluster.Authenticator = gocql.PasswordAuthenticator{ + Username: cfg.User, + Password: cfg.Pass, + } + cluster.Port = cfg.Port + + cassSess, err := cluster.CreateSession() + if err != nil { + return nil, errors.Wrap(errConnect, err) + } + return cassSess, nil +} + +func InitDB(cs *gocql.Session, query string) error { + return cs.Query(query).Exec() +} diff --git a/pkg/clients/cassandra/doc.go b/pkg/clients/cassandra/doc.go new file mode 100644 index 0000000..b65ed3f --- /dev/null +++ b/pkg/clients/cassandra/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package cassandra contains the domain concept definitions needed to support +// Magistrala Cassandra database functionality. +// +// It provides the abstraction of the Cassandra database service, which is used +// to configure, setup and connect to the Cassandra database. +package cassandra diff --git a/pkg/clients/doc.go b/pkg/clients/doc.go new file mode 100644 index 0000000..ad1239b --- /dev/null +++ b/pkg/clients/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package clients contains the domain concept definitions needed to support +// Magistrala clients functionality for example: postgres, redis, grpc, jaeger. +package clients diff --git a/pkg/clients/influxdb/doc.go b/pkg/clients/influxdb/doc.go new file mode 100644 index 0000000..203c349 --- /dev/null +++ b/pkg/clients/influxdb/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package influxdb contains the domain concept definitions needed to support +// Magistrala InfluxDB database functionality. +// +// It provides the abstraction of the InfluxDB database service, which is used +// to configure, setup and connect to the InfluxDB database. +package influxdb diff --git a/pkg/clients/influxdb/influxdb.go b/pkg/clients/influxdb/influxdb.go new file mode 100644 index 0000000..652d3bc --- /dev/null +++ b/pkg/clients/influxdb/influxdb.go @@ -0,0 +1,57 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb + +import ( + "context" + "time" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/caarlos0/env/v10" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" +) + +var ( + errConnect = errors.New("failed to create InfluxDB client") + errConfig = errors.New("failed to load InfluxDB client configuration from environment variable") +) + +type Config struct { + Protocol string `env:"PROTOCOL" envDefault:"http"` + Host string `env:"HOST" envDefault:"localhost"` + Port string `env:"PORT" envDefault:"8086"` + Username string `env:"ADMIN_USER" envDefault:"magistrala"` + Password string `env:"ADMIN_PASSWORD" envDefault:"magistrala"` + DBName string `env:"NAME" envDefault:"magistrala"` + Bucket string `env:"BUCKET" envDefault:"magistrala-bucket"` + Org string `env:"ORG" envDefault:"magistrala"` + Token string `env:"TOKEN" envDefault:"magistrala-token"` + DBUrl string `env:"DBURL" envDefault:""` + UserAgent string `env:"USER_AGENT" envDefault:"InfluxDBClient"` + Timeout time.Duration `env:"TIMEOUT"` // Influxdb client configuration by default has no timeout duration , this field will not have a fallback default timeout duration. Reference: https://pkg.go.dev/github.com/influxdata/influxdb@v1.10.0/client/v2#HTTPConfig + InsecureSkipVerify bool `env:"INSECURE_SKIP_VERIFY" envDefault:"false"` +} + +// Setup load configuration from environment variable, create InfluxDB client and connect to InfluxDB server. +func Setup(ctx context.Context, envPrefix string) (influxdb2.Client, error) { + cfg := Config{} + if err := env.ParseWithOptions(&cfg, env.Options{Prefix: envPrefix}); err != nil { + return nil, errors.Wrap(errConfig, err) + } + return Connect(ctx, cfg) +} + +// Connect create InfluxDB client and connect to InfluxDB server. +func Connect(ctx context.Context, config Config) (influxdb2.Client, error) { + client := influxdb2.NewClientWithOptions(config.DBUrl, config.Token, + influxdb2.DefaultOptions(). + SetUseGZip(true). + SetFlushInterval(100)) + ctx, cancel := context.WithTimeout(ctx, config.Timeout) + defer cancel() + if _, err := client.Ready(ctx); err != nil { + return nil, errors.Wrap(errConnect, err) + } + return client, nil +} diff --git a/pkg/clients/mongo/doc.go b/pkg/clients/mongo/doc.go new file mode 100644 index 0000000..78d3dbd --- /dev/null +++ b/pkg/clients/mongo/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mongodb contains the domain concept definitions needed to support +// Magistrala Mongo database functionality. +// +// It provides the abstraction of the Mongo database service, which is used +// to configure, setup and connect to the Mongo database. +package mongodb diff --git a/pkg/clients/mongo/mongo.go b/pkg/clients/mongo/mongo.go new file mode 100644 index 0000000..5c635ee --- /dev/null +++ b/pkg/clients/mongo/mongo.go @@ -0,0 +1,51 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb + +import ( + "context" + "fmt" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/caarlos0/env/v10" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +var ( + errConfig = errors.New("failed to load mongodb configuration") + errConnect = errors.New("failed to connect to mongodb server") +) + +// Config defines the options that are used when connecting to a MongoDB instance. +type Config struct { + Host string `env:"HOST" envDefault:"localhost"` + Port string `env:"PORT" envDefault:"27017"` + Name string `env:"NAME" envDefault:"messages"` +} + +// Connect creates a connection to the MongoDB instance. +func Connect(cfg Config) (*mongo.Database, error) { + addr := fmt.Sprintf("mongodb://%s:%s", cfg.Host, cfg.Port) + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + if err != nil { + return nil, errors.Wrap(errConnect, err) + } + + db := client.Database(cfg.Name) + return db, nil +} + +// Setup load configuration from environment, create new MongoDB client and connect to MongoDB server. +func Setup(envPrefix string) (*mongo.Database, error) { + cfg := Config{} + if err := env.ParseWithOptions(&cfg, env.Options{Prefix: envPrefix}); err != nil { + return nil, errors.Wrap(errConfig, err) + } + db, err := Connect(cfg) + if err != nil { + return nil, err + } + return db, nil +} diff --git a/pkg/clients/redis/doc.go b/pkg/clients/redis/doc.go new file mode 100644 index 0000000..8496ce3 --- /dev/null +++ b/pkg/clients/redis/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package redis contains the domain concept definitions needed to support +// Magistrala redis cache functionality. +// +// It provides the abstraction of the redis cache service, which is used +// to configure, setup and connect to the redis cache. +package redis diff --git a/pkg/clients/redis/redis.go b/pkg/clients/redis/redis.go new file mode 100644 index 0000000..4386bf8 --- /dev/null +++ b/pkg/clients/redis/redis.go @@ -0,0 +1,16 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package redis + +import "github.com/go-redis/redis/v8" + +// Connect create new RedisDB client and connect to RedisDB server. +func Connect(url string) (*redis.Client, error) { + opts, err := redis.ParseURL(url) + if err != nil { + return nil, err + } + + return redis.NewClient(opts), nil +} diff --git a/pkg/email/README.md b/pkg/email/README.md new file mode 100644 index 0000000..a152d68 --- /dev/null +++ b/pkg/email/README.md @@ -0,0 +1,21 @@ +# Magistrala Email Agent + +Magistrala Email Agent is used for sending emails. It wraps basic SMTP features and +provides a simple API that Magistrala services can use to send email notifications. + +## Configuration + +Magistrala Email Agent is configured using the following configuration parameters: + +| Parameter | Description | +| ----------------------------------- | ----------------------------------------------------------------------- | +| MG_EMAIL_HOST | Mail server host | +| MG_EMAIL_PORT | Mail server port | +| MG_EMAIL_USERNAME | Mail server username | +| MG_EMAIL_PASSWORD | Mail server password | +| MG_EMAIL_FROM_ADDRESS | Email "from" address | +| MG_EMAIL_FROM_NAME | Email "from" name | +| MG_EMAIL_TEMPLATE | Email template for sending notification emails | + +There are two authentication methods supported: Basic Auth and CRAM-MD5. +If `MG_EMAIL_USERNAME` is empty, no authentication will be used. diff --git a/pkg/email/doc.go b/pkg/email/doc.go new file mode 100644 index 0000000..f5d4a0b --- /dev/null +++ b/pkg/email/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package email contains the domain concept definitions needed to support +// Magistrala email functionality. +package email diff --git a/pkg/email/email.go b/pkg/email/email.go new file mode 100644 index 0000000..8925c38 --- /dev/null +++ b/pkg/email/email.go @@ -0,0 +1,110 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package email + +import ( + "bytes" + "net/mail" + "strconv" + "strings" + "text/template" + + "github.com/absmach/magistrala/pkg/errors" + "gopkg.in/gomail.v2" +) + +var ( + // errMissingEmailTemplate missing email template file. + errMissingEmailTemplate = errors.New("Missing e-mail template file") + errParseTemplate = errors.New("Parse e-mail template failed") + errExecTemplate = errors.New("Execute e-mail template failed") + errSendMail = errors.New("Sending e-mail failed") +) + +type email struct { + To []string + From string + Subject string + Header string + User string + Content string + Host string + Footer string +} + +// Config email agent configuration. +type Config struct { + Host string `env:"MG_EMAIL_HOST" envDefault:"localhost"` + Port string `env:"MG_EMAIL_PORT" envDefault:"25"` + Username string `env:"MG_EMAIL_USERNAME" envDefault:"root"` + Password string `env:"MG_EMAIL_PASSWORD" envDefault:""` + FromAddress string `env:"MG_EMAIL_FROM_ADDRESS" envDefault:""` + FromName string `env:"MG_EMAIL_FROM_NAME" envDefault:""` + Template string `env:"MG_EMAIL_TEMPLATE" envDefault:"email.tmpl"` +} + +// Agent for mailing. +type Agent struct { + conf *Config + tmpl *template.Template + dial *gomail.Dialer +} + +// New creates new email agent. +func New(c *Config) (*Agent, error) { + a := &Agent{} + a.conf = c + port, err := strconv.Atoi(c.Port) + if err != nil { + return a, err + } + d := gomail.NewDialer(c.Host, port, c.Username, c.Password) + a.dial = d + + tmpl, err := template.ParseFiles(c.Template) + if err != nil { + return a, errors.Wrap(errParseTemplate, err) + } + a.tmpl = tmpl + return a, nil +} + +// Send sends e-mail. +func (a *Agent) Send(to []string, from, subject, header, user, content, footer string) error { + if a.tmpl == nil { + return errMissingEmailTemplate + } + + buff := new(bytes.Buffer) + e := email{ + To: to, + From: from, + Subject: subject, + Header: header, + User: user, + Content: content, + Host: strings.Split(content, "?")[0], + Footer: footer, + } + if from == "" { + from := mail.Address{Name: a.conf.FromName, Address: a.conf.FromAddress} + e.From = from.String() + } + + if err := a.tmpl.Execute(buff, e); err != nil { + return errors.Wrap(errExecTemplate, err) + } + + m := gomail.NewMessage() + m.SetHeader("From", e.From) + m.SetHeader("To", to...) + m.SetHeader("Subject", subject) + m.SetBody("text/plain", buff.String()) + + if err := a.dial.DialAndSend(m); err != nil { + return errors.Wrap(errSendMail, err) + } + + return nil +} diff --git a/pkg/groups/api/decode.go b/pkg/groups/api/decode.go new file mode 100644 index 0000000..cbd0679 --- /dev/null +++ b/pkg/groups/api/decode.go @@ -0,0 +1,288 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "net/http" + "strings" + + "github.com/absmach/magistrala/pkg/apiutil" + mgclients "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + mggroups "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/mg-contrib/pkg/api" + "github.com/go-chi/chi/v5" +) + +func DecodeListGroupsRequest(_ context.Context, r *http.Request) (interface{}, error) { + pm, err := decodePageMeta(r) + if err != nil { + return nil, err + } + + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + parentID, err := apiutil.ReadStringQuery(r, api.ParentKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + tree, err := apiutil.ReadBoolQuery(r, api.TreeKey, false) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + dir, err := apiutil.ReadNumQuery[int64](r, api.DirKey, -1) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + memberKind, err := apiutil.ReadStringQuery(r, api.MemberKindKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + permission, err := apiutil.ReadStringQuery(r, api.PermissionKey, api.DefPermission) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + listPerms, err := apiutil.ReadBoolQuery(r, api.ListPerms, api.DefListPerms) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + req := listGroupsReq{ + token: apiutil.ExtractBearerToken(r), + tree: tree, + memberKind: memberKind, + memberID: chi.URLParam(r, "memberID"), + Page: mggroups.Page{ + Level: level, + ID: parentID, + Permission: permission, + PageMeta: pm, + Direction: dir, + ListPerms: listPerms, + }, + } + return req, nil +} + +func DecodeListParentsRequest(_ context.Context, r *http.Request) (interface{}, error) { + pm, err := decodePageMeta(r) + if err != nil { + return nil, err + } + + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + tree, err := apiutil.ReadBoolQuery(r, api.TreeKey, false) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + permission, err := apiutil.ReadStringQuery(r, api.PermissionKey, api.DefPermission) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + listPerms, err := apiutil.ReadBoolQuery(r, api.ListPerms, api.DefListPerms) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + req := listGroupsReq{ + token: apiutil.ExtractBearerToken(r), + tree: tree, + Page: mggroups.Page{ + Level: level, + ID: chi.URLParam(r, "groupID"), + Permission: permission, + PageMeta: pm, + Direction: +1, + ListPerms: listPerms, + }, + } + return req, nil +} + +func DecodeListChildrenRequest(_ context.Context, r *http.Request) (interface{}, error) { + pm, err := decodePageMeta(r) + if err != nil { + return nil, err + } + + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + tree, err := apiutil.ReadBoolQuery(r, api.TreeKey, false) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + permission, err := apiutil.ReadStringQuery(r, api.PermissionKey, api.DefPermission) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + listPerms, err := apiutil.ReadBoolQuery(r, api.ListPerms, api.DefListPerms) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + req := listGroupsReq{ + token: apiutil.ExtractBearerToken(r), + tree: tree, + Page: mggroups.Page{ + Level: level, + ID: chi.URLParam(r, "groupID"), + Permission: permission, + PageMeta: pm, + Direction: -1, + ListPerms: listPerms, + }, + } + return req, nil +} + +func DecodeGroupCreate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + var g mggroups.Group + if err := json.NewDecoder(r.Body).Decode(&g); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, errors.Wrap(err, errors.ErrMalformedEntity)) + } + req := createGroupReq{ + Group: g, + token: apiutil.ExtractBearerToken(r), + } + + return req, nil +} + +func DecodeGroupUpdate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + req := updateGroupReq{ + id: chi.URLParam(r, "groupID"), + token: apiutil.ExtractBearerToken(r), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, errors.Wrap(err, errors.ErrMalformedEntity)) + } + return req, nil +} + +func DecodeGroupRequest(_ context.Context, r *http.Request) (interface{}, error) { + req := groupReq{ + token: apiutil.ExtractBearerToken(r), + id: chi.URLParam(r, "groupID"), + } + return req, nil +} + +func DecodeGroupPermsRequest(_ context.Context, r *http.Request) (interface{}, error) { + req := groupPermsReq{ + token: apiutil.ExtractBearerToken(r), + id: chi.URLParam(r, "groupID"), + } + return req, nil +} + +func DecodeChangeGroupStatus(_ context.Context, r *http.Request) (interface{}, error) { + req := changeGroupStatusReq{ + token: apiutil.ExtractBearerToken(r), + id: chi.URLParam(r, "groupID"), + } + return req, nil +} + +func DecodeAssignMembersRequest(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + req := assignReq{ + token: apiutil.ExtractBearerToken(r), + groupID: chi.URLParam(r, "groupID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, errors.Wrap(err, errors.ErrMalformedEntity)) + } + return req, nil +} + +func DecodeUnassignMembersRequest(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + req := unassignReq{ + token: apiutil.ExtractBearerToken(r), + groupID: chi.URLParam(r, "groupID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, errors.Wrap(err, errors.ErrMalformedEntity)) + } + return req, nil +} + +func DecodeListMembersRequest(_ context.Context, r *http.Request) (interface{}, error) { + memberKind, err := apiutil.ReadStringQuery(r, api.MemberKindKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + permission, err := apiutil.ReadStringQuery(r, api.PermissionKey, api.DefPermission) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + req := listMembersReq{ + token: apiutil.ExtractBearerToken(r), + groupID: chi.URLParam(r, "groupID"), + permission: permission, + memberKind: memberKind, + } + return req, nil +} + +func decodePageMeta(r *http.Request) (mggroups.PageMeta, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefGroupStatus) + if err != nil { + return mggroups.PageMeta{}, errors.Wrap(apiutil.ErrValidation, err) + } + st, err := mgclients.ToStatus(s) + if err != nil { + return mggroups.PageMeta{}, errors.Wrap(apiutil.ErrValidation, err) + } + offset, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return mggroups.PageMeta{}, errors.Wrap(apiutil.ErrValidation, err) + } + limit, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return mggroups.PageMeta{}, errors.Wrap(apiutil.ErrValidation, err) + } + name, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return mggroups.PageMeta{}, errors.Wrap(apiutil.ErrValidation, err) + } + meta, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return mggroups.PageMeta{}, errors.Wrap(apiutil.ErrValidation, err) + } + + ret := mggroups.PageMeta{ + Offset: offset, + Limit: limit, + Name: name, + Metadata: meta, + Status: st, + } + return ret, nil +} diff --git a/pkg/groups/api/decode_test.go b/pkg/groups/api/decode_test.go new file mode 100644 index 0000000..1a32894 --- /dev/null +++ b/pkg/groups/api/decode_test.go @@ -0,0 +1,784 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/mg-contrib/pkg/api" + "github.com/stretchr/testify/assert" +) + +func TestDecodeListGroupsRequest(t *testing.T) { + cases := []struct { + desc string + url string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request with no parameters", + url: "http://localhost:8080", + header: map[string][]string{}, + resp: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + Permission: api.DefPermission, + Direction: -1, + }, + }, + err: nil, + }, + { + desc: "valid request with all parameters", + url: "http://localhost:8080?status=enabled&offset=10&limit=10&name=random&metadata={\"test\":\"test\"}&level=2&parent_id=random&tree=true&dir=-1&member_kind=random&permission=random&list_perms=true", + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + }, + resp: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Status: clients.EnabledStatus, + Offset: 10, + Limit: 10, + Name: "random", + Metadata: clients.Metadata{ + "test": "test", + }, + }, + Level: 2, + ID: "random", + Permission: "random", + Direction: -1, + ListPerms: true, + }, + token: "123", + tree: true, + memberKind: "random", + }, + err: nil, + }, + { + desc: "valid request with invalid page metadata", + url: "http://localhost:8080?metadata=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid level", + url: "http://localhost:8080?level=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid parent", + url: "http://localhost:8080?parent_id=random&parent_id=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid tree", + url: "http://localhost:8080?tree=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid dir", + url: "http://localhost:8080?dir=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid member kind", + url: "http://localhost:8080?member_kind=random&member_kind=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid permission", + url: "http://localhost:8080?permission=random&permission=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid list permission", + url: "http://localhost:8080?&list_perms=random", + resp: nil, + err: apiutil.ErrValidation, + }, + } + + for _, tc := range cases { + parsedURL, err := url.Parse(tc.url) + assert.NoError(t, err) + + req := &http.Request{ + URL: parsedURL, + Header: tc.header, + } + resp, err := DecodeListGroupsRequest(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeListParentsRequest(t *testing.T) { + cases := []struct { + desc string + url string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request with no parameters", + url: "http://localhost:8080", + header: map[string][]string{}, + resp: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + Permission: api.DefPermission, + Direction: +1, + }, + }, + err: nil, + }, + { + desc: "valid request with all parameters", + url: "http://localhost:8080?status=enabled&offset=10&limit=10&name=random&metadata={\"test\":\"test\"}&level=2&parent_id=random&tree=true&dir=-1&member_kind=random&permission=random&list_perms=true", + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + }, + resp: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Status: clients.EnabledStatus, + Offset: 10, + Limit: 10, + Name: "random", + Metadata: clients.Metadata{ + "test": "test", + }, + }, + Level: 2, + Permission: "random", + Direction: +1, + ListPerms: true, + }, + token: "123", + tree: true, + }, + err: nil, + }, + { + desc: "valid request with invalid page metadata", + url: "http://localhost:8080?metadata=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid level", + url: "http://localhost:8080?level=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid tree", + url: "http://localhost:8080?tree=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid permission", + url: "http://localhost:8080?permission=random&permission=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid list permission", + url: "http://localhost:8080?&list_perms=random", + resp: nil, + err: apiutil.ErrValidation, + }, + } + + for _, tc := range cases { + parsedURL, err := url.Parse(tc.url) + assert.NoError(t, err) + + req := &http.Request{ + URL: parsedURL, + Header: tc.header, + } + resp, err := DecodeListParentsRequest(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeListChildrenRequest(t *testing.T) { + cases := []struct { + desc string + url string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request with no parameters", + url: "http://localhost:8080", + header: map[string][]string{}, + resp: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + Permission: api.DefPermission, + Direction: -1, + }, + }, + err: nil, + }, + { + desc: "valid request with all parameters", + url: "http://localhost:8080?status=enabled&offset=10&limit=10&name=random&metadata={\"test\":\"test\"}&level=2&parent_id=random&tree=true&dir=-1&member_kind=random&permission=random&list_perms=true", + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + }, + resp: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Status: clients.EnabledStatus, + Offset: 10, + Limit: 10, + Name: "random", + Metadata: clients.Metadata{ + "test": "test", + }, + }, + Level: 2, + Permission: "random", + Direction: -1, + ListPerms: true, + }, + token: "123", + tree: true, + }, + err: nil, + }, + { + desc: "valid request with invalid page metadata", + url: "http://localhost:8080?metadata=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid level", + url: "http://localhost:8080?level=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid tree", + url: "http://localhost:8080?tree=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid permission", + url: "http://localhost:8080?permission=random&permission=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid list permission", + url: "http://localhost:8080?&list_perms=random", + resp: nil, + err: apiutil.ErrValidation, + }, + } + + for _, tc := range cases { + parsedURL, err := url.Parse(tc.url) + assert.NoError(t, err) + + req := &http.Request{ + URL: parsedURL, + Header: tc.header, + } + resp, err := DecodeListChildrenRequest(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeListMembersRequest(t *testing.T) { + cases := []struct { + desc string + url string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request with no parameters", + url: "http://localhost:8080", + header: map[string][]string{}, + resp: listMembersReq{ + permission: api.DefPermission, + }, + err: nil, + }, + { + desc: "valid request with all parameters", + url: "http://localhost:8080?member_kind=random&permission=random", + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + }, + resp: listMembersReq{ + token: "123", + memberKind: "random", + permission: "random", + }, + err: nil, + }, + { + desc: "valid request with invalid permission", + url: "http://localhost:8080?permission=random&permission=random", + resp: nil, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid member kind", + url: "http://localhost:8080?member_kind=random&member_kind=random", + resp: nil, + err: apiutil.ErrValidation, + }, + } + + for _, tc := range cases { + parsedURL, err := url.Parse(tc.url) + assert.NoError(t, err) + + req := &http.Request{ + URL: parsedURL, + Header: tc.header, + } + resp, err := DecodeListMembersRequest(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodePageMeta(t *testing.T) { + cases := []struct { + desc string + url string + resp groups.PageMeta + err error + }{ + { + desc: "valid request with no parameters", + url: "http://localhost:8080", + resp: groups.PageMeta{ + Limit: 10, + }, + err: nil, + }, + { + desc: "valid request with all parameters", + url: "http://localhost:8080?status=enabled&offset=10&limit=10&name=random&metadata={\"test\":\"test\"}", + resp: groups.PageMeta{ + Status: clients.EnabledStatus, + Offset: 10, + Limit: 10, + Name: "random", + Metadata: clients.Metadata{ + "test": "test", + }, + }, + err: nil, + }, + { + desc: "valid request with invalid status", + url: "http://localhost:8080?status=random", + resp: groups.PageMeta{}, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid status duplicated", + url: "http://localhost:8080?status=random&status=random", + resp: groups.PageMeta{}, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid offset", + url: "http://localhost:8080?offset=random", + resp: groups.PageMeta{}, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid limit", + url: "http://localhost:8080?limit=random", + resp: groups.PageMeta{}, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid name", + url: "http://localhost:8080?name=random&name=random", + resp: groups.PageMeta{}, + err: apiutil.ErrValidation, + }, + { + desc: "valid request with invalid page metadata", + url: "http://localhost:8080?metadata=random", + resp: groups.PageMeta{}, + err: apiutil.ErrValidation, + }, + } + + for _, tc := range cases { + parsedURL, err := url.Parse(tc.url) + assert.NoError(t, err) + + req := &http.Request{URL: parsedURL} + resp, err := decodePageMeta(req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeGroupCreate(t *testing.T) { + cases := []struct { + desc string + body string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request", + body: `{"name": "random", "description": "random"}`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {api.ContentType}, + }, + resp: createGroupReq{ + Group: groups.Group{ + Name: "random", + Description: "random", + }, + token: "123", + }, + err: nil, + }, + { + desc: "invalid content type", + body: `{"name": "random", "description": "random"}`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {"text/plain"}, + }, + resp: nil, + err: apiutil.ErrUnsupportedContentType, + }, + { + desc: "invalid request body", + body: `data`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {api.ContentType}, + }, + resp: nil, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + req, err := http.NewRequest(http.MethodPost, "http://localhost:8080", strings.NewReader(tc.body)) + assert.NoError(t, err) + req.Header = tc.header + resp, err := DecodeGroupCreate(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeGroupUpdate(t *testing.T) { + cases := []struct { + desc string + body string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request", + body: `{"name": "random", "description": "random"}`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {api.ContentType}, + }, + resp: updateGroupReq{ + Name: "random", + Description: "random", + token: "123", + }, + err: nil, + }, + { + desc: "invalid content type", + body: `{"name": "random", "description": "random"}`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {"text/plain"}, + }, + resp: nil, + err: apiutil.ErrUnsupportedContentType, + }, + { + desc: "invalid request body", + body: `data`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {api.ContentType}, + }, + resp: nil, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + req, err := http.NewRequest(http.MethodPut, "http://localhost:8080", strings.NewReader(tc.body)) + assert.NoError(t, err) + req.Header = tc.header + resp, err := DecodeGroupUpdate(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeGroupRequest(t *testing.T) { + cases := []struct { + desc string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request", + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + }, + resp: groupReq{ + token: "123", + }, + err: nil, + }, + { + desc: "empty token", + resp: groupReq{}, + err: nil, + }, + } + + for _, tc := range cases { + req, err := http.NewRequest(http.MethodGet, "http://localhost:8080", http.NoBody) + assert.NoError(t, err) + req.Header = tc.header + resp, err := DecodeGroupRequest(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeGroupPermsRequest(t *testing.T) { + cases := []struct { + desc string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request", + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + }, + resp: groupPermsReq{ + token: "123", + }, + err: nil, + }, + { + desc: "empty token", + resp: groupPermsReq{}, + err: nil, + }, + } + + for _, tc := range cases { + req, err := http.NewRequest(http.MethodGet, "http://localhost:8080", http.NoBody) + assert.NoError(t, err) + req.Header = tc.header + resp, err := DecodeGroupPermsRequest(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeChangeGroupStatus(t *testing.T) { + cases := []struct { + desc string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request", + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + }, + resp: changeGroupStatusReq{ + token: "123", + }, + err: nil, + }, + { + desc: "empty token", + resp: changeGroupStatusReq{}, + err: nil, + }, + } + + for _, tc := range cases { + req, err := http.NewRequest(http.MethodGet, "http://localhost:8080", http.NoBody) + assert.NoError(t, err) + req.Header = tc.header + resp, err := DecodeChangeGroupStatus(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeAssignMembersRequest(t *testing.T) { + cases := []struct { + desc string + body string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request", + body: `{"member_kind": "random", "members": ["random"]}`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {api.ContentType}, + }, + resp: assignReq{ + MemberKind: "random", + Members: []string{"random"}, + token: "123", + }, + err: nil, + }, + { + desc: "invalid content type", + body: `{"member_kind": "random", "members": ["random"]}`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {"text/plain"}, + }, + resp: nil, + err: apiutil.ErrUnsupportedContentType, + }, + { + desc: "invalid request body", + body: `data`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {api.ContentType}, + }, + resp: nil, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + req, err := http.NewRequest(http.MethodPost, "http://localhost:8080", strings.NewReader(tc.body)) + assert.NoError(t, err) + req.Header = tc.header + resp, err := DecodeAssignMembersRequest(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} + +func TestDecodeUnassignMembersRequest(t *testing.T) { + cases := []struct { + desc string + body string + header map[string][]string + resp interface{} + err error + }{ + { + desc: "valid request", + body: `{"member_kind": "random", "members": ["random"]}`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {api.ContentType}, + }, + resp: unassignReq{ + MemberKind: "random", + Members: []string{"random"}, + token: "123", + }, + err: nil, + }, + { + desc: "invalid content type", + body: `{"member_kind": "random", "members": ["random"]}`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {"text/plain"}, + }, + resp: nil, + err: apiutil.ErrUnsupportedContentType, + }, + { + desc: "invalid request body", + body: `data`, + header: map[string][]string{ + "Authorization": {"Bearer 123"}, + "Content-Type": {api.ContentType}, + }, + resp: nil, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + req, err := http.NewRequest(http.MethodPost, "http://localhost:8080", strings.NewReader(tc.body)) + assert.NoError(t, err) + req.Header = tc.header + resp, err := DecodeUnassignMembersRequest(context.Background(), req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + } +} diff --git a/pkg/groups/api/doc.go b/pkg/groups/api/doc.go new file mode 100644 index 0000000..2424852 --- /dev/null +++ b/pkg/groups/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains API-related concerns: endpoint definitions, middlewares +// and all resource representations. +package api diff --git a/pkg/groups/api/endpoint_test.go b/pkg/groups/api/endpoint_test.go new file mode 100644 index 0000000..bac2c55 --- /dev/null +++ b/pkg/groups/api/endpoint_test.go @@ -0,0 +1,1016 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/absmach/magistrala/auth" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/magistrala/pkg/groups/mocks" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var validGroupResp = groups.Group{ + ID: testsutil.GenerateUUID(&testing.T{}), + Name: valid, + Description: valid, + Domain: testsutil.GenerateUUID(&testing.T{}), + Parent: testsutil.GenerateUUID(&testing.T{}), + Metadata: clients.Metadata{ + "name": "test", + }, + Children: []*groups.Group{}, + CreatedAt: time.Now().Add(-1 * time.Second), + UpdatedAt: time.Now(), + UpdatedBy: testsutil.GenerateUUID(&testing.T{}), + Status: clients.EnabledStatus, +} + +func TestCreateGroupEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + kind string + req createGroupReq + svcResp groups.Group + svcErr error + resp createGroupRes + err error + }{ + { + desc: "successfully with groups kind", + kind: auth.NewGroupKind, + req: createGroupReq{ + token: valid, + Group: groups.Group{ + Name: valid, + }, + }, + svcResp: validGroupResp, + svcErr: nil, + resp: createGroupRes{created: true, Group: validGroupResp}, + err: nil, + }, + { + desc: "successfully with channels kind", + kind: auth.NewChannelKind, + req: createGroupReq{ + token: valid, + Group: groups.Group{ + Name: valid, + }, + }, + svcResp: validGroupResp, + svcErr: nil, + resp: createGroupRes{created: true, Group: validGroupResp}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + kind: auth.NewGroupKind, + req: createGroupReq{ + Group: groups.Group{ + Name: valid, + }, + }, + resp: createGroupRes{created: false}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + kind: auth.NewGroupKind, + req: createGroupReq{ + token: valid, + Group: groups.Group{ + Name: valid, + }, + }, + svcResp: groups.Group{}, + svcErr: svcerr.ErrAuthorization, + resp: createGroupRes{created: false}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := svc.On("CreateGroup", context.Background(), tc.req.token, tc.kind, tc.req.Group).Return(tc.svcResp, tc.svcErr) + resp, err := CreateGroupEndpoint(svc, tc.kind)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(createGroupRes) + switch err { + case nil: + assert.Equal(t, response.Code(), http.StatusCreated) + assert.Equal(t, response.Headers()["Location"], fmt.Sprintf("/groups/%s", response.ID)) + default: + assert.Equal(t, response.Code(), http.StatusOK) + assert.Empty(t, response.Headers()) + } + assert.False(t, response.Empty()) + repoCall.Unset() + } +} + +func TestViewGroupEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + req groupReq + svcResp groups.Group + svcErr error + resp viewGroupRes + err error + }{ + { + desc: "successfully", + req: groupReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcResp: validGroupResp, + svcErr: nil, + resp: viewGroupRes{Group: validGroupResp}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + req: groupReq{ + id: testsutil.GenerateUUID(t), + }, + resp: viewGroupRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + req: groupReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcResp: groups.Group{}, + svcErr: svcerr.ErrAuthorization, + resp: viewGroupRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := svc.On("ViewGroup", context.Background(), tc.req.token, tc.req.id).Return(tc.svcResp, tc.svcErr) + resp, err := ViewGroupEndpoint(svc)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(viewGroupRes) + assert.Equal(t, response.Code(), http.StatusOK) + assert.Empty(t, response.Headers()) + assert.False(t, response.Empty()) + repoCall.Unset() + } +} + +func TestViewGroupPermsEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + req groupPermsReq + svcResp []string + svcErr error + resp viewGroupPermsRes + err error + }{ + { + desc: "successfully", + req: groupPermsReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcResp: []string{ + valid, + }, + svcErr: nil, + resp: viewGroupPermsRes{Permissions: []string{valid}}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + req: groupPermsReq{ + id: testsutil.GenerateUUID(t), + }, + resp: viewGroupPermsRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + req: groupPermsReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcResp: []string{}, + svcErr: svcerr.ErrAuthorization, + resp: viewGroupPermsRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := svc.On("ViewGroupPerms", context.Background(), tc.req.token, tc.req.id).Return(tc.svcResp, tc.svcErr) + resp, err := ViewGroupPermsEndpoint(svc)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(viewGroupPermsRes) + assert.Equal(t, response.Code(), http.StatusOK) + assert.Empty(t, response.Headers()) + assert.False(t, response.Empty()) + repoCall.Unset() + } +} + +func TestEnableGroupEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + req changeGroupStatusReq + svcResp groups.Group + svcErr error + resp changeStatusRes + err error + }{ + { + desc: "successfully", + req: changeGroupStatusReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcResp: validGroupResp, + svcErr: nil, + resp: changeStatusRes{Group: validGroupResp}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + req: changeGroupStatusReq{ + id: testsutil.GenerateUUID(t), + }, + resp: changeStatusRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + req: changeGroupStatusReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcResp: groups.Group{}, + svcErr: svcerr.ErrAuthorization, + resp: changeStatusRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := svc.On("EnableGroup", context.Background(), tc.req.token, tc.req.id).Return(tc.svcResp, tc.svcErr) + resp, err := EnableGroupEndpoint(svc)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(changeStatusRes) + assert.Equal(t, response.Code(), http.StatusOK) + assert.Empty(t, response.Headers()) + assert.False(t, response.Empty()) + repoCall.Unset() + } +} + +func TestDisableGroupEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + req changeGroupStatusReq + svcResp groups.Group + svcErr error + resp changeStatusRes + err error + }{ + { + desc: "successfully", + req: changeGroupStatusReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcResp: validGroupResp, + svcErr: nil, + resp: changeStatusRes{Group: validGroupResp}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + req: changeGroupStatusReq{ + id: testsutil.GenerateUUID(t), + }, + resp: changeStatusRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + req: changeGroupStatusReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcResp: groups.Group{}, + svcErr: svcerr.ErrAuthorization, + resp: changeStatusRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := svc.On("DisableGroup", context.Background(), tc.req.token, tc.req.id).Return(tc.svcResp, tc.svcErr) + resp, err := DisableGroupEndpoint(svc)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(changeStatusRes) + assert.Equal(t, response.Code(), http.StatusOK) + assert.Empty(t, response.Headers()) + assert.False(t, response.Empty()) + repoCall.Unset() + } +} + +func TestDeleteGroupEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + req groupReq + svcErr error + resp deleteGroupRes + err error + }{ + { + desc: "successfully", + req: groupReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcErr: nil, + resp: deleteGroupRes{deleted: true}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + req: groupReq{ + id: testsutil.GenerateUUID(t), + }, + resp: deleteGroupRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + req: groupReq{ + token: valid, + id: testsutil.GenerateUUID(t), + }, + svcErr: svcerr.ErrAuthorization, + resp: deleteGroupRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := svc.On("DeleteGroup", context.Background(), tc.req.token, tc.req.id).Return(tc.svcErr) + resp, err := DeleteGroupEndpoint(svc)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(deleteGroupRes) + switch err { + case nil: + assert.Equal(t, response.Code(), http.StatusNoContent) + default: + assert.Equal(t, response.Code(), http.StatusBadRequest) + } + assert.Empty(t, response.Headers()) + assert.True(t, response.Empty()) + repoCall.Unset() + } +} + +func TestUpdateGroupEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + req updateGroupReq + svcResp groups.Group + svcErr error + resp updateGroupRes + err error + }{ + { + desc: "successfully", + req: updateGroupReq{ + token: valid, + id: testsutil.GenerateUUID(t), + Name: valid, + }, + svcResp: validGroupResp, + svcErr: nil, + resp: updateGroupRes{Group: validGroupResp}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + req: updateGroupReq{ + id: testsutil.GenerateUUID(t), + Name: valid, + }, + resp: updateGroupRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + req: updateGroupReq{ + token: valid, + id: testsutil.GenerateUUID(t), + Name: valid, + }, + svcResp: groups.Group{}, + svcErr: svcerr.ErrAuthorization, + resp: updateGroupRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + group := groups.Group{ + ID: tc.req.id, + Name: tc.req.Name, + Description: tc.req.Description, + Metadata: tc.req.Metadata, + } + repoCall := svc.On("UpdateGroup", context.Background(), tc.req.token, group).Return(tc.svcResp, tc.svcErr) + resp, err := UpdateGroupEndpoint(svc)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(updateGroupRes) + assert.Equal(t, response.Code(), http.StatusOK) + assert.Empty(t, response.Headers()) + assert.False(t, response.Empty()) + repoCall.Unset() + } +} + +func TestListGroupsEndpoint(t *testing.T) { + svc := new(mocks.Service) + childGroup := groups.Group{ + ID: testsutil.GenerateUUID(t), + Name: valid, + Description: valid, + Domain: testsutil.GenerateUUID(t), + Parent: validGroupResp.ID, + Metadata: clients.Metadata{ + "name": "test", + }, + Level: -1, + Children: []*groups.Group{}, + CreatedAt: time.Now().Add(-1 * time.Second), + UpdatedAt: time.Now(), + UpdatedBy: testsutil.GenerateUUID(t), + Status: clients.EnabledStatus, + } + parentGroup := groups.Group{ + ID: testsutil.GenerateUUID(t), + Name: valid, + Description: valid, + Domain: testsutil.GenerateUUID(t), + Metadata: clients.Metadata{ + "name": "test", + }, + Level: 1, + Children: []*groups.Group{}, + CreatedAt: time.Now().Add(-1 * time.Second), + UpdatedAt: time.Now(), + UpdatedBy: testsutil.GenerateUUID(t), + Status: clients.EnabledStatus, + } + + validGroupResp.Children = append(validGroupResp.Children, &childGroup) + parentGroup.Children = append(parentGroup.Children, &validGroupResp) + + cases := []struct { + desc string + memberKind string + req listGroupsReq + svcResp groups.Page + svcErr error + resp groupPageRes + err error + }{ + { + desc: "successfully", + memberKind: auth.ThingsKind, + req: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + }, + token: valid, + memberKind: auth.ThingsKind, + memberID: testsutil.GenerateUUID(t), + }, + svcResp: groups.Page{ + Groups: []groups.Group{validGroupResp}, + }, + svcErr: nil, + resp: groupPageRes{ + Groups: []viewGroupRes{ + { + Group: validGroupResp, + }, + }, + }, + err: nil, + }, + { + desc: "successfully with empty member kind", + req: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + }, + token: valid, + memberKind: auth.ThingsKind, + memberID: testsutil.GenerateUUID(t), + }, + svcResp: groups.Page{ + Groups: []groups.Group{validGroupResp}, + }, + svcErr: nil, + resp: groupPageRes{ + Groups: []viewGroupRes{ + { + Group: validGroupResp, + }, + }, + }, + err: nil, + }, + { + desc: "successfully with tree", + memberKind: auth.ThingsKind, + req: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + }, + tree: true, + token: valid, + memberKind: auth.ThingsKind, + memberID: testsutil.GenerateUUID(t), + }, + svcResp: groups.Page{ + Groups: []groups.Group{validGroupResp, childGroup}, + }, + svcErr: nil, + resp: groupPageRes{ + Groups: []viewGroupRes{ + { + Group: validGroupResp, + }, + }, + }, + err: nil, + }, + { + desc: "list children groups successfully without tree", + memberKind: auth.UsersKind, + req: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + ID: validGroupResp.ID, + Direction: -1, + }, + tree: false, + token: valid, + memberKind: auth.UsersKind, + memberID: testsutil.GenerateUUID(t), + }, + svcResp: groups.Page{ + Groups: []groups.Group{validGroupResp, childGroup}, + }, + svcErr: nil, + resp: groupPageRes{ + Groups: []viewGroupRes{ + { + Group: childGroup, + }, + }, + }, + err: nil, + }, + { + desc: "list parent group successfully without tree", + memberKind: auth.UsersKind, + req: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + ID: validGroupResp.ID, + Direction: 1, + }, + tree: false, + token: valid, + memberKind: auth.UsersKind, + memberID: testsutil.GenerateUUID(t), + }, + svcResp: groups.Page{ + Groups: []groups.Group{parentGroup, validGroupResp}, + }, + svcErr: nil, + resp: groupPageRes{ + Groups: []viewGroupRes{ + { + Group: parentGroup, + }, + }, + }, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + memberKind: auth.ThingsKind, + req: listGroupsReq{}, + resp: groupPageRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + memberKind: auth.ThingsKind, + req: listGroupsReq{ + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + }, + token: valid, + memberKind: auth.ThingsKind, + memberID: testsutil.GenerateUUID(t), + }, + svcResp: groups.Page{}, + svcErr: svcerr.ErrAuthorization, + resp: groupPageRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + if tc.memberKind != "" { + tc.req.memberKind = tc.memberKind + } + repoCall := svc.On("ListGroups", context.Background(), tc.req.token, tc.req.memberKind, tc.req.memberID, tc.req.Page).Return(tc.svcResp, tc.svcErr) + resp, err := ListGroupsEndpoint(svc, mock.Anything, tc.memberKind)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(groupPageRes) + assert.Equal(t, response.Code(), http.StatusOK) + assert.Empty(t, response.Headers()) + assert.False(t, response.Empty()) + repoCall.Unset() + } +} + +func TestListMembersEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + memberKind string + req listMembersReq + svcResp groups.MembersPage + svcErr error + resp listMembersRes + err error + }{ + { + desc: "successfully", + memberKind: auth.ThingsKind, + req: listMembersReq{ + token: valid, + memberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + }, + svcResp: groups.MembersPage{ + Members: []groups.Member{ + { + ID: valid, + Type: valid, + }, + }, + }, + svcErr: nil, + resp: listMembersRes{ + Members: []groups.Member{ + { + ID: valid, + Type: valid, + }, + }, + }, + err: nil, + }, + { + desc: "successfully with empty member kind", + req: listMembersReq{ + token: valid, + memberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + }, + svcResp: groups.MembersPage{ + Members: []groups.Member{ + { + ID: valid, + Type: valid, + }, + }, + }, + svcErr: nil, + resp: listMembersRes{ + Members: []groups.Member{ + { + ID: valid, + Type: valid, + }, + }, + }, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + memberKind: auth.ThingsKind, + req: listMembersReq{}, + resp: listMembersRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + memberKind: auth.ThingsKind, + req: listMembersReq{ + token: valid, + memberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + }, + svcResp: groups.MembersPage{}, + svcErr: svcerr.ErrAuthorization, + resp: listMembersRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + if tc.memberKind != "" { + tc.req.memberKind = tc.memberKind + } + repoCall := svc.On("ListMembers", context.Background(), tc.req.token, tc.req.groupID, tc.req.permission, tc.req.memberKind).Return(tc.svcResp, tc.svcErr) + resp, err := ListMembersEndpoint(svc, tc.memberKind)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(listMembersRes) + assert.Equal(t, response.Code(), http.StatusOK) + assert.Empty(t, response.Headers()) + assert.False(t, response.Empty()) + repoCall.Unset() + } +} + +func TestAssignMembersEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + relation string + memberKind string + req assignReq + svcErr error + resp assignRes + err error + }{ + { + desc: "successfully", + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + req: assignReq{ + token: valid, + MemberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + Members: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + svcErr: nil, + resp: assignRes{assigned: true}, + err: nil, + }, + { + desc: "successfully with empty member kind", + relation: auth.ContributorRelation, + req: assignReq{ + token: valid, + groupID: testsutil.GenerateUUID(t), + MemberKind: auth.ThingsKind, + Members: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + svcErr: nil, + resp: assignRes{assigned: true}, + err: nil, + }, + { + desc: "successfully with empty relation", + memberKind: auth.ThingsKind, + req: assignReq{ + token: valid, + MemberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + Members: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + svcErr: nil, + resp: assignRes{assigned: true}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + req: assignReq{}, + resp: assignRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + req: assignReq{ + token: valid, + MemberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + Members: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + svcErr: svcerr.ErrAuthorization, + resp: assignRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + if tc.memberKind != "" { + tc.req.MemberKind = tc.memberKind + } + if tc.relation != "" { + tc.req.Relation = tc.relation + } + repoCall := svc.On("Assign", context.Background(), tc.req.token, tc.req.groupID, tc.req.Relation, tc.req.MemberKind, tc.req.Members).Return(tc.svcErr) + resp, err := AssignMembersEndpoint(svc, tc.relation, tc.memberKind)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(assignRes) + switch err { + case nil: + assert.Equal(t, response.Code(), http.StatusCreated) + default: + assert.Equal(t, response.Code(), http.StatusBadRequest) + } + assert.Empty(t, response.Headers()) + assert.True(t, response.Empty()) + repoCall.Unset() + } +} + +func TestUnassignMembersEndpoint(t *testing.T) { + svc := new(mocks.Service) + cases := []struct { + desc string + relation string + memberKind string + req unassignReq + svcErr error + resp unassignRes + err error + }{ + { + desc: "successfully", + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + req: unassignReq{ + token: valid, + MemberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + Members: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + svcErr: nil, + resp: unassignRes{unassigned: true}, + err: nil, + }, + { + desc: "successfully with empty member kind", + relation: auth.ContributorRelation, + req: unassignReq{ + token: valid, + groupID: testsutil.GenerateUUID(t), + MemberKind: auth.ThingsKind, + Members: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + svcErr: nil, + resp: unassignRes{unassigned: true}, + err: nil, + }, + { + desc: "successfully with empty relation", + memberKind: auth.ThingsKind, + req: unassignReq{ + token: valid, + MemberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + Members: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + svcErr: nil, + resp: unassignRes{unassigned: true}, + err: nil, + }, + { + desc: "unsuccessfully with invalid request", + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + req: unassignReq{}, + resp: unassignRes{}, + err: apiutil.ErrValidation, + }, + { + desc: "unsuccessfully with repo error", + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + req: unassignReq{ + token: valid, + MemberKind: auth.ThingsKind, + groupID: testsutil.GenerateUUID(t), + Members: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + svcErr: svcerr.ErrAuthorization, + resp: unassignRes{}, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + if tc.memberKind != "" { + tc.req.MemberKind = tc.memberKind + } + if tc.relation != "" { + tc.req.Relation = tc.relation + } + repoCall := svc.On("Unassign", context.Background(), tc.req.token, tc.req.groupID, tc.req.Relation, tc.req.MemberKind, tc.req.Members).Return(tc.svcErr) + resp, err := UnassignMembersEndpoint(svc, tc.relation, tc.memberKind)(context.Background(), tc.req) + assert.Equal(t, tc.resp, resp, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.resp, resp)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + response := resp.(unassignRes) + switch err { + case nil: + assert.Equal(t, response.Code(), http.StatusCreated) + default: + assert.Equal(t, response.Code(), http.StatusBadRequest) + } + assert.Empty(t, response.Headers()) + assert.True(t, response.Empty()) + repoCall.Unset() + } +} diff --git a/pkg/groups/api/endpoints.go b/pkg/groups/api/endpoints.go new file mode 100644 index 0000000..64ff9cc --- /dev/null +++ b/pkg/groups/api/endpoints.go @@ -0,0 +1,319 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/groups" + "github.com/go-kit/kit/endpoint" +) + +const groupTypeChannels = "channels" + +func CreateGroupEndpoint(svc groups.Service, kind string) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createGroupReq) + if err := req.validate(); err != nil { + return createGroupRes{created: false}, errors.Wrap(apiutil.ErrValidation, err) + } + + group, err := svc.CreateGroup(ctx, req.token, kind, req.Group) + if err != nil { + return createGroupRes{created: false}, err + } + + return createGroupRes{created: true, Group: group}, nil + } +} + +func ViewGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(groupReq) + if err := req.validate(); err != nil { + return viewGroupRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + + group, err := svc.ViewGroup(ctx, req.token, req.id) + if err != nil { + return viewGroupRes{}, err + } + + return viewGroupRes{Group: group}, nil + } +} + +func ViewGroupPermsEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(groupPermsReq) + if err := req.validate(); err != nil { + return viewGroupPermsRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + + p, err := svc.ViewGroupPerms(ctx, req.token, req.id) + if err != nil { + return viewGroupPermsRes{}, err + } + + return viewGroupPermsRes{Permissions: p}, nil + } +} + +func UpdateGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateGroupReq) + if err := req.validate(); err != nil { + return updateGroupRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + + group := groups.Group{ + ID: req.id, + Name: req.Name, + Description: req.Description, + Metadata: req.Metadata, + } + + group, err := svc.UpdateGroup(ctx, req.token, group) + if err != nil { + return updateGroupRes{}, err + } + + return updateGroupRes{Group: group}, nil + } +} + +func EnableGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeGroupStatusReq) + if err := req.validate(); err != nil { + return changeStatusRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + group, err := svc.EnableGroup(ctx, req.token, req.id) + if err != nil { + return changeStatusRes{}, err + } + return changeStatusRes{Group: group}, nil + } +} + +func DisableGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeGroupStatusReq) + if err := req.validate(); err != nil { + return changeStatusRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + group, err := svc.DisableGroup(ctx, req.token, req.id) + if err != nil { + return changeStatusRes{}, err + } + return changeStatusRes{Group: group}, nil + } +} + +func ListGroupsEndpoint(svc groups.Service, groupType, memberKind string) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listGroupsReq) + if memberKind != "" { + req.memberKind = memberKind + } + if err := req.validate(); err != nil { + if groupType == groupTypeChannels { + return channelPageRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + return groupPageRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + page, err := svc.ListGroups(ctx, req.token, req.memberKind, req.memberID, req.Page) + if err != nil { + if groupType == groupTypeChannels { + return channelPageRes{}, err + } + return groupPageRes{}, err + } + + if req.tree { + return buildGroupsResponseTree(page), nil + } + filterByID := req.Page.ID != "" + + if groupType == groupTypeChannels { + return buildChannelsResponse(page, filterByID), nil + } + return buildGroupsResponse(page, filterByID), nil + } +} + +func ListMembersEndpoint(svc groups.Service, memberKind string) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listMembersReq) + if memberKind != "" { + req.memberKind = memberKind + } + if err := req.validate(); err != nil { + return listMembersRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + + page, err := svc.ListMembers(ctx, req.token, req.groupID, req.permission, req.memberKind) + if err != nil { + return listMembersRes{}, err + } + + return listMembersRes{ + pageRes: pageRes{ + Limit: page.Limit, + Offset: page.Offset, + Total: page.Total, + }, + Members: page.Members, + }, nil + } +} + +func AssignMembersEndpoint(svc groups.Service, relation, memberKind string) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(assignReq) + if relation != "" { + req.Relation = relation + } + if memberKind != "" { + req.MemberKind = memberKind + } + if err := req.validate(); err != nil { + return assignRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + if err := svc.Assign(ctx, req.token, req.groupID, req.Relation, req.MemberKind, req.Members...); err != nil { + return assignRes{}, err + } + return assignRes{assigned: true}, nil + } +} + +func UnassignMembersEndpoint(svc groups.Service, relation, memberKind string) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(unassignReq) + if relation != "" { + req.Relation = relation + } + if memberKind != "" { + req.MemberKind = memberKind + } + if err := req.validate(); err != nil { + return unassignRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + + if err := svc.Unassign(ctx, req.token, req.groupID, req.Relation, req.MemberKind, req.Members...); err != nil { + return unassignRes{}, err + } + return unassignRes{unassigned: true}, nil + } +} + +func DeleteGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(groupReq) + if err := req.validate(); err != nil { + return deleteGroupRes{}, errors.Wrap(apiutil.ErrValidation, err) + } + if err := svc.DeleteGroup(ctx, req.token, req.id); err != nil { + return deleteGroupRes{}, err + } + return deleteGroupRes{deleted: true}, nil + } +} + +func buildGroupsResponseTree(page groups.Page) groupPageRes { + groupsMap := map[string]*groups.Group{} + // Parents' map keeps its array of children. + parentsMap := map[string][]*groups.Group{} + for i := range page.Groups { + if _, ok := groupsMap[page.Groups[i].ID]; !ok { + groupsMap[page.Groups[i].ID] = &page.Groups[i] + parentsMap[page.Groups[i].ID] = make([]*groups.Group, 0) + } + } + + for _, group := range groupsMap { + if children, ok := parentsMap[group.Parent]; ok { + children = append(children, group) + parentsMap[group.Parent] = children + } + } + + res := groupPageRes{ + pageRes: pageRes{ + Limit: page.Limit, + Offset: page.Offset, + Total: page.Total, + Level: page.Level, + }, + Groups: []viewGroupRes{}, + } + + for _, group := range groupsMap { + if children, ok := parentsMap[group.ID]; ok { + group.Children = children + } + } + + for _, group := range groupsMap { + view := toViewGroupRes(*group) + if children, ok := parentsMap[group.Parent]; len(children) == 0 || !ok { + res.Groups = append(res.Groups, view) + } + } + + return res +} + +func toViewGroupRes(group groups.Group) viewGroupRes { + view := viewGroupRes{ + Group: group, + } + return view +} + +func buildGroupsResponse(gp groups.Page, filterByID bool) groupPageRes { + res := groupPageRes{ + pageRes: pageRes{ + Total: gp.Total, + Level: gp.Level, + }, + Groups: []viewGroupRes{}, + } + + for _, group := range gp.Groups { + view := viewGroupRes{ + Group: group, + } + if filterByID && group.Level == 0 { + continue + } + res.Groups = append(res.Groups, view) + } + + return res +} + +func buildChannelsResponse(cp groups.Page, filterByID bool) channelPageRes { + res := channelPageRes{ + pageRes: pageRes{ + Total: cp.Total, + Level: cp.Level, + }, + Channels: []viewGroupRes{}, + } + + for _, channel := range cp.Groups { + if filterByID && channel.Level == 0 { + continue + } + view := viewGroupRes{ + Group: channel, + } + res.Channels = append(res.Channels, view) + } + + return res +} diff --git a/pkg/groups/api/logging.go b/pkg/groups/api/logging.go new file mode 100644 index 0000000..b8a0909 --- /dev/null +++ b/pkg/groups/api/logging.go @@ -0,0 +1,250 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "log/slog" + "time" + + "github.com/absmach/magistrala/pkg/groups" +) + +var _ groups.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + svc groups.Service +} + +// LoggingMiddleware adds logging facilities to the groups service. +func LoggingMiddleware(svc groups.Service, logger *slog.Logger) groups.Service { + return &loggingMiddleware{logger, svc} +} + +// CreateGroup logs the create_group request. It logs the group name, id and token and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) CreateGroup(ctx context.Context, token, kind string, group groups.Group) (g groups.Group, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("group", + slog.String("id", g.ID), + slog.String("name", g.Name), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Create group failed", args...) + return + } + lm.logger.Info("Create group completed successfully", args...) + }(time.Now()) + return lm.svc.CreateGroup(ctx, token, kind, group) +} + +// UpdateGroup logs the update_group request. It logs the group name, id and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) UpdateGroup(ctx context.Context, token string, group groups.Group) (g groups.Group, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("group", + slog.String("id", group.ID), + slog.String("name", group.Name), + slog.Any("metadata", group.Metadata), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Update group failed", args...) + return + } + lm.logger.Info("Update group completed successfully", args...) + }(time.Now()) + return lm.svc.UpdateGroup(ctx, token, group) +} + +// ViewGroup logs the view_group request. It logs the group name, id and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ViewGroup(ctx context.Context, token, id string) (g groups.Group, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("group", + slog.String("id", g.ID), + slog.String("name", g.Name), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("View group failed", args...) + return + } + lm.logger.Info("View group completed successfully", args...) + }(time.Now()) + return lm.svc.ViewGroup(ctx, token, id) +} + +// ViewGroupPerms logs the view_group request. It logs the group id and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ViewGroupPerms(ctx context.Context, token, id string) (p []string, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("group_id", id), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("View group permissions failed", args...) + return + } + lm.logger.Info("View group permissions completed successfully", args...) + }(time.Now()) + return lm.svc.ViewGroupPerms(ctx, token, id) +} + +// ListGroups logs the list_groups request. It logs the page metadata and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ListGroups(ctx context.Context, token, memberKind, memberID string, gp groups.Page) (cg groups.Page, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("member", + slog.String("id", memberID), + slog.String("kind", memberKind), + ), + slog.Group("page", + slog.Uint64("limit", gp.Limit), + slog.Uint64("offset", gp.Offset), + slog.Uint64("total", cg.Total), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("List groups failed", args...) + return + } + lm.logger.Info("List groups completed successfully", args...) + }(time.Now()) + return lm.svc.ListGroups(ctx, token, memberKind, memberID, gp) +} + +// EnableGroup logs the enable_group request. It logs the group name, id and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) EnableGroup(ctx context.Context, token, id string) (g groups.Group, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("group", + slog.String("id", id), + slog.String("name", g.Name), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Enable group failed", args...) + return + } + lm.logger.Info("Enable group completed successfully", args...) + }(time.Now()) + return lm.svc.EnableGroup(ctx, token, id) +} + +// DisableGroup logs the disable_group request. It logs the group id and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) DisableGroup(ctx context.Context, token, id string) (g groups.Group, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("group", + slog.String("id", id), + slog.String("name", g.Name), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Disable group failed", args...) + return + } + lm.logger.Info("Disable group completed successfully", args...) + }(time.Now()) + return lm.svc.DisableGroup(ctx, token, id) +} + +// ListMembers logs the list_members request. It logs the groupID and the time it took to complete the request. +// If the request fails, it logs the error. +func (lm *loggingMiddleware) ListMembers(ctx context.Context, token, groupID, permission, memberKind string) (mp groups.MembersPage, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("group_id", groupID), + slog.String("permission", permission), + slog.String("member_kind", memberKind), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("List members failed", args...) + return + } + lm.logger.Info("List members completed successfully", args...) + }(time.Now()) + return lm.svc.ListMembers(ctx, token, groupID, permission, memberKind) +} + +func (lm *loggingMiddleware) Assign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("group_id", groupID), + slog.String("relation", relation), + slog.String("member_kind", memberKind), + slog.Any("member_ids", memberIDs), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Assign member to group failed", args...) + return + } + lm.logger.Info("Assign member to group completed successfully", args...) + }(time.Now()) + + return lm.svc.Assign(ctx, token, groupID, relation, memberKind, memberIDs...) +} + +func (lm *loggingMiddleware) Unassign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("group_id", groupID), + slog.String("relation", relation), + slog.String("member_kind", memberKind), + slog.Any("member_ids", memberIDs), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Unassign member to group failed", args...) + return + } + lm.logger.Info("Unassign member to group completed successfully", args...) + }(time.Now()) + + return lm.svc.Unassign(ctx, token, groupID, relation, memberKind, memberIDs...) +} + +func (lm *loggingMiddleware) DeleteGroup(ctx context.Context, token, id string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("group_id", id), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Delete group failed", args...) + return + } + lm.logger.Info("Delete group completed successfully", args...) + }(time.Now()) + return lm.svc.DeleteGroup(ctx, token, id) +} diff --git a/pkg/groups/api/metrics.go b/pkg/groups/api/metrics.go new file mode 100644 index 0000000..234bcb2 --- /dev/null +++ b/pkg/groups/api/metrics.go @@ -0,0 +1,129 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "time" + + "github.com/absmach/magistrala/pkg/groups" + "github.com/go-kit/kit/metrics" +) + +var _ groups.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc groups.Service +} + +// MetricsMiddleware instruments policies service by tracking request count and latency. +func MetricsMiddleware(svc groups.Service, counter metrics.Counter, latency metrics.Histogram) groups.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +// CreateGroup instruments CreateGroup method with metrics. +func (ms *metricsMiddleware) CreateGroup(ctx context.Context, token, kind string, g groups.Group) (groups.Group, error) { + defer func(begin time.Time) { + ms.counter.With("method", "create_group").Add(1) + ms.latency.With("method", "create_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.CreateGroup(ctx, token, kind, g) +} + +// UpdateGroup instruments UpdateGroup method with metrics. +func (ms *metricsMiddleware) UpdateGroup(ctx context.Context, token string, group groups.Group) (rGroup groups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_group").Add(1) + ms.latency.With("method", "update_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateGroup(ctx, token, group) +} + +// ViewGroup instruments ViewGroup method with metrics. +func (ms *metricsMiddleware) ViewGroup(ctx context.Context, token, id string) (g groups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_group").Add(1) + ms.latency.With("method", "view_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ViewGroup(ctx, token, id) +} + +// ViewGroupPerms instruments ViewGroup method with metrics. +func (ms *metricsMiddleware) ViewGroupPerms(ctx context.Context, token, id string) (p []string, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_group_perms").Add(1) + ms.latency.With("method", "view_group_perms").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ViewGroupPerms(ctx, token, id) +} + +// ListGroups instruments ListGroups method with metrics. +func (ms *metricsMiddleware) ListGroups(ctx context.Context, token, memberKind, memberID string, gp groups.Page) (cg groups.Page, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_groups").Add(1) + ms.latency.With("method", "list_groups").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListGroups(ctx, token, memberKind, memberID, gp) +} + +// EnableGroup instruments EnableGroup method with metrics. +func (ms *metricsMiddleware) EnableGroup(ctx context.Context, token, id string) (g groups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "enable_group").Add(1) + ms.latency.With("method", "enable_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.EnableGroup(ctx, token, id) +} + +// DisableGroup instruments DisableGroup method with metrics. +func (ms *metricsMiddleware) DisableGroup(ctx context.Context, token, id string) (g groups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "disable_group").Add(1) + ms.latency.With("method", "disable_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.DisableGroup(ctx, token, id) +} + +// ListMembers instruments ListMembers method with metrics. +func (ms *metricsMiddleware) ListMembers(ctx context.Context, token, groupID, permission, memberKind string) (mp groups.MembersPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_memberships").Add(1) + ms.latency.With("method", "list_memberships").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListMembers(ctx, token, groupID, permission, memberKind) +} + +// Assign instruments Assign method with metrics. +func (ms *metricsMiddleware) Assign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "assign").Add(1) + ms.latency.With("method", "assign").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.Assign(ctx, token, groupID, relation, memberKind, memberIDs...) +} + +// Unassign instruments Unassign method with metrics. +func (ms *metricsMiddleware) Unassign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "unassign").Add(1) + ms.latency.With("method", "unassign").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.Unassign(ctx, token, groupID, relation, memberKind, memberIDs...) +} + +func (ms *metricsMiddleware) DeleteGroup(ctx context.Context, token, id string) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "delete_group").Add(1) + ms.latency.With("method", "delete_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.DeleteGroup(ctx, token, id) +} diff --git a/pkg/groups/api/requests.go b/pkg/groups/api/requests.go new file mode 100644 index 0000000..ff552a1 --- /dev/null +++ b/pkg/groups/api/requests.go @@ -0,0 +1,203 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "github.com/absmach/magistrala/auth" + "github.com/absmach/magistrala/pkg/apiutil" + mggroups "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/mg-contrib/pkg/api" +) + +type createGroupReq struct { + mggroups.Group + token string +} + +func (req createGroupReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if len(req.Name) > api.MaxNameSize || req.Name == "" { + return apiutil.ErrNameSize + } + + return nil +} + +type updateGroupReq struct { + token string + id string + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +func (req updateGroupReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + if len(req.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + return nil +} + +type listGroupsReq struct { + mggroups.Page + token string + memberKind string + memberID string + // - `true` - result is JSON tree representing groups hierarchy, + // - `false` - result is JSON array of groups. + tree bool +} + +func (req listGroupsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.memberKind == "" { + return apiutil.ErrMissingMemberKind + } + if req.memberKind == auth.ThingsKind && req.memberID == "" { + return apiutil.ErrMissingID + } + if req.Level > mggroups.MaxLevel { + return apiutil.ErrInvalidLevel + } + if req.Limit > api.MaxLimitSize || req.Limit < 1 { + return apiutil.ErrLimitSize + } + + return nil +} + +type groupReq struct { + token string + id string +} + +func (req groupReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type groupPermsReq struct { + token string + id string +} + +func (req groupPermsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type changeGroupStatusReq struct { + token string + id string +} + +func (req changeGroupStatusReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} + +type assignReq struct { + token string + groupID string + Relation string `json:"relation,omitempty"` + MemberKind string `json:"member_kind,omitempty"` + Members []string `json:"members"` +} + +func (req assignReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.MemberKind == "" { + return apiutil.ErrMissingMemberKind + } + + if req.groupID == "" { + return apiutil.ErrMissingID + } + + if len(req.Members) == 0 { + return apiutil.ErrEmptyList + } + + return nil +} + +type unassignReq struct { + token string + groupID string + Relation string `json:"relation,omitempty"` + MemberKind string `json:"member_kind,omitempty"` + Members []string `json:"members"` +} + +func (req unassignReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.MemberKind == "" { + return apiutil.ErrMissingMemberKind + } + + if req.groupID == "" { + return apiutil.ErrMissingID + } + + if len(req.Members) == 0 { + return apiutil.ErrEmptyList + } + + return nil +} + +type listMembersReq struct { + token string + groupID string + permission string + memberKind string +} + +func (req listMembersReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.memberKind == "" { + return apiutil.ErrMissingMemberKind + } + + if req.groupID == "" { + return apiutil.ErrMissingID + } + return nil +} diff --git a/pkg/groups/api/requests_test.go b/pkg/groups/api/requests_test.go new file mode 100644 index 0000000..42ad9d4 --- /dev/null +++ b/pkg/groups/api/requests_test.go @@ -0,0 +1,516 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "fmt" + "strings" + "testing" + + "github.com/absmach/magistrala/auth" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/mg-contrib/pkg/api" + "github.com/stretchr/testify/assert" +) + +var valid = "valid" + +func TestCreateGroupReqValidation(t *testing.T) { + cases := []struct { + desc string + req createGroupReq + err error + }{ + { + desc: "valid request", + req: createGroupReq{ + token: valid, + Group: groups.Group{ + Name: valid, + }, + }, + err: nil, + }, + { + desc: "empty token", + req: createGroupReq{ + Group: groups.Group{ + Name: valid, + }, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "long name", + req: createGroupReq{ + token: valid, + Group: groups.Group{ + Name: strings.Repeat("a", api.MaxNameSize+1), + }, + }, + err: apiutil.ErrNameSize, + }, + { + desc: "empty name", + req: createGroupReq{ + token: valid, + Group: groups.Group{}, + }, + err: apiutil.ErrNameSize, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestUpdateGroupReqValidation(t *testing.T) { + cases := []struct { + desc string + req updateGroupReq + err error + }{ + { + desc: "valid request", + req: updateGroupReq{ + token: valid, + id: valid, + Name: valid, + }, + err: nil, + }, + { + desc: "empty token", + req: updateGroupReq{ + id: valid, + Name: valid, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "long name", + req: updateGroupReq{ + token: valid, + id: valid, + Name: strings.Repeat("a", api.MaxNameSize+1), + }, + err: apiutil.ErrNameSize, + }, + { + desc: "empty id", + req: updateGroupReq{ + token: valid, + Name: valid, + }, + err: apiutil.ErrMissingID, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestListGroupReqValidation(t *testing.T) { + cases := []struct { + desc string + req listGroupsReq + err error + }{ + { + desc: "valid request", + req: listGroupsReq{ + token: valid, + memberKind: auth.ThingsKind, + memberID: valid, + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + }, + }, + err: nil, + }, + { + desc: "empty token", + req: listGroupsReq{ + memberKind: auth.ThingsKind, + memberID: valid, + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + }, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "empty memberkind", + req: listGroupsReq{ + token: valid, + memberID: valid, + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + }, + }, + err: apiutil.ErrMissingMemberKind, + }, + { + desc: "empty member id", + req: listGroupsReq{ + token: valid, + memberKind: auth.ThingsKind, + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + }, + }, + err: apiutil.ErrMissingID, + }, + { + desc: "invalid upper level", + req: listGroupsReq{ + token: valid, + memberKind: auth.ThingsKind, + memberID: valid, + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 10, + }, + Level: groups.MaxLevel + 1, + }, + }, + err: apiutil.ErrInvalidLevel, + }, + { + desc: "invalid lower limit", + req: listGroupsReq{ + token: valid, + memberKind: auth.ThingsKind, + memberID: valid, + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: 0, + }, + }, + }, + err: apiutil.ErrLimitSize, + }, + { + desc: "invalid upper limit", + req: listGroupsReq{ + token: valid, + memberKind: auth.ThingsKind, + memberID: valid, + Page: groups.Page{ + PageMeta: groups.PageMeta{ + Limit: api.MaxLimitSize + 1, + }, + }, + }, + err: apiutil.ErrLimitSize, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestGroupReqValidation(t *testing.T) { + cases := []struct { + desc string + req groupReq + err error + }{ + { + desc: "valid request", + req: groupReq{ + token: valid, + id: valid, + }, + err: nil, + }, + { + desc: "empty token", + req: groupReq{ + id: valid, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "empty id", + req: groupReq{ + token: valid, + }, + err: apiutil.ErrMissingID, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestGroupPermsReqValidation(t *testing.T) { + cases := []struct { + desc string + req groupPermsReq + err error + }{ + { + desc: "valid request", + req: groupPermsReq{ + token: valid, + id: valid, + }, + err: nil, + }, + { + desc: "empty token", + req: groupPermsReq{ + id: valid, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "empty id", + req: groupPermsReq{ + token: valid, + }, + err: apiutil.ErrMissingID, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestChangeGroupStatusReqValidation(t *testing.T) { + cases := []struct { + desc string + req changeGroupStatusReq + err error + }{ + { + desc: "valid request", + req: changeGroupStatusReq{ + token: valid, + id: valid, + }, + err: nil, + }, + { + desc: "empty token", + req: changeGroupStatusReq{ + id: valid, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "empty id", + req: changeGroupStatusReq{ + token: valid, + }, + err: apiutil.ErrMissingID, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestAssignReqValidation(t *testing.T) { + cases := []struct { + desc string + req assignReq + err error + }{ + { + desc: "valid request", + req: assignReq{ + token: valid, + groupID: valid, + Relation: auth.ContributorRelation, + MemberKind: auth.ThingsKind, + Members: []string{valid}, + }, + err: nil, + }, + { + desc: "empty token", + req: assignReq{ + groupID: valid, + Relation: auth.ContributorRelation, + MemberKind: auth.ThingsKind, + Members: []string{valid}, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "empty member kind", + req: assignReq{ + token: valid, + groupID: valid, + Relation: auth.ContributorRelation, + Members: []string{valid}, + }, + err: apiutil.ErrMissingMemberKind, + }, + { + desc: "empty groupID", + req: assignReq{ + token: valid, + Relation: auth.ContributorRelation, + MemberKind: auth.ThingsKind, + Members: []string{valid}, + }, + err: apiutil.ErrMissingID, + }, + { + desc: "empty Members", + req: assignReq{ + token: valid, + groupID: valid, + Relation: auth.ContributorRelation, + MemberKind: auth.ThingsKind, + }, + err: apiutil.ErrEmptyList, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestUnAssignReqValidation(t *testing.T) { + cases := []struct { + desc string + req unassignReq + err error + }{ + { + desc: "valid request", + req: unassignReq{ + token: valid, + groupID: valid, + Relation: auth.ContributorRelation, + MemberKind: auth.ThingsKind, + Members: []string{valid}, + }, + err: nil, + }, + { + desc: "empty token", + req: unassignReq{ + groupID: valid, + Relation: auth.ContributorRelation, + MemberKind: auth.ThingsKind, + Members: []string{valid}, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "empty member kind", + req: unassignReq{ + token: valid, + groupID: valid, + Relation: auth.ContributorRelation, + Members: []string{valid}, + }, + err: apiutil.ErrMissingMemberKind, + }, + { + desc: "empty groupID", + req: unassignReq{ + token: valid, + Relation: auth.ContributorRelation, + MemberKind: auth.ThingsKind, + Members: []string{valid}, + }, + err: apiutil.ErrMissingID, + }, + { + desc: "empty Members", + req: unassignReq{ + token: valid, + groupID: valid, + Relation: auth.ContributorRelation, + MemberKind: auth.ThingsKind, + }, + err: apiutil.ErrEmptyList, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestListMembersReqValidation(t *testing.T) { + cases := []struct { + desc string + req listMembersReq + err error + }{ + { + desc: "valid request", + req: listMembersReq{ + token: valid, + groupID: valid, + permission: auth.ViewPermission, + memberKind: auth.ThingsKind, + }, + err: nil, + }, + { + desc: "empty token", + req: listMembersReq{ + groupID: valid, + permission: auth.ViewPermission, + memberKind: auth.ThingsKind, + }, + err: apiutil.ErrBearerToken, + }, + { + desc: "empty member kind", + req: listMembersReq{ + token: valid, + groupID: valid, + permission: auth.ViewPermission, + }, + err: apiutil.ErrMissingMemberKind, + }, + { + desc: "empty groupID", + req: listMembersReq{ + token: valid, + permission: auth.ViewPermission, + memberKind: auth.ThingsKind, + }, + err: apiutil.ErrMissingID, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} diff --git a/pkg/groups/api/responses.go b/pkg/groups/api/responses.go new file mode 100644 index 0000000..a2c3079 --- /dev/null +++ b/pkg/groups/api/responses.go @@ -0,0 +1,231 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "fmt" + "net/http" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/groups" +) + +var ( + _ magistrala.Response = (*createGroupRes)(nil) + _ magistrala.Response = (*groupPageRes)(nil) + _ magistrala.Response = (*changeStatusRes)(nil) + _ magistrala.Response = (*viewGroupRes)(nil) + _ magistrala.Response = (*updateGroupRes)(nil) + _ magistrala.Response = (*assignRes)(nil) + _ magistrala.Response = (*unassignRes)(nil) +) + +type viewGroupRes struct { + groups.Group `json:",inline"` +} + +func (res viewGroupRes) Code() int { + return http.StatusOK +} + +func (res viewGroupRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewGroupRes) Empty() bool { + return false +} + +type viewGroupPermsRes struct { + Permissions []string `json:"permissions"` +} + +func (res viewGroupPermsRes) Code() int { + return http.StatusOK +} + +func (res viewGroupPermsRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewGroupPermsRes) Empty() bool { + return false +} + +type createGroupRes struct { + groups.Group `json:",inline"` + created bool +} + +func (res createGroupRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res createGroupRes) Headers() map[string]string { + if res.created { + return map[string]string{ + "Location": fmt.Sprintf("/groups/%s", res.ID), + } + } + + return map[string]string{} +} + +func (res createGroupRes) Empty() bool { + return false +} + +type groupPageRes struct { + pageRes + Groups []viewGroupRes `json:"groups"` +} + +type pageRes struct { + Limit uint64 `json:"limit,omitempty"` + Offset uint64 `json:"offset"` + Total uint64 `json:"total"` + Level uint64 `json:"level,omitempty"` +} + +func (res groupPageRes) Code() int { + return http.StatusOK +} + +func (res groupPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res groupPageRes) Empty() bool { + return false +} + +type channelPageRes struct { + pageRes + Channels []viewGroupRes `json:"channels"` +} + +func (res channelPageRes) Code() int { + return http.StatusOK +} + +func (res channelPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res channelPageRes) Empty() bool { + return false +} + +type updateGroupRes struct { + groups.Group `json:",inline"` +} + +func (res updateGroupRes) Code() int { + return http.StatusOK +} + +func (res updateGroupRes) Headers() map[string]string { + return map[string]string{} +} + +func (res updateGroupRes) Empty() bool { + return false +} + +type changeStatusRes struct { + groups.Group `json:",inline"` +} + +func (res changeStatusRes) Code() int { + return http.StatusOK +} + +func (res changeStatusRes) Headers() map[string]string { + return map[string]string{} +} + +func (res changeStatusRes) Empty() bool { + return false +} + +type assignRes struct { + assigned bool +} + +func (res assignRes) Code() int { + if res.assigned { + return http.StatusCreated + } + + return http.StatusBadRequest +} + +func (res assignRes) Headers() map[string]string { + return map[string]string{} +} + +func (res assignRes) Empty() bool { + return true +} + +type unassignRes struct { + unassigned bool +} + +func (res unassignRes) Code() int { + if res.unassigned { + return http.StatusCreated + } + + return http.StatusBadRequest +} + +func (res unassignRes) Headers() map[string]string { + return map[string]string{} +} + +func (res unassignRes) Empty() bool { + return true +} + +type listMembersRes struct { + pageRes + Members []groups.Member `json:"members"` +} + +func (res listMembersRes) Code() int { + return http.StatusOK +} + +func (res listMembersRes) Headers() map[string]string { + return map[string]string{} +} + +func (res listMembersRes) Empty() bool { + return false +} + +type deleteGroupRes struct { + deleted bool +} + +func (res deleteGroupRes) Code() int { + if res.deleted { + return http.StatusNoContent + } + + return http.StatusBadRequest +} + +func (res deleteGroupRes) Headers() map[string]string { + return map[string]string{} +} + +func (res deleteGroupRes) Empty() bool { + return true +} diff --git a/pkg/groups/events/doc.go b/pkg/groups/events/doc.go new file mode 100644 index 0000000..f1cd64c --- /dev/null +++ b/pkg/groups/events/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package events contains event source Redis client implementation. +package events diff --git a/pkg/groups/events/events.go b/pkg/groups/events/events.go new file mode 100644 index 0000000..eb65fd4 --- /dev/null +++ b/pkg/groups/events/events.go @@ -0,0 +1,271 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "time" + + "github.com/absmach/magistrala/pkg/events" + groups "github.com/absmach/magistrala/pkg/groups" +) + +var ( + groupPrefix = "group." + groupCreate = groupPrefix + "create" + groupUpdate = groupPrefix + "update" + groupChangeStatus = groupPrefix + "change_status" + groupView = groupPrefix + "view" + groupViewPerms = groupPrefix + "view_perms" + groupList = groupPrefix + "list" + groupListMemberships = groupPrefix + "list_by_user" + groupRemove = groupPrefix + "remove" + groupAssign = groupPrefix + "assign" + groupUnassign = groupPrefix + "unassign" +) + +var ( + _ events.Event = (*assignEvent)(nil) + _ events.Event = (*unassignEvent)(nil) + _ events.Event = (*createGroupEvent)(nil) + _ events.Event = (*updateGroupEvent)(nil) + _ events.Event = (*changeStatusGroupEvent)(nil) + _ events.Event = (*viewGroupEvent)(nil) + _ events.Event = (*deleteGroupEvent)(nil) + _ events.Event = (*viewGroupEvent)(nil) + _ events.Event = (*listGroupEvent)(nil) + _ events.Event = (*listGroupMembershipEvent)(nil) +) + +type assignEvent struct { + memberIDs []string + relation string + memberKind string + groupID string +} + +func (cge assignEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": groupAssign, + "member_ids": cge.memberIDs, + "relation": cge.relation, + "memberKind": cge.memberKind, + "group_id": cge.groupID, + }, nil +} + +type unassignEvent struct { + memberIDs []string + relation string + memberKind string + groupID string +} + +func (cge unassignEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": groupUnassign, + "member_ids": cge.memberIDs, + "relation": cge.relation, + "memberKind": cge.memberKind, + "group_id": cge.groupID, + }, nil +} + +type createGroupEvent struct { + groups.Group +} + +func (cge createGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupCreate, + "id": cge.ID, + "status": cge.Status.String(), + "created_at": cge.CreatedAt, + } + + if cge.Domain != "" { + val["domain"] = cge.Domain + } + if cge.Parent != "" { + val["parent"] = cge.Parent + } + if cge.Name != "" { + val["name"] = cge.Name + } + if cge.Description != "" { + val["description"] = cge.Description + } + if cge.Metadata != nil { + val["metadata"] = cge.Metadata + } + if cge.Status.String() != "" { + val["status"] = cge.Status.String() + } + + return val, nil +} + +type updateGroupEvent struct { + groups.Group +} + +func (uge updateGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupUpdate, + "updated_at": uge.UpdatedAt, + "updated_by": uge.UpdatedBy, + } + + if uge.ID != "" { + val["id"] = uge.ID + } + if uge.Domain != "" { + val["domain"] = uge.Domain + } + if uge.Parent != "" { + val["parent"] = uge.Parent + } + if uge.Name != "" { + val["name"] = uge.Name + } + if uge.Description != "" { + val["description"] = uge.Description + } + if uge.Metadata != nil { + val["metadata"] = uge.Metadata + } + if !uge.CreatedAt.IsZero() { + val["created_at"] = uge.CreatedAt + } + if uge.Status.String() != "" { + val["status"] = uge.Status.String() + } + + return val, nil +} + +type changeStatusGroupEvent struct { + id string + status string + updatedAt time.Time + updatedBy string +} + +func (rge changeStatusGroupEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": groupChangeStatus, + "id": rge.id, + "status": rge.status, + "updated_at": rge.updatedAt, + "updated_by": rge.updatedBy, + }, nil +} + +type viewGroupEvent struct { + groups.Group +} + +func (vge viewGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupView, + "id": vge.ID, + } + + if vge.Domain != "" { + val["domain"] = vge.Domain + } + if vge.Parent != "" { + val["parent"] = vge.Parent + } + if vge.Name != "" { + val["name"] = vge.Name + } + if vge.Description != "" { + val["description"] = vge.Description + } + if vge.Metadata != nil { + val["metadata"] = vge.Metadata + } + if !vge.CreatedAt.IsZero() { + val["created_at"] = vge.CreatedAt + } + if !vge.UpdatedAt.IsZero() { + val["updated_at"] = vge.UpdatedAt + } + if vge.UpdatedBy != "" { + val["updated_by"] = vge.UpdatedBy + } + if vge.Status.String() != "" { + val["status"] = vge.Status.String() + } + + return val, nil +} + +type viewGroupPermsEvent struct { + permissions []string +} + +func (vgpe viewGroupPermsEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": groupViewPerms, + "permissions": vgpe.permissions, + }, nil +} + +type listGroupEvent struct { + groups.Page +} + +func (lge listGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupList, + "total": lge.Total, + "offset": lge.Offset, + "limit": lge.Limit, + } + + if lge.Name != "" { + val["name"] = lge.Name + } + if lge.DomainID != "" { + val["domain_id"] = lge.DomainID + } + if lge.Tag != "" { + val["tag"] = lge.Tag + } + if lge.Metadata != nil { + val["metadata"] = lge.Metadata + } + if lge.Status.String() != "" { + val["status"] = lge.Status.String() + } + + return val, nil +} + +type listGroupMembershipEvent struct { + groupID string + permission string + memberKind string +} + +func (lgme listGroupMembershipEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": groupListMemberships, + "id": lgme.groupID, + "permission": lgme.permission, + "member_kind": lgme.memberKind, + }, nil +} + +type deleteGroupEvent struct { + id string +} + +func (rge deleteGroupEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": groupRemove, + "id": rge.id, + }, nil +} diff --git a/pkg/groups/events/streams.go b/pkg/groups/events/streams.go new file mode 100644 index 0000000..9fa5fed --- /dev/null +++ b/pkg/groups/events/streams.go @@ -0,0 +1,211 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "context" + + "github.com/absmach/magistrala/pkg/events" + "github.com/absmach/magistrala/pkg/events/store" + "github.com/absmach/magistrala/pkg/groups" +) + +var _ groups.Service = (*eventStore)(nil) + +type eventStore struct { + events.Publisher + svc groups.Service +} + +// NewEventStoreMiddleware returns wrapper around things service that sends +// events to event store. +func NewEventStoreMiddleware(ctx context.Context, svc groups.Service, url, streamID string) (groups.Service, error) { + publisher, err := store.NewPublisher(ctx, url, streamID) + if err != nil { + return nil, err + } + + return &eventStore{ + svc: svc, + Publisher: publisher, + }, nil +} + +func (es eventStore) CreateGroup(ctx context.Context, token, kind string, group groups.Group) (groups.Group, error) { + group, err := es.svc.CreateGroup(ctx, token, kind, group) + if err != nil { + return group, err + } + + event := createGroupEvent{ + group, + } + + if err := es.Publish(ctx, event); err != nil { + return group, err + } + + return group, nil +} + +func (es eventStore) UpdateGroup(ctx context.Context, token string, group groups.Group) (groups.Group, error) { + group, err := es.svc.UpdateGroup(ctx, token, group) + if err != nil { + return group, err + } + + event := updateGroupEvent{ + group, + } + + if err := es.Publish(ctx, event); err != nil { + return group, err + } + + return group, nil +} + +func (es eventStore) ViewGroup(ctx context.Context, token, id string) (groups.Group, error) { + group, err := es.svc.ViewGroup(ctx, token, id) + if err != nil { + return group, err + } + event := viewGroupEvent{ + group, + } + + if err := es.Publish(ctx, event); err != nil { + return group, err + } + + return group, nil +} + +func (es eventStore) ViewGroupPerms(ctx context.Context, token, id string) ([]string, error) { + permissions, err := es.svc.ViewGroupPerms(ctx, token, id) + if err != nil { + return permissions, err + } + event := viewGroupPermsEvent{ + permissions, + } + + if err := es.Publish(ctx, event); err != nil { + return permissions, err + } + + return permissions, nil +} + +func (es eventStore) ListGroups(ctx context.Context, token, memberKind, memberID string, pm groups.Page) (groups.Page, error) { + gp, err := es.svc.ListGroups(ctx, token, memberKind, memberID, pm) + if err != nil { + return gp, err + } + event := listGroupEvent{ + pm, + } + + if err := es.Publish(ctx, event); err != nil { + return gp, err + } + + return gp, nil +} + +func (es eventStore) ListMembers(ctx context.Context, token, groupID, permission, memberKind string) (groups.MembersPage, error) { + mp, err := es.svc.ListMembers(ctx, token, groupID, permission, memberKind) + if err != nil { + return mp, err + } + event := listGroupMembershipEvent{ + groupID, permission, memberKind, + } + + if err := es.Publish(ctx, event); err != nil { + return mp, err + } + + return mp, nil +} + +func (es eventStore) EnableGroup(ctx context.Context, token, id string) (groups.Group, error) { + group, err := es.svc.EnableGroup(ctx, token, id) + if err != nil { + return group, err + } + + return es.changeStatus(ctx, group) +} + +func (es eventStore) Assign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) error { + if err := es.svc.Assign(ctx, token, groupID, relation, memberKind, memberIDs...); err != nil { + return err + } + + event := assignEvent{ + groupID: groupID, + relation: relation, + memberKind: memberKind, + memberIDs: memberIDs, + } + + if err := es.Publish(ctx, event); err != nil { + return err + } + + return nil +} + +func (es eventStore) Unassign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) error { + if err := es.svc.Unassign(ctx, token, groupID, relation, memberKind, memberIDs...); err != nil { + return err + } + + event := unassignEvent{ + groupID: groupID, + relation: relation, + memberKind: memberKind, + memberIDs: memberIDs, + } + + if err := es.Publish(ctx, event); err != nil { + return err + } + return es.svc.Unassign(ctx, token, groupID, relation, memberKind, memberIDs...) +} + +func (es eventStore) DisableGroup(ctx context.Context, token, id string) (groups.Group, error) { + group, err := es.svc.DisableGroup(ctx, token, id) + if err != nil { + return group, err + } + + return es.changeStatus(ctx, group) +} + +func (es eventStore) changeStatus(ctx context.Context, group groups.Group) (groups.Group, error) { + event := changeStatusGroupEvent{ + id: group.ID, + updatedAt: group.UpdatedAt, + updatedBy: group.UpdatedBy, + status: group.Status.String(), + } + + if err := es.Publish(ctx, event); err != nil { + return group, err + } + + return group, nil +} + +func (es eventStore) DeleteGroup(ctx context.Context, token, id string) error { + if err := es.svc.DeleteGroup(ctx, token, id); err != nil { + return err + } + if err := es.Publish(ctx, deleteGroupEvent{id}); err != nil { + return err + } + return nil +} diff --git a/pkg/groups/postgres/doc.go b/pkg/groups/postgres/doc.go new file mode 100644 index 0000000..96fe211 --- /dev/null +++ b/pkg/groups/postgres/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package postgres contains the database implementation of groups repository layer. +package postgres diff --git a/pkg/groups/postgres/groups.go b/pkg/groups/postgres/groups.go new file mode 100644 index 0000000..5830883 --- /dev/null +++ b/pkg/groups/postgres/groups.go @@ -0,0 +1,499 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + mgclients "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + mggroups "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/magistrala/pkg/postgres" + "github.com/jmoiron/sqlx" +) + +var _ mggroups.Repository = (*groupRepository)(nil) + +type groupRepository struct { + db postgres.Database +} + +// New instantiates a PostgreSQL implementation of group +// repository. +func New(db postgres.Database) mggroups.Repository { + return &groupRepository{ + db: db, + } +} + +func (repo groupRepository) Save(ctx context.Context, g mggroups.Group) (mggroups.Group, error) { + q := `INSERT INTO groups (name, description, id, domain_id, parent_id, metadata, created_at, status) + VALUES (:name, :description, :id, :domain_id, :parent_id, :metadata, :created_at, :status) + RETURNING id, name, description, domain_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, status;` + dbg, err := toDBGroup(g) + if err != nil { + return mggroups.Group{}, err + } + row, err := repo.db.NamedQueryContext(ctx, q, dbg) + if err != nil { + return mggroups.Group{}, postgres.HandleError(repoerr.ErrCreateEntity, err) + } + + defer row.Close() + row.Next() + dbg = dbGroup{} + if err := row.StructScan(&dbg); err != nil { + return mggroups.Group{}, err + } + + return toGroup(dbg) +} + +func (repo groupRepository) Update(ctx context.Context, g mggroups.Group) (mggroups.Group, error) { + var query []string + var upq string + if g.Name != "" { + query = append(query, "name = :name,") + } + if g.Description != "" { + query = append(query, "description = :description,") + } + if g.Metadata != nil { + query = append(query, "metadata = :metadata,") + } + if len(query) > 0 { + upq = strings.Join(query, " ") + } + g.Status = mgclients.EnabledStatus + q := fmt.Sprintf(`UPDATE groups SET %s updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, description, domain_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, updated_at, updated_by, status`, upq) + + dbu, err := toDBGroup(g) + if err != nil { + return mggroups.Group{}, errors.Wrap(repoerr.ErrUpdateEntity, err) + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbu) + if err != nil { + return mggroups.Group{}, postgres.HandleError(repoerr.ErrUpdateEntity, err) + } + + defer row.Close() + if ok := row.Next(); !ok { + return mggroups.Group{}, errors.Wrap(repoerr.ErrNotFound, row.Err()) + } + dbu = dbGroup{} + if err := row.StructScan(&dbu); err != nil { + return mggroups.Group{}, errors.Wrap(err, repoerr.ErrUpdateEntity) + } + return toGroup(dbu) +} + +func (repo groupRepository) ChangeStatus(ctx context.Context, group mggroups.Group) (mggroups.Group, error) { + qc := `UPDATE groups SET status = :status, updated_at = :updated_at, updated_by = :updated_by WHERE id = :id + RETURNING id, name, description, domain_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, updated_at, updated_by, status` + + dbg, err := toDBGroup(group) + if err != nil { + return mggroups.Group{}, errors.Wrap(repoerr.ErrUpdateEntity, err) + } + row, err := repo.db.NamedQueryContext(ctx, qc, dbg) + if err != nil { + return mggroups.Group{}, postgres.HandleError(repoerr.ErrUpdateEntity, err) + } + defer row.Close() + if ok := row.Next(); !ok { + return mggroups.Group{}, errors.Wrap(repoerr.ErrNotFound, row.Err()) + } + dbg = dbGroup{} + if err := row.StructScan(&dbg); err != nil { + return mggroups.Group{}, errors.Wrap(err, repoerr.ErrUpdateEntity) + } + + return toGroup(dbg) +} + +func (repo groupRepository) RetrieveByID(ctx context.Context, id string) (mggroups.Group, error) { + q := `SELECT id, name, domain_id, COALESCE(parent_id, '') AS parent_id, description, metadata, created_at, updated_at, updated_by, status FROM groups + WHERE id = :id` + + dbg := dbGroup{ + ID: id, + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbg) + if err != nil { + return mggroups.Group{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + defer row.Close() + + dbg = dbGroup{} + if row.Next() { + if err := row.StructScan(&dbg); err != nil { + return mggroups.Group{}, errors.Wrap(repoerr.ErrNotFound, err) + } + } + + return toGroup(dbg) +} + +func (repo groupRepository) RetrieveAll(ctx context.Context, gm mggroups.Page) (mggroups.Page, error) { + var q string + query := buildQuery(gm) + + if gm.ID != "" { + q = buildHierachy(gm) + } + if gm.ID == "" { + q = `SELECT DISTINCT g.id, g.domain_id, COALESCE(g.parent_id, '') AS parent_id, g.name, g.description, + g.metadata, g.created_at, g.updated_at, g.updated_by, g.status FROM groups g` + } + q = fmt.Sprintf("%s %s ORDER BY g.created_at LIMIT :limit OFFSET :offset;", q, query) + + dbPage, err := toDBGroupPage(gm) + if err != nil { + return mggroups.Page{}, errors.Wrap(repoerr.ErrFailedToRetrieveAllGroups, err) + } + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mggroups.Page{}, errors.Wrap(repoerr.ErrFailedToRetrieveAllGroups, err) + } + defer rows.Close() + + items, err := repo.processRows(rows) + if err != nil { + return mggroups.Page{}, errors.Wrap(repoerr.ErrFailedToRetrieveAllGroups, err) + } + + cq := "SELECT COUNT(*) FROM groups g" + if query != "" { + cq = fmt.Sprintf(" %s %s", cq, query) + } + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mggroups.Page{}, errors.Wrap(repoerr.ErrFailedToRetrieveAllGroups, err) + } + + page := gm + page.Groups = items + page.Total = total + + return page, nil +} + +func (repo groupRepository) RetrieveByIDs(ctx context.Context, gm mggroups.Page, ids ...string) (mggroups.Page, error) { + var q string + if (len(ids) == 0) && (gm.PageMeta.DomainID == "") { + return mggroups.Page{PageMeta: mggroups.PageMeta{Offset: gm.Offset, Limit: gm.Limit}}, nil + } + query := buildQuery(gm, ids...) + + if gm.ID != "" { + q = buildHierachy(gm) + } + if gm.ID == "" { + q = `SELECT DISTINCT g.id, g.domain_id, COALESCE(g.parent_id, '') AS parent_id, g.name, g.description, + g.metadata, g.created_at, g.updated_at, g.updated_by, g.status FROM groups g` + } + q = fmt.Sprintf("%s %s ORDER BY g.created_at LIMIT :limit OFFSET :offset;", q, query) + + dbPage, err := toDBGroupPage(gm) + if err != nil { + return mggroups.Page{}, errors.Wrap(repoerr.ErrFailedToRetrieveAllGroups, err) + } + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mggroups.Page{}, errors.Wrap(repoerr.ErrFailedToRetrieveAllGroups, err) + } + defer rows.Close() + + items, err := repo.processRows(rows) + if err != nil { + return mggroups.Page{}, errors.Wrap(repoerr.ErrFailedToRetrieveAllGroups, err) + } + + cq := "SELECT COUNT(*) FROM groups g" + if query != "" { + cq = fmt.Sprintf(" %s %s", cq, query) + } + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mggroups.Page{}, errors.Wrap(repoerr.ErrFailedToRetrieveAllGroups, err) + } + + page := gm + page.Groups = items + page.Total = total + + return page, nil +} + +func (repo groupRepository) AssignParentGroup(ctx context.Context, parentGroupID string, groupIDs ...string) error { + if len(groupIDs) == 0 { + return nil + } + var updateColumns []string + for _, groupID := range groupIDs { + updateColumns = append(updateColumns, fmt.Sprintf("('%s', '%s') ", groupID, parentGroupID)) + } + uc := strings.Join(updateColumns, ",") + query := fmt.Sprintf(` + UPDATE groups AS g SET + parent_id = u.parent_group_id + FROM (VALUES + %s + ) AS u(id, parent_group_id) + WHERE g.id = u.id; + `, uc) + + row, err := repo.db.QueryContext(ctx, query) + if err != nil { + return postgres.HandleError(repoerr.ErrUpdateEntity, err) + } + defer row.Close() + + return nil +} + +func (repo groupRepository) UnassignParentGroup(ctx context.Context, parentGroupID string, groupIDs ...string) error { + if len(groupIDs) == 0 { + return nil + } + var updateColumns []string + for _, groupID := range groupIDs { + updateColumns = append(updateColumns, fmt.Sprintf("('%s', '%s') ", groupID, parentGroupID)) + } + uc := strings.Join(updateColumns, ",") + query := fmt.Sprintf(` + UPDATE groups AS g SET + parent_id = NULL + FROM (VALUES + %s + ) AS u(id, parent_group_id) + WHERE g.id = u.id ; + `, uc) + + row, err := repo.db.QueryContext(ctx, query) + if err != nil { + return postgres.HandleError(repoerr.ErrUpdateEntity, err) + } + defer row.Close() + + return nil +} + +func (repo groupRepository) Delete(ctx context.Context, groupID string) error { + q := "DELETE FROM groups AS g WHERE g.id = $1;" + + result, err := repo.db.ExecContext(ctx, q, groupID) + if err != nil { + return postgres.HandleError(repoerr.ErrRemoveEntity, err) + } + if rows, _ := result.RowsAffected(); rows == 0 { + return repoerr.ErrNotFound + } + return nil +} + +func buildHierachy(gm mggroups.Page) string { + query := "" + switch { + case gm.Direction >= 0: // ancestors + query = `WITH RECURSIVE groups_cte as ( + SELECT id, COALESCE(parent_id, '') AS parent_id, domain_id, name, description, metadata, created_at, updated_at, updated_by, status, 0 as level from groups WHERE id = :id + UNION SELECT x.id, COALESCE(x.parent_id, '') AS parent_id, x.domain_id, x.name, x.description, x.metadata, x.created_at, x.updated_at, x.updated_by, x.status, level - 1 from groups x + INNER JOIN groups_cte a ON a.parent_id = x.id + ) SELECT * FROM groups_cte g` + + case gm.Direction < 0: // descendants + query = `WITH RECURSIVE groups_cte as ( + SELECT id, COALESCE(parent_id, '') AS parent_id, domain_id, name, description, metadata, created_at, updated_at, updated_by, status, 0 as level, CONCAT('', '', id) as path from groups WHERE id = :id + UNION SELECT x.id, COALESCE(x.parent_id, '') AS parent_id, x.domain_id, x.name, x.description, x.metadata, x.created_at, x.updated_at, x.updated_by, x.status, level + 1, CONCAT(path, '.', x.id) as path from groups x + INNER JOIN groups_cte d ON d.id = x.parent_id + ) SELECT * FROM groups_cte g` + } + return query +} + +func buildQuery(gm mggroups.Page, ids ...string) string { + queries := []string{} + + if len(ids) > 0 { + queries = append(queries, fmt.Sprintf(" id in ('%s') ", strings.Join(ids, "', '"))) + } + if gm.Name != "" { + queries = append(queries, "g.name = :name") + } + if gm.Status != mgclients.AllStatus { + queries = append(queries, "g.status = :status") + } + if gm.DomainID != "" { + queries = append(queries, "g.domain_id = :domain_id") + } + if len(gm.Metadata) > 0 { + queries = append(queries, "g.metadata @> :metadata") + } + if len(queries) > 0 { + return fmt.Sprintf("WHERE %s", strings.Join(queries, " AND ")) + } + + return "" +} + +type dbGroup struct { + ID string `db:"id"` + ParentID *string `db:"parent_id,omitempty"` + DomainID string `db:"domain_id,omitempty"` + Name string `db:"name"` + Description string `db:"description,omitempty"` + Level int `db:"level"` + Path string `db:"path,omitempty"` + Metadata []byte `db:"metadata,omitempty"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt sql.NullTime `db:"updated_at,omitempty"` + UpdatedBy *string `db:"updated_by,omitempty"` + Status mgclients.Status `db:"status"` +} + +func toDBGroup(g mggroups.Group) (dbGroup, error) { + data := []byte("{}") + if len(g.Metadata) > 0 { + b, err := json.Marshal(g.Metadata) + if err != nil { + return dbGroup{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + data = b + } + var parentID *string + if g.Parent != "" { + parentID = &g.Parent + } + var updatedAt sql.NullTime + if !g.UpdatedAt.IsZero() { + updatedAt = sql.NullTime{Time: g.UpdatedAt, Valid: true} + } + var updatedBy *string + if g.UpdatedBy != "" { + updatedBy = &g.UpdatedBy + } + return dbGroup{ + ID: g.ID, + Name: g.Name, + ParentID: parentID, + DomainID: g.Domain, + Description: g.Description, + Metadata: data, + Path: g.Path, + CreatedAt: g.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + Status: g.Status, + }, nil +} + +func toGroup(g dbGroup) (mggroups.Group, error) { + var metadata mgclients.Metadata + if g.Metadata != nil { + if err := json.Unmarshal(g.Metadata, &metadata); err != nil { + return mggroups.Group{}, errors.Wrap(repoerr.ErrMalformedEntity, err) + } + } + var parentID string + if g.ParentID != nil { + parentID = *g.ParentID + } + var updatedAt time.Time + if g.UpdatedAt.Valid { + updatedAt = g.UpdatedAt.Time + } + var updatedBy string + if g.UpdatedBy != nil { + updatedBy = *g.UpdatedBy + } + + return mggroups.Group{ + ID: g.ID, + Name: g.Name, + Parent: parentID, + Domain: g.DomainID, + Description: g.Description, + Metadata: metadata, + Level: g.Level, + Path: g.Path, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + CreatedAt: g.CreatedAt, + Status: g.Status, + }, nil +} + +func toDBGroupPage(pm mggroups.Page) (dbGroupPage, error) { + level := mggroups.MaxLevel + if pm.Level < mggroups.MaxLevel { + level = pm.Level + } + data := []byte("{}") + if len(pm.Metadata) > 0 { + b, err := json.Marshal(pm.Metadata) + if err != nil { + return dbGroupPage{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + data = b + } + return dbGroupPage{ + ID: pm.ID, + Name: pm.Name, + Metadata: data, + Path: pm.Path, + Level: level, + Total: pm.Total, + Offset: pm.Offset, + Limit: pm.Limit, + ParentID: pm.ID, + DomainID: pm.DomainID, + Status: pm.Status, + }, nil +} + +type dbGroupPage struct { + ClientID string `db:"client_id"` + ID string `db:"id"` + Name string `db:"name"` + ParentID string `db:"parent_id"` + DomainID string `db:"domain_id"` + Metadata []byte `db:"metadata"` + Path string `db:"path"` + Level uint64 `db:"level"` + Total uint64 `db:"total"` + Limit uint64 `db:"limit"` + Offset uint64 `db:"offset"` + Subject string `db:"subject"` + Action string `db:"action"` + Status mgclients.Status `db:"status"` +} + +func (repo groupRepository) processRows(rows *sqlx.Rows) ([]mggroups.Group, error) { + var items []mggroups.Group + for rows.Next() { + dbg := dbGroup{} + if err := rows.StructScan(&dbg); err != nil { + return items, err + } + group, err := toGroup(dbg) + if err != nil { + return items, err + } + items = append(items, group) + } + return items, nil +} diff --git a/pkg/groups/postgres/groups_test.go b/pkg/groups/postgres/groups_test.go new file mode 100644 index 0000000..0bf9d97 --- /dev/null +++ b/pkg/groups/postgres/groups_test.go @@ -0,0 +1,1213 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/0x6flab/namegenerator" + "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + mggroups "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/mg-contrib/pkg/groups/postgres" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + namegen = namegenerator.NewGenerator() + invalidID = strings.Repeat("a", 37) + validGroup = mggroups.Group{ + ID: testsutil.GenerateUUID(&testing.T{}), + Domain: testsutil.GenerateUUID(&testing.T{}), + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + } +) + +func TestSave(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + + cases := []struct { + desc string + group mggroups.Group + err error + }{ + { + desc: "add new group successfully", + group: validGroup, + err: nil, + }, + { + desc: "add duplicate group", + group: validGroup, + err: repoerr.ErrConflict, + }, + { + desc: "add group with invalid ID", + group: mggroups.Group{ + ID: invalidID, + Domain: testsutil.GenerateUUID(t), + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + }, + err: repoerr.ErrMalformedEntity, + }, + { + desc: "add group with invalid domain", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: invalidID, + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + }, + err: repoerr.ErrMalformedEntity, + }, + { + desc: "add group with invalid parent", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Parent: invalidID, + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + }, + err: repoerr.ErrMalformedEntity, + }, + { + desc: "add group with invalid name", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: testsutil.GenerateUUID(t), + Name: strings.Repeat("a", 1025), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + }, + err: repoerr.ErrMalformedEntity, + }, + { + desc: "add group with invalid description", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: testsutil.GenerateUUID(t), + Name: namegen.Generate(), + Description: strings.Repeat("a", 1025), + Metadata: map[string]interface{}{"key": "value"}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + }, + err: repoerr.ErrMalformedEntity, + }, + { + desc: "add group with invalid metadata", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: testsutil.GenerateUUID(t), + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{ + "key": make(chan int), + }, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + }, + err: repoerr.ErrMalformedEntity, + }, + { + desc: "add group with empty domain", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + }, + err: repoerr.ErrMalformedEntity, + }, + { + desc: "add group with empty name", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: testsutil.GenerateUUID(t), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + }, + err: repoerr.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + switch group, err := repo.Save(context.Background(), tc.group); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.group, group, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group, group)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestUpdate(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + + group, err := repo.Save(context.Background(), validGroup) + require.Nil(t, err, fmt.Sprintf("save group unexpected error: %s", err)) + + cases := []struct { + desc string + group mggroups.Group + err error + }{ + { + desc: "update group successfully", + group: mggroups.Group{ + ID: group.ID, + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: nil, + }, + { + desc: "update group name", + group: mggroups.Group{ + ID: group.ID, + Name: namegen.Generate(), + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: nil, + }, + { + desc: "update group description", + group: mggroups.Group{ + ID: group.ID, + Description: strings.Repeat("a", 64), + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: nil, + }, + { + desc: "update group metadata", + group: mggroups.Group{ + ID: group.ID, + Metadata: map[string]interface{}{"key": "value"}, + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: nil, + }, + { + desc: "update group with invalid ID", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: repoerr.ErrNotFound, + }, + { + desc: "update group with empty ID", + group: mggroups.Group{ + Name: namegen.Generate(), + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"key": "value"}, + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + switch group, err := repo.Update(context.Background(), tc.group); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.group.ID, group.ID, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group.ID, group.ID)) + assert.Equal(t, tc.group.UpdatedAt, group.UpdatedAt, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group.UpdatedAt, group.UpdatedAt)) + assert.Equal(t, tc.group.UpdatedBy, group.UpdatedBy, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group.UpdatedBy, group.UpdatedBy)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestChangeStatus(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + + group, err := repo.Save(context.Background(), validGroup) + require.Nil(t, err, fmt.Sprintf("save group unexpected error: %s", err)) + + cases := []struct { + desc string + group mggroups.Group + err error + }{ + { + desc: "change status group successfully", + group: mggroups.Group{ + ID: group.ID, + Status: clients.DisabledStatus, + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: nil, + }, + { + desc: "change status group with invalid ID", + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Status: clients.DisabledStatus, + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: repoerr.ErrNotFound, + }, + { + desc: "change status group with empty ID", + group: mggroups.Group{ + Status: clients.DisabledStatus, + UpdatedAt: time.Now().UTC().Truncate(time.Microsecond), + UpdatedBy: testsutil.GenerateUUID(t), + }, + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + switch group, err := repo.ChangeStatus(context.Background(), tc.group); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.group.ID, group.ID, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group.ID, group.ID)) + assert.Equal(t, tc.group.UpdatedAt, group.UpdatedAt, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group.UpdatedAt, group.UpdatedAt)) + assert.Equal(t, tc.group.UpdatedBy, group.UpdatedBy, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group.UpdatedBy, group.UpdatedBy)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestRetrieveByID(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + + group, err := repo.Save(context.Background(), validGroup) + require.Nil(t, err, fmt.Sprintf("save group unexpected error: %s", err)) + + cases := []struct { + desc string + id string + group mggroups.Group + err error + }{ + { + desc: "retrieve group by id successfully", + id: group.ID, + group: validGroup, + err: nil, + }, + { + desc: "retrieve group by id with invalid ID", + id: invalidID, + group: mggroups.Group{}, + err: repoerr.ErrNotFound, + }, + { + desc: "retrieve group by id with empty ID", + id: "", + group: mggroups.Group{}, + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + switch group, err := repo.RetrieveByID(context.Background(), tc.id); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.group, group, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group, group)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestRetrieveAll(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + num := 200 + + var items []mggroups.Group + parentID := "" + for i := 0; i < num; i++ { + name := namegen.Generate() + group := mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: testsutil.GenerateUUID(t), + Parent: parentID, + Name: name, + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"name": name}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + } + _, err := repo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("create invitation unexpected error: %s", err)) + items = append(items, group) + parentID = group.ID + } + + cases := []struct { + desc string + page mggroups.Page + response mggroups.Page + err error + }{ + { + desc: "retrieve groups successfully", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 0, + Limit: 10, + }, + Groups: items[:10], + }, + err: nil, + }, + { + desc: "retrieve groups with offset", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 10, + Limit: 10, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 10, + Limit: 10, + }, + Groups: items[10:20], + }, + err: nil, + }, + { + desc: "retrieve groups with limit", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 50, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 0, + Limit: 50, + }, + Groups: items[:50], + }, + err: nil, + }, + { + desc: "retrieve groups with offset and limit", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 50, + Limit: 50, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 50, + Limit: 50, + }, + Groups: items[50:100], + }, + err: nil, + }, + { + desc: "retrieve groups with offset out of range", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 1000, + Limit: 50, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 1000, + Limit: 50, + }, + Groups: []mggroups.Group(nil), + }, + err: nil, + }, + { + desc: "retrieve groups with offset and limit out of range", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 170, + Limit: 50, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 170, + Limit: 50, + }, + Groups: items[170:200], + }, + err: nil, + }, + { + desc: "retrieve groups with limit out of range", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 1000, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 0, + Limit: 1000, + }, + Groups: items, + }, + err: nil, + }, + { + desc: "retrieve groups with empty page", + page: mggroups.Page{}, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 0, + Limit: 0, + }, + Groups: []mggroups.Group(nil), + }, + err: nil, + }, + { + desc: "retrieve groups with name", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + Name: items[0].Name, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 1, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group{items[0]}, + }, + err: nil, + }, + { + desc: "retrieve groups with domain", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + DomainID: items[0].Domain, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 1, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group{items[0]}, + }, + err: nil, + }, + { + desc: "retrieve groups with metadata", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + Metadata: items[0].Metadata, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 1, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group{items[0]}, + }, + err: nil, + }, + { + desc: "retrieve groups with invalid metadata", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + Metadata: map[string]interface{}{ + "key": make(chan int), + }, + }, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 0, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group(nil), + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "retrieve parent groups", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: uint64(num), + }, + ID: items[5].ID, + Direction: 1, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 0, + Limit: uint64(num), + }, + Groups: items[:6], + }, + err: nil, + }, + { + desc: "retrieve children groups", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: uint64(num), + }, + ID: items[150].ID, + Direction: -1, + }, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: uint64(num), + Offset: 0, + Limit: uint64(num), + }, + Groups: items[150:], + }, + err: nil, + }, + } + + for _, tc := range cases { + switch groups, err := repo.RetrieveAll(context.Background(), tc.page); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response.Total, groups.Total, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.response.Total, groups.Total)) + assert.Equal(t, tc.response.Limit, groups.Limit, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.response.Limit, groups.Limit)) + assert.Equal(t, tc.response.Offset, groups.Offset, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.response.Offset, groups.Offset)) + for i := range tc.response.Groups { + tc.response.Groups[i].Level = groups.Groups[i].Level + tc.response.Groups[i].Path = groups.Groups[i].Path + } + assert.ElementsMatch(t, groups.Groups, tc.response.Groups, fmt.Sprintf("%s: expected %+v got %+v\n", tc.desc, tc.response.Groups, groups.Groups)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestRetrieveByIDs(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + num := 200 + + var items []mggroups.Group + parentID := "" + for i := 0; i < num; i++ { + name := namegen.Generate() + group := mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: testsutil.GenerateUUID(t), + Parent: parentID, + Name: name, + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"name": name}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + } + _, err := repo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("create invitation unexpected error: %s", err)) + items = append(items, group) + parentID = group.ID + } + + cases := []struct { + desc string + page mggroups.Page + ids []string + response mggroups.Page + err error + }{ + { + desc: "retrieve groups successfully", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + }, + }, + ids: getIDs(items[0:3]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 3, + Offset: 0, + Limit: 10, + }, + Groups: items[0:3], + }, + err: nil, + }, + { + desc: "retrieve groups with empty ids", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + }, + }, + ids: []string{}, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group(nil), + }, + err: nil, + }, + { + desc: "retrieve groups with empty ids but with domain", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + DomainID: items[0].Domain, + }, + }, + ids: []string{}, + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 1, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group{items[0]}, + }, + err: nil, + }, + { + desc: "retrieve groups with offset", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 10, + Limit: 10, + }, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 20, + Offset: 10, + Limit: 10, + }, + Groups: items[10:20], + }, + err: nil, + }, + { + desc: "retrieve groups with offset out of range", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 1000, + Limit: 50, + }, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 20, + Offset: 1000, + Limit: 50, + }, + Groups: []mggroups.Group(nil), + }, + err: nil, + }, + { + desc: "retrieve groups with offset and limit out of range", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 15, + Limit: 10, + }, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 20, + Offset: 15, + Limit: 10, + }, + Groups: items[15:20], + }, + err: nil, + }, + { + desc: "retrieve groups with limit out of range", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 1000, + }, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 20, + Offset: 0, + Limit: 1000, + }, + Groups: items[:20], + }, + err: nil, + }, + { + desc: "retrieve groups with name", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + Name: items[0].Name, + }, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 1, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group{items[0]}, + }, + err: nil, + }, + { + desc: "retrieve groups with domain", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + DomainID: items[0].Domain, + }, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 1, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group{items[0]}, + }, + err: nil, + }, + { + desc: "retrieve groups with metadata", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + Metadata: items[0].Metadata, + }, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 1, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group{items[0]}, + }, + err: nil, + }, + { + desc: "retrieve groups with invalid metadata", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: 10, + Metadata: map[string]interface{}{ + "key": make(chan int), + }, + }, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 0, + Offset: 0, + Limit: 10, + }, + Groups: []mggroups.Group(nil), + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "retrieve parent groups", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: uint64(num), + }, + ID: items[5].ID, + Direction: 1, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 20, + Offset: 0, + Limit: uint64(num), + }, + Groups: items[:6], + }, + err: nil, + }, + { + desc: "retrieve children groups", + page: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Offset: 0, + Limit: uint64(num), + }, + ID: items[15].ID, + Direction: -1, + }, + ids: getIDs(items[0:20]), + response: mggroups.Page{ + PageMeta: mggroups.PageMeta{ + Total: 20, + Offset: 0, + Limit: uint64(num), + }, + Groups: items[15:20], + }, + err: nil, + }, + } + + for _, tc := range cases { + switch groups, err := repo.RetrieveByIDs(context.Background(), tc.page, tc.ids...); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response.Total, groups.Total, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.response.Total, groups.Total)) + assert.Equal(t, tc.response.Limit, groups.Limit, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.response.Limit, groups.Limit)) + assert.Equal(t, tc.response.Offset, groups.Offset, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.response.Offset, groups.Offset)) + for i := range tc.response.Groups { + tc.response.Groups[i].Level = groups.Groups[i].Level + tc.response.Groups[i].Path = groups.Groups[i].Path + } + assert.ElementsMatch(t, groups.Groups, tc.response.Groups, fmt.Sprintf("%s: expected %+v got %+v\n", tc.desc, tc.response.Groups, groups.Groups)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestDelete(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + + group, err := repo.Save(context.Background(), validGroup) + require.Nil(t, err, fmt.Sprintf("save group unexpected error: %s", err)) + + cases := []struct { + desc string + id string + err error + }{ + { + desc: "delete group successfully", + id: group.ID, + err: nil, + }, + { + desc: "delete group with invalid ID", + id: invalidID, + err: repoerr.ErrNotFound, + }, + { + desc: "delete group with empty ID", + id: "", + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + switch err := repo.Delete(context.Background(), tc.id); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestAssignParentGroup(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + + num := 10 + + var items []mggroups.Group + parentID := "" + for i := 0; i < num; i++ { + name := namegen.Generate() + group := mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: testsutil.GenerateUUID(t), + Parent: parentID, + Name: name, + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"name": name}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + } + _, err := repo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("create invitation unexpected error: %s", err)) + items = append(items, group) + parentID = group.ID + } + + cases := []struct { + desc string + id string + ids []string + err error + }{ + { + desc: "assign parent group successfully", + id: items[0].ID, + ids: []string{items[1].ID, items[2].ID, items[3].ID, items[4].ID, items[5].ID}, + err: nil, + }, + { + desc: "assign parent group with invalid ID", + id: testsutil.GenerateUUID(t), + ids: []string{items[1].ID, items[2].ID, items[3].ID, items[4].ID, items[5].ID}, + err: repoerr.ErrCreateEntity, + }, + { + desc: "assign parent group with empty ID", + id: "", + ids: []string{items[1].ID, items[2].ID, items[3].ID, items[4].ID, items[5].ID}, + err: repoerr.ErrCreateEntity, + }, + { + desc: "assign parent group with invalid group IDs", + id: items[0].ID, + ids: []string{testsutil.GenerateUUID(t), testsutil.GenerateUUID(t), testsutil.GenerateUUID(t), testsutil.GenerateUUID(t), testsutil.GenerateUUID(t)}, + err: nil, + }, + { + desc: "assign parent group with empty group IDs", + id: items[0].ID, + ids: []string{}, + err: nil, + }, + } + + for _, tc := range cases { + switch err := repo.AssignParentGroup(context.Background(), tc.id, tc.ids...); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestUnassignParentGroup(t *testing.T) { + t.Cleanup(func() { + _, err := db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + }) + + repo := postgres.New(database) + + num := 10 + + var items []mggroups.Group + parentID := "" + for i := 0; i < num; i++ { + name := namegen.Generate() + group := mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Domain: testsutil.GenerateUUID(t), + Parent: parentID, + Name: name, + Description: strings.Repeat("a", 64), + Metadata: map[string]interface{}{"name": name}, + CreatedAt: time.Now().UTC().Truncate(time.Microsecond), + Status: clients.EnabledStatus, + } + _, err := repo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("create invitation unexpected error: %s", err)) + items = append(items, group) + parentID = group.ID + } + + cases := []struct { + desc string + id string + ids []string + err error + }{ + { + desc: "un-assign parent group successfully", + id: items[0].ID, + ids: []string{items[1].ID, items[2].ID, items[3].ID, items[4].ID, items[5].ID}, + err: nil, + }, + { + desc: "un-assign parent group with invalid ID", + id: testsutil.GenerateUUID(t), + ids: []string{items[1].ID, items[2].ID, items[3].ID, items[4].ID, items[5].ID}, + err: repoerr.ErrCreateEntity, + }, + { + desc: "un-assign parent group with empty ID", + id: "", + ids: []string{items[1].ID, items[2].ID, items[3].ID, items[4].ID, items[5].ID}, + err: repoerr.ErrCreateEntity, + }, + { + desc: "un-assign parent group with invalid group IDs", + id: items[0].ID, + ids: []string{testsutil.GenerateUUID(t), testsutil.GenerateUUID(t), testsutil.GenerateUUID(t), testsutil.GenerateUUID(t), testsutil.GenerateUUID(t)}, + err: nil, + }, + { + desc: "un-assign parent group with empty group IDs", + id: items[0].ID, + ids: []string{}, + err: nil, + }, + } + + for _, tc := range cases { + switch err := repo.UnassignParentGroup(context.Background(), tc.id, tc.ids...); { + case err == nil: + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + default: + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func getIDs(groups []mggroups.Group) []string { + var ids []string + for _, group := range groups { + ids = append(ids, group.ID) + } + + return ids +} diff --git a/pkg/groups/postgres/init.go b/pkg/groups/postgres/init.go new file mode 100644 index 0000000..0b799c4 --- /dev/null +++ b/pkg/groups/postgres/init.go @@ -0,0 +1,38 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + _ "github.com/jackc/pgx/v5/stdlib" // required for SQL access + migrate "github.com/rubenv/sql-migrate" +) + +func Migration() *migrate.MemoryMigrationSource { + return &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "groups_01", + Up: []string{ + `CREATE TABLE IF NOT EXISTS groups ( + id VARCHAR(36) PRIMARY KEY, + parent_id VARCHAR(36), + domain_id VARCHAR(36) NOT NULL, + name VARCHAR(1024) NOT NULL, + description VARCHAR(1024), + metadata JSONB, + created_at TIMESTAMP, + updated_at TIMESTAMP, + updated_by VARCHAR(254), + status SMALLINT NOT NULL DEFAULT 0 CHECK (status >= 0), + UNIQUE (domain_id, name), + FOREIGN KEY (parent_id) REFERENCES groups (id) ON DELETE SET NULL + )`, + }, + Down: []string{ + `DROP TABLE IF EXISTS groups`, + }, + }, + }, + } +} diff --git a/pkg/groups/postgres/setup_test.go b/pkg/groups/postgres/setup_test.go new file mode 100644 index 0000000..e1fd411 --- /dev/null +++ b/pkg/groups/postgres/setup_test.go @@ -0,0 +1,94 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/postgres" + pgclient "github.com/absmach/magistrala/pkg/postgres" + gpostgres "github.com/absmach/mg-contrib/pkg/groups/postgres" + "github.com/jmoiron/sqlx" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.opentelemetry.io/otel" +) + +var ( + db *sqlx.DB + database postgres.Database + tracer = otel.Tracer("repo_tests") +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "16.2-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + pool.MaxWait = 120 * time.Second + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err := sql.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgclient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = pgclient.Setup(dbConfig, *gpostgres.Migration()); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + database = postgres.NewDatabase(db, dbConfig, tracer) + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/pkg/groups/service.go b/pkg/groups/service.go new file mode 100644 index 0000000..77ad6e2 --- /dev/null +++ b/pkg/groups/service.go @@ -0,0 +1,780 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package groups + +import ( + "context" + "fmt" + "time" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/auth" + "github.com/absmach/magistrala/pkg/apiutil" + mgclients "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/groups" + "golang.org/x/sync/errgroup" +) + +var ( + errParentUnAuthz = errors.New("failed to authorize parent group") + errMemberKind = errors.New("invalid member kind") + errGroupIDs = errors.New("invalid group ids") +) + +type service struct { + groups groups.Repository + auth magistrala.AuthServiceClient + idProvider magistrala.IDProvider +} + +// NewService returns a new Clients service implementation. +func NewService(g groups.Repository, idp magistrala.IDProvider, authClient magistrala.AuthServiceClient) groups.Service { + return service{ + groups: g, + idProvider: idp, + auth: authClient, + } +} + +func (svc service) CreateGroup(ctx context.Context, token, kind string, g groups.Group) (gr groups.Group, err error) { + res, err := svc.identify(ctx, token) + if err != nil { + return groups.Group{}, err + } + // If domain is disabled , then this authorization will fail for all non-admin domain users + if _, err := svc.authorizeKind(ctx, "", auth.UserType, auth.UsersKind, res.GetId(), auth.CreatePermission, auth.DomainType, res.GetDomainId()); err != nil { + return groups.Group{}, err + } + groupID, err := svc.idProvider.ID() + if err != nil { + return groups.Group{}, err + } + if g.Status != mgclients.EnabledStatus && g.Status != mgclients.DisabledStatus { + return groups.Group{}, svcerr.ErrInvalidStatus + } + + g.ID = groupID + g.CreatedAt = time.Now() + g.Domain = res.GetDomainId() + if g.Parent != "" { + _, err := svc.authorizeToken(ctx, auth.UserType, token, auth.EditPermission, auth.GroupType, g.Parent) + if err != nil { + return groups.Group{}, errors.Wrap(errParentUnAuthz, err) + } + } + + if err := svc.addGroupPolicy(ctx, res.GetId(), res.GetDomainId(), g.ID, g.Parent, kind); err != nil { + return groups.Group{}, err + } + defer func() { + if err != nil { + if errRollback := svc.addGroupPolicyRollback(ctx, res.GetId(), res.GetDomainId(), g.ID, g.Parent, kind); errRollback != nil { + err = errors.Wrap(errors.Wrap(errors.ErrRollbackTx, errRollback), err) + } + } + }() + + saved, err := svc.groups.Save(ctx, g) + if err != nil { + return groups.Group{}, errors.Wrap(svcerr.ErrCreateEntity, err) + } + + return saved, nil +} + +func (svc service) ViewGroup(ctx context.Context, token, id string) (groups.Group, error) { + _, err := svc.authorizeToken(ctx, auth.UserType, token, auth.ViewPermission, auth.GroupType, id) + if err != nil { + return groups.Group{}, err + } + + group, err := svc.groups.RetrieveByID(ctx, id) + if err != nil { + return groups.Group{}, errors.Wrap(svcerr.ErrViewEntity, err) + } + + return group, nil +} + +func (svc service) ViewGroupPerms(ctx context.Context, token, id string) ([]string, error) { + res, err := svc.identify(ctx, token) + if err != nil { + return nil, err + } + + return svc.listUserGroupPermission(ctx, res.GetId(), id) +} + +func (svc service) ListGroups(ctx context.Context, token, memberKind, memberID string, gm groups.Page) (groups.Page, error) { + var ids []string + res, err := svc.identify(ctx, token) + if err != nil { + return groups.Page{}, err + } + switch memberKind { + case auth.ThingsKind: + if _, err := svc.authorizeKind(ctx, res.GetDomainId(), auth.UserType, auth.UsersKind, res.GetId(), auth.ViewPermission, auth.ThingType, memberID); err != nil { + return groups.Page{}, err + } + cids, err := svc.auth.ListAllSubjects(ctx, &magistrala.ListSubjectsReq{ + SubjectType: auth.GroupType, + Permission: auth.GroupRelation, + ObjectType: auth.ThingType, + Object: memberID, + }) + if err != nil { + return groups.Page{}, err + } + ids, err = svc.filterAllowedGroupIDsOfUserID(ctx, res.GetId(), gm.Permission, cids.Policies) + if err != nil { + return groups.Page{}, err + } + case auth.GroupsKind: + if _, err := svc.authorizeKind(ctx, res.GetDomainId(), auth.UserType, auth.UsersKind, res.GetId(), gm.Permission, auth.GroupType, memberID); err != nil { + return groups.Page{}, err + } + + gids, err := svc.auth.ListAllObjects(ctx, &magistrala.ListObjectsReq{ + SubjectType: auth.GroupType, + Subject: memberID, + Permission: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + }) + if err != nil { + return groups.Page{}, err + } + ids, err = svc.filterAllowedGroupIDsOfUserID(ctx, res.GetId(), gm.Permission, gids.Policies) + if err != nil { + return groups.Page{}, err + } + case auth.ChannelsKind: + if _, err := svc.authorizeKind(ctx, res.GetDomainId(), auth.UserType, auth.UsersKind, res.GetId(), auth.ViewPermission, auth.GroupType, memberID); err != nil { + return groups.Page{}, err + } + gids, err := svc.auth.ListAllSubjects(ctx, &magistrala.ListSubjectsReq{ + SubjectType: auth.GroupType, + Permission: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: memberID, + }) + if err != nil { + return groups.Page{}, err + } + + ids, err = svc.filterAllowedGroupIDsOfUserID(ctx, res.GetId(), gm.Permission, gids.Policies) + if err != nil { + return groups.Page{}, err + } + case auth.UsersKind: + switch { + case memberID != "" && res.GetUserId() != memberID: + if _, err := svc.authorizeKind(ctx, res.GetDomainId(), auth.UserType, auth.UsersKind, res.GetId(), auth.AdminPermission, auth.DomainType, res.GetDomainId()); err != nil { + return groups.Page{}, err + } + gids, err := svc.auth.ListAllObjects(ctx, &magistrala.ListObjectsReq{ + SubjectType: auth.UserType, + Subject: auth.EncodeDomainUserID(res.GetDomainId(), memberID), + Permission: gm.Permission, + ObjectType: auth.GroupType, + }) + if err != nil { + return groups.Page{}, err + } + ids, err = svc.filterAllowedGroupIDsOfUserID(ctx, res.GetId(), gm.Permission, gids.Policies) + if err != nil { + return groups.Page{}, err + } + default: + switch svc.checkSuperAdmin(ctx, res.GetUserId()) { + case nil: + gm.PageMeta.DomainID = res.GetDomainId() + default: + // If domain is disabled , then this authorization will fail for all non-admin domain users + if _, err := svc.authorizeKind(ctx, "", auth.UserType, auth.UsersKind, res.GetId(), auth.MembershipPermission, auth.DomainType, res.GetDomainId()); err != nil { + return groups.Page{}, err + } + ids, err = svc.listAllGroupsOfUserID(ctx, res.GetId(), gm.Permission) + if err != nil { + return groups.Page{}, err + } + } + } + default: + return groups.Page{}, errMemberKind + } + + gp, err := svc.groups.RetrieveByIDs(ctx, gm, ids...) + if err != nil { + return groups.Page{}, errors.Wrap(svcerr.ErrViewEntity, err) + } + + if gm.ListPerms && len(gp.Groups) > 0 { + g, ctx := errgroup.WithContext(ctx) + + for i := range gp.Groups { + // Copying loop variable "i" to avoid "loop variable captured by func literal" + iter := i + g.Go(func() error { + return svc.retrievePermissions(ctx, res.GetId(), &gp.Groups[iter]) + }) + } + + if err := g.Wait(); err != nil { + return groups.Page{}, err + } + } + return gp, nil +} + +// Experimental functions used for async calling of svc.listUserThingPermission. This might be helpful during listing of large number of entities. +func (svc service) retrievePermissions(ctx context.Context, userID string, group *groups.Group) error { + permissions, err := svc.listUserGroupPermission(ctx, userID, group.ID) + if err != nil { + return err + } + group.Permissions = permissions + return nil +} + +func (svc service) listUserGroupPermission(ctx context.Context, userID, groupID string) ([]string, error) { + lp, err := svc.auth.ListPermissions(ctx, &magistrala.ListPermissionsReq{ + SubjectType: auth.UserType, + Subject: userID, + Object: groupID, + ObjectType: auth.GroupType, + }) + if err != nil { + return []string{}, err + } + if len(lp.GetPermissions()) == 0 { + return []string{}, svcerr.ErrAuthorization + } + return lp.GetPermissions(), nil +} + +func (svc service) checkSuperAdmin(ctx context.Context, userID string) error { + res, err := svc.auth.Authorize(ctx, &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + Subject: userID, + Permission: auth.AdminPermission, + ObjectType: auth.PlatformType, + Object: auth.MagistralaObject, + }) + if err != nil { + return errors.Wrap(svcerr.ErrAuthorization, err) + } + if !res.Authorized { + return svcerr.ErrAuthorization + } + return nil +} + +// IMPROVEMENT NOTE: remove this function and all its related auxiliary function, ListMembers are moved to respective service. +func (svc service) ListMembers(ctx context.Context, token, groupID, permission, memberKind string) (groups.MembersPage, error) { + _, err := svc.authorizeToken(ctx, auth.UserType, token, auth.ViewPermission, auth.GroupType, groupID) + if err != nil { + return groups.MembersPage{}, err + } + switch memberKind { + case auth.ThingsKind: + tids, err := svc.auth.ListAllObjects(ctx, &magistrala.ListObjectsReq{ + SubjectType: auth.GroupType, + Subject: groupID, + Relation: auth.GroupRelation, + ObjectType: auth.ThingType, + }) + if err != nil { + return groups.MembersPage{}, err + } + + members := []groups.Member{} + + for _, id := range tids.Policies { + members = append(members, groups.Member{ + ID: id, + Type: auth.ThingType, + }) + } + return groups.MembersPage{ + Total: uint64(len(members)), + Offset: 0, + Limit: uint64(len(members)), + Members: members, + }, nil + case auth.UsersKind: + uids, err := svc.auth.ListAllSubjects(ctx, &magistrala.ListSubjectsReq{ + SubjectType: auth.UserType, + Permission: permission, + Object: groupID, + ObjectType: auth.GroupType, + }) + if err != nil { + return groups.MembersPage{}, err + } + + members := []groups.Member{} + + for _, id := range uids.Policies { + members = append(members, groups.Member{ + ID: id, + Type: auth.UserType, + }) + } + return groups.MembersPage{ + Total: uint64(len(members)), + Offset: 0, + Limit: uint64(len(members)), + Members: members, + }, nil + default: + return groups.MembersPage{}, errMemberKind + } +} + +func (svc service) UpdateGroup(ctx context.Context, token string, g groups.Group) (groups.Group, error) { + id, err := svc.authorizeToken(ctx, auth.UserType, token, auth.EditPermission, auth.GroupType, g.ID) + if err != nil { + return groups.Group{}, err + } + + g.UpdatedAt = time.Now() + g.UpdatedBy = id + + return svc.groups.Update(ctx, g) +} + +func (svc service) EnableGroup(ctx context.Context, token, id string) (groups.Group, error) { + group := groups.Group{ + ID: id, + Status: mgclients.EnabledStatus, + UpdatedAt: time.Now(), + } + group, err := svc.changeGroupStatus(ctx, token, group) + if err != nil { + return groups.Group{}, err + } + return group, nil +} + +func (svc service) DisableGroup(ctx context.Context, token, id string) (groups.Group, error) { + group := groups.Group{ + ID: id, + Status: mgclients.DisabledStatus, + UpdatedAt: time.Now(), + } + group, err := svc.changeGroupStatus(ctx, token, group) + if err != nil { + return groups.Group{}, err + } + return group, nil +} + +func (svc service) Assign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) error { + res, err := svc.identify(ctx, token) + if err != nil { + return err + } + if _, err := svc.authorizeKind(ctx, res.GetDomainId(), auth.UserType, auth.UsersKind, res.GetId(), auth.EditPermission, auth.GroupType, groupID); err != nil { + return err + } + + policies := magistrala.AddPoliciesReq{} + switch memberKind { + case auth.ThingsKind: + for _, memberID := range memberIDs { + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: res.GetDomainId(), + SubjectType: auth.GroupType, + SubjectKind: auth.ChannelsKind, + Subject: groupID, + Relation: relation, + ObjectType: auth.ThingType, + Object: memberID, + }) + } + case auth.ChannelsKind: + for _, memberID := range memberIDs { + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: res.GetDomainId(), + SubjectType: auth.GroupType, + Subject: memberID, + Relation: relation, + ObjectType: auth.GroupType, + Object: groupID, + }) + } + case auth.GroupsKind: + return svc.assignParentGroup(ctx, res.GetDomainId(), groupID, memberIDs) + + case auth.UsersKind: + for _, memberID := range memberIDs { + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: res.GetDomainId(), + SubjectType: auth.UserType, + Subject: auth.EncodeDomainUserID(res.GetDomainId(), memberID), + Relation: relation, + ObjectType: auth.GroupType, + Object: groupID, + }) + } + default: + return errMemberKind + } + + if _, err := svc.auth.AddPolicies(ctx, &policies); err != nil { + return errors.Wrap(svcerr.ErrAddPolicies, err) + } + + return nil +} + +func (svc service) assignParentGroup(ctx context.Context, domain, parentGroupID string, groupIDs []string) (err error) { + groupsPage, err := svc.groups.RetrieveByIDs(ctx, groups.Page{PageMeta: groups.PageMeta{Limit: 1<<63 - 1}}, groupIDs...) + if err != nil { + return errors.Wrap(svcerr.ErrViewEntity, err) + } + if len(groupsPage.Groups) == 0 { + return errGroupIDs + } + var addPolicies magistrala.AddPoliciesReq + var deletePolicies magistrala.DeletePoliciesReq + for _, group := range groupsPage.Groups { + if group.Parent != "" { + return errors.Wrap(svcerr.ErrConflict, fmt.Errorf("%s group already have parent", group.ID)) + } + addPolicies.AddPoliciesReq = append(addPolicies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: domain, + SubjectType: auth.GroupType, + Subject: parentGroupID, + Relation: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: group.ID, + }) + deletePolicies.DeletePoliciesReq = append(deletePolicies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: domain, + SubjectType: auth.GroupType, + Subject: parentGroupID, + Relation: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: group.ID, + }) + } + + if _, err := svc.auth.AddPolicies(ctx, &addPolicies); err != nil { + return errors.Wrap(svcerr.ErrAddPolicies, err) + } + defer func() { + if err != nil { + if _, errRollback := svc.auth.DeletePolicies(ctx, &deletePolicies); errRollback != nil { + err = errors.Wrap(err, errors.Wrap(apiutil.ErrRollbackTx, errRollback)) + } + } + }() + + return svc.groups.AssignParentGroup(ctx, parentGroupID, groupIDs...) +} + +func (svc service) unassignParentGroup(ctx context.Context, domain, parentGroupID string, groupIDs []string) (err error) { + groupsPage, err := svc.groups.RetrieveByIDs(ctx, groups.Page{PageMeta: groups.PageMeta{Limit: 1<<63 - 1}}, groupIDs...) + if err != nil { + return errors.Wrap(svcerr.ErrViewEntity, err) + } + if len(groupsPage.Groups) == 0 { + return errGroupIDs + } + var addPolicies magistrala.AddPoliciesReq + var deletePolicies magistrala.DeletePoliciesReq + for _, group := range groupsPage.Groups { + if group.Parent != "" && group.Parent != parentGroupID { + return errors.Wrap(svcerr.ErrConflict, fmt.Errorf("%s group doesn't have same parent", group.ID)) + } + addPolicies.AddPoliciesReq = append(addPolicies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: domain, + SubjectType: auth.GroupType, + Subject: parentGroupID, + Relation: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: group.ID, + }) + deletePolicies.DeletePoliciesReq = append(deletePolicies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: domain, + SubjectType: auth.GroupType, + Subject: parentGroupID, + Relation: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: group.ID, + }) + } + + if _, err := svc.auth.DeletePolicies(ctx, &deletePolicies); err != nil { + return errors.Wrap(svcerr.ErrDeletePolicies, err) + } + defer func() { + if err != nil { + if _, errRollback := svc.auth.AddPolicies(ctx, &addPolicies); errRollback != nil { + err = errors.Wrap(err, errors.Wrap(apiutil.ErrRollbackTx, errRollback)) + } + } + }() + + return svc.groups.UnassignParentGroup(ctx, parentGroupID, groupIDs...) +} + +func (svc service) Unassign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) error { + res, err := svc.identify(ctx, token) + if err != nil { + return err + } + if _, err := svc.authorizeKind(ctx, res.GetDomainId(), auth.UserType, auth.UsersKind, res.GetId(), auth.EditPermission, auth.GroupType, groupID); err != nil { + return err + } + + policies := magistrala.DeletePoliciesReq{} + + switch memberKind { + case auth.ThingsKind: + for _, memberID := range memberIDs { + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: res.GetDomainId(), + SubjectType: auth.GroupType, + SubjectKind: auth.ChannelsKind, + Subject: groupID, + Relation: relation, + ObjectType: auth.ThingType, + Object: memberID, + }) + } + case auth.ChannelsKind: + for _, memberID := range memberIDs { + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: res.GetDomainId(), + SubjectType: auth.GroupType, + Subject: memberID, + Relation: relation, + ObjectType: auth.GroupType, + Object: groupID, + }) + } + case auth.GroupsKind: + return svc.unassignParentGroup(ctx, res.GetDomainId(), groupID, memberIDs) + case auth.UsersKind: + for _, memberID := range memberIDs { + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: res.GetDomainId(), + SubjectType: auth.UserType, + Subject: auth.EncodeDomainUserID(res.GetDomainId(), memberID), + Relation: relation, + ObjectType: auth.GroupType, + Object: groupID, + }) + } + default: + return errMemberKind + } + + if _, err := svc.auth.DeletePolicies(ctx, &policies); err != nil { + return errors.Wrap(svcerr.ErrDeletePolicies, err) + } + return nil +} + +func (svc service) DeleteGroup(ctx context.Context, token, id string) error { + res, err := svc.identify(ctx, token) + if err != nil { + return err + } + if _, err := svc.authorizeKind(ctx, res.GetDomainId(), auth.UserType, auth.UsersKind, res.GetId(), auth.DeletePermission, auth.GroupType, id); err != nil { + return err + } + + deleteRes, err := svc.auth.DeleteEntityPolicies(ctx, &magistrala.DeleteEntityPoliciesReq{ + EntityType: auth.GroupType, + Id: id, + }) + if err != nil { + return errors.Wrap(svcerr.ErrDeletePolicies, err) + } + if !deleteRes.Deleted { + return svcerr.ErrAuthorization + } + + if err := svc.groups.Delete(ctx, id); err != nil { + return err + } + + return nil +} + +func (svc service) filterAllowedGroupIDsOfUserID(ctx context.Context, userID, permission string, groupIDs []string) ([]string, error) { + var ids []string + allowedIDs, err := svc.listAllGroupsOfUserID(ctx, userID, permission) + if err != nil { + return []string{}, err + } + + for _, gid := range groupIDs { + for _, id := range allowedIDs { + if id == gid { + ids = append(ids, id) + } + } + } + return ids, nil +} + +func (svc service) listAllGroupsOfUserID(ctx context.Context, userID, permission string) ([]string, error) { + allowedIDs, err := svc.auth.ListAllObjects(ctx, &magistrala.ListObjectsReq{ + SubjectType: auth.UserType, + Subject: userID, + Permission: permission, + ObjectType: auth.GroupType, + }) + if err != nil { + return []string{}, err + } + return allowedIDs.Policies, nil +} + +func (svc service) changeGroupStatus(ctx context.Context, token string, group groups.Group) (groups.Group, error) { + id, err := svc.authorizeToken(ctx, auth.UserType, token, auth.EditPermission, auth.GroupType, group.ID) + if err != nil { + return groups.Group{}, err + } + dbGroup, err := svc.groups.RetrieveByID(ctx, group.ID) + if err != nil { + return groups.Group{}, errors.Wrap(svcerr.ErrViewEntity, err) + } + if dbGroup.Status == group.Status { + return groups.Group{}, errors.ErrStatusAlreadyAssigned + } + + group.UpdatedBy = id + return svc.groups.ChangeStatus(ctx, group) +} + +func (svc service) identify(ctx context.Context, token string) (*magistrala.IdentityRes, error) { + res, err := svc.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return nil, errors.Wrap(svcerr.ErrAuthentication, err) + } + if res.GetId() == "" || res.GetDomainId() == "" { + return nil, svcerr.ErrDomainAuthorization + } + return res, nil +} + +func (svc service) authorizeToken(ctx context.Context, subjectType, subject, permission, objectType, object string) (string, error) { + req := &magistrala.AuthorizeReq{ + SubjectType: subjectType, + SubjectKind: auth.TokenKind, + Subject: subject, + Permission: permission, + Object: object, + ObjectType: objectType, + } + res, err := svc.auth.Authorize(ctx, req) + if err != nil { + return "", errors.Wrap(svcerr.ErrAuthorization, err) + } + if !res.GetAuthorized() { + return "", svcerr.ErrAuthorization + } + return res.GetId(), nil +} + +func (svc service) authorizeKind(ctx context.Context, domainID, subjectType, subjectKind, subject, permission, objectType, object string) (string, error) { + req := &magistrala.AuthorizeReq{ + Domain: domainID, + SubjectType: subjectType, + SubjectKind: subjectKind, + Subject: subject, + Permission: permission, + Object: object, + ObjectType: objectType, + } + res, err := svc.auth.Authorize(ctx, req) + if err != nil { + return "", errors.Wrap(svcerr.ErrAuthorization, err) + } + if !res.GetAuthorized() { + return "", svcerr.ErrAuthorization + } + return res.GetId(), nil +} + +func (svc service) addGroupPolicy(ctx context.Context, userID, domainID, id, parentID, kind string) error { + policies := magistrala.AddPoliciesReq{} + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: domainID, + SubjectType: auth.UserType, + Subject: userID, + Relation: auth.AdministratorRelation, + ObjectKind: kind, + ObjectType: auth.GroupType, + Object: id, + }) + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: domainID, + SubjectType: auth.DomainType, + Subject: domainID, + Relation: auth.DomainRelation, + ObjectType: auth.GroupType, + Object: id, + }) + if parentID != "" { + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: domainID, + SubjectType: auth.GroupType, + Subject: parentID, + Relation: auth.ParentGroupRelation, + ObjectKind: kind, + ObjectType: auth.GroupType, + Object: id, + }) + } + if _, err := svc.auth.AddPolicies(ctx, &policies); err != nil { + return errors.Wrap(svcerr.ErrAddPolicies, err) + } + + return nil +} + +func (svc service) addGroupPolicyRollback(ctx context.Context, userID, domainID, id, parentID, kind string) error { + policies := magistrala.DeletePoliciesReq{} + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: domainID, + SubjectType: auth.UserType, + Subject: userID, + Relation: auth.AdministratorRelation, + ObjectKind: kind, + ObjectType: auth.GroupType, + Object: id, + }) + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: domainID, + SubjectType: auth.DomainType, + Subject: domainID, + Relation: auth.DomainRelation, + ObjectType: auth.GroupType, + Object: id, + }) + if parentID != "" { + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: domainID, + SubjectType: auth.GroupType, + Subject: parentID, + Relation: auth.ParentGroupRelation, + ObjectKind: kind, + ObjectType: auth.GroupType, + Object: id, + }) + } + if _, err := svc.auth.DeletePolicies(ctx, &policies); err != nil { + return errors.Wrap(svcerr.ErrDeletePolicies, err) + } + + return nil +} diff --git a/pkg/groups/service_test.go b/pkg/groups/service_test.go new file mode 100644 index 0000000..4c883cd --- /dev/null +++ b/pkg/groups/service_test.go @@ -0,0 +1,2583 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package groups_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/0x6flab/namegenerator" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/auth" + authmocks "github.com/absmach/magistrala/auth/mocks" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + mggroups "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/magistrala/pkg/groups/mocks" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/pkg/groups" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var ( + idProvider = uuid.New() + token = "token" + namegen = namegenerator.NewGenerator() + validGroup = mggroups.Group{ + Name: namegen.Generate(), + Description: namegen.Generate(), + Metadata: map[string]interface{}{ + "key": "value", + }, + Status: clients.Status(groups.EnabledStatus), + } + allowedIDs = []string{ + testsutil.GenerateUUID(&testing.T{}), + testsutil.GenerateUUID(&testing.T{}), + testsutil.GenerateUUID(&testing.T{}), + } +) + +func TestCreateGroup(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + kind string + group mggroups.Group + idResp *magistrala.IdentityRes + idErr error + authzResp *magistrala.AuthorizeRes + authzErr error + authzTknResp *magistrala.AuthorizeRes + authzTknErr error + repoResp mggroups.Group + repoErr error + addPolResp *magistrala.AddPoliciesRes + addPolErr error + deletePolResp *magistrala.DeletePolicyRes + deletePolErr error + err error + }{ + { + desc: "successfully", + token: token, + kind: auth.NewGroupKind, + group: validGroup, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + authzTknResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + CreatedAt: time.Now(), + Domain: testsutil.GenerateUUID(t), + }, + addPolResp: &magistrala.AddPoliciesRes{ + Added: true, + }, + }, + { + desc: "with invalid token", + token: token, + kind: auth.NewGroupKind, + group: validGroup, + idResp: &magistrala.IdentityRes{}, + idErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "with empty id or domain id but with no grpc error", + token: token, + kind: auth.NewGroupKind, + group: validGroup, + idResp: &magistrala.IdentityRes{}, + idErr: nil, + err: svcerr.ErrDomainAuthorization, + }, + { + desc: "with failed to authorize domain membership", + token: token, + kind: auth.NewGroupKind, + group: validGroup, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + err: svcerr.ErrAuthorization, + }, + { + desc: "with failed to authorize domain membership with grpc error", + token: token, + kind: auth.NewGroupKind, + group: validGroup, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "with invalid status", + token: token, + kind: auth.NewGroupKind, + group: mggroups.Group{ + Name: namegen.Generate(), + Description: namegen.Generate(), + Status: clients.Status(100), + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + err: svcerr.ErrInvalidStatus, + }, + { + desc: "successfully with parent", + token: token, + kind: auth.NewGroupKind, + group: mggroups.Group{ + Name: namegen.Generate(), + Description: namegen.Generate(), + Status: clients.Status(groups.EnabledStatus), + Parent: testsutil.GenerateUUID(t), + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + authzTknResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + CreatedAt: time.Now(), + Domain: testsutil.GenerateUUID(t), + Parent: testsutil.GenerateUUID(t), + }, + addPolResp: &magistrala.AddPoliciesRes{ + Added: true, + }, + }, + { + desc: "unsuccessfully with parent due to authorization error", + token: token, + kind: auth.NewGroupKind, + group: mggroups.Group{ + Name: namegen.Generate(), + Description: namegen.Generate(), + Status: clients.Status(groups.EnabledStatus), + Parent: testsutil.GenerateUUID(t), + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + authzTknResp: &magistrala.AuthorizeRes{}, + authzTknErr: svcerr.ErrAuthorization, + repoResp: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Parent: testsutil.GenerateUUID(t), + }, + addPolResp: &magistrala.AddPoliciesRes{ + Added: true, + }, + err: svcerr.ErrAuthorization, + }, + { + desc: "with repo error", + token: token, + kind: auth.NewGroupKind, + group: validGroup, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + authzTknResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Group{}, + repoErr: errors.ErrMalformedEntity, + err: errors.ErrMalformedEntity, + }, + { + desc: "with failed to add policies", + token: token, + kind: auth.NewGroupKind, + group: validGroup, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + authzTknResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + }, + addPolResp: &magistrala.AddPoliciesRes{}, + addPolErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "with failed to delete policies response", + token: token, + kind: auth.NewGroupKind, + group: mggroups.Group{ + Name: namegen.Generate(), + Description: namegen.Generate(), + Status: clients.Status(groups.EnabledStatus), + Parent: testsutil.GenerateUUID(t), + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + authzTknResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoErr: errors.ErrMalformedEntity, + addPolResp: &magistrala.AddPoliciesRes{Added: true}, + deletePolErr: svcerr.ErrAuthorization, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.idResp, tc.idErr) + authcall1 := authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + SubjectKind: auth.UsersKind, + Subject: tc.idResp.GetId(), + Permission: auth.CreatePermission, + Object: tc.idResp.GetDomainId(), + ObjectType: auth.DomainType, + }).Return(tc.authzResp, tc.authzErr) + authcall2 := authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + SubjectKind: auth.TokenKind, + Subject: tc.token, + Permission: auth.EditPermission, + Object: tc.group.Parent, + ObjectType: auth.GroupType, + }).Return(tc.authzTknResp, tc.authzTknErr) + repocall := repo.On("Save", context.Background(), mock.Anything).Return(tc.repoResp, tc.repoErr) + authcall3 := authsvc.On("AddPolicies", context.Background(), mock.Anything).Return(tc.addPolResp, tc.addPolErr) + authCall4 := authsvc.On("DeletePolicies", mock.Anything, mock.Anything).Return(tc.deletePolResp, tc.deletePolErr) + got, err := svc.CreateGroup(context.Background(), tc.token, tc.kind, tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + if err == nil { + assert.NotEmpty(t, got.ID) + assert.NotEmpty(t, got.CreatedAt) + assert.NotEmpty(t, got.Domain) + assert.WithinDuration(t, time.Now(), got.CreatedAt, 2*time.Second) + ok := repocall.Parent.AssertCalled(t, "Save", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Save was not called on %s", tc.desc)) + } + authcall.Unset() + authcall1.Unset() + authcall2.Unset() + repocall.Unset() + authcall3.Unset() + authCall4.Unset() + }) + } +} + +func TestViewGroup(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + id string + authzResp *magistrala.AuthorizeRes + authzErr error + repoResp mggroups.Group + repoErr error + err error + }{ + { + desc: "successfully", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: validGroup, + }, + { + desc: "with invalid token", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "with failed to authorize", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: nil, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + SubjectKind: auth.TokenKind, + Subject: tc.token, + Permission: auth.ViewPermission, + Object: tc.id, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + repo.On("RetrieveByID", context.Background(), tc.id).Return(tc.repoResp, tc.repoErr) + got, err := svc.ViewGroup(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + if err == nil { + assert.Equal(t, tc.repoResp, got) + ok := repo.AssertCalled(t, "RetrieveByID", context.Background(), tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + } + }) + } +} + +func TestViewGroupPerms(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + id string + idResp *magistrala.IdentityRes + idErr error + listResp *magistrala.ListPermissionsRes + listErr error + err error + }{ + { + desc: "successfully", + token: token, + id: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + listResp: &magistrala.ListPermissionsRes{ + Permissions: []string{ + auth.ViewPermission, + auth.EditPermission, + }, + }, + }, + { + desc: "with invalid token", + token: token, + id: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{}, + idErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "with failed to list permissions", + token: token, + id: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + listErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "with empty permissions", + token: token, + id: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + listResp: &magistrala.ListPermissionsRes{ + Permissions: []string{}, + }, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.idResp, tc.idErr) + authcall1 := authsvc.On("ListPermissions", context.Background(), &magistrala.ListPermissionsReq{ + SubjectType: auth.UserType, + Subject: tc.idResp.GetId(), + Object: tc.id, + ObjectType: auth.GroupType, + }).Return(tc.listResp, tc.listErr) + got, err := svc.ViewGroupPerms(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + if err == nil { + assert.Equal(t, tc.listResp.Permissions, got) + } + authcall.Unset() + authcall1.Unset() + }) + } +} + +func TestUpdateGroup(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + group mggroups.Group + authzResp *magistrala.AuthorizeRes + authzErr error + repoResp mggroups.Group + repoErr error + err error + }{ + { + desc: "successfully", + token: token, + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Name: namegen.Generate(), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: validGroup, + }, + { + desc: "with invalid token", + token: token, + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Name: namegen.Generate(), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "with failed to authorize", + token: token, + group: mggroups.Group{ + ID: testsutil.GenerateUUID(t), + Name: namegen.Generate(), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: nil, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + SubjectKind: auth.TokenKind, + Subject: tc.token, + Permission: auth.EditPermission, + Object: tc.group.ID, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + repo.On("Update", context.Background(), mock.Anything).Return(tc.repoResp, tc.repoErr) + got, err := svc.UpdateGroup(context.Background(), tc.token, tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + if err == nil { + assert.Equal(t, tc.repoResp, got) + ok := repo.AssertCalled(t, "Update", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + } + }) + } +} + +func TestEnableGroup(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + id string + authzResp *magistrala.AuthorizeRes + authzErr error + retrieveResp mggroups.Group + retrieveErr error + changeResp mggroups.Group + changeErr error + err error + }{ + { + desc: "successfully", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + retrieveResp: mggroups.Group{ + Status: clients.Status(groups.DisabledStatus), + }, + changeResp: validGroup, + }, + { + desc: "with invalid token", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "with failed to authorize", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: nil, + err: svcerr.ErrAuthorization, + }, + { + desc: "with enabled group", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + retrieveResp: mggroups.Group{ + Status: clients.Status(groups.EnabledStatus), + }, + err: errors.ErrStatusAlreadyAssigned, + }, + { + desc: "with retrieve error", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + retrieveResp: mggroups.Group{}, + retrieveErr: repoerr.ErrNotFound, + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + SubjectKind: auth.TokenKind, + Subject: tc.token, + Permission: auth.EditPermission, + Object: tc.id, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + repocall := repo.On("RetrieveByID", context.Background(), tc.id).Return(tc.retrieveResp, tc.retrieveErr) + repocall1 := repo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.changeResp, tc.changeErr) + got, err := svc.EnableGroup(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + if err == nil { + assert.Equal(t, tc.changeResp, got) + ok := repo.AssertCalled(t, "RetrieveByID", context.Background(), tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + } + authcall.Unset() + repocall.Unset() + repocall1.Unset() + }) + } +} + +func TestDisableGroup(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + id string + authzResp *magistrala.AuthorizeRes + authzErr error + retrieveResp mggroups.Group + retrieveErr error + changeResp mggroups.Group + changeErr error + err error + }{ + { + desc: "successfully", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + retrieveResp: mggroups.Group{ + Status: clients.Status(groups.EnabledStatus), + }, + changeResp: validGroup, + }, + { + desc: "with invalid token", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "with failed to authorize", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: nil, + err: svcerr.ErrAuthorization, + }, + { + desc: "with enabled group", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + retrieveResp: mggroups.Group{ + Status: clients.Status(groups.DisabledStatus), + }, + err: errors.ErrStatusAlreadyAssigned, + }, + { + desc: "with retrieve error", + token: token, + id: testsutil.GenerateUUID(t), + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + retrieveResp: mggroups.Group{}, + retrieveErr: repoerr.ErrNotFound, + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + SubjectKind: auth.TokenKind, + Subject: tc.token, + Permission: auth.EditPermission, + Object: tc.id, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + repocall := repo.On("RetrieveByID", context.Background(), tc.id).Return(tc.retrieveResp, tc.retrieveErr) + repocall1 := repo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.changeResp, tc.changeErr) + got, err := svc.DisableGroup(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + if err == nil { + assert.Equal(t, tc.changeResp, got) + ok := repo.AssertCalled(t, "RetrieveByID", context.Background(), tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + } + authcall.Unset() + repocall.Unset() + repocall1.Unset() + }) + } +} + +func TestListMembers(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + groupID string + permission string + memberKind string + authzResp *magistrala.AuthorizeRes + authzErr error + listSubjectResp *magistrala.ListSubjectsRes + listSubjectErr error + listObjectResp *magistrala.ListObjectsRes + listObjectErr error + err error + }{ + { + desc: "successfully with things kind", + token: token, + groupID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{ + Policies: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + }, + { + desc: "successfully with users kind", + token: token, + groupID: testsutil.GenerateUUID(t), + memberKind: auth.UsersKind, + permission: auth.ViewPermission, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{ + Policies: []string{ + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + testsutil.GenerateUUID(t), + }, + }, + }, + { + desc: "with invalid kind", + token: token, + groupID: testsutil.GenerateUUID(t), + memberKind: auth.GroupsKind, + permission: auth.ViewPermission, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + err: errors.New("invalid member kind"), + }, + { + desc: "with invalid token", + token: token, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + err: svcerr.ErrAuthorization, + }, + { + desc: "failed to list objects with things kind", + token: token, + groupID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{ + Policies: []string{}, + }, + listObjectErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "failed to list subjects with users kind", + token: token, + groupID: testsutil.GenerateUUID(t), + memberKind: auth.UsersKind, + permission: auth.ViewPermission, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{ + Policies: []string{}, + }, + listSubjectErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + SubjectKind: auth.TokenKind, + Subject: tc.token, + Permission: auth.ViewPermission, + Object: tc.groupID, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + authcall1 := authsvc.On("ListAllObjects", context.Background(), &magistrala.ListObjectsReq{ + SubjectType: auth.GroupType, + Subject: tc.groupID, + Relation: auth.GroupRelation, + ObjectType: auth.ThingType, + }).Return(tc.listObjectResp, tc.listObjectErr) + authcall2 := authsvc.On("ListAllSubjects", context.Background(), &magistrala.ListSubjectsReq{ + SubjectType: auth.UserType, + Permission: tc.permission, + Object: tc.groupID, + ObjectType: auth.GroupType, + }).Return(tc.listSubjectResp, tc.listSubjectErr) + got, err := svc.ListMembers(context.Background(), tc.token, tc.groupID, tc.permission, tc.memberKind) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + if err == nil { + assert.NotEmpty(t, got) + } + authcall.Unset() + authcall1.Unset() + authcall2.Unset() + }) + } +} + +func TestListGroups(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + memberKind string + memberID string + page mggroups.Page + idResp *magistrala.IdentityRes + idErr error + authzResp *magistrala.AuthorizeRes + authzErr error + listSubjectResp *magistrala.ListSubjectsRes + listSubjectErr error + listObjectResp *magistrala.ListObjectsRes + listObjectErr error + listObjectFilterResp *magistrala.ListObjectsRes + listObjectFilterErr error + authSuperAdminResp *magistrala.AuthorizeRes + authSuperAdminErr error + repoResp mggroups.Page + repoErr error + listPermResp *magistrala.ListPermissionsRes + listPermErr error + err error + }{ + { + desc: "successfully with things kind", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + listPermResp: &magistrala.ListPermissionsRes{ + Permissions: []string{ + auth.ViewPermission, + auth.EditPermission, + }, + }, + }, + { + desc: "successfully with groups kind", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.GroupsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + listPermResp: &magistrala.ListPermissionsRes{ + Permissions: []string{ + auth.ViewPermission, + auth.EditPermission, + }, + }, + }, + { + desc: "successfully with channels kind", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ChannelsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + listPermResp: &magistrala.ListPermissionsRes{ + Permissions: []string{ + auth.ViewPermission, + auth.EditPermission, + }, + }, + }, + { + desc: "successfully with users kind non admin", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.UsersKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + listPermResp: &magistrala.ListPermissionsRes{ + Permissions: []string{ + auth.ViewPermission, + auth.EditPermission, + }, + }, + }, + { + desc: "successfully with users kind admin", + token: token, + memberKind: auth.UsersKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + UserId: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + listPermResp: &magistrala.ListPermissionsRes{ + Permissions: []string{ + auth.ViewPermission, + auth.EditPermission, + }, + }, + }, + { + desc: "unsuccessfully with users kind admin", + token: token, + memberKind: auth.UsersKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + UserId: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with users kind admin with nil error", + token: token, + memberKind: auth.UsersKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + UserId: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with things kind due to failed to authorize", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with things kind due to failed to list subjects", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{}, + listSubjectErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with things kind due to failed to list filtered objects", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{}, + listObjectFilterErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with groups kind due to failed to authorize", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.GroupsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with groups kind due to failed to list subjects", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.GroupsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{}, + listObjectErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with groups kind due to failed to list filtered objects", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.GroupsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{}, + listObjectFilterErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with channels kind due to failed to authorize", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ChannelsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with channels kind due to failed to list subjects", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ChannelsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{}, + listSubjectErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with channels kind due to failed to list filtered objects", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ChannelsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{}, + listObjectFilterErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with users kind due to failed to authorize", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.UsersKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with users kind due to failed to list subjects", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.UsersKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{}, + listObjectErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with users kind due to failed to list filtered objects", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.UsersKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{}, + listObjectFilterErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "successfully with users kind admin", + token: token, + memberKind: auth.UsersKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + UserId: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listObjectResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + listPermResp: &magistrala.ListPermissionsRes{ + Permissions: []string{ + auth.ViewPermission, + auth.EditPermission, + }, + }, + }, + { + desc: "unsuccessfully with invalid kind", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: "invalid", + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + err: errors.New("invalid member kind"), + }, + { + desc: "unsuccessfully with things kind due to repo error", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + repoResp: mggroups.Page{}, + repoErr: repoerr.ErrViewEntity, + err: repoerr.ErrViewEntity, + }, + { + desc: "unsuccessfully with things kind due to failed to list permissions", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + listSubjectResp: &magistrala.ListSubjectsRes{ + Policies: allowedIDs, + }, + listObjectFilterResp: &magistrala.ListObjectsRes{ + Policies: allowedIDs, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + listPermResp: &magistrala.ListPermissionsRes{}, + listPermErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with invalid token", + token: token, + memberID: testsutil.GenerateUUID(t), + memberKind: auth.ThingsKind, + page: mggroups.Page{ + Permission: auth.ViewPermission, + ListPerms: true, + }, + idResp: &magistrala.IdentityRes{}, + idErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.idResp, tc.idErr) + authcall1 := &mock.Call{} + authcall2 := &mock.Call{} + authcall3 := &mock.Call{} + adminCheck := &mock.Call{} + switch tc.memberKind { + case auth.ThingsKind: + authcall1 = authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + SubjectKind: auth.UsersKind, + Subject: tc.idResp.GetId(), + Permission: auth.ViewPermission, + Object: tc.memberID, + ObjectType: auth.ThingType, + }).Return(tc.authzResp, tc.authzErr) + authcall2 = authsvc.On("ListAllSubjects", context.Background(), &magistrala.ListSubjectsReq{ + SubjectType: auth.GroupType, + Permission: auth.GroupRelation, + ObjectType: auth.ThingType, + Object: tc.memberID, + }).Return(tc.listSubjectResp, tc.listSubjectErr) + authcall3 = authsvc.On("ListAllObjects", context.Background(), &magistrala.ListObjectsReq{ + SubjectType: auth.UserType, + Subject: tc.idResp.GetId(), + Permission: tc.page.Permission, + ObjectType: auth.GroupType, + }).Return(tc.listObjectFilterResp, tc.listObjectFilterErr) + case auth.GroupsKind: + authcall1 = authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + SubjectKind: auth.UsersKind, + Subject: tc.idResp.GetId(), + Permission: tc.page.Permission, + Object: tc.memberID, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + authcall2 = authsvc.On("ListAllObjects", context.Background(), &magistrala.ListObjectsReq{ + SubjectType: auth.GroupType, + Subject: tc.memberID, + Permission: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + }).Return(tc.listObjectResp, tc.listObjectErr) + authcall3 = authsvc.On("ListAllObjects", context.Background(), &magistrala.ListObjectsReq{ + SubjectType: auth.UserType, + Subject: tc.idResp.GetId(), + Permission: tc.page.Permission, + ObjectType: auth.GroupType, + }).Return(tc.listObjectFilterResp, tc.listObjectFilterErr) + case auth.ChannelsKind: + authcall1 = authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + SubjectKind: auth.UsersKind, + Subject: tc.idResp.GetId(), + Permission: auth.ViewPermission, + Object: tc.memberID, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + authcall2 = authsvc.On("ListAllSubjects", context.Background(), &magistrala.ListSubjectsReq{ + SubjectType: auth.GroupType, + Permission: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: tc.memberID, + }).Return(tc.listSubjectResp, tc.listSubjectErr) + authcall3 = authsvc.On("ListAllObjects", context.Background(), &magistrala.ListObjectsReq{ + SubjectType: auth.UserType, + Subject: tc.idResp.GetId(), + Permission: tc.page.Permission, + ObjectType: auth.GroupType, + }).Return(tc.listObjectFilterResp, tc.listObjectFilterErr) + case auth.UsersKind: + adminCheckReq := &magistrala.AuthorizeReq{ + SubjectType: auth.UserType, + Subject: tc.idResp.GetUserId(), + Permission: auth.AdminPermission, + Object: auth.MagistralaObject, + ObjectType: auth.PlatformType, + } + adminCheck = authsvc.On("Authorize", context.Background(), adminCheckReq).Return(tc.authzResp, tc.authzErr) + authReq := &magistrala.AuthorizeReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + SubjectKind: auth.UsersKind, + Subject: tc.idResp.GetId(), + Permission: auth.AdminPermission, + Object: tc.idResp.GetDomainId(), + ObjectType: auth.DomainType, + } + if tc.memberID == "" { + authReq.Domain = "" + authReq.Permission = auth.MembershipPermission + } + authcall1 = authsvc.On("Authorize", context.Background(), authReq).Return(tc.authzResp, tc.authzErr) + authcall2 = authsvc.On("ListAllObjects", context.Background(), &magistrala.ListObjectsReq{ + SubjectType: auth.UserType, + Subject: auth.EncodeDomainUserID(tc.idResp.GetDomainId(), tc.memberID), + Permission: tc.page.Permission, + ObjectType: auth.GroupType, + }).Return(tc.listObjectResp, tc.listObjectErr) + authcall3 = authsvc.On("ListAllObjects", context.Background(), &magistrala.ListObjectsReq{ + SubjectType: auth.UserType, + Subject: tc.idResp.GetId(), + Permission: tc.page.Permission, + ObjectType: auth.GroupType, + }).Return(tc.listObjectFilterResp, tc.listObjectFilterErr) + } + repocall := repo.On("RetrieveByIDs", context.Background(), mock.Anything, mock.Anything).Return(tc.repoResp, tc.repoErr) + authcall4 := authsvc.On("ListPermissions", mock.Anything, mock.Anything).Return(tc.listPermResp, tc.listPermErr) + got, err := svc.ListGroups(context.Background(), tc.token, tc.memberKind, tc.memberID, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + if err == nil { + assert.NotEmpty(t, got) + } + authcall.Unset() + repocall.Unset() + switch tc.memberKind { + case auth.ThingsKind, auth.GroupsKind, auth.ChannelsKind, auth.UsersKind: + authcall1.Unset() + authcall2.Unset() + authcall3.Unset() + authcall4.Unset() + if tc.memberID == "" { + adminCheck.Unset() + } + } + }) + } +} + +func TestAssign(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + groupID string + relation string + memberKind string + memberIDs []string + idResp *magistrala.IdentityRes + idErr error + authzResp *magistrala.AuthorizeRes + authzErr error + addPoliciesRes *magistrala.AddPoliciesRes + addPoliciesErr error + repoResp mggroups.Page + repoErr error + addParentPoliciesRes *magistrala.AddPoliciesRes + addParentPoliciesErr error + deleteParentPoliciesRes *magistrala.DeletePolicyRes + deleteParentPoliciesErr error + repoParentGroupErr error + err error + }{ + { + desc: "successfully with things kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + addPoliciesRes: &magistrala.AddPoliciesRes{ + Added: true, + }, + }, + { + desc: "successfully with channels kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.ChannelsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + addPoliciesRes: &magistrala.AddPoliciesRes{ + Added: true, + }, + }, + { + desc: "successfully with groups kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + addPoliciesRes: &magistrala.AddPoliciesRes{ + Added: true, + }, + repoParentGroupErr: nil, + }, + { + desc: "successfully with users kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.UsersKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + addPoliciesRes: &magistrala.AddPoliciesRes{ + Added: true, + }, + }, + { + desc: "unsuccessfully with groups kind due to repo err", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{}, + repoErr: repoerr.ErrViewEntity, + err: repoerr.ErrViewEntity, + }, + { + desc: "unsuccessfully with groups kind due to empty page", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{}, + }, + err: errors.New("invalid group ids"), + }, + { + desc: "unsuccessfully with groups kind due to non empty parent", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + { + ID: testsutil.GenerateUUID(t), + Parent: testsutil.GenerateUUID(t), + }, + }, + }, + err: repoerr.ErrConflict, + }, + { + desc: "unsuccessfully with groups kind due to failed to add policies", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + addPoliciesRes: &magistrala.AddPoliciesRes{ + Added: false, + }, + addPoliciesErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with groups kind due to failed to assign parent", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + addPoliciesRes: &magistrala.AddPoliciesRes{ + Added: true, + }, + repoParentGroupErr: repoerr.ErrConflict, + err: repoerr.ErrConflict, + }, + { + desc: "unsuccessfully with groups kind due to failed to assign parent and delete policy", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + addPoliciesRes: &magistrala.AddPoliciesRes{ + Added: true, + }, + deleteParentPoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: false, + }, + deleteParentPoliciesErr: svcerr.ErrAuthorization, + repoParentGroupErr: repoerr.ErrConflict, + err: apiutil.ErrRollbackTx, + }, + { + desc: "unsuccessfully with invalid kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: "invalid", + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + err: errors.New("invalid member kind"), + }, + { + desc: "unsuccessfully with invalid token", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.UsersKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{}, + idErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "unsuccessfully with failed to authorize", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with failed to add policies", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + addPoliciesRes: &magistrala.AddPoliciesRes{ + Added: false, + }, + addPoliciesErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.idResp, tc.idErr) + authcall1 := authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + SubjectKind: auth.UsersKind, + Subject: tc.idResp.GetId(), + Permission: auth.EditPermission, + Object: tc.groupID, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + retrieveByIDsCall := &mock.Call{} + deletePoliciesCall := &mock.Call{} + assignParentCall := &mock.Call{} + policies := magistrala.AddPoliciesReq{} + switch tc.memberKind { + case auth.ThingsKind: + for _, memberID := range tc.memberIDs { + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.GroupType, + SubjectKind: auth.ChannelsKind, + Subject: tc.groupID, + Relation: tc.relation, + ObjectType: auth.ThingType, + Object: memberID, + }) + } + case auth.GroupsKind: + retrieveByIDsCall = repo.On("RetrieveByIDs", context.Background(), mggroups.Page{PageMeta: mggroups.PageMeta{Limit: 1<<63 - 1}}, mock.Anything).Return(tc.repoResp, tc.repoErr) + var deletePolicies magistrala.DeletePoliciesReq + for _, group := range tc.repoResp.Groups { + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.GroupType, + Subject: tc.groupID, + Relation: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: group.ID, + }) + deletePolicies.DeletePoliciesReq = append(deletePolicies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.GroupType, + Subject: tc.groupID, + Relation: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: group.ID, + }) + } + deletePoliciesCall = authsvc.On("DeletePolicies", context.Background(), &deletePolicies).Return(tc.deleteParentPoliciesRes, tc.deleteParentPoliciesErr) + assignParentCall = repo.On("AssignParentGroup", context.Background(), tc.groupID, tc.memberIDs).Return(tc.repoParentGroupErr) + case auth.ChannelsKind: + for _, memberID := range tc.memberIDs { + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.GroupType, + Subject: memberID, + Relation: tc.relation, + ObjectType: auth.GroupType, + Object: tc.groupID, + }) + } + case auth.UsersKind: + for _, memberID := range tc.memberIDs { + policies.AddPoliciesReq = append(policies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + Subject: auth.EncodeDomainUserID(tc.idResp.GetDomainId(), memberID), + Relation: tc.relation, + ObjectType: auth.GroupType, + Object: tc.groupID, + }) + } + } + authcall2 := authsvc.On("AddPolicies", context.Background(), &policies).Return(tc.addPoliciesRes, tc.addPoliciesErr) + err := svc.Assign(context.Background(), tc.token, tc.groupID, tc.relation, tc.memberKind, tc.memberIDs...) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + authcall.Unset() + authcall1.Unset() + authcall2.Unset() + if tc.memberKind == auth.GroupsKind { + retrieveByIDsCall.Unset() + deletePoliciesCall.Unset() + assignParentCall.Unset() + } + }) + } +} + +func TestUnassign(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + groupID string + relation string + memberKind string + memberIDs []string + idResp *magistrala.IdentityRes + idErr error + authzResp *magistrala.AuthorizeRes + authzErr error + deletePoliciesRes *magistrala.DeletePolicyRes + deletePoliciesErr error + repoResp mggroups.Page + repoErr error + addParentPoliciesRes *magistrala.AddPoliciesRes + addParentPoliciesErr error + deleteParentPoliciesRes *magistrala.DeletePolicyRes + deleteParentPoliciesErr error + repoParentGroupErr error + err error + }{ + { + desc: "successfully with things kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: true, + }, + }, + { + desc: "successfully with channels kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.ChannelsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: true, + }, + }, + { + desc: "successfully with groups kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: true, + }, + repoParentGroupErr: nil, + }, + { + desc: "successfully with users kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.UsersKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: true, + }, + }, + { + desc: "unsuccessfully with groups kind due to repo err", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{}, + repoErr: repoerr.ErrViewEntity, + err: repoerr.ErrViewEntity, + }, + { + desc: "unsuccessfully with groups kind due to empty page", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{}, + }, + err: errors.New("invalid group ids"), + }, + { + desc: "unsuccessfully with groups kind due to non empty parent", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + { + ID: testsutil.GenerateUUID(t), + Parent: testsutil.GenerateUUID(t), + }, + }, + }, + err: repoerr.ErrConflict, + }, + { + desc: "unsuccessfully with groups kind due to failed to add policies", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: false, + }, + deletePoliciesErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with groups kind due to failed to unassign parent", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: true, + }, + repoParentGroupErr: repoerr.ErrConflict, + err: repoerr.ErrConflict, + }, + { + desc: "unsuccessfully with groups kind due to failed to unassign parent and add policy", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.GroupsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + repoResp: mggroups.Page{ + Groups: []mggroups.Group{ + validGroup, + validGroup, + validGroup, + }, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: true, + }, + repoParentGroupErr: repoerr.ErrConflict, + addParentPoliciesRes: &magistrala.AddPoliciesRes{ + Added: false, + }, + addParentPoliciesErr: svcerr.ErrAuthorization, + err: repoerr.ErrConflict, + }, + { + desc: "unsuccessfully with invalid kind", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: "invalid", + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + err: errors.New("invalid member kind"), + }, + { + desc: "unsuccessfully with invalid token", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.UsersKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{}, + idErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "unsuccessfully with failed to authorize", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with failed to add policies", + token: token, + groupID: testsutil.GenerateUUID(t), + relation: auth.ContributorRelation, + memberKind: auth.ThingsKind, + memberIDs: allowedIDs, + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: false, + }, + deletePoliciesErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.idResp, tc.idErr) + authcall1 := authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + SubjectKind: auth.UsersKind, + Subject: tc.idResp.GetId(), + Permission: auth.EditPermission, + Object: tc.groupID, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + retrieveByIDsCall := &mock.Call{} + addPoliciesCall := &mock.Call{} + assignParentCall := &mock.Call{} + policies := magistrala.DeletePoliciesReq{} + switch tc.memberKind { + case auth.ThingsKind: + for _, memberID := range tc.memberIDs { + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.GroupType, + SubjectKind: auth.ChannelsKind, + Subject: tc.groupID, + Relation: tc.relation, + ObjectType: auth.ThingType, + Object: memberID, + }) + } + case auth.GroupsKind: + retrieveByIDsCall = repo.On("RetrieveByIDs", context.Background(), mggroups.Page{PageMeta: mggroups.PageMeta{Limit: 1<<63 - 1}}, mock.Anything).Return(tc.repoResp, tc.repoErr) + var addPolicies magistrala.AddPoliciesReq + for _, group := range tc.repoResp.Groups { + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.GroupType, + Subject: tc.groupID, + Relation: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: group.ID, + }) + addPolicies.AddPoliciesReq = append(addPolicies.AddPoliciesReq, &magistrala.AddPolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.GroupType, + Subject: tc.groupID, + Relation: auth.ParentGroupRelation, + ObjectType: auth.GroupType, + Object: group.ID, + }) + } + addPoliciesCall = authsvc.On("AddPolicies", context.Background(), &addPolicies).Return(tc.addParentPoliciesRes, tc.addParentPoliciesErr) + assignParentCall = repo.On("UnassignParentGroup", context.Background(), tc.groupID, tc.memberIDs).Return(tc.repoParentGroupErr) + case auth.ChannelsKind: + for _, memberID := range tc.memberIDs { + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.GroupType, + Subject: memberID, + Relation: tc.relation, + ObjectType: auth.GroupType, + Object: tc.groupID, + }) + } + case auth.UsersKind: + for _, memberID := range tc.memberIDs { + policies.DeletePoliciesReq = append(policies.DeletePoliciesReq, &magistrala.DeletePolicyReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + Subject: auth.EncodeDomainUserID(tc.idResp.GetDomainId(), memberID), + Relation: tc.relation, + ObjectType: auth.GroupType, + Object: tc.groupID, + }) + } + } + authcall2 := authsvc.On("DeletePolicies", context.Background(), &policies).Return(tc.deletePoliciesRes, tc.deletePoliciesErr) + err := svc.Unassign(context.Background(), tc.token, tc.groupID, tc.relation, tc.memberKind, tc.memberIDs...) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + authcall.Unset() + authcall1.Unset() + authcall2.Unset() + if tc.memberKind == auth.GroupsKind { + retrieveByIDsCall.Unset() + addPoliciesCall.Unset() + assignParentCall.Unset() + } + }) + } +} + +func TestDeleteGroup(t *testing.T) { + repo := new(mocks.Repository) + authsvc := new(authmocks.AuthClient) + svc := groups.NewService(repo, idProvider, authsvc) + + cases := []struct { + desc string + token string + groupID string + idResp *magistrala.IdentityRes + idErr error + authzResp *magistrala.AuthorizeRes + authzErr error + deletePoliciesRes *magistrala.DeletePolicyRes + deletePoliciesErr error + repoErr error + err error + }{ + { + desc: "successfully", + token: token, + groupID: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: true, + }, + }, + { + desc: "unsuccessfully with invalid token", + token: token, + groupID: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{}, + deletePoliciesRes: &magistrala.DeletePolicyRes{}, + idErr: svcerr.ErrAuthentication, + err: svcerr.ErrAuthentication, + }, + { + desc: "unsuccessfully with authorization error", + token: token, + groupID: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: false, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{}, + authzErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with failed to remove policy", + token: token, + groupID: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: false, + }, + deletePoliciesErr: svcerr.ErrAuthorization, + err: svcerr.ErrAuthorization, + }, + { + desc: "unsuccessfully with repo err", + token: token, + groupID: testsutil.GenerateUUID(t), + idResp: &magistrala.IdentityRes{ + Id: testsutil.GenerateUUID(t), + DomainId: testsutil.GenerateUUID(t), + }, + authzResp: &magistrala.AuthorizeRes{ + Authorized: true, + }, + deletePoliciesRes: &magistrala.DeletePolicyRes{ + Deleted: true, + }, + repoErr: repoerr.ErrNotFound, + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + authcall := authsvc.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(tc.idResp, tc.idErr) + authcall1 := authsvc.On("Authorize", context.Background(), &magistrala.AuthorizeReq{ + Domain: tc.idResp.GetDomainId(), + SubjectType: auth.UserType, + SubjectKind: auth.UsersKind, + Subject: tc.idResp.GetId(), + Permission: auth.DeletePermission, + Object: tc.groupID, + ObjectType: auth.GroupType, + }).Return(tc.authzResp, tc.authzErr) + authcall2 := authsvc.On("DeleteEntityPolicies", context.Background(), &magistrala.DeleteEntityPoliciesReq{ + EntityType: auth.GroupType, + Id: tc.groupID, + }).Return(tc.deletePoliciesRes, tc.deletePoliciesErr) + repocall := repo.On("Delete", context.Background(), tc.groupID).Return(tc.repoErr) + err := svc.DeleteGroup(context.Background(), tc.token, tc.groupID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("expected error %v to contain %v", err, tc.err)) + authcall.Unset() + authcall1.Unset() + authcall2.Unset() + repocall.Unset() + }) + } +} diff --git a/pkg/groups/status.go b/pkg/groups/status.go new file mode 100644 index 0000000..d967dbc --- /dev/null +++ b/pkg/groups/status.go @@ -0,0 +1,58 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package groups + +import svcerr "github.com/absmach/magistrala/pkg/errors/service" + +// Status represents Group status. +type Status uint8 + +// Possible Group status values. +const ( + // EnabledStatus represents enabled Group. + EnabledStatus Status = iota + // DisabledStatus represents disabled Group. + DisabledStatus + + // AllStatus is used for querying purposes to list groups irrespective + // of their status - both active and inactive. It is never stored in the + // database as the actual Group status and should always be the largest + // value in this enumeration. + AllStatus +) + +// String representation of the possible status values. +const ( + Disabled = "disabled" + Enabled = "enabled" + All = "all" + Unknown = "unknown" +) + +// String converts group status to string literal. +func (s Status) String() string { + switch s { + case DisabledStatus: + return Disabled + case EnabledStatus: + return Enabled + case AllStatus: + return All + default: + return Unknown + } +} + +// ToStatus converts string value to a valid Group status. +func ToStatus(status string) (Status, error) { + switch status { + case Disabled: + return DisabledStatus, nil + case Enabled: + return EnabledStatus, nil + case All: + return AllStatus, nil + } + return Status(0), svcerr.ErrInvalidStatus +} diff --git a/pkg/groups/status_test.go b/pkg/groups/status_test.go new file mode 100644 index 0000000..7bc33a7 --- /dev/null +++ b/pkg/groups/status_test.go @@ -0,0 +1,50 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package groups_test + +import ( + "testing" + + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/mg-contrib/pkg/groups" + "github.com/stretchr/testify/assert" +) + +func TestStatus_String(t *testing.T) { + cases := []struct { + name string + status groups.Status + expected string + }{ + {"Enabled", groups.EnabledStatus, "enabled"}, + {"Disabled", groups.DisabledStatus, "disabled"}, + {"All", groups.AllStatus, "all"}, + {"Unknown", groups.Status(100), "unknown"}, + } + + for _, tc := range cases { + got := tc.status.String() + assert.Equal(t, tc.expected, got, "Status.String() = %v, expected %v", got, tc.expected) + } +} + +func TestToStatus(t *testing.T) { + cases := []struct { + name string + status string + gstatus groups.Status + err error + }{ + {"Enabled", "enabled", groups.EnabledStatus, nil}, + {"Disabled", "disabled", groups.DisabledStatus, nil}, + {"All", "all", groups.AllStatus, nil}, + {"Unknown", "unknown", groups.Status(0), svcerr.ErrInvalidStatus}, + } + + for _, tc := range cases { + got, err := groups.ToStatus(tc.status) + assert.Equal(t, tc.err, err, "ToStatus() error = %v, expected %v", err, tc.err) + assert.Equal(t, tc.gstatus, got, "ToStatus() = %v, expected %v", got, tc.gstatus) + } +} diff --git a/pkg/groups/tracing/doc.go b/pkg/groups/tracing/doc.go new file mode 100644 index 0000000..6a419f3 --- /dev/null +++ b/pkg/groups/tracing/doc.go @@ -0,0 +1,12 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package tracing provides tracing instrumentation for Magistrala Users Groups service. +// +// This package provides tracing middleware for Magistrala Users Groups service. +// It can be used to trace incoming requests and add tracing capabilities to +// Magistrala Users Groups service. +// +// For more details about tracing instrumentation for Magistrala messaging refer +// to the documentation at https://docs.magistrala.abstractmachines.fr/tracing/. +package tracing diff --git a/pkg/groups/tracing/tracing.go b/pkg/groups/tracing/tracing.go new file mode 100644 index 0000000..afe5ea4 --- /dev/null +++ b/pkg/groups/tracing/tracing.go @@ -0,0 +1,112 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package tracing + +import ( + "context" + + "github.com/absmach/magistrala/pkg/groups" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ groups.Service = (*tracingMiddleware)(nil) + +type tracingMiddleware struct { + tracer trace.Tracer + gsvc groups.Service +} + +// New returns a new group service with tracing capabilities. +func New(gsvc groups.Service, tracer trace.Tracer) groups.Service { + return &tracingMiddleware{tracer, gsvc} +} + +// CreateGroup traces the "CreateGroup" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) CreateGroup(ctx context.Context, token, kind string, g groups.Group) (groups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_create_group") + defer span.End() + + return tm.gsvc.CreateGroup(ctx, token, kind, g) +} + +// ViewGroup traces the "ViewGroup" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) ViewGroup(ctx context.Context, token, id string) (groups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_group", trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return tm.gsvc.ViewGroup(ctx, token, id) +} + +// ViewGroupPerms traces the "ViewGroupPerms" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) ViewGroupPerms(ctx context.Context, token, id string) ([]string, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_group", trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return tm.gsvc.ViewGroupPerms(ctx, token, id) +} + +// ListGroups traces the "ListGroups" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) ListGroups(ctx context.Context, token, memberKind, memberID string, gm groups.Page) (groups.Page, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_groups") + defer span.End() + + return tm.gsvc.ListGroups(ctx, token, memberKind, memberID, gm) +} + +// ListMembers traces the "ListMembers" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) ListMembers(ctx context.Context, token, groupID, permission, memberKind string) (groups.MembersPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_members", trace.WithAttributes(attribute.String("groupID", groupID))) + defer span.End() + + return tm.gsvc.ListMembers(ctx, token, groupID, permission, memberKind) +} + +// UpdateGroup traces the "UpdateGroup" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) UpdateGroup(ctx context.Context, token string, g groups.Group) (groups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_group") + defer span.End() + + return tm.gsvc.UpdateGroup(ctx, token, g) +} + +// EnableGroup traces the "EnableGroup" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) EnableGroup(ctx context.Context, token, id string) (groups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_enable_group", trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return tm.gsvc.EnableGroup(ctx, token, id) +} + +// DisableGroup traces the "DisableGroup" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) DisableGroup(ctx context.Context, token, id string) (groups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_disable_group", trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return tm.gsvc.DisableGroup(ctx, token, id) +} + +// Assign traces the "Assign" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) Assign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) error { + ctx, span := tm.tracer.Start(ctx, "svc_assign", trace.WithAttributes(attribute.String("id", groupID))) + defer span.End() + + return tm.gsvc.Assign(ctx, token, groupID, relation, memberKind, memberIDs...) +} + +// Unassign traces the "Unassign" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) Unassign(ctx context.Context, token, groupID, relation, memberKind string, memberIDs ...string) error { + ctx, span := tm.tracer.Start(ctx, "svc_unassign", trace.WithAttributes(attribute.String("id", groupID))) + defer span.End() + + return tm.gsvc.Unassign(ctx, token, groupID, relation, memberKind, memberIDs...) +} + +// DeleteGroup traces the "DeleteGroup" operation of the wrapped groups.Service. +func (tm *tracingMiddleware) DeleteGroup(ctx context.Context, token, id string) error { + ctx, span := tm.tracer.Start(ctx, "svc_delete_group", trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return tm.gsvc.DeleteGroup(ctx, token, id) +} diff --git a/pkg/testsutil/common.go b/pkg/testsutil/common.go new file mode 100644 index 0000000..f6048a8 --- /dev/null +++ b/pkg/testsutil/common.go @@ -0,0 +1,19 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package testsutil + +import ( + "fmt" + "testing" + + "github.com/absmach/magistrala/pkg/uuid" + "github.com/stretchr/testify/require" +) + +func GenerateUUID(t *testing.T) string { + idProvider := uuid.New() + ulid, err := idProvider.ID() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + return ulid +} diff --git a/provision/README.md b/provision/README.md new file mode 100644 index 0000000..73f6c86 --- /dev/null +++ b/provision/README.md @@ -0,0 +1,194 @@ +# Provision service + +Provision service provides an HTTP API to interact with [Magistrala][magistrala]. +Provision service is used to setup initial applications configuration i.e. things, channels, connections and certificates that will be required for the specific use case especially useful for gateway provision. + +For gateways to communicate with [Magistrala][magistrala] configuration is required (mqtt host, thing, channels, certificates...). To get the configuration gateway will send a request to [Bootstrap][bootstrap] service providing `<external_id>` and `<external_key>` in request. To make a request to [Bootstrap][bootstrap] service you can use [Agent][agent] service on a gateway. + +To create bootstrap configuration you can use [Bootstrap][bootstrap] or `Provision` service. [Magistrala UI][mgxui] uses [Bootstrap][bootstrap] service for creating gateway configurations. `Provision` service should provide an easy way of provisioning your gateways i.e creating bootstrap configuration and as many things and channels that your setup requires. + +Also you may use provision service to create certificates for each thing. Each service running on gateway may require more than one thing and channel for communication. Let's say that you are using services [Agent][agent] and [Export][export] on a gateway you will need two channels for `Agent` (`data` and `control`) and one for `Export` and one thing. Additionally if you enabled mtls each service will need its own thing and certificate for access to [Magistrala][magistrala]. Your setup could require any number of things and channels this kind of setup we can call `provision layout`. + +Provision service provides a way of specifying this `provision layout` and creating a setup according to that layout by serving requests on `/mapping` endpoint. Provision layout is configured in [config.toml](configs/config.toml). + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| ----------------------------------- | ------------------------------------------------- | ------------------------------------ | +| MG_PROVISION_LOG_LEVEL | Service log level | debug | +| MG_PROVISION_USER | User (email) for accessing Magistrala | <user@example.com> | +| MG_PROVISION_PASS | Magistrala password | user123 | +| MG_PROVISION_API_KEY | Magistrala authentication token | | +| MG_PROVISION_CONFIG_FILE | Provision config file | config.toml | +| MG_PROVISION_HTTP_PORT | Provision service listening port | 9016 | +| MG_PROVISION_ENV_CLIENTS_TLS | Magistrala SDK TLS verification | false | +| MG_PROVISION_SERVER_CERT | Magistrala gRPC secure server cert | | +| MG_PROVISION_SERVER_KEY | Magistrala gRPC secure server key | | +| MG_PROVISION_USERS_LOCATION | Users service URL | <http://users:9002> | +| MG_PROVISION_THINGS_LOCATION | Things service URL | <http://things:9000> | +| MG_PROVISION_BS_SVC_URL | Magistrala Bootstrap service URL | <http://bootstrap:9013> | +| MG_PROVISION_CERTS_SVC_URL | Certificates service URL | <http://certs:9019> | +| MG_PROVISION_X509_PROVISIONING | Should X509 client cert be provisioned | false | +| MG_PROVISION_BS_CONFIG_PROVISIONING | Should thing config be saved in Bootstrap service | true | +| MG_PROVISION_BS_AUTO_WHITELIST | Should thing be auto whitelisted | true | +| MG_PROVISION_BS_CONTENT | Bootstrap service configs content, JSON format | {} | +| MG_PROVISION_CERTS_RSA_BITS | Certificate RSA bits parameter | 4096 | +| MG_PROVISION_CERTS_HOURS_VALID | Number of hours that certificate is valid | "2400h" | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | + +By default, call to `/mapping` endpoint will create one thing and two channels (`control` and `data`) and connect it. If there is a requirement for different provision layout we can use [config](docker/configs/config.toml) file in addition to environment variables. + +For the purposes of running provision as an add-on in docker composition environment variables seems more suitable. Environment variables are set in [.env](.env). + +Configuration can be specified in [config.toml](configs/config.toml). Config file can specify all the settings that environment variables can configure and in addition +`/mapping` endpoint provision layout can be configured. + +In `config.toml` we can enlist array of things and channels that we want to create and make connections between them which we call provision layout. + +Metadata can be whatever suits your needs except that at least one thing needs to have `external_id` (which is populated with value from [request](#example)). Thing that has `external_id` will be used for creating bootstrap configuration which can be fetched with [Agent][agent]. +For channels metadata `type` is reserved for `control` and `data` which we use with [Agent][agent]. + +Example of provision layout below + +```toml +[[things]] + name = "thing" + + [things.metadata] + external_id = "xxxxxx" + + +[[channels]] + name = "control-channel" + + [channels.metadata] + type = "control" + +[[channels]] + name = "data-channel" + + [channels.metadata] + type = "data" + +[[channels]] + name = "export-channel" + + [channels.metadata] + type = "data" +``` + +## Authentication + +In order to create necessary entities provision service needs to authenticate against Magistrala. To provide authentication credentials to the provision service you can pass it in an environment variable or in a config file as Magistrala user and password or as API token that can be issued on `/users/tokens/issue`. + +Additionally users or API token can be passed in Authorization header, this authentication takes precedence over others. + +- `username`, `password` - (`MG_PROVISION_USER`, `MG_PROVISION_PASSWORD` in [.env](../.env), `mg_user`, `mg_pass` in [config.toml](../docker/addons/provision/configs/config.toml)) +- API Key - (`MG_PROVISION_API_KEY` in [.env](../.env) or [config.toml](../docker/addons/provision/configs/config.toml)) +- `Authorization: Bearer Token` - request authorization header containing either users token. + +## Running + +Provision service can be run as a standalone or in docker composition as addon to the core docker composition. + +Standalone: + +```bash +MG_PROVISION_BS_SVC_URL=http://localhost:9013 \ +MG_PROVISION_THINGS_LOCATION=http://localhost:9000 \ +MG_PROVISION_USERS_LOCATION=http://localhost:9002 \ +MG_PROVISION_CONFIG_FILE=docker/addons/provision/configs/config.toml \ +build/magistrala-provision +``` + +Docker composition: + +```bash +docker compose -f docker/addons/provision/docker-compose.yml up +``` + +For the case that credentials or API token is passed in configuration file or environment variables, call to `/mapping` endpoint doesn't require `Authentication` header: + +```bash +curl -s -S -X POST http://localhost:<MG_PROVISION_HTTP_PORT>/mapping -H 'Content-Type: application/json' -d '{"external_id": "33:52:77:99:43", "external_key": "223334fw2"}' +``` + +In the case that provision service is not deployed with credentials or API key or you want to use user other than one being set in environment (or config file): + +```bash +curl -s -S -X POST http://localhost:<MG_PROVISION_HTTP_PORT>/mapping -H "Authorization: Bearer <token|api_key>" -H 'Content-Type: application/json' -d '{"external_id": "<external_id>", "external_key": "<external_key>"}' +``` + +Or if you want to specify a name for thing different than in `config.toml` you can specify post data as: + +```json +{ + "name": "<name>", + "external_id": "<external_id>", + "external_key": "<external_key>" +} +``` + +Response contains created things, channels and certificates if any: + +```json +{ + "things": [ + { + "id": "c22b0c0f-8c03-40da-a06b-37ed3a72c8d1", + "name": "thing", + "key": "007cce56-e0eb-40d6-b2b9-ed348a97d1eb", + "metadata": { + "external_id": "33:52:79:C3:43" + } + } + ], + "channels": [ + { + "id": "064c680e-181b-4b58-975e-6983313a5170", + "name": "control-channel", + "metadata": { + "type": "control" + } + }, + { + "id": "579da92d-6078-4801-a18a-dd1cfa2aa44f", + "name": "data-channel", + "metadata": { + "type": "data" + } + } + ], + "whitelisted": { + "c22b0c0f-8c03-40da-a06b-37ed3a72c8d1": true + } +} +``` + +## Certificates + +Provision service has `/certs` endpoint that can be used to generate certificates for things when mTLS is required: + +- `users_token` - users authentication token or API token +- `thing_id` - id of the thing for which certificate is going to be generated + +```bash +curl -s -X POST http://localhost:8190/certs -H "Authorization: Bearer <users_token>" -H 'Content-Type: application/json' -d '{"thing_id": "<thing_id>", "ttl":"2400h" }' +``` + +```json +{ + "thing_cert": "-----BEGIN CERTIFICATE-----\nMIIEmDCCA4CgAwIBAgIQCZ0NOq2oKLo+XftbAu0TfzANBgkqhkiG9w0BAQsFADBX\nMRIwEAYDVQQDDAlsb2NhbGhvc3QxETAPBgNVBAoMCE1haW5mbHV4MQwwCgYDVQQL\nDANJb1QxIDAeBgkqhkiG9w0BCQEWEWluZm9AbWFpbmZsdXguY29tMB4XDTIwMDYw\nNTEyMzc1M1oXDTIwMDkxMzEyMzc1M1owVTERMA8GA1UEChMITWFpbmZsdXgxETAP\nBgNVBAsTCG1haW5mbHV4MS0wKwYDVQQDEyQyYmZlYmZmMC05ODZhLTQ3ZTAtOGQ3\nYS00YTRiN2UyYjU3OGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCn\nWvTuOIdhqOLEREcEJqfQAtDoYu3rUDijOffXuWFZgNqfZTGmoD5ZqJXxwbZ4tCST\npdSteHtyr7JXnPJQN1dsslU+q3haKjFoZRc39/7u4/8XCTwlqbMl9YVcwqS+FLkM\niLSyyqzryP7Y8H8cidTKg56p5JALaEKfzZS6Km3G+CCinR6hNNW9ckWsy29a0/9E\nMAUtM+Lsk5OjsHzOnWruuqHsCx4ODI5aJQaMC1qntkbXkht0WDiwAt9SDQ3uLWru\nAoSJDK9a6EgR3a0Jf7ZiVPiwlZNjrB/I5OQyFDGqcmSAl2rdJqPkmaDXKKFyL1cG\nMIyHv62QzJoMdRoXu20lxyGxAvEjQNVHux4LA3dbf/85nEVTI2uP8crMf2Jnzbg5\n9zF+iTMJGpUlatCyK2RJS/mvHbbUIf5Ro3VbcPHbgFroJ7qMFz0Fc5kYY8IdwXjG\nlyG9MobKEO2CfBGRjPmCuTQq2HcuOy7F6KfQf3HToI8MmC5hBtCmTNbV8I3GIjWA\n/xJQLm2pVZ41QhrnNGtuqAYoe3Zt6OldxGRcoAj7KlIpYcPZ55PJ6mWcV6dB9Fnl\n5mYOwQL8jtfybbGWvqJldhTxUqm7/EbAaF0Qjmh4oOHMl2xADrmYzJHvf0llwr6g\noRQuzqxPi0aW3tkFNsm63NX1Ab5BXFQhMSj5+82blwIDAQABo2IwYDAOBgNVHQ8B\nAf8EBAMCB4AwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMA4GA1UdDgQH\nBAUBAgMEBjAfBgNVHSMEGDAWgBRs4xR91qEjNRGmw391xS7x6Tc+8jANBgkqhkiG\n9w0BAQsFAAOCAQEAphLT8PjawRRWswU1B5oWnnqeTllnvGB88sjDPLAG0UiBlDLX\nwoPiBVPWuYV+MMJuaREgheYF1Ahx4Jrfy9stFDU7B99ON1T58oM1aKEq4rKc+/Ke\nyxrAFTonclC0LNaaOvpZZjsPFWr2muTQO8XHiS8icw3BLxEzoF+5aJ8ihtxRtfKL\nUvtHDqC6IPAbSUcvqyjrFh3RrTUAyGOzW12IEWSXP9DLwoiLPwJ6kCVoXdG/asjz\nUpk/jj7AUn9oJNF8nUbyhdOnmeJ2z0x1ylgYrIAxvGzm8zs+NEVN67CrBYKwstlN\nvw7DRQsCvGJjZzWj28VV3FGLtXFgu52bFZNBww==\n-----END CERTIFICATE-----\n", + "thing_cert_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIJJwIBAAKCAgEAp1r07jiHYajixERHBCan0ALQ6GLt61A4ozn317lhWYDan2Ux\npqA+WaiV8cG2eLQkk6XUrXh7cq+yV5zyUDdXbLJVPqt4WioxaGUXN/f+7uP/Fwk8\nJamzJfWFXMKkvhS5DIi0ssqs68j+2PB/HInUyoOeqeSQC2hCn82Uuiptxvggop0e\noTTVvXJFrMtvWtP/RDAFLTPi7JOTo7B8zp1q7rqh7AseDgyOWiUGjAtap7ZG15Ib\ndFg4sALfUg0N7i1q7gKEiQyvWuhIEd2tCX+2YlT4sJWTY6wfyOTkMhQxqnJkgJdq\n3Saj5Jmg1yihci9XBjCMh7+tkMyaDHUaF7ttJcchsQLxI0DVR7seCwN3W3//OZxF\nUyNrj/HKzH9iZ824OfcxfokzCRqVJWrQsitkSUv5rx221CH+UaN1W3Dx24Ba6Ce6\njBc9BXOZGGPCHcF4xpchvTKGyhDtgnwRkYz5grk0Kth3Ljsuxein0H9x06CPDJgu\nYQbQpkzW1fCNxiI1gP8SUC5tqVWeNUIa5zRrbqgGKHt2bejpXcRkXKAI+ypSKWHD\n2eeTyeplnFenQfRZ5eZmDsEC/I7X8m2xlr6iZXYU8VKpu/xGwGhdEI5oeKDhzJds\nQA65mMyR739JZcK+oKEULs6sT4tGlt7ZBTbJutzV9QG+QVxUITEo+fvNm5cCAwEA\nAQKCAgAmCIfNc89gpG8Ux6eUC+zrWxh7F7CWX97fSZdH0XuMSbplqyvDgHtrCOM6\n1BlSCS6e13skCVOU1tUjECoJjOoza7vvyCxL4XblEMRcFeI8DFi2tYST0qNCJzAt\nypaCFFeRv6fBUkpGM6GnT9Czfad8drkiRy1tSj6J7sC0JlxYcZ+JFUgWvtksesHW\n6UzfSXqj1n32reoOdeOBueRDWIcqxgNyj3w/GR9o4S1BunrZzpT+/Nd8c2g+qAh0\nrz7ROEUq3iucseNQN6XZWZWvqPScGE+EYhni9wUqNMqfjvNSlzi7+K1yoQtyMm/Z\nNgSq3JNcdsAZQbiCRd1ko2BQsGm3ZBnbsAJ1Dxcn+i9nF5DT/ddWjUWin6LYWuUM\n/0Bqfv3etlrFuP6yxc8bPEMX0ucJg4yVxdkDrm1tYlJ+ANEQoOlZqhngvjz0f8uO\nOtEcDLmiG5VG6Yl72UtWIw+ALnKc5U7ib43Qve0bDAKR5zlHODcRetN9BCMvpekY\nOA4hohkllTP25xmMzLokBqY9n38zEt74kJOp67VKMvhoF7QkrLOfKWCRJjFL7/9I\nHDa6jb31INA9Wu+p/2LIa6I1SUYnMvCUqISgF2hBG9Q9S9TZvKnYUvfurhFS9jZv\n18sxW7IFYWmQyioo+gsAmfKLolJtLl9hCmTfYi7oqCh/EtZdIQKCAQEA0Umkp0Uu\nimVilLjgYGTWLcg8T3NWaELQzb2HYRXSzEq/M8GOtEr7TR7noJBm8fcgl55HEnPl\ni4cEJrr+VprzGbdMtXjHbCD+I945GA6vv3khg7mbqS9a1Uw6gjrQEZgZQU+/IVCu\n9Pbvx8Af32xaBWuN2cFzC7Z6iB815LPc2O5qyZ3+3nEUPah+Z+a9WEeTR6M0hy5c\nkkaRqhehugHDgqMRWGt8GfsFOmaR13kvfFfKadPRPkaGkftCSKBMWjrU4uX7aulm\nD7k4VDbnXIBMhI039+0znSkhZdcV1zk6qwBYn9TtZ11PTlspFPjtPxqS5M6IGflw\nsXkZGv4rZ5CkiQKCAQEAzLVdw2qw/8rWGsCV39EKp7hXLvp7+FuodPvX1L55lWB0\nvmSOldGcNvb2ZsK3RNvgteb8VfKRgaY6waeN5Qm1UXazsOX4F+GThPGHstdNuzkt\nJofRQQHQVR3npZbCngSkSZdahQ9SjiLIDKn8baPN8I8HfpJ4oHLUvkayavbch1kJ\nYWUfGtVKxHGX5m/nnxLdgbJEx9Q+3Qa7DDHuxTqsEqhkk0R0Ganred34HjpDNMs6\nV95HFNolW3yKfuHETKA1bLhej+XdMa11Ts5hBVGCMnnT07WcGhxtyK2dSa656SyT\ngT9+Hd1VWZ/KPpAkQmH9boOr2ihE+oAXiZ4D1t53HwKCAQAD0cA7fTu4Mtl1tVoC\n6FQwSbMwD/7HsFB3MLpDv041hDexDhs4lxW29pVrjLcUO1pQ6gaKA6twvGoK+uah\nVfqRwZKYzTd2dbOtm+SW183FRMSjzsNUdxTFR7rZnZEmgQwU8Quf5AUNW2RM1Oi/\n/w41gxz3mFwtHotl6IvnPJEPNGqme0enb5Da/zQvWTqjXcsGR6gxv1rZIIiP/hZp\nepbCz48FehCtuLMDudN3hzKipkd/Xuo2pLrX9ynigWpjSyePbHsGHHRMXSj2AHqA\naab71EftMlr6x0FgxmgToWu8qyjy4cPjWwSTfX5mb5SEzktX+ZzqPG8eDgOzRmgs\nX6thAoIBADL3kQG/hZQaL1Z3zpjsFggOKH7E1KrQP0/pCCKqzeC4JDjnFm0MxCUX\nNd/96N1XFUqU2QyZGUs7VPO0QOrekOtYb4LCrxNbEXyPGicX3f2YTbqDJEFYL0OR\n74PV1ly7cR/1dA8e8oH6/O3SQMwXdYXIRqhn1Wq1TGyXc4KYNe3o6CH8qFLo+fWR\nBq3T/MopS0coWGGcYY5sR5PQts8aPY9jp67W40UkfkFYV5dHEEaLttn7uJzjd1ug\n1Waj1VjypnqMKNcQ9xKQSl21mohVc+IXXPsgA16o51iIiVm4DAeXFp6ebUsIOWDY\nHOWYw75XYV7rn5TwY8Qusi2MTw5nUycCggEAB/45U0LW7ZGpks/aF/BeGaSWiLIG\nodBWUjRQ4w+Le/pTC8Ci9fiidxuCDH6TQbsUTGKOk7GsfncWHTQJogaMyO26IJ1N\nmYGgK2JJvs7PKyIkocPDVD/Yh0gIzQIE92ZdyXUT21pIYKDUB9e3p0fy/+E0pyeI\nsmsV8oaLr4tZRY1cMogI+pvtUUferbLQmZHhFd9X3m3RslR43Dl1qpYQyzE3x/a3\nWA2NJZbJhh+LiAKzqk7swXOqrTrmXuzLcjMG+T/3lizrbLLuKjQrf+eehlpw0db0\nHVVvkMLOP5ZH/ImkmvOZJY7xxup89VV7LD7TfMKwXafOrjMDdvTAYPtgxw==\n-----END RSA PRIVATE KEY-----\n" +} +``` + +[magistrala]: https://github.com/absmach/magistrala +[bootstrap]: https://github.com/absmach/magistrala/tree/master/bootstrap +[export]: https://github.com/absmach/export +[agent]: https://github.com/absmach/agent +[mgxui]: https://github.com/absmach/magistrala/ui diff --git a/provision/api/doc.go b/provision/api/doc.go new file mode 100644 index 0000000..2424852 --- /dev/null +++ b/provision/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains API-related concerns: endpoint definitions, middlewares +// and all resource representations. +package api diff --git a/provision/api/endpoint.go b/provision/api/endpoint.go new file mode 100644 index 0000000..db9ac6d --- /dev/null +++ b/provision/api/endpoint.go @@ -0,0 +1,54 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/provision" + "github.com/go-kit/kit/endpoint" +) + +func doProvision(svc provision.Service) endpoint.Endpoint { + return func(_ context.Context, request interface{}) (interface{}, error) { + req := request.(provisionReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + res, err := svc.Provision(req.token, req.Name, req.ExternalID, req.ExternalKey) + if err != nil { + return nil, err + } + + provisionResponse := provisionRes{ + Things: res.Things, + Channels: res.Channels, + ClientCert: res.ClientCert, + ClientKey: res.ClientKey, + CACert: res.CACert, + Whitelisted: res.Whitelisted, + } + + return provisionResponse, nil + } +} + +func getMapping(svc provision.Service) endpoint.Endpoint { + return func(_ context.Context, request interface{}) (interface{}, error) { + req := request.(mappingReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + res, err := svc.Mapping(req.token) + if err != nil { + return nil, err + } + + return mappingRes{Data: res}, nil + } +} diff --git a/provision/api/endpoint_test.go b/provision/api/endpoint_test.go new file mode 100644 index 0000000..4bcf310 --- /dev/null +++ b/provision/api/endpoint_test.go @@ -0,0 +1,210 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api_test + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/apiutil" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/absmach/mg-contrib/provision" + "github.com/absmach/mg-contrib/provision/api" + "github.com/absmach/mg-contrib/provision/mocks" + "github.com/stretchr/testify/assert" +) + +var ( + validToken = "valid" + validContenType = "application/json" + validID = testsutil.GenerateUUID(&testing.T{}) +) + +type testRequest struct { + client *http.Client + method string + url string + token string + contentType string + body io.Reader +} + +func (tr testRequest) make() (*http.Response, error) { + req, err := http.NewRequest(tr.method, tr.url, tr.body) + if err != nil { + return nil, err + } + + if tr.token != "" { + req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) + } + + if tr.contentType != "" { + req.Header.Set("Content-Type", tr.contentType) + } + + return tr.client.Do(req) +} + +func newProvisionServer() (*httptest.Server, *mocks.Service) { + svc := new(mocks.Service) + + logger := mglog.NewMock() + mux := api.MakeHandler(svc, logger, "test") + return httptest.NewServer(mux), svc +} + +func TestProvision(t *testing.T) { + is, svc := newProvisionServer() + + cases := []struct { + desc string + token string + data string + contentType string + status int + svcErr error + }{ + { + desc: "valid request", + token: validToken, + data: fmt.Sprintf(`{"name": "test", "external_id": "%s", "external_key": "%s"}`, validID, validID), + status: http.StatusCreated, + contentType: validContenType, + svcErr: nil, + }, + { + desc: "request with empty external id", + token: validToken, + data: fmt.Sprintf(`{"name": "test", "external_key": "%s"}`, validID), + status: http.StatusBadRequest, + contentType: validContenType, + svcErr: nil, + }, + { + desc: "request with empty external key", + token: validToken, + data: fmt.Sprintf(`{"name": "test", "external_id": "%s"}`, validID), + status: http.StatusBadRequest, + contentType: validContenType, + svcErr: nil, + }, + { + desc: "empty token", + token: "", + data: fmt.Sprintf(`{"name": "test", "external_id": "%s", "external_key": "%s"}`, validID, validID), + status: http.StatusCreated, + contentType: validContenType, + svcErr: nil, + }, + { + desc: "invalid content type", + token: validToken, + data: fmt.Sprintf(`{"name": "test", "external_id": "%s", "external_key": "%s"}`, validID, validID), + status: http.StatusUnsupportedMediaType, + contentType: "text/plain", + svcErr: nil, + }, + { + desc: "invalid request", + token: validToken, + data: `data`, + status: http.StatusBadRequest, + contentType: validContenType, + svcErr: nil, + }, + { + desc: "service error", + token: validToken, + data: fmt.Sprintf(`{"name": "test", "external_id": "%s", "external_key": "%s"}`, validID, validID), + status: http.StatusForbidden, + contentType: validContenType, + svcErr: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + repocall := svc.On("Provision", tc.token, "test", validID, validID).Return(provision.Result{}, tc.svcErr) + req := testRequest{ + client: is.Client(), + method: http.MethodPost, + url: is.URL + "/mapping", + token: tc.token, + contentType: tc.contentType, + body: strings.NewReader(tc.data), + } + + resp, err := req.make() + assert.Nil(t, err, tc.desc) + assert.Equal(t, tc.status, resp.StatusCode, tc.desc) + repocall.Unset() + }) + } +} + +func TestMapping(t *testing.T) { + is, svc := newProvisionServer() + + cases := []struct { + desc string + token string + contentType string + status int + svcErr error + }{ + { + desc: "valid request", + token: validToken, + status: http.StatusOK, + contentType: validContenType, + svcErr: nil, + }, + { + desc: "empty token", + token: "", + status: http.StatusUnauthorized, + contentType: validContenType, + svcErr: nil, + }, + { + desc: "invalid content type", + token: validToken, + status: http.StatusUnsupportedMediaType, + contentType: "text/plain", + svcErr: nil, + }, + { + desc: "service error", + token: validToken, + status: http.StatusForbidden, + contentType: validContenType, + svcErr: svcerr.ErrAuthorization, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + repocall := svc.On("Mapping", tc.token).Return(map[string]interface{}{}, tc.svcErr) + req := testRequest{ + client: is.Client(), + method: http.MethodGet, + url: is.URL + "/mapping", + token: tc.token, + contentType: tc.contentType, + } + + resp, err := req.make() + assert.Nil(t, err, tc.desc) + assert.Equal(t, tc.status, resp.StatusCode, tc.desc) + repocall.Unset() + }) + } +} diff --git a/provision/api/logging.go b/provision/api/logging.go new file mode 100644 index 0000000..eea62a9 --- /dev/null +++ b/provision/api/logging.go @@ -0,0 +1,77 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "log/slog" + "time" + + "github.com/absmach/mg-contrib/provision" +) + +var _ provision.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + svc provision.Service +} + +// NewLoggingMiddleware adds logging facilities to the core service. +func NewLoggingMiddleware(svc provision.Service, logger *slog.Logger) provision.Service { + return &loggingMiddleware{logger, svc} +} + +func (lm *loggingMiddleware) Provision(token, name, externalID, externalKey string) (res provision.Result, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("name", name), + slog.String("external_id", externalID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Provision failed", args...) + return + } + lm.logger.Info("Provision completed successfully", args...) + }(time.Now()) + + return lm.svc.Provision(token, name, externalID, externalKey) +} + +func (lm *loggingMiddleware) Cert(token, thingID, duration string) (cert, key string, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("thing_id", thingID), + slog.String("ttl", duration), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Thing certificate failed to create successfully", args...) + return + } + lm.logger.Info("Thing certificate created successfully", args...) + }(time.Now()) + + return lm.svc.Cert(token, thingID, duration) +} + +func (lm *loggingMiddleware) Mapping(token string) (res map[string]interface{}, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Mapping failed", args...) + return + } + lm.logger.Info("Mapping completed successfully", args...) + }(time.Now()) + + return lm.svc.Mapping(token) +} diff --git a/provision/api/requests.go b/provision/api/requests.go new file mode 100644 index 0000000..323b98e --- /dev/null +++ b/provision/api/requests.go @@ -0,0 +1,40 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import "github.com/absmach/magistrala/pkg/apiutil" + +type provisionReq struct { + token string + Name string `json:"name"` + ExternalID string `json:"external_id"` + ExternalKey string `json:"external_key"` +} + +func (req provisionReq) validate() error { + if req.ExternalID == "" { + return apiutil.ErrMissingID + } + + if req.ExternalKey == "" { + return apiutil.ErrBearerKey + } + + if req.Name == "" { + return apiutil.ErrMissingName + } + + return nil +} + +type mappingReq struct { + token string +} + +func (req mappingReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + return nil +} diff --git a/provision/api/requests_test.go b/provision/api/requests_test.go new file mode 100644 index 0000000..c7f12c9 --- /dev/null +++ b/provision/api/requests_test.go @@ -0,0 +1,86 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "fmt" + "testing" + + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" +) + +func TestProvisioReq(t *testing.T) { + cases := []struct { + desc string + req provisionReq + err error + }{ + { + desc: "valid request", + req: provisionReq{ + token: "token", + Name: "name", + ExternalID: testsutil.GenerateUUID(t), + ExternalKey: testsutil.GenerateUUID(t), + }, + err: nil, + }, + { + desc: "empty external id", + req: provisionReq{ + token: "token", + Name: "name", + ExternalID: "", + ExternalKey: testsutil.GenerateUUID(t), + }, + err: apiutil.ErrMissingID, + }, + { + desc: "empty external key", + req: provisionReq{ + token: "token", + Name: "name", + ExternalID: testsutil.GenerateUUID(t), + ExternalKey: "", + }, + err: apiutil.ErrBearerKey, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected `%v` got `%v`", tc.desc, tc.err, err)) + } +} + +func TestMappingReq(t *testing.T) { + cases := []struct { + desc string + req mappingReq + err error + }{ + { + desc: "valid request", + req: mappingReq{ + token: "token", + }, + err: nil, + }, + { + desc: "empty token", + req: mappingReq{ + token: "", + }, + err: apiutil.ErrBearerToken, + }, + } + + for _, tc := range cases { + err := tc.req.validate() + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected `%v` got `%v`", tc.desc, tc.err, err)) + } +} diff --git a/provision/api/responses.go b/provision/api/responses.go new file mode 100644 index 0000000..87c1052 --- /dev/null +++ b/provision/api/responses.go @@ -0,0 +1,55 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "encoding/json" + "net/http" + + "github.com/absmach/magistrala" + sdk "github.com/absmach/magistrala/pkg/sdk/go" +) + +var _ magistrala.Response = (*provisionRes)(nil) + +type provisionRes struct { + Things []sdk.Thing `json:"things"` + Channels []sdk.Channel `json:"channels"` + ClientCert map[string]string `json:"client_cert,omitempty"` + ClientKey map[string]string `json:"client_key,omitempty"` + CACert string `json:"ca_cert,omitempty"` + Whitelisted map[string]bool `json:"whitelisted,omitempty"` +} + +func (res provisionRes) Code() int { + return http.StatusCreated +} + +func (res provisionRes) Headers() map[string]string { + return map[string]string{} +} + +func (res provisionRes) Empty() bool { + return false +} + +type mappingRes struct { + Data interface{} +} + +func (res mappingRes) Code() int { + return http.StatusOK +} + +func (res mappingRes) Headers() map[string]string { + return map[string]string{} +} + +func (res mappingRes) Empty() bool { + return false +} + +func (res mappingRes) MarshalJSON() ([]byte, error) { + return json.Marshal(res.Data) +} diff --git a/provision/api/transport.go b/provision/api/transport.go new file mode 100644 index 0000000..4485d87 --- /dev/null +++ b/provision/api/transport.go @@ -0,0 +1,76 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "log/slog" + "net/http" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/pkg/api" + "github.com/absmach/mg-contrib/provision" + "github.com/go-chi/chi/v5" + kithttp "github.com/go-kit/kit/transport/http" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +const ( + contentType = "application/json" +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(svc provision.Service, logger *slog.Logger, instanceID string) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + + r := chi.NewRouter() + + r.Route("/mapping", func(r chi.Router) { + r.Post("/", kithttp.NewServer( + doProvision(svc), + decodeProvisionRequest, + api.EncodeResponse, + opts..., + ).ServeHTTP) + r.Get("/", kithttp.NewServer( + getMapping(svc), + decodeMappingRequest, + api.EncodeResponse, + opts..., + ).ServeHTTP) + }) + + r.Handle("/metrics", promhttp.Handler()) + r.Get("/health", magistrala.Health("provision", instanceID)) + + return r +} + +func decodeProvisionRequest(_ context.Context, r *http.Request) (interface{}, error) { + if r.Header.Get("Content-Type") != contentType { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + + req := provisionReq{token: apiutil.ExtractBearerToken(r)} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, errors.Wrap(err, errors.ErrMalformedEntity)) + } + + return req, nil +} + +func decodeMappingRequest(_ context.Context, r *http.Request) (interface{}, error) { + if r.Header.Get("Content-Type") != contentType { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + + req := mappingReq{token: apiutil.ExtractBearerToken(r)} + + return req, nil +} diff --git a/provision/config.go b/provision/config.go new file mode 100644 index 0000000..d0f6683 --- /dev/null +++ b/provision/config.go @@ -0,0 +1,103 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package provision + +import ( + "fmt" + "os" + + mgclients "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/groups" + "github.com/pelletier/go-toml" +) + +var errFailedToReadConfig = errors.New("failed to read config file") + +// ServiceConf represents service config. +type ServiceConf struct { + Port string `toml:"port" env:"MG_PROVISION_HTTP_PORT" envDefault:"9016"` + LogLevel string `toml:"log_level" env:"MG_PROVISION_LOG_LEVEL" envDefault:"info"` + TLS bool `toml:"tls" env:"MG_PROVISION_ENV_CLIENTS_TLS" envDefault:"false"` + ServerCert string `toml:"server_cert" env:"MG_PROVISION_SERVER_CERT" envDefault:""` + ServerKey string `toml:"server_key" env:"MG_PROVISION_SERVER_KEY" envDefault:""` + ThingsURL string `toml:"things_url" env:"MG_PROVISION_THINGS_LOCATION" envDefault:"http://localhost"` + UsersURL string `toml:"users_url" env:"MG_PROVISION_USERS_LOCATION" envDefault:"http://localhost"` + HTTPPort string `toml:"http_port" env:"MG_PROVISION_HTTP_PORT" envDefault:"9016"` + MgUser string `toml:"mg_user" env:"MG_PROVISION_USER" envDefault:"test@example.com"` + MgPass string `toml:"mg_pass" env:"MG_PROVISION_PASS" envDefault:"test"` + MgDomainID string `toml:"mg_domain_id" env:"MG_PROVISION_DOMAIN_ID" envDefault:""` + MgAPIKey string `toml:"mg_api_key" env:"MG_PROVISION_API_KEY" envDefault:""` + MgBSURL string `toml:"mg_bs_url" env:"MG_PROVISION_BS_SVC_URL" envDefault:"http://localhost:9000"` + MgCertsURL string `toml:"mg_certs_url" env:"MG_PROVISION_CERTS_SVC_URL" envDefault:"http://localhost:9019"` +} + +// Bootstrap represetns the Bootstrap config. +type Bootstrap struct { + X509Provision bool `toml:"x509_provision" env:"MG_PROVISION_X509_PROVISIONING" envDefault:"false"` + Provision bool `toml:"provision" env:"MG_PROVISION_BS_CONFIG_PROVISIONING" envDefault:"true"` + AutoWhiteList bool `toml:"autowhite_list" env:"MG_PROVISION_BS_AUTO_WHITELIST" envDefault:"true"` + Content map[string]interface{} `toml:"content"` +} + +// Gateway represetns the Gateway config. +type Gateway struct { + Type string `toml:"type" json:"type"` + ExternalID string `toml:"external_id" json:"external_id"` + ExternalKey string `toml:"external_key" json:"external_key"` + CtrlChannelID string `toml:"ctrl_channel_id" json:"ctrl_channel_id"` + DataChannelID string `toml:"data_channel_id" json:"data_channel_id"` + ExportChannelID string `toml:"export_channel_id" json:"export_channel_id"` + CfgID string `toml:"cfg_id" json:"cfg_id"` +} + +// Cert represetns the certificate config. +type Cert struct { + TTL string `json:"ttl" toml:"ttl" env:"MG_PROVISION_CERTS_HOURS_VALID" envDefault:"2400h"` +} + +// Config struct of Provision. +type Config struct { + File string `toml:"file" env:"MG_PROVISION_CONFIG_FILE" envDefault:"config.toml"` + Server ServiceConf `toml:"server" mapstructure:"server"` + Bootstrap Bootstrap `toml:"bootstrap" mapstructure:"bootstrap"` + Things []mgclients.Client `toml:"things" mapstructure:"things"` + Channels []groups.Group `toml:"channels" mapstructure:"channels"` + Cert Cert `toml:"cert" mapstructure:"cert"` + BSContent string `env:"MG_PROVISION_BS_CONTENT" envDefault:""` + SendTelemetry bool `env:"MG_SEND_TELEMETRY" envDefault:"true"` + InstanceID string `env:"MG_MQTT_ADAPTER_INSTANCE_ID" envDefault:""` +} + +// Save - store config in a file. +func Save(c Config, file string) error { + if file == "" { + return errors.ErrEmptyPath + } + + b, err := toml.Marshal(c) + if err != nil { + return errors.Wrap(errFailedToReadConfig, err) + } + if err := os.WriteFile(file, b, 0o644); err != nil { + return fmt.Errorf("Error writing toml: %w", err) + } + + return nil +} + +// Read - retrieve config from a file. +func Read(file string) (Config, error) { + data, err := os.ReadFile(file) + if err != nil { + return Config{}, errors.Wrap(errFailedToReadConfig, err) + } + + var c Config + if err := toml.Unmarshal(data, &c); err != nil { + return Config{}, fmt.Errorf("Error unmarshaling toml: %w", err) + } + + return c, nil +} diff --git a/provision/config_test.go b/provision/config_test.go new file mode 100644 index 0000000..515fe01 --- /dev/null +++ b/provision/config_test.go @@ -0,0 +1,222 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package provision_test + +import ( + "fmt" + "os" + "testing" + + mgclients "github.com/absmach/magistrala/pkg/clients" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/groups" + "github.com/absmach/mg-contrib/provision" + "github.com/pelletier/go-toml" + "github.com/stretchr/testify/assert" +) + +var ( + validConfig = provision.Config{ + Server: provision.ServiceConf{ + Port: "9016", + LogLevel: "info", + TLS: false, + }, + Bootstrap: provision.Bootstrap{ + X509Provision: true, + Provision: true, + AutoWhiteList: true, + Content: map[string]interface{}{ + "test": "test", + }, + }, + Things: []mgclients.Client{ + { + ID: "1234567890", + Name: "test", + Tags: []string{"test"}, + Metadata: map[string]interface{}{ + "test": "test", + }, + Permissions: []string{"test"}, + }, + }, + Channels: []groups.Group{ + { + ID: "1234567890", + Name: "test", + Metadata: map[string]interface{}{ + "test": "test", + }, + Permissions: []string{"test"}, + }, + }, + Cert: provision.Cert{}, + SendTelemetry: true, + InstanceID: "1234567890", + } + validConfigFile = "./config.toml" + invalidConfig = provision.Config{ + Bootstrap: provision.Bootstrap{ + Content: map[string]interface{}{ + "invalid": make(chan int), + }, + }, + } + invalidConfigFile = "./invalid.toml" +) + +func createInvalidConfigFile() error { + config := map[string]interface{}{ + "invalid": "invalid", + } + b, err := toml.Marshal(config) + if err != nil { + return err + } + + f, err := os.Create(invalidConfigFile) + if err != nil { + return err + } + + if _, err = f.Write(b); err != nil { + return err + } + + return nil +} + +func createValidConfigFile() error { + b, err := toml.Marshal(validConfig) + if err != nil { + return err + } + + f, err := os.Create(validConfigFile) + if err != nil { + return err + } + + if _, err = f.Write(b); err != nil { + return err + } + + return nil +} + +func TestSave(t *testing.T) { + cases := []struct { + desc string + cfg provision.Config + file string + err error + }{ + { + desc: "save valid config", + cfg: validConfig, + file: validConfigFile, + err: nil, + }, + { + desc: "save valid config with empty file name", + cfg: validConfig, + file: "", + err: errors.ErrEmptyPath, + }, + { + desc: "save empty config with valid config file", + cfg: provision.Config{}, + file: validConfigFile, + err: nil, + }, + { + desc: "save empty config with empty file name", + cfg: provision.Config{}, + file: "", + err: errors.ErrEmptyPath, + }, + { + desc: "save invalid config", + cfg: invalidConfig, + file: invalidConfigFile, + err: errors.New("failed to read config file"), + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + err := provision.Save(c.cfg, c.file) + assert.True(t, errors.Contains(err, c.err), fmt.Sprintf("expected: %v, got: %v", c.err, err)) + + if err == nil { + defer func() { + if c.file != "" { + err := os.Remove(c.file) + assert.NoError(t, err) + } + }() + + cfg, err := provision.Read(c.file) + if c.cfg.Bootstrap.Content == nil { + c.cfg.Bootstrap.Content = map[string]interface{}{} + } + assert.Equal(t, c.err, err) + assert.Equal(t, c.cfg, cfg) + } + }) + } +} + +func TestRead(t *testing.T) { + err := createInvalidConfigFile() + assert.NoError(t, err) + + err = createValidConfigFile() + assert.NoError(t, err) + + t.Cleanup(func() { + err := os.Remove(invalidConfigFile) + assert.NoError(t, err) + err = os.Remove(validConfigFile) + assert.NoError(t, err) + }) + + cases := []struct { + desc string + file string + cfg provision.Config + err error + }{ + { + desc: "read valid config", + file: validConfigFile, + cfg: validConfig, + err: nil, + }, + { + desc: "read invalid config", + file: invalidConfigFile, + cfg: invalidConfig, + err: nil, + }, + { + desc: "read empty config", + file: "", + cfg: provision.Config{}, + err: errors.New("failed to read config file"), + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + cfg, err := provision.Read(c.file) + if c.desc == "read invalid config" { + c.cfg.Bootstrap.Content = nil + } + assert.True(t, errors.Contains(err, c.err), fmt.Sprintf("expected: %v, got: %v", c.err, err)) + assert.Equal(t, c.cfg, cfg) + }) + } +} diff --git a/provision/configs/config.toml b/provision/configs/config.toml new file mode 100644 index 0000000..38455eb --- /dev/null +++ b/provision/configs/config.toml @@ -0,0 +1,47 @@ +# Copyright (c) Abstract Machines +# SPDX-License-Identifier: Apache-2.0 + +file = "config.toml" + +[bootstrap] + autowhite_list = true + content = "" + provision = true + x509_provision = false + + +[server] + LogLevel = "info" + ca_certs = "" + http_port = "8190" + mg_api_key = "" + mg_bs_url = "http://localhost:9013" + mg_certs_url = "http://localhost:9019" + mg_pass = "" + mg_user = "" + mqtt_url = "" + port = "" + server_cert = "" + server_key = "" + things_location = "http://localhost:9000" + tls = true + users_location = "" + +[[things]] + name = "thing" + + [things.metadata] + external_id = "xxxxxx" + + +[[channels]] + name = "control-channel" + + [channels.metadata] + type = "control" + +[[channels]] + name = "data-channel" + + [channels.metadata] + type = "data" diff --git a/provision/doc.go b/provision/doc.go new file mode 100644 index 0000000..e9b8552 --- /dev/null +++ b/provision/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package provision contains domain concept definitions needed to support +// Provision service feature, i.e. automate provision process. +package provision diff --git a/provision/mocks/service.go b/provision/mocks/service.go new file mode 100644 index 0000000..efa02eb --- /dev/null +++ b/provision/mocks/service.go @@ -0,0 +1,122 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + provision "github.com/absmach/mg-contrib/provision" + mock "github.com/stretchr/testify/mock" +) + +// Service is an autogenerated mock type for the Service type +type Service struct { + mock.Mock +} + +// Cert provides a mock function with given fields: token, thingID, duration +func (_m *Service) Cert(token string, thingID string, duration string) (string, string, error) { + ret := _m.Called(token, thingID, duration) + + if len(ret) == 0 { + panic("no return value specified for Cert") + } + + var r0 string + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func(string, string, string) (string, string, error)); ok { + return rf(token, thingID, duration) + } + if rf, ok := ret.Get(0).(func(string, string, string) string); ok { + r0 = rf(token, thingID, duration) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(string, string, string) string); ok { + r1 = rf(token, thingID, duration) + } else { + r1 = ret.Get(1).(string) + } + + if rf, ok := ret.Get(2).(func(string, string, string) error); ok { + r2 = rf(token, thingID, duration) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Mapping provides a mock function with given fields: token +func (_m *Service) Mapping(token string) (map[string]interface{}, error) { + ret := _m.Called(token) + + if len(ret) == 0 { + panic("no return value specified for Mapping") + } + + var r0 map[string]interface{} + var r1 error + if rf, ok := ret.Get(0).(func(string) (map[string]interface{}, error)); ok { + return rf(token) + } + if rf, ok := ret.Get(0).(func(string) map[string]interface{}); ok { + r0 = rf(token) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]interface{}) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(token) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Provision provides a mock function with given fields: token, name, externalID, externalKey +func (_m *Service) Provision(token string, name string, externalID string, externalKey string) (provision.Result, error) { + ret := _m.Called(token, name, externalID, externalKey) + + if len(ret) == 0 { + panic("no return value specified for Provision") + } + + var r0 provision.Result + var r1 error + if rf, ok := ret.Get(0).(func(string, string, string, string) (provision.Result, error)); ok { + return rf(token, name, externalID, externalKey) + } + if rf, ok := ret.Get(0).(func(string, string, string, string) provision.Result); ok { + r0 = rf(token, name, externalID, externalKey) + } else { + r0 = ret.Get(0).(provision.Result) + } + + if rf, ok := ret.Get(1).(func(string, string, string, string) error); ok { + r1 = rf(token, name, externalID, externalKey) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewService(t interface { + mock.TestingT + Cleanup(func()) +}) *Service { + mock := &Service{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/provision/service.go b/provision/service.go new file mode 100644 index 0000000..6e49c98 --- /dev/null +++ b/provision/service.go @@ -0,0 +1,414 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package provision + +import ( + "encoding/json" + "fmt" + "log/slog" + + "github.com/absmach/magistrala/pkg/errors" + sdk "github.com/absmach/magistrala/pkg/sdk/go" +) + +const ( + externalIDKey = "external_id" + gateway = "gateway" + Active = 1 + + control = "control" + data = "data" + export = "export" +) + +var ( + ErrUnauthorized = errors.New("unauthorized access") + ErrFailedToCreateToken = errors.New("failed to create access token") + ErrEmptyThingsList = errors.New("things list in configuration empty") + ErrThingUpdate = errors.New("failed to update thing") + ErrEmptyChannelsList = errors.New("channels list in configuration is empty") + ErrFailedChannelCreation = errors.New("failed to create channel") + ErrFailedChannelRetrieval = errors.New("failed to retrieve channel") + ErrFailedThingCreation = errors.New("failed to create thing") + ErrFailedThingRetrieval = errors.New("failed to retrieve thing") + ErrMissingCredentials = errors.New("missing credentials") + ErrFailedBootstrapRetrieval = errors.New("failed to retrieve bootstrap") + ErrFailedCertCreation = errors.New("failed to create certificates") + ErrFailedBootstrap = errors.New("failed to create bootstrap config") + ErrFailedBootstrapValidate = errors.New("failed to validate bootstrap config creation") + ErrGatewayUpdate = errors.New("failed to updated gateway metadata") + + limit uint = 10 + offset uint = 0 +) + +var _ Service = (*provisionService)(nil) + +// Service specifies Provision service API. +// +//go:generate mockery --name Service --output=./mocks --filename service.go --quiet --note "Copyright (c) Abstract Machines" +type Service interface { + // Provision is the only method this API specifies. Depending on the configuration, + // the following actions will can be executed: + // - create a Thing based on external_id (eg. MAC address) + // - create multiple Channels + // - create Bootstrap configuration + // - whitelist Thing in Bootstrap configuration == connect Thing to Channels + Provision(token, name, externalID, externalKey string) (Result, error) + + // Mapping returns current configuration used for provision + // useful for using in ui to create configuration that matches + // one created with Provision method. + Mapping(token string) (map[string]interface{}, error) + + // Certs creates certificate for things that communicate over mTLS + // A duration string is a possibly signed sequence of decimal numbers, + // each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "Āµs"), "ms", "s", "m", "h". + Cert(token, thingID, duration string) (string, string, error) +} + +type provisionService struct { + logger *slog.Logger + sdk sdk.SDK + conf Config +} + +// Result represent what is created with additional info. +type Result struct { + Things []sdk.Thing `json:"things,omitempty"` + Channels []sdk.Channel `json:"channels,omitempty"` + ClientCert map[string]string `json:"client_cert,omitempty"` + ClientKey map[string]string `json:"client_key,omitempty"` + CACert string `json:"ca_cert,omitempty"` + Whitelisted map[string]bool `json:"whitelisted,omitempty"` + Error string `json:"error,omitempty"` +} + +// New returns new provision service. +func New(cfg Config, mgsdk sdk.SDK, logger *slog.Logger) Service { + return &provisionService{ + logger: logger, + conf: cfg, + sdk: mgsdk, + } +} + +// Mapping retrieves current configuration. +func (ps *provisionService) Mapping(token string) (map[string]interface{}, error) { + pm := sdk.PageMetadata{ + Offset: uint64(offset), + Limit: uint64(limit), + } + + if _, err := ps.sdk.Users(pm, token); err != nil { + return map[string]interface{}{}, errors.Wrap(ErrUnauthorized, err) + } + + return ps.conf.Bootstrap.Content, nil +} + +// Provision is provision method for creating setup according to +// provision layout specified in config.toml. +func (ps *provisionService) Provision(token, name, externalID, externalKey string) (res Result, err error) { + var channels []sdk.Channel + var things []sdk.Thing + defer ps.recover(&err, &things, &channels, &token) + + token, err = ps.createTokenIfEmpty(token) + if err != nil { + return res, errors.Wrap(ErrFailedToCreateToken, err) + } + + if len(ps.conf.Things) == 0 { + return res, ErrEmptyThingsList + } + if len(ps.conf.Channels) == 0 { + return res, ErrEmptyChannelsList + } + for _, thing := range ps.conf.Things { + // If thing in configs contains metadata with external_id + // set value for it from the provision request + if _, ok := thing.Metadata[externalIDKey]; ok { + thing.Metadata[externalIDKey] = externalID + } + + th := sdk.Thing{ + Metadata: thing.Metadata, + } + if name == "" { + name = thing.Name + } + th.Name = name + th, err := ps.sdk.CreateThing(th, token) + if err != nil { + res.Error = err.Error() + return res, errors.Wrap(ErrFailedThingCreation, err) + } + + // Get newly created thing (in order to get the key). + th, err = ps.sdk.Thing(th.ID, token) + if err != nil { + e := errors.Wrap(err, fmt.Errorf("thing id: %s", th.ID)) + return res, errors.Wrap(ErrFailedThingRetrieval, e) + } + things = append(things, th) + } + + for _, channel := range ps.conf.Channels { + ch := sdk.Channel{ + Name: name + "_" + channel.Name, + Metadata: sdk.Metadata(channel.Metadata), + } + ch, err := ps.sdk.CreateChannel(ch, token) + if err != nil { + return res, errors.Wrap(ErrFailedChannelCreation, err) + } + ch, err = ps.sdk.Channel(ch.ID, token) + if err != nil { + e := errors.Wrap(err, fmt.Errorf("channel id: %s", ch.ID)) + return res, errors.Wrap(ErrFailedChannelRetrieval, e) + } + channels = append(channels, ch) + } + + res = Result{ + Things: things, + Channels: channels, + Whitelisted: map[string]bool{}, + ClientCert: map[string]string{}, + ClientKey: map[string]string{}, + } + + var cert sdk.Cert + var bsConfig sdk.BootstrapConfig + for _, thing := range things { + var chanIDs []string + + for _, ch := range channels { + chanIDs = append(chanIDs, ch.ID) + } + content, err := json.Marshal(ps.conf.Bootstrap.Content) + if err != nil { + return Result{}, errors.Wrap(ErrFailedBootstrap, err) + } + + if ps.conf.Bootstrap.Provision && needsBootstrap(thing) { + bsReq := sdk.BootstrapConfig{ + ThingID: thing.ID, + ExternalID: externalID, + ExternalKey: externalKey, + Channels: chanIDs, + CACert: res.CACert, + ClientCert: cert.ClientCert, + ClientKey: cert.ClientKey, + Content: string(content), + } + bsid, err := ps.sdk.AddBootstrap(bsReq, token) + if err != nil { + return Result{}, errors.Wrap(ErrFailedBootstrap, err) + } + + bsConfig, err = ps.sdk.ViewBootstrap(bsid, token) + if err != nil { + return Result{}, errors.Wrap(ErrFailedBootstrapValidate, err) + } + } + + if ps.conf.Bootstrap.X509Provision { + var cert sdk.Cert + + cert, err = ps.sdk.IssueCert(thing.ID, ps.conf.Cert.TTL, token) + if err != nil { + e := errors.Wrap(err, fmt.Errorf("thing id: %s", thing.ID)) + return res, errors.Wrap(ErrFailedCertCreation, e) + } + + res.ClientCert[thing.ID] = cert.ClientCert + res.ClientKey[thing.ID] = cert.ClientKey + res.CACert = "" + + if needsBootstrap(thing) { + if _, err = ps.sdk.UpdateBootstrapCerts(bsConfig.ThingID, cert.ClientCert, cert.ClientKey, "", token); err != nil { + return Result{}, errors.Wrap(ErrFailedCertCreation, err) + } + } + } + + if ps.conf.Bootstrap.AutoWhiteList { + if err := ps.sdk.Whitelist(thing.ID, Active, token); err != nil { + res.Error = err.Error() + return res, ErrThingUpdate + } + res.Whitelisted[thing.ID] = true + } + } + + if err = ps.updateGateway(token, bsConfig, channels); err != nil { + return res, err + } + return res, nil +} + +func (ps *provisionService) Cert(token, thingID, ttl string) (string, string, error) { + token, err := ps.createTokenIfEmpty(token) + if err != nil { + return "", "", errors.Wrap(ErrFailedToCreateToken, err) + } + + th, err := ps.sdk.Thing(thingID, token) + if err != nil { + return "", "", errors.Wrap(ErrUnauthorized, err) + } + cert, err := ps.sdk.IssueCert(th.ID, ps.conf.Cert.TTL, token) + return cert.ClientCert, cert.ClientKey, err +} + +func (ps *provisionService) createTokenIfEmpty(token string) (string, error) { + if token != "" { + return token, nil + } + + // If no token in request is provided + // use API key provided in config file or env + if ps.conf.Server.MgAPIKey != "" { + return ps.conf.Server.MgAPIKey, nil + } + + // If no API key use username and password provided to create access token. + if ps.conf.Server.MgUser == "" || ps.conf.Server.MgPass == "" { + return token, ErrMissingCredentials + } + + u := sdk.Login{ + Identity: ps.conf.Server.MgUser, + Secret: ps.conf.Server.MgPass, + DomainID: ps.conf.Server.MgDomainID, + } + tkn, err := ps.sdk.CreateToken(u) + if err != nil { + return token, errors.Wrap(ErrFailedToCreateToken, err) + } + + return tkn.AccessToken, nil +} + +func (ps *provisionService) updateGateway(token string, bs sdk.BootstrapConfig, channels []sdk.Channel) error { + var gw Gateway + for _, ch := range channels { + switch ch.Metadata["type"] { + case control: + gw.CtrlChannelID = ch.ID + case data: + gw.DataChannelID = ch.ID + case export: + gw.ExportChannelID = ch.ID + } + } + gw.ExternalID = bs.ExternalID + gw.ExternalKey = bs.ExternalKey + gw.CfgID = bs.ThingID + gw.Type = gateway + + th, sdkerr := ps.sdk.Thing(bs.ThingID, token) + if sdkerr != nil { + return errors.Wrap(ErrGatewayUpdate, sdkerr) + } + b, err := json.Marshal(gw) + if err != nil { + return errors.Wrap(ErrGatewayUpdate, err) + } + if err := json.Unmarshal(b, &th.Metadata); err != nil { + return errors.Wrap(ErrGatewayUpdate, err) + } + if _, err := ps.sdk.UpdateThing(th, token); err != nil { + return errors.Wrap(ErrGatewayUpdate, err) + } + return nil +} + +func (ps *provisionService) errLog(err error) { + if err != nil { + ps.logger.Error(fmt.Sprintf("Error recovering: %s", err)) + } +} + +func clean(ps *provisionService, things []sdk.Thing, channels []sdk.Channel, token string) { + for _, t := range things { + err := ps.sdk.DeleteThing(t.ID, token) + ps.errLog(err) + } + for _, c := range channels { + err := ps.sdk.DeleteChannel(c.ID, token) + ps.errLog(err) + } +} + +func (ps *provisionService) recover(e *error, ths *[]sdk.Thing, chs *[]sdk.Channel, tkn *string) { + if e == nil { + return + } + things, channels, token, err := *ths, *chs, *tkn, *e + + if errors.Contains(err, ErrFailedThingRetrieval) || errors.Contains(err, ErrFailedChannelCreation) { + for _, th := range things { + err := ps.sdk.DeleteThing(th.ID, token) + ps.errLog(err) + } + return + } + + if errors.Contains(err, ErrFailedBootstrap) || errors.Contains(err, ErrFailedChannelRetrieval) { + clean(ps, things, channels, token) + return + } + + if errors.Contains(err, ErrFailedBootstrapValidate) || errors.Contains(err, ErrFailedCertCreation) { + clean(ps, things, channels, token) + for _, th := range things { + if needsBootstrap(th) { + ps.errLog(ps.sdk.RemoveBootstrap(th.ID, token)) + } + } + return + } + + if errors.Contains(err, ErrFailedBootstrapValidate) || errors.Contains(err, ErrFailedCertCreation) { + clean(ps, things, channels, token) + for _, th := range things { + if needsBootstrap(th) { + bs, err := ps.sdk.ViewBootstrap(th.ID, token) + ps.errLog(errors.Wrap(ErrFailedBootstrapRetrieval, err)) + ps.errLog(ps.sdk.RemoveBootstrap(bs.ThingID, token)) + } + } + } + + if errors.Contains(err, ErrThingUpdate) || errors.Contains(err, ErrGatewayUpdate) { + clean(ps, things, channels, token) + for _, th := range things { + if ps.conf.Bootstrap.X509Provision && needsBootstrap(th) { + _, err := ps.sdk.RevokeCert(th.ID, token) + ps.errLog(err) + } + if needsBootstrap(th) { + bs, err := ps.sdk.ViewBootstrap(th.ID, token) + ps.errLog(errors.Wrap(ErrFailedBootstrapRetrieval, err)) + ps.errLog(ps.sdk.RemoveBootstrap(bs.ThingID, token)) + } + } + return + } +} + +func needsBootstrap(th sdk.Thing) bool { + if th.Metadata == nil { + return false + } + + if _, ok := th.Metadata[externalIDKey]; ok { + return true + } + return false +} diff --git a/provision/service_test.go b/provision/service_test.go new file mode 100644 index 0000000..c903e00 --- /dev/null +++ b/provision/service_test.go @@ -0,0 +1,222 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package provision_test + +import ( + "fmt" + "testing" + + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + sdk "github.com/absmach/magistrala/pkg/sdk/go" + sdkmocks "github.com/absmach/magistrala/pkg/sdk/mocks" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/absmach/mg-contrib/provision" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var validToken = "valid" + +func TestMapping(t *testing.T) { + mgsdk := new(sdkmocks.SDK) + svc := provision.New(validConfig, mgsdk, mglog.NewMock()) + + cases := []struct { + desc string + token string + content map[string]interface{} + sdkerr error + err error + }{ + { + desc: "valid token", + token: validToken, + content: validConfig.Bootstrap.Content, + sdkerr: nil, + err: nil, + }, + { + desc: "invalid token", + token: "invalid", + content: map[string]interface{}{}, + sdkerr: errors.NewSDKErrorWithStatus(svcerr.ErrAuthentication, 401), + err: provision.ErrUnauthorized, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + pm := sdk.PageMetadata{Offset: uint64(0), Limit: uint64(10)} + repocall := mgsdk.On("Users", pm, c.token).Return(sdk.UsersPage{}, c.sdkerr) + content, err := svc.Mapping(c.token) + assert.True(t, errors.Contains(err, c.err), fmt.Sprintf("expected error %v, got %v", c.err, err)) + assert.Equal(t, c.content, content) + repocall.Unset() + }) + } +} + +func TestCert(t *testing.T) { + cases := []struct { + desc string + config provision.Config + token string + thingID string + ttl string + cert string + key string + sdkThingErr error + sdkCertErr error + sdkTokenErr error + err error + }{ + { + desc: "valid", + config: validConfig, + token: validToken, + thingID: testsutil.GenerateUUID(t), + ttl: "1h", + cert: "cert", + key: "key", + sdkThingErr: nil, + sdkCertErr: nil, + sdkTokenErr: nil, + err: nil, + }, + { + desc: "empty token with config API key", + config: provision.Config{ + Server: provision.ServiceConf{MgAPIKey: "key"}, + Cert: provision.Cert{TTL: "1h"}, + }, + token: "", + thingID: testsutil.GenerateUUID(t), + ttl: "1h", + cert: "cert", + key: "key", + sdkThingErr: nil, + sdkCertErr: nil, + sdkTokenErr: nil, + err: nil, + }, + { + desc: "empty token with username and password", + config: provision.Config{ + Server: provision.ServiceConf{ + MgUser: "test@example.com", + MgPass: "12345678", + MgDomainID: testsutil.GenerateUUID(t), + }, + Cert: provision.Cert{TTL: "1h"}, + }, + token: "", + thingID: testsutil.GenerateUUID(t), + ttl: "1h", + cert: "cert", + key: "key", + sdkThingErr: nil, + sdkCertErr: nil, + sdkTokenErr: nil, + err: nil, + }, + { + desc: "empty token with username and invalid password", + config: provision.Config{ + Server: provision.ServiceConf{ + MgUser: "test@example.com", + MgPass: "12345678", + MgDomainID: testsutil.GenerateUUID(t), + }, + Cert: provision.Cert{TTL: "1h"}, + }, + token: "", + thingID: testsutil.GenerateUUID(t), + ttl: "1h", + cert: "", + key: "", + sdkThingErr: nil, + sdkCertErr: nil, + sdkTokenErr: errors.NewSDKErrorWithStatus(svcerr.ErrAuthentication, 401), + err: provision.ErrFailedToCreateToken, + }, + { + desc: "empty token with empty username and password", + config: provision.Config{ + Server: provision.ServiceConf{}, + Cert: provision.Cert{TTL: "1h"}, + }, + token: "", + thingID: testsutil.GenerateUUID(t), + ttl: "1h", + cert: "", + key: "", + sdkThingErr: nil, + sdkCertErr: nil, + sdkTokenErr: nil, + err: provision.ErrMissingCredentials, + }, + { + desc: "invalid thingID", + config: validConfig, + token: "invalid", + thingID: testsutil.GenerateUUID(t), + ttl: "1h", + cert: "", + key: "", + sdkThingErr: errors.NewSDKErrorWithStatus(svcerr.ErrAuthentication, 401), + sdkCertErr: nil, + sdkTokenErr: nil, + err: provision.ErrUnauthorized, + }, + { + desc: "invalid thingID", + config: validConfig, + token: validToken, + thingID: "invalid", + ttl: "1h", + cert: "", + key: "", + sdkThingErr: errors.NewSDKErrorWithStatus(repoerr.ErrNotFound, 404), + sdkCertErr: nil, + sdkTokenErr: nil, + err: provision.ErrUnauthorized, + }, + { + desc: "failed to issue cert", + config: validConfig, + token: validToken, + thingID: testsutil.GenerateUUID(t), + ttl: "1h", + cert: "", + key: "", + sdkThingErr: nil, + sdkTokenErr: nil, + sdkCertErr: errors.NewSDKError(repoerr.ErrCreateEntity), + err: repoerr.ErrCreateEntity, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + mgsdk := new(sdkmocks.SDK) + svc := provision.New(c.config, mgsdk, mglog.NewMock()) + + mgsdk.On("Thing", c.thingID, mock.Anything).Return(sdk.Thing{ID: c.thingID}, c.sdkThingErr) + mgsdk.On("IssueCert", c.thingID, c.config.Cert.TTL, mock.Anything).Return(sdk.Cert{ClientCert: c.cert, ClientKey: c.key}, c.sdkCertErr) + login := sdk.Login{ + Identity: c.config.Server.MgUser, + Secret: c.config.Server.MgPass, + DomainID: c.config.Server.MgDomainID, + } + mgsdk.On("CreateToken", login).Return(sdk.Token{AccessToken: validToken}, c.sdkTokenErr) + cert, key, err := svc.Cert(c.token, c.thingID, c.ttl) + assert.Equal(t, c.cert, cert) + assert.Equal(t, c.key, key) + assert.True(t, errors.Contains(err, c.err), fmt.Sprintf("expected error %v, got %v", c.err, err)) + }) + } +} diff --git a/readers/README.md b/readers/README.md new file mode 100644 index 0000000..4c7be59 --- /dev/null +++ b/readers/README.md @@ -0,0 +1,7 @@ +# Readers + +Readers provide implementations of various `message readers`. Message readers are services that consume normalized (in `SenML` format) Magistrala messages from data storage and expose HTTP API for message consumption. + +For an in-depth explanation of the usage of `reader`, as well as thorough understanding of Magistrala, please check out the [official documentation][doc]. + +[doc]: https://docs.magistrala.abstractmachines.fr diff --git a/readers/api/doc.go b/readers/api/doc.go new file mode 100644 index 0000000..2424852 --- /dev/null +++ b/readers/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains API-related concerns: endpoint definitions, middlewares +// and all resource representations. +package api diff --git a/readers/api/endpoint.go b/readers/api/endpoint.go new file mode 100644 index 0000000..846e81d --- /dev/null +++ b/readers/api/endpoint.go @@ -0,0 +1,39 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/readers" + "github.com/go-kit/kit/endpoint" +) + +func listMessagesEndpoint(svc readers.MessageRepository, uauth magistrala.AuthServiceClient, taauth magistrala.AuthzServiceClient) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listMessagesReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + if err := authorize(ctx, req, uauth, taauth); err != nil { + return nil, errors.Wrap(svcerr.ErrAuthorization, err) + } + + page, err := svc.ReadAll(req.chanID, req.pageMeta) + if err != nil { + return nil, err + } + + return pageRes{ + PageMetadata: page.PageMetadata, + Total: page.Total, + Messages: page.Messages, + }, nil + } +} diff --git a/readers/api/endpoint_test.go b/readers/api/endpoint_test.go new file mode 100644 index 0000000..6ee4c96 --- /dev/null +++ b/readers/api/endpoint_test.go @@ -0,0 +1,1020 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api_test + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/absmach/magistrala" + authmocks "github.com/absmach/magistrala/auth/mocks" + "github.com/absmach/magistrala/pkg/apiutil" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + "github.com/absmach/magistrala/readers/api" + "github.com/absmach/magistrala/readers/mocks" + thmocks "github.com/absmach/magistrala/things/mocks" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + svcName = "test-service" + thingToken = "1" + userToken = "token" + email = "user@example.com" + invalid = "invalid" + numOfMessages = 100 + valueFields = 5 + subtopic = "topic" + mqttProt = "mqtt" + httpProt = "http" + msgName = "temperature" + instanceID = "5de9b29a-feb9-11ed-be56-0242ac120002" +) + +var ( + v float64 = 5 + vs = "value" + vb = true + vd = "dataValue" + sum float64 = 42 +) + +func newServer(repo *mocks.MessageRepository, ac *authmocks.AuthClient, tc *thmocks.ThingAuthzService) *httptest.Server { + mux := api.MakeHandler(repo, ac, tc, svcName, instanceID) + return httptest.NewServer(mux) +} + +type testRequest struct { + client *http.Client + method string + url string + token string + key string +} + +func (tr testRequest) make() (*http.Response, error) { + req, err := http.NewRequest(tr.method, tr.url, http.NoBody) + if err != nil { + return nil, err + } + if tr.token != "" { + req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) + } + if tr.key != "" { + req.Header.Set("Authorization", apiutil.ThingPrefix+tr.key) + } + + return tr.client.Do(req) +} + +func TestReadAll(t *testing.T) { + chanID := testsutil.GenerateUUID(t) + pubID := testsutil.GenerateUUID(t) + pubID2 := testsutil.GenerateUUID(t) + + now := time.Now().Unix() + + var messages []senml.Message + var queryMsgs []senml.Message + var valueMsgs []senml.Message + var boolMsgs []senml.Message + var stringMsgs []senml.Message + var dataMsgs []senml.Message + + for i := 0; i < numOfMessages; i++ { + // Mix possible values as well as value sum. + msg := senml.Message{ + Channel: chanID, + Publisher: pubID, + Protocol: mqttProt, + Time: float64(now - int64(i)), + Name: "name", + } + + count := i % valueFields + switch count { + case 0: + msg.Value = &v + valueMsgs = append(valueMsgs, msg) + case 1: + msg.BoolValue = &vb + boolMsgs = append(boolMsgs, msg) + case 2: + msg.StringValue = &vs + stringMsgs = append(stringMsgs, msg) + case 3: + msg.DataValue = &vd + dataMsgs = append(dataMsgs, msg) + case 4: + msg.Sum = &sum + msg.Subtopic = subtopic + msg.Protocol = httpProt + msg.Publisher = pubID2 + msg.Name = msgName + queryMsgs = append(queryMsgs, msg) + } + + messages = append(messages, msg) + } + + repo := new(mocks.MessageRepository) + auth := new(authmocks.AuthClient) + tauth := new(thmocks.ThingAuthzService) + ts := newServer(repo, auth, tauth) + defer ts.Close() + + cases := []struct { + desc string + req string + url string + token string + key string + authResponse bool + status int + res pageRes + err error + }{ + { + desc: "read page with valid offset and limit", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages"}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with valid offset and limit as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with negative offset as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=-1&limit=10", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with negative limit as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=-10", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with zero limit as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=0", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with non-integer offset as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=abc&limit=10", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with non-integer limit as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=abc", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with invalid channel id as thing", + url: fmt.Sprintf("%s/channels//messages?offset=0&limit=10", ts.URL), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with invalid token as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=10", ts.URL, chanID), + token: authmocks.InvalidValue, + authResponse: false, + status: http.StatusUnauthorized, + err: svcerr.ErrAuthorization, + }, + { + desc: "read page with multiple offset as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&offset=1&limit=10", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with multiple limit as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=20&limit=10", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with empty token as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=10", ts.URL, chanID), + token: "", + authResponse: false, + status: http.StatusUnauthorized, + err: svcerr.ErrAuthorization, + }, + { + desc: "read page with default offset as thing", + url: fmt.Sprintf("%s/channels/%s/messages?limit=10", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with default limit as thing", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with senml format as thing", + url: fmt.Sprintf("%s/channels/%s/messages?format=messages", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Format: "messages"}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with subtopic as thing", + url: fmt.Sprintf("%s/channels/%s/messages?subtopic=%s&protocol=%s", ts.URL, chanID, subtopic, httpProt), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Subtopic: subtopic, Format: "messages", Protocol: httpProt}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with subtopic and protocol as thing", + url: fmt.Sprintf("%s/channels/%s/messages?subtopic=%s&protocol=%s", ts.URL, chanID, subtopic, httpProt), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Subtopic: subtopic, Format: "messages", Protocol: httpProt}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with publisher as thing", + url: fmt.Sprintf("%s/channels/%s/messages?publisher=%s", ts.URL, chanID, pubID2), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Publisher: pubID2}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + + { + desc: "read page with protocol as thing", + url: fmt.Sprintf("%s/channels/%s/messages?protocol=http", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Protocol: httpProt}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with name as thing", + url: fmt.Sprintf("%s/channels/%s/messages?name=%s", ts.URL, chanID, msgName), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Name: msgName}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with value as thing", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f", ts.URL, chanID, v), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and equal comparator as thing", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v, readers.EqualKey), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v, Comparator: readers.EqualKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and lower-than comparator as thing", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v+1, readers.LowerThanKey), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v + 1, Comparator: readers.LowerThanKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and lower-than-or-equal comparator as thing", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v+1, readers.LowerThanEqualKey), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v + 1, Comparator: readers.LowerThanEqualKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and greater-than comparator as thing", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v-1, readers.GreaterThanKey), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v - 1, Comparator: readers.GreaterThanKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and greater-than-or-equal comparator as thing", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v-1, readers.GreaterThanEqualKey), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v - 1, Comparator: readers.GreaterThanEqualKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with non-float value as thing", + url: fmt.Sprintf("%s/channels/%s/messages?v=ab01", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with value and wrong comparator as thing", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=wrong", ts.URL, chanID, v-1), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with boolean value as thing", + url: fmt.Sprintf("%s/channels/%s/messages?vb=true", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", BoolValue: true}, + Total: uint64(len(boolMsgs)), + Messages: boolMsgs[0:10], + }, + }, + { + desc: "read page with non-boolean value as thing", + url: fmt.Sprintf("%s/channels/%s/messages?vb=yes", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with string value as thing", + url: fmt.Sprintf("%s/channels/%s/messages?vs=%s", ts.URL, chanID, vs), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", StringValue: vs}, + Total: uint64(len(stringMsgs)), + Messages: stringMsgs[0:10], + }, + }, + { + desc: "read page with data value as thing", + url: fmt.Sprintf("%s/channels/%s/messages?vd=%s", ts.URL, chanID, vd), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", DataValue: vd}, + Total: uint64(len(dataMsgs)), + Messages: dataMsgs[0:10], + }, + }, + { + desc: "read page with non-float from as thing", + url: fmt.Sprintf("%s/channels/%s/messages?from=ABCD", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with non-float to as thing", + url: fmt.Sprintf("%s/channels/%s/messages?to=ABCD", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with from/to as thing", + url: fmt.Sprintf("%s/channels/%s/messages?from=%f&to=%f", ts.URL, chanID, messages[19].Time, messages[4].Time), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", From: messages[19].Time, To: messages[4].Time}, + Total: uint64(len(messages[5:20])), + Messages: messages[5:15], + }, + }, + { + desc: "read page with aggregation as thing", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with interval as thing", + url: fmt.Sprintf("%s/channels/%s/messages?interval=10h", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Interval: "10h"}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with aggregation and interval as thing", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h", ts.URL, chanID), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with aggregation, interval, to and from as thing", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h&from=%f&to=%f", ts.URL, chanID, messages[19].Time, messages[4].Time), + key: thingToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Aggregation: "MAX", Interval: "10h", From: messages[19].Time, To: messages[4].Time}, + Total: uint64(len(messages[5:20])), + Messages: messages[5:15], + }, + }, + { + desc: "read page with invalid aggregation and valid interval, to and from as thing", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=invalid&interval=10h&from=%f&to=%f", ts.URL, chanID, messages[19].Time, messages[4].Time), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with invalid interval and valid aggregation, to and from as thing", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10hrs&from=%f&to=%f", ts.URL, chanID, messages[19].Time, messages[4].Time), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with aggregation, interval and to with missing from as thing", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h&to=%f", ts.URL, chanID, messages[4].Time), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with aggregation, interval and to with invalid from as thing", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h&to=ABCD&from=%f", ts.URL, chanID, messages[4].Time), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with aggregation, interval and to with invalid to as thing", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h&from=%f&to=ABCD", ts.URL, chanID, messages[4].Time), + key: thingToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with valid offset and limit as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with negative offset as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=-1&limit=10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with negative limit as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=-10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with zero limit as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=0", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with non-integer offset as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=abc&limit=10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with non-integer limit as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=abc", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with invalid channel id as user", + url: fmt.Sprintf("%s/channels//messages?offset=0&limit=10", ts.URL), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with invalid token as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=10", ts.URL, chanID), + token: authmocks.InvalidValue, + authResponse: false, + status: http.StatusUnauthorized, + err: svcerr.ErrAuthorization, + }, + { + desc: "read page with multiple offset as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&offset=1&limit=10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with multiple limit as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=20&limit=10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with empty token as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0&limit=10", ts.URL, chanID), + token: "", + authResponse: false, + status: http.StatusUnauthorized, + err: svcerr.ErrAuthorization, + }, + { + desc: "read page with default offset as user", + url: fmt.Sprintf("%s/channels/%s/messages?limit=10", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with default limit as user", + url: fmt.Sprintf("%s/channels/%s/messages?offset=0", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with senml format as user", + url: fmt.Sprintf("%s/channels/%s/messages?format=messages", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Format: "messages"}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with subtopic as user", + url: fmt.Sprintf("%s/channels/%s/messages?subtopic=%s&protocol=%s", ts.URL, chanID, subtopic, httpProt), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Subtopic: subtopic, Protocol: httpProt}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with subtopic and protocol as user", + url: fmt.Sprintf("%s/channels/%s/messages?subtopic=%s&protocol=%s", ts.URL, chanID, subtopic, httpProt), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Subtopic: subtopic, Protocol: httpProt}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with publisher as user", + url: fmt.Sprintf("%s/channels/%s/messages?publisher=%s", ts.URL, chanID, pubID2), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Publisher: pubID2}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with protocol as user", + url: fmt.Sprintf("%s/channels/%s/messages?protocol=http", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Protocol: httpProt}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with name as user", + url: fmt.Sprintf("%s/channels/%s/messages?name=%s", ts.URL, chanID, msgName), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Name: msgName}, + Total: uint64(len(queryMsgs)), + Messages: queryMsgs[0:10], + }, + }, + { + desc: "read page with value as user", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f", ts.URL, chanID, v), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and equal comparator as user", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v, readers.EqualKey), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v, Comparator: readers.EqualKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and lower-than comparator as user", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v+1, readers.LowerThanKey), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v + 1, Comparator: readers.LowerThanKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and lower-than-or-equal comparator as user", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v+1, readers.LowerThanEqualKey), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v + 1, Comparator: readers.LowerThanEqualKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and greater-than comparator as user", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v-1, readers.GreaterThanKey), + token: userToken, + status: http.StatusOK, + authResponse: true, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v - 1, Comparator: readers.GreaterThanKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with value and greater-than-or-equal comparator as user", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=%s", ts.URL, chanID, v-1, readers.GreaterThanEqualKey), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Value: v - 1, Comparator: readers.GreaterThanEqualKey}, + Total: uint64(len(valueMsgs)), + Messages: valueMsgs[0:10], + }, + }, + { + desc: "read page with non-float value as user", + url: fmt.Sprintf("%s/channels/%s/messages?v=ab01", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with value and wrong comparator as user", + url: fmt.Sprintf("%s/channels/%s/messages?v=%f&comparator=wrong", ts.URL, chanID, v-1), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with boolean value as user", + url: fmt.Sprintf("%s/channels/%s/messages?vb=true", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", BoolValue: true}, + Total: uint64(len(boolMsgs)), + Messages: boolMsgs[0:10], + }, + }, + { + desc: "read page with non-boolean value as user", + url: fmt.Sprintf("%s/channels/%s/messages?vb=yes", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with string value as user", + url: fmt.Sprintf("%s/channels/%s/messages?vs=%s", ts.URL, chanID, vs), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", StringValue: vs}, + Total: uint64(len(stringMsgs)), + Messages: stringMsgs[0:10], + }, + }, + { + desc: "read page with data value as user", + url: fmt.Sprintf("%s/channels/%s/messages?vd=%s", ts.URL, chanID, vd), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", DataValue: vd}, + Total: uint64(len(dataMsgs)), + Messages: dataMsgs[0:10], + }, + }, + { + desc: "read page with non-float from as user", + url: fmt.Sprintf("%s/channels/%s/messages?from=ABCD", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with non-float to as user", + url: fmt.Sprintf("%s/channels/%s/messages?to=ABCD", ts.URL, chanID), + token: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with from/to as user", + url: fmt.Sprintf("%s/channels/%s/messages?from=%f&to=%f", ts.URL, chanID, messages[19].Time, messages[4].Time), + token: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", From: messages[19].Time, To: messages[4].Time}, + Total: uint64(len(messages[5:20])), + Messages: messages[5:15], + }, + }, + { + desc: "read page with aggregation as user", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX", ts.URL, chanID), + key: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with interval as user", + url: fmt.Sprintf("%s/channels/%s/messages?interval=10h", ts.URL, chanID), + key: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Interval: "10h"}, + Total: uint64(len(messages)), + Messages: messages[0:10], + }, + }, + { + desc: "read page with aggregation and interval as user", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h", ts.URL, chanID), + key: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with aggregation, interval, to and from as user", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h&from=%f&to=%f", ts.URL, chanID, messages[19].Time, messages[4].Time), + key: userToken, + authResponse: true, + status: http.StatusOK, + res: pageRes{ + PageMetadata: readers.PageMetadata{Limit: 10, Format: "messages", Aggregation: "MAX", Interval: "10h", From: messages[19].Time, To: messages[4].Time}, + Total: uint64(len(messages[5:20])), + Messages: messages[5:15], + }, + }, + { + desc: "read page with invalid aggregation and valid interval, to and from as user", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=invalid&interval=10h&from=%f&to=%f", ts.URL, chanID, messages[19].Time, messages[4].Time), + key: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with invalid interval and valid aggregation, to and from as user", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10hrs&from=%f&to=%f", ts.URL, chanID, messages[19].Time, messages[4].Time), + key: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with aggregation, interval and to with missing from as user", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h&to=%f", ts.URL, chanID, messages[4].Time), + key: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with aggregation, interval and to with invalid from as user", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h&to=ABCD&from=%f", ts.URL, chanID, messages[4].Time), + key: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + { + desc: "read page with aggregation, interval and to with invalid to as user", + url: fmt.Sprintf("%s/channels/%s/messages?aggregation=MAX&interval=10h&from=%f&to=ABCD", ts.URL, chanID, messages[4].Time), + key: userToken, + authResponse: true, + status: http.StatusBadRequest, + }, + } + + for _, tc := range cases { + repoCall := auth.On("Identify", context.Background(), mock.Anything).Return(&magistrala.IdentityRes{Id: testsutil.GenerateUUID(t)}, nil) + authCall := auth.On("Authorize", mock.Anything, mock.Anything).Return(&magistrala.AuthorizeRes{Authorized: tc.authResponse}, tc.err) + repo.On("ReadAll", chanID, tc.res.PageMetadata).Return(readers.MessagesPage{Total: tc.res.Total, Messages: fromSenml(tc.res.Messages)}, nil) + if tc.key != "" { + repoCall = tauth.On("Authorize", mock.Anything, mock.Anything).Return(&magistrala.AuthorizeRes{Authorized: tc.authResponse}, tc.err) + } + req := testRequest{ + client: ts.Client(), + method: http.MethodGet, + url: tc.url, + token: tc.token, + key: tc.key, + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + + var page pageRes + err = json.NewDecoder(res.Body).Decode(&page) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error while decoding response body: %s", tc.desc, err)) + + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected %d got %d", tc.desc, tc.status, res.StatusCode)) + assert.Equal(t, tc.res.Total, page.Total, fmt.Sprintf("%s: expected %d got %d", tc.desc, tc.res.Total, page.Total)) + assert.ElementsMatch(t, tc.res.Messages, page.Messages, fmt.Sprintf("%s: got incorrect body from response", tc.desc)) + repoCall.Unset() + authCall.Unset() + } +} + +type pageRes struct { + readers.PageMetadata + Total uint64 `json:"total"` + Messages []senml.Message `json:"messages,omitempty"` +} + +func fromSenml(in []senml.Message) []readers.Message { + var ret []readers.Message + for _, m := range in { + ret = append(ret, m) + } + return ret +} diff --git a/readers/api/logging.go b/readers/api/logging.go new file mode 100644 index 0000000..49eedcb --- /dev/null +++ b/readers/api/logging.go @@ -0,0 +1,56 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "log/slog" + "time" + + "github.com/absmach/magistrala/readers" +) + +var _ readers.MessageRepository = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + svc readers.MessageRepository +} + +// LoggingMiddleware adds logging facilities to the core service. +func LoggingMiddleware(svc readers.MessageRepository, logger *slog.Logger) readers.MessageRepository { + return &loggingMiddleware{ + logger: logger, + svc: svc, + } +} + +func (lm *loggingMiddleware) ReadAll(chanID string, rpm readers.PageMetadata) (page readers.MessagesPage, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("channel_id", chanID), + slog.Group("page", + slog.Uint64("offset", rpm.Offset), + slog.Uint64("limit", rpm.Limit), + slog.Uint64("total", page.Total), + ), + } + if rpm.Subtopic != "" { + args = append(args, slog.String("subtopic", rpm.Subtopic)) + } + if rpm.Publisher != "" { + args = append(args, slog.String("publisher", rpm.Publisher)) + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Read all failed", args...) + return + } + lm.logger.Info("Read all completed successfully", args...) + }(time.Now()) + + return lm.svc.ReadAll(chanID, rpm) +} diff --git a/readers/api/metrics.go b/readers/api/metrics.go new file mode 100644 index 0000000..026f3f4 --- /dev/null +++ b/readers/api/metrics.go @@ -0,0 +1,39 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "time" + + "github.com/absmach/magistrala/readers" + "github.com/go-kit/kit/metrics" +) + +var _ readers.MessageRepository = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc readers.MessageRepository +} + +// MetricsMiddleware instruments core service by tracking request count and latency. +func MetricsMiddleware(svc readers.MessageRepository, counter metrics.Counter, latency metrics.Histogram) readers.MessageRepository { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (mm *metricsMiddleware) ReadAll(chanID string, rpm readers.PageMetadata) (readers.MessagesPage, error) { + defer func(begin time.Time) { + mm.counter.With("method", "read_all").Add(1) + mm.latency.With("method", "read_all").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return mm.svc.ReadAll(chanID, rpm) +} diff --git a/readers/api/requests.go b/readers/api/requests.go new file mode 100644 index 0000000..c32be45 --- /dev/null +++ b/readers/api/requests.go @@ -0,0 +1,67 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "slices" + "strings" + "time" + + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/readers" +) + +const maxLimitSize = 1000 + +var validAggregations = []string{"MAX", "MIN", "AVG", "SUM", "COUNT"} + +type listMessagesReq struct { + chanID string + token string + key string + pageMeta readers.PageMetadata +} + +func (req listMessagesReq) validate() error { + if req.token == "" && req.key == "" { + return apiutil.ErrBearerToken + } + + if req.chanID == "" { + return apiutil.ErrMissingID + } + + if req.pageMeta.Limit < 1 || req.pageMeta.Limit > maxLimitSize { + return apiutil.ErrLimitSize + } + + if req.pageMeta.Comparator != "" && + req.pageMeta.Comparator != readers.EqualKey && + req.pageMeta.Comparator != readers.LowerThanKey && + req.pageMeta.Comparator != readers.LowerThanEqualKey && + req.pageMeta.Comparator != readers.GreaterThanKey && + req.pageMeta.Comparator != readers.GreaterThanEqualKey { + return apiutil.ErrInvalidComparator + } + + if req.pageMeta.Aggregation != "" { + if req.pageMeta.From == 0 { + return apiutil.ErrMissingFrom + } + + if req.pageMeta.To == 0 { + return apiutil.ErrMissingTo + } + + if !slices.Contains(validAggregations, strings.ToUpper(req.pageMeta.Aggregation)) { + return apiutil.ErrInvalidAggregation + } + + if _, err := time.ParseDuration(req.pageMeta.Interval); err != nil { + return apiutil.ErrInvalidInterval + } + } + + return nil +} diff --git a/readers/api/responses.go b/readers/api/responses.go new file mode 100644 index 0000000..980f234 --- /dev/null +++ b/readers/api/responses.go @@ -0,0 +1,31 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "net/http" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/readers" +) + +var _ magistrala.Response = (*pageRes)(nil) + +type pageRes struct { + readers.PageMetadata + Total uint64 `json:"total"` + Messages []readers.Message `json:"messages,omitempty"` +} + +func (res pageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res pageRes) Code() int { + return http.StatusOK +} + +func (res pageRes) Empty() bool { + return false +} diff --git a/readers/api/transport.go b/readers/api/transport.go new file mode 100644 index 0000000..c604e33 --- /dev/null +++ b/readers/api/transport.go @@ -0,0 +1,280 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/readers" + "github.com/go-chi/chi/v5" + kithttp "github.com/go-kit/kit/transport/http" + "github.com/prometheus/client_golang/prometheus/promhttp" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + contentType = "application/json" + offsetKey = "offset" + limitKey = "limit" + formatKey = "format" + subtopicKey = "subtopic" + publisherKey = "publisher" + protocolKey = "protocol" + nameKey = "name" + valueKey = "v" + stringValueKey = "vs" + dataValueKey = "vd" + boolValueKey = "vb" + comparatorKey = "comparator" + fromKey = "from" + toKey = "to" + aggregationKey = "aggregation" + intervalKey = "interval" + defInterval = "1s" + defLimit = 10 + defOffset = 0 + defFormat = "messages" + + tokenKind = "token" + thingType = "thing" + userType = "user" + subscribePermission = "subscribe" + viewPermission = "view" + groupType = "group" +) + +var errUserAccess = errors.New("user has no permission") + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(svc readers.MessageRepository, uauth magistrala.AuthServiceClient, taauth magistrala.AuthzServiceClient, svcName, instanceID string) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(encodeError), + } + + mux := chi.NewRouter() + mux.Get("/channels/{chanID}/messages", kithttp.NewServer( + listMessagesEndpoint(svc, uauth, taauth), + decodeList, + encodeResponse, + opts..., + ).ServeHTTP) + + mux.Get("/health", magistrala.Health(svcName, instanceID)) + mux.Handle("/metrics", promhttp.Handler()) + + return mux +} + +func decodeList(_ context.Context, r *http.Request) (interface{}, error) { + offset, err := apiutil.ReadNumQuery[uint64](r, offsetKey, defOffset) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + limit, err := apiutil.ReadNumQuery[uint64](r, limitKey, defLimit) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + format, err := apiutil.ReadStringQuery(r, formatKey, defFormat) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + subtopic, err := apiutil.ReadStringQuery(r, subtopicKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + publisher, err := apiutil.ReadStringQuery(r, publisherKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + protocol, err := apiutil.ReadStringQuery(r, protocolKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + name, err := apiutil.ReadStringQuery(r, nameKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + v, err := apiutil.ReadNumQuery[float64](r, valueKey, 0) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + comparator, err := apiutil.ReadStringQuery(r, comparatorKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + vs, err := apiutil.ReadStringQuery(r, stringValueKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + vd, err := apiutil.ReadStringQuery(r, dataValueKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + vb, err := apiutil.ReadBoolQuery(r, boolValueKey, false) + if err != nil && err != apiutil.ErrNotFoundParam { + return nil, err + } + + from, err := apiutil.ReadNumQuery[float64](r, fromKey, 0) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + to, err := apiutil.ReadNumQuery[float64](r, toKey, 0) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + aggregation, err := apiutil.ReadStringQuery(r, aggregationKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + var interval string + if aggregation != "" { + interval, err = apiutil.ReadStringQuery(r, intervalKey, defInterval) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + } + + req := listMessagesReq{ + chanID: chi.URLParam(r, "chanID"), + token: apiutil.ExtractBearerToken(r), + key: apiutil.ExtractThingKey(r), + pageMeta: readers.PageMetadata{ + Offset: offset, + Limit: limit, + Format: format, + Subtopic: subtopic, + Publisher: publisher, + Protocol: protocol, + Name: name, + Value: v, + Comparator: comparator, + StringValue: vs, + DataValue: vd, + BoolValue: vb, + From: from, + To: to, + Aggregation: aggregation, + Interval: interval, + }, + } + return req, nil +} + +func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", contentType) + + if ar, ok := response.(magistrala.Response); ok { + for k, v := range ar.Headers() { + w.Header().Set(k, v) + } + + w.WriteHeader(ar.Code()) + + if ar.Empty() { + return nil + } + } + + return json.NewEncoder(w).Encode(response) +} + +func encodeError(_ context.Context, err error, w http.ResponseWriter) { + var wrapper error + if errors.Contains(err, apiutil.ErrValidation) { + wrapper, err = errors.Unwrap(err) + } + + switch { + case errors.Contains(err, nil): + case errors.Contains(err, apiutil.ErrInvalidQueryParams), + errors.Contains(err, svcerr.ErrMalformedEntity), + errors.Contains(err, apiutil.ErrMissingID), + errors.Contains(err, apiutil.ErrLimitSize), + errors.Contains(err, apiutil.ErrOffsetSize), + errors.Contains(err, apiutil.ErrInvalidComparator), + errors.Contains(err, apiutil.ErrInvalidAggregation), + errors.Contains(err, apiutil.ErrInvalidInterval), + errors.Contains(err, apiutil.ErrMissingFrom), + errors.Contains(err, apiutil.ErrMissingTo): + w.WriteHeader(http.StatusBadRequest) + case errors.Contains(err, svcerr.ErrAuthentication), + errors.Contains(err, svcerr.ErrAuthorization), + errors.Contains(err, apiutil.ErrBearerToken): + w.WriteHeader(http.StatusUnauthorized) + case errors.Contains(err, readers.ErrReadMessages): + w.WriteHeader(http.StatusInternalServerError) + default: + w.WriteHeader(http.StatusInternalServerError) + } + + if wrapper != nil { + err = errors.Wrap(wrapper, err) + } + if errorVal, ok := err.(errors.Error); ok { + w.Header().Set("Content-Type", contentType) + if err := json.NewEncoder(w).Encode(errorVal); err != nil { + w.WriteHeader(http.StatusInternalServerError) + } + } +} + +func authorize(ctx context.Context, req listMessagesReq, uauth magistrala.AuthServiceClient, taauth magistrala.AuthzServiceClient) (err error) { + switch { + case req.token != "": + if _, err = uauth.Authorize(ctx, &magistrala.AuthorizeReq{ + SubjectType: userType, + SubjectKind: tokenKind, + Subject: req.token, + Permission: viewPermission, + ObjectType: groupType, + Object: req.chanID, + }); err != nil { + e, ok := status.FromError(err) + if ok && e.Code() == codes.PermissionDenied { + return errors.Wrap(errUserAccess, err) + } + return err + } + return nil + case req.key != "": + if _, err = taauth.Authorize(ctx, &magistrala.AuthorizeReq{ + SubjectType: groupType, + Subject: req.key, + ObjectType: thingType, + Object: req.chanID, + Permission: subscribePermission, + }); err != nil { + e, ok := status.FromError(err) + if ok && e.Code() == codes.PermissionDenied { + return errors.Wrap(errUserAccess, err) + } + return err + } + return nil + default: + return svcerr.ErrAuthorization + } +} diff --git a/readers/cassandra/README.md b/readers/cassandra/README.md new file mode 100644 index 0000000..b38ed90 --- /dev/null +++ b/readers/cassandra/README.md @@ -0,0 +1,100 @@ +# Cassandra reader + +Cassandra reader provides message repository implementation for Cassandra. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| ------------------------------------ | --------------------------------------------------- | ------------------------------ | +| MG_CASSANDRA_READER_LOG_LEVEL | Cassandra service log level | debug | +| MG_CASSANDRA_READER_HTTP_HOST | Cassandra service HTTP host | localhost | +| MG_CASSANDRA_READER_HTTP_PORT | Cassandra service HTTP port | 9003 | +| MG_CASSANDRA_READER_HTTP_SERVER_CERT | Cassandra service HTTP server cert | "" | +| MG_CASSANDRA_READER_HTTP_SERVER_KEY | Cassandra service HTTP server key | "" | +| MG_CASSANDRA_CLUSTER | Cassandra cluster comma separated addresses | localhost | +| MG_CASSANDRA_USER | Cassandra DB username | magistrala | +| MG_CASSANDRA_PASS | Cassandra DB password | magistrala | +| MG_CASSANDRA_KEYSPACE | Cassandra keyspace name | messages | +| MG_CASSANDRA_PORT | Cassandra DB port | 9042 | +| MG_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | +| MG_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC request timeout in seconds | 1 | +| MG_THINGS_AUTH_GRPC_CLIENT_TLS | Things service Auth gRPC TLS enabled | false | +| MG_THINGS_AUTH_GRPC_CA_CERTS | Things service Auth gRPC CA certificates | "" | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MG_AUTH_GRPC_CLIENT_TLS | Auth service gRPC TLS enabled | false | +| MG_AUTH_GRPC_CA_CERT | Auth service gRPC CA certificates | "" | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_CASSANDRA_READER_INSTANCE_ID | Cassandra Reader instance ID | "" | + +## Deployment + +The service itself is distributed as Docker container. Check the [`cassandra-reader`](https://github.com/absmach/magistrala/blob/main/docker/addons/cassandra-reader/docker-compose.yml#L15-L35) service section in +docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the cassandra +make cassandra-reader + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_CASSANDRA_READER_LOG_LEVEL=[Cassandra Service log level] \ +MG_CASSANDRA_READER_HTTP_HOST=[Cassandra Service HTTP host] \ +MG_CASSANDRA_READER_HTTP_PORT=[Cassandra Service HTTP port] \ +MG_CASSANDRA_READER_HTTP_SERVER_CERT=[Cassandra Service HTTP server cert] \ +MG_CASSANDRA_READER_HTTP_SERVER_KEY=[Cassandra Service HTTP server key] \ +MG_CASSANDRA_CLUSTER=[Cassandra cluster comma separated addresses] \ +MG_CASSANDRA_KEYSPACE=[Cassandra keyspace name] \ +MG_CASSANDRA_USER=[Cassandra DB username] \ +MG_CASSANDRA_PASS=[Cassandra DB password] \ +MG_CASSANDRA_PORT=[Cassandra DB port] \ +MG_THINGS_AUTH_GRPC_URL=[Things service Auth gRPC URL] \ +MG_THINGS_AUTH_GRPC_TIMEOUT=[Things service Auth gRPC request timeout in seconds] \ +MG_THINGS_AUTH_GRPC_CLIENT_TLS=[Things service Auth gRPC TLS enabled] \ +MG_THINGS_AUTH_GRPC_CA_CERTS=[Things service Auth gRPC CA certificates] \ +MG_AUTH_GRPC_URL=[Auth service gRPC URL] \ +MG_AUTH_GRPC_TIMEOUT=[Auth service gRPC request timeout in seconds] \ +MG_AUTH_GRPC_CLIENT_TLS=[Auth service gRPC TLS enabled] \ +MG_AUTH_GRPC_CA_CERT=[Auth service gRPC CA certificates] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_CASSANDRA_READER_INSTANCE_ID=[Cassandra Reader instance ID] \ +$GOBIN/magistrala-cassandra-reader +``` + +### Using docker-compose + +This service can be deployed using docker containers. Docker compose file is +available in `<project_root>/docker/addons/cassandra-reader/docker-compose.yml`. +In order to run all Magistrala core services, as well as mentioned optional ones, +execute following command: + +```bash +docker compose -f docker/docker-compose.yml up -d +./docker/addons/cassandra-writer/init.sh +docker compose -f docker/addons/casandra-reader/docker-compose.yml up -d +``` + +## Usage + +Service exposes [HTTP API](https://docs.api.magistrala.abstractmachines.fr/?urls.primaryName=readers-openapi.yml) for fetching messages. + +``` +Note: Cassandra Reader doesn't support searching substrings from string_value, due to inefficient searching as the current data model is not suitable for this type of queries. +``` + +[doc]: https://docs.magistrala.abstractmachines.fr diff --git a/readers/cassandra/doc.go b/readers/cassandra/doc.go new file mode 100644 index 0000000..5280fb2 --- /dev/null +++ b/readers/cassandra/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package cassandra contains Cassandra specific reader implementation. +package cassandra diff --git a/readers/cassandra/messages.go b/readers/cassandra/messages.go new file mode 100644 index 0000000..e25ab6d --- /dev/null +++ b/readers/cassandra/messages.go @@ -0,0 +1,195 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package cassandra + +import ( + "encoding/json" + "fmt" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + "github.com/gocql/gocql" +) + +const ( + // Table for SenML messages. + defTable = "messages" + + // Error code for Undefined table error. + undefinedTableCode = 8704 +) + +var _ readers.MessageRepository = (*cassandraRepository)(nil) + +type cassandraRepository struct { + session *gocql.Session +} + +// New instantiates Cassandra message repository. +func New(session *gocql.Session) readers.MessageRepository { + return cassandraRepository{ + session: session, + } +} + +func (cr cassandraRepository) ReadAll(chanID string, rpm readers.PageMetadata) (readers.MessagesPage, error) { + format := defTable + if rpm.Format != "" { + format = rpm.Format + } + + q, vals := buildQuery(chanID, rpm) + + selectCQL := fmt.Sprintf(`SELECT channel, subtopic, publisher, protocol, name, unit, + value, string_value, bool_value, data_value, sum, time, + update_time FROM messages WHERE channel = ? %s LIMIT ? + ALLOW FILTERING`, q) + countCQL := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE channel = ? %s ALLOW FILTERING`, format, q) + + if format != defTable { + selectCQL = fmt.Sprintf(`SELECT channel, subtopic, publisher, protocol, created, payload FROM %s WHERE channel = ? %s LIMIT ? + ALLOW FILTERING`, format, q) + countCQL = fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE channel = ? %s ALLOW FILTERING`, format, q) + } + + iter := cr.session.Query(selectCQL, vals...).Iter() + defer iter.Close() + scanner := iter.Scanner() + + // skip first OFFSET rows + for i := uint64(0); i < rpm.Offset; i++ { + if !scanner.Next() { + break + } + } + + page := readers.MessagesPage{ + PageMetadata: rpm, + Messages: []readers.Message{}, + } + + switch format { + case defTable: + for scanner.Next() { + var msg senml.Message + if err := scanner.Scan(&msg.Channel, &msg.Subtopic, &msg.Publisher, &msg.Protocol, + &msg.Name, &msg.Unit, &msg.Value, &msg.StringValue, &msg.BoolValue, + &msg.DataValue, &msg.Sum, &msg.Time, &msg.UpdateTime); err != nil { + if e, ok := err.(gocql.RequestError); ok { + if e.Code() == undefinedTableCode { + return readers.MessagesPage{}, nil + } + } + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + page.Messages = append(page.Messages, msg) + } + default: + for scanner.Next() { + var msg jsonMessage + if err := scanner.Scan(&msg.Channel, &msg.Subtopic, &msg.Publisher, &msg.Protocol, &msg.Created, &msg.Payload); err != nil { + if e, ok := err.(gocql.RequestError); ok { + if e.Code() == undefinedTableCode { + return readers.MessagesPage{}, nil + } + } + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + m, err := msg.toMap() + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + page.Messages = append(page.Messages, m) + } + } + + if err := cr.session.Query(countCQL, vals[:len(vals)-1]...).Scan(&page.Total); err != nil { + if e, ok := err.(gocql.RequestError); ok { + if e.Code() == undefinedTableCode { + return readers.MessagesPage{}, nil + } + } + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + + return page, nil +} + +func buildQuery(chanID string, rpm readers.PageMetadata) (string, []interface{}) { + var condCQL string + vals := []interface{}{chanID} + + var query map[string]interface{} + meta, err := json.Marshal(rpm) + if err != nil { + return condCQL, vals + } + if err := json.Unmarshal(meta, &query); err != nil { + return condCQL, vals + } + + for name, val := range query { + switch name { + case + "channel", + "subtopic", + "publisher", + "name", + "protocol": + vals = append(vals, val) + condCQL = fmt.Sprintf(`%s AND %s = ?`, condCQL, name) + case "v": + vals = append(vals, val) + comparator := readers.ParseValueComparator(query) + condCQL = fmt.Sprintf(`%s AND value %s ?`, condCQL, comparator) + case "vb": + vals = append(vals, val) + condCQL = fmt.Sprintf(`%s AND bool_value = ?`, condCQL) + case "vs": + vals = append(vals, val) + condCQL = fmt.Sprintf(`%s AND string_value = ?`, condCQL) + case "vd": + vals = append(vals, val) + condCQL = fmt.Sprintf(`%s AND data_value = ?`, condCQL) + case "from": + vals = append(vals, val) + condCQL = fmt.Sprintf(`%s AND time >= ?`, condCQL) + case "to": + vals = append(vals, val) + condCQL = fmt.Sprintf(`%s AND time < ?`, condCQL) + } + } + vals = append(vals, rpm.Offset+rpm.Limit) + + return condCQL, vals +} + +type jsonMessage struct { + ID string + Channel string + Created int64 + Subtopic string + Publisher string + Protocol string + Payload string +} + +func (msg jsonMessage) toMap() (map[string]interface{}, error) { + ret := map[string]interface{}{ + "id": msg.ID, + "channel": msg.Channel, + "created": msg.Created, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "protocol": msg.Protocol, + "payload": map[string]interface{}{}, + } + pld := make(map[string]interface{}) + if err := json.Unmarshal([]byte(msg.Payload), &pld); err != nil { + return nil, err + } + ret["payload"] = pld + return ret, nil +} diff --git a/readers/cassandra/messages_test.go b/readers/cassandra/messages_test.go new file mode 100644 index 0000000..323bc67 --- /dev/null +++ b/readers/cassandra/messages_test.go @@ -0,0 +1,591 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package cassandra_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + cwriter "github.com/absmach/mg-contrib/consumers/writers/cassandra" + casclient "github.com/absmach/mg-contrib/pkg/clients/cassandra" + "github.com/absmach/mg-contrib/pkg/testsutil" + creader "github.com/absmach/mg-contrib/readers/cassandra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + keyspace = "magistrala" + subtopic = "subtopic" + msgsNum = 100 + limit = 10 + valueFields = 5 + mqttProt = "mqtt" + httpProt = "http" + msgName = "temperature" + + format1 = "format_1" + format2 = "format_2" + wrongID = "0" +) + +var ( + addr = "localhost" + v float64 = 5 + vs = "stringValue" + vb = true + vd = "dataValue" + sum float64 = 42 +) + +func TestReadSenml(t *testing.T) { + session, err := casclient.Connect(casclient.Config{ + Hosts: []string{addr}, + Keyspace: keyspace, + }) + require.Nil(t, err, fmt.Sprintf("failed to connect to Cassandra: %s", err)) + defer session.Close() + + err = casclient.InitDB(session, cwriter.Table) + require.Nil(t, err, fmt.Sprintf("failed to initialize to Cassandra: %s", err)) + writer := cwriter.New(session) + + chanID := testsutil.GenerateUUID(t) + pubID := testsutil.GenerateUUID(t) + pubID2 := testsutil.GenerateUUID(t) + wrongID := testsutil.GenerateUUID(t) + + m := senml.Message{ + Channel: chanID, + Publisher: pubID, + Protocol: mqttProt, + } + + messages := []senml.Message{} + valueMsgs := []senml.Message{} + boolMsgs := []senml.Message{} + stringMsgs := []senml.Message{} + dataMsgs := []senml.Message{} + queryMsgs := []senml.Message{} + now := float64(time.Now().Unix()) + + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + msg := m + msg.Time = now - float64(i) + + count := i % valueFields + switch count { + case 0: + msg.Value = &v + valueMsgs = append(valueMsgs, msg) + case 1: + msg.BoolValue = &vb + boolMsgs = append(boolMsgs, msg) + case 2: + msg.StringValue = &vs + stringMsgs = append(stringMsgs, msg) + case 3: + msg.DataValue = &vd + dataMsgs = append(dataMsgs, msg) + case 4: + msg.Sum = &sum + msg.Subtopic = subtopic + msg.Protocol = httpProt + msg.Publisher = pubID2 + msg.Name = msgName + queryMsgs = append(queryMsgs, msg) + } + + messages = append(messages, msg) + } + + err = writer.ConsumeBlocking(context.TODO(), messages) + require.Nil(t, err, fmt.Sprintf("failed to store message to Cassandra: %s", err)) + + reader := creader.New(session) + + // Since messages are not saved in natural order, + // cases that return subset of messages are only + // checking data result set size, but not content. + cases := []struct { + desc string + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + { + desc: "read message page for existing channel", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages), + }, + }, + { + desc: "read message page for non-existent channel", + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message last page", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages[msgsNum-20 : msgsNum]), + }, + }, + { + desc: "read message with non-existent subtopic", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + Subtopic: "not-present", + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message with subtopic", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Subtopic: subtopic, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with publisher", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with wrong format", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Format: "messagess", + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: 0, + Messages: []readers.Message{}, + }, + }, + { + desc: "read message with protocol", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with name", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Name: msgName, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs[0:limit]), + }, + }, + { + desc: "read message with value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + Comparator: readers.EqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with boolean value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + BoolValue: vb, + }, + page: readers.MessagesPage{ + Total: uint64(len(boolMsgs)), + Messages: fromSenml(boolMsgs[0:limit]), + }, + }, + { + desc: "read message with string value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with data value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with from", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[0:21])), + From: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[0:21])), + Messages: fromSenml(messages[0:21]), + }, + }, + { + desc: "read message with to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[21:])), + To: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[21:])), + Messages: fromSenml(messages[21:]), + }, + }, + { + desc: "read message with from/to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + From: messages[5].Time, + To: messages[0].Time, + }, + page: readers.MessagesPage{ + Total: 5, + Messages: fromSenml(messages[1:6]), + }, + }, + } + + for _, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %s", tc.desc, err)) + if tc.pageMeta.Offset == 0 { + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: got incorrect list of senml Messages from ReadAll()", tc.desc)) + } + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.page.Total, result.Total)) + } +} + +func TestReadJSON(t *testing.T) { + session, err := casclient.Connect(casclient.Config{ + Hosts: []string{addr}, + Keyspace: keyspace, + }) + require.Nil(t, err, fmt.Sprintf("failed to connect to Cassandra: %s", err)) + defer session.Close() + writer := cwriter.New(session) + + id1 := testsutil.GenerateUUID(t) + m := json.Message{ + Channel: id1, + Publisher: id1, + Subtopic: "subtopic/format/some_json", + Protocol: "coap", + Payload: map[string]interface{}{ + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42.0, + }, + }, + } + messages1 := json.Messages{ + Format: format1, + } + msgs1 := []map[string]interface{}{} + now := time.Now().Unix() + for i := 0; i < msgsNum; i++ { + msg := m + msg.Created = now - int64(i) + messages1.Data = append(messages1.Data, msg) + m := toMap(msg) + msgs1 = append(msgs1, m) + } + + err = writer.ConsumeBlocking(context.TODO(), messages1) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + id2 := testsutil.GenerateUUID(t) + m = json.Message{ + Channel: id2, + Publisher: id2, + Subtopic: "subtopic/other_format/some_other_json", + Protocol: "udp", + Payload: map[string]interface{}{ + "field_pi": 3.14159265, + "false_value": false, + "field_map": map[string]interface{}{ + "field_1": "wrong_value", + "field_2": 24.5, + }, + }, + } + messages2 := json.Messages{ + Format: format2, + } + msgs2 := []map[string]interface{}{} + now = time.Now().Unix() + for i := 0; i < msgsNum; i++ { + msg := m + msg.Created = now - int64(i) + if i%2 == 0 { + msg.Protocol = httpProt + } + messages2.Data = append(messages2.Data, msg) + m := toMap(msg) + msgs2 = append(msgs2, m) + } + + err = writer.ConsumeBlocking(context.TODO(), messages2) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + httpMsgs := []map[string]interface{}{} + for i := 0; i < msgsNum; i += 2 { + httpMsgs = append(httpMsgs, msgs2[i]) + } + + reader := creader.New(session) + + cases := []struct { + desc string + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + { + desc: "read message page for existing channel", + chanID: id1, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Total: 100, + Messages: fromJSON(msgs1[:10]), + }, + }, + { + desc: "read message page for non-existent channel", + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message last page", + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromJSON(msgs2[msgsNum-20 : msgsNum]), + }, + }, + { + desc: "read message with protocol", + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: 0, + Limit: uint64(msgsNum / 2), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(msgsNum / 2), + Messages: fromJSON(httpMsgs), + }, + }, + } + + for _, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + for i := 0; i < len(result.Messages); i++ { + m := result.Messages[i] + // Remove id as it is not sent by the client. + delete(m.(map[string]interface{}), "id") + result.Messages[i] = m + } + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %s", tc.desc, err)) + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: got incorrect list of json Messages from ReadAll()", tc.desc)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.page.Total, result.Total)) + } +} + +func fromSenml(in []senml.Message) []readers.Message { + var ret []readers.Message + for _, m := range in { + ret = append(ret, m) + } + return ret +} + +func fromJSON(msg []map[string]interface{}) []readers.Message { + var ret []readers.Message + for _, m := range msg { + ret = append(ret, m) + } + return ret +} + +func toMap(msg json.Message) map[string]interface{} { + return map[string]interface{}{ + "channel": msg.Channel, + "created": msg.Created, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "protocol": msg.Protocol, + "payload": map[string]interface{}(msg.Payload), + } +} diff --git a/readers/cassandra/setup_test.go b/readers/cassandra/setup_test.go new file mode 100644 index 0000000..28b5505 --- /dev/null +++ b/readers/cassandra/setup_test.go @@ -0,0 +1,83 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package cassandra_test + +import ( + "fmt" + "log" + "os" + "testing" + + mglog "github.com/absmach/magistrala/logger" + casclient "github.com/absmach/mg-contrib/pkg/clients/cassandra" + "github.com/gocql/gocql" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var logger, _ = mglog.New(os.Stdout, "info") + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + logger.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "cassandra", + Tag: "3.11.16", + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("9042/tcp") + addr = fmt.Sprintf("%s:%s", addr, port) + + if err = pool.Retry(func() error { + if err := createKeyspace([]string{addr}); err != nil { + return err + } + + session, err := casclient.Connect(casclient.Config{ + Hosts: []string{addr}, + Keyspace: keyspace, + }) + if err != nil { + return err + } + defer session.Close() + + return nil + }); err != nil { + logger.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + code := m.Run() + + if err := pool.Purge(container); err != nil { + logger.Error(fmt.Sprintf("Could not purge container: %s", err)) + } + + os.Exit(code) +} + +func createKeyspace(hosts []string) error { + cluster := gocql.NewCluster(hosts...) + cluster.Consistency = gocql.Quorum + + session, err := cluster.CreateSession() + if err != nil { + return err + } + defer session.Close() + + keyspaceCQL := fmt.Sprintf(`CREATE KEYSPACE IF NOT EXISTS %s WITH replication = + {'class':'SimpleStrategy','replication_factor':'1'}`, keyspace) + + return session.Query(keyspaceCQL).Exec() +} diff --git a/readers/doc.go b/readers/doc.go new file mode 100644 index 0000000..e02d432 --- /dev/null +++ b/readers/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package readers provides a set of readers for various formats. +package readers diff --git a/readers/influxdb/README.md b/readers/influxdb/README.md new file mode 100644 index 0000000..ea515fc --- /dev/null +++ b/readers/influxdb/README.md @@ -0,0 +1,126 @@ +# InfluxDB reader + +InfluxDB reader provides message repository implementation for InfluxDB. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| -------------------------------- | --------------------------------------------------- | ------------------------------ | +| MG_INFLUX_READER_LOG_LEVEL | Service log level | info | +| MG_INFLUX_READER_HTTP_HOST | Service HTTP host | localhost | +| MG_INFLUX_READER_HTTP_PORT | Service HTTP port | 9005 | +| MG_INFLUX_READER_SERVER_CERT | Service HTTP server cert | "" | +| MG_INFLUX_READER_SERVER_KEY | Service HTTP server key | "" | +| MG_INFLUXDB_PROTOCOL | InfluxDB protocol | http | +| MG_INFLUXDB_HOST | InfluxDB host name | localhost | +| MG_INFLUXDB_PORT | Default port of InfluxDB database | 8086 | +| MG_INFLUXDB_ADMIN_USER | Default user of InfluxDB database | magistrala | +| MG_INFLUXDB_ADMIN_PASSWORD | Default password of InfluxDB user | magistrala | +| MG_INFLUXDB_NAME | InfluxDB database name | magistrala | +| MG_INFLUXDB_BUCKET | InfluxDB bucket name | magistrala-bucket | +| MG_INFLUXDB_ORG | InfluxDB organization name | magistrala | +| MG_INFLUXDB_TOKEN | InfluxDB API token | magistrala-token | +| MG_INFLUXDB_DBURL | InfluxDB database URL | "" | +| MG_INFLUXDB_USER_AGENT | InfluxDB user agent | "" | +| MG_INFLUXDB_TIMEOUT | InfluxDB client connection readiness timeout | 1s | +| MG_INFLUXDB_INSECURE_SKIP_VERIFY | InfluxDB insecure skip verify | false | +| MG_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | +| MG_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC request timeout in seconds | 1s | +| MG_THINGS_AUTH_GRPC_CLIENT_TLS | Flag that indicates if TLS should be turned on | false | +| MG_THINGS_AUTH_GRPC_CA_CERTS | Path to trusted CAs in PEM format | "" | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MG_AUTH_GRPC_CLIENT_TLS | Flag that indicates if TLS should be turned on | false | +| MG_AUTH_GRPC_CA_CERTS | Path to trusted CAs in PEM format | "" | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_INFLUX_READER_INSTANCE_ID | InfluxDB reader instance ID | | + +## Deployment + +The service itself is distributed as Docker container. Check the [`influxdb-reader`](https://github.com/absmach/magistrala/blob/main/docker/addons/influxdb-reader/docker-compose.yml#L17-L40) service section in docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the influxdb-reader +make influxdb-reader + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_INFLUX_READER_LOG_LEVEL=[Service log level] \ +MG_INFLUX_READER_HTTP_HOST=[Service HTTP host] \ +MG_INFLUX_READER_HTTP_PORT=[Service HTTP port] \ +MG_INFLUX_READER_HTTP_SERVER_CERT=[Service HTTP server certificate] \ +MG_INFLUX_READER_HTTP_SERVER_KEY=[Service HTTP server key] \ +MG_INFLUXDB_PROTOCOL=[InfluxDB protocol] \ +MG_INFLUXDB_HOST=[InfluxDB database host] \ +MG_INFLUXDB_PORT=[InfluxDB database port] \ +MG_INFLUXDB_ADMIN_USER=[InfluxDB admin user] \ +MG_INFLUXDB_ADMIN_PASSWORD=[InfluxDB admin password] \ +MG_INFLUXDB_NAME=[InfluxDB database name] \ +MG_INFLUXDB_BUCKET=[InfluxDB bucket] \ +MG_INFLUXDB_ORG=[InfluxDB org] \ +MG_INFLUXDB_TOKEN=[InfluxDB token] \ +MG_INFLUXDB_DBURL=[InfluxDB database URL] \ +MG_INFLUXDB_USER_AGENT=[InfluxDB user agent] \ +MG_INFLUXDB_TIMEOUT=[InfluxDB timeout] \ +MG_INFLUXDB_INSECURE_SKIP_VERIFY=[InfluxDB insecure skip verify] \ +MG_THINGS_AUTH_GRPC_URL=[Things service Auth gRPC URL] \ +MG_THINGS_AURH_GRPC_TIMEOUT=[Things service Auth gRPC request timeout in seconds] \ +MG_THINGS_AUTH_GRPC_CLIENT_TLS=[Flag that indicates if TLS should be turned on] \ +MG_THINGS_AUTH_GRPC_CA_CERTS=[Path to trusted CAs in PEM format] \ +MG_AUTH_GRPC_URL=[Auth service gRPC URL] \ +MG_AUTH_GRPC_TIMEOUT=[Auth service gRPC request timeout in seconds] \ +MG_AUTH_GRPC_CLIENT_TLS=[Flag that indicates if TLS should be turned on] \ +MG_AUTH_GRPC_CA_CERTS=[Path to trusted CAs in PEM format] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_INFLUX_READER_INSTANCE_ID=[InfluxDB reader instance ID] \ +$GOBIN/magistrala-influxdb + +``` + +### Using docker-compose + +This service can be deployed using docker containers. Docker compose file is +available in `<project_root>/docker/addons/influxdb-reader/docker-compose.yml`. +In order to run all Magistrala core services, as well as mentioned optional ones, +execute following command: + +```bash +docker compose -f docker/docker-compose.yml up -d +docker compose -f docker/addons/influxdb-reader/docker-compose.yml up -d +``` + +And, to use the default .env file, execute the following command: + +```bash +docker compose -f docker/addons/influxdb-reader/docker-compose.yml up --env-file docker/.env -d +``` + +## Usage + +Service exposes [HTTP API](https://docs.api.magistrala.abstractmachines.fr/?urls.primaryName=readers-openapi.yml) for fetching messages. + +Comparator Usage Guide: +| Comparator | Usage | Example | +|----------------------|-----------------------------------------------------------------------------|------------------------------------| +| eq | Return values that are equal to the query | eq["active"] -> "active" | +| ge | Return values that are substrings of the query | ge["tiv"] -> "active" and "tiv" | +| gt | Return values that are substrings of the query and not equal to the query | gt["tiv"] -> "active" | +| le | Return values that are superstrings of the query | le["active"] -> "tiv" | +| lt | Return values that are superstrings of the query and not equal to the query | lt["active"] -> "active" and "tiv" | + +Official docs can be found [here](https://docs.magistrala.abstractmachines.fr). diff --git a/readers/influxdb/doc.go b/readers/influxdb/doc.go new file mode 100644 index 0000000..b990892 --- /dev/null +++ b/readers/influxdb/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package influxdb contains the domain concept definitions needed to +// support Magistrala InfluxDB reader service functionality. +package influxdb diff --git a/readers/influxdb/messages.go b/readers/influxdb/messages.go new file mode 100644 index 0000000..bd11c9b --- /dev/null +++ b/readers/influxdb/messages.go @@ -0,0 +1,312 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + "unicode" + + "github.com/absmach/magistrala/pkg/errors" + jsont "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" +) + +const ( + // Measurement for SenML messages. + defMeasurement = "messages" +) + +var _ readers.MessageRepository = (*influxRepository)(nil) + +var errResultTime = errors.New("invalid result time") + +type RepoConfig struct { + Bucket string + Org string +} +type influxRepository struct { + cfg RepoConfig + client influxdb2.Client +} + +// New returns new InfluxDB reader. +func New(client influxdb2.Client, repoCfg RepoConfig) readers.MessageRepository { + return &influxRepository{ + repoCfg, + client, + } +} + +func (repo *influxRepository) ReadAll(chanID string, rpm readers.PageMetadata) (readers.MessagesPage, error) { + format := defMeasurement + if rpm.Format != "" { + format = rpm.Format + } + + queryAPI := repo.client.QueryAPI(repo.cfg.Org) + condition, timeRange := fmtCondition(chanID, rpm) + + query := fmt.Sprintf(` + import "influxdata/influxdb/v1" + import "strings" + from(bucket: "%s") + %s + |> v1.fieldsAsCols() + |> group() + |> filter(fn: (r) => r._measurement == "%s") + %s + |> sort(columns: ["_time"], desc: true) + |> limit(n:%d,offset:%d) + |> yield(name: "sort")`, + repo.cfg.Bucket, + timeRange, + format, + condition, + rpm.Limit, rpm.Offset, + ) + + resp, err := queryAPI.Query(context.Background(), query) + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + + var messages []readers.Message + var valueMap map[string]interface{} + for resp.Next() { + valueMap = resp.Record().Values() + msg, err := parseMessage(format, valueMap) + if err != nil { + return readers.MessagesPage{}, err + } + messages = append(messages, msg) + } + if resp.Err() != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, resp.Err()) + } + + total, err := repo.count(format, condition, timeRange) + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + + page := readers.MessagesPage{ + PageMetadata: rpm, + Total: total, + Messages: messages, + } + + return page, nil +} + +func (repo *influxRepository) count(measurement, condition, timeRange string) (uint64, error) { + cmd := fmt.Sprintf(` + import "influxdata/influxdb/v1" + import "strings" + from(bucket: "%s") + %s + |> v1.fieldsAsCols() + |> filter(fn: (r) => r._measurement == "%s") + %s + |> group() + |> count(column:"_measurement") + |> yield(name: "count") + `, + repo.cfg.Bucket, + timeRange, + measurement, + condition) + queryAPI := repo.client.QueryAPI(repo.cfg.Org) + resp, err := queryAPI.Query(context.Background(), cmd) + if err != nil { + return 0, err + } + + switch resp.Next() { + case true: + valueMap := resp.Record().Values() + + val, ok := valueMap["_measurement"].(int64) + if !ok { + return 0, nil + } + return uint64(val), nil + + default: + // same as no rows. + return 0, nil + } +} + +func fmtCondition(chanID string, rpm readers.PageMetadata) (string, string) { + var timeRange string + var sb strings.Builder + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => r["channel"] == "%s" )`, chanID)) + + var query map[string]interface{} + meta, err := json.Marshal(rpm) + if err != nil { + return sb.String(), timeRange + } + + if err := json.Unmarshal(meta, &query); err != nil { + return sb.String(), timeRange + } + + // range(start:...) is a must for FluxQL syntax. + from := `start: time(v:0)` + if value, ok := query["from"]; ok { + fromValue := int64(value.(float64)*1e9) - 1 + from = fmt.Sprintf(`start: time(v: %d )`, fromValue) + } + // range(...,stop:) is an option for FluxQL syntax. + to := "" + if value, ok := query["to"]; ok { + toValue := int64(value.(float64) * 1e9) + to = fmt.Sprintf(`, stop: time(v: %d )`, toValue) + } + // timeRange returned separately because + // in FluxQL time range must be at the + // beginning of the query. + timeRange = fmt.Sprintf(`|> range(%s %s)`, from, to) + + for name, value := range query { + switch name { + case + "channel", + "subtopic", + "publisher", + "name", + "protocol": + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => r.%s == "%s" )`, name, value)) + case "v": + comparator := readers.ParseValueComparator(query) + // flux eq comparator is different + if comparator == "=" { + comparator = "==" + } + sb.WriteString(`|> filter(fn: (r) => exists r.value)`) + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => r.value %s %v)`, comparator, value)) + case "vb": + sb.WriteString(`|> filter(fn: (r) => exists r.boolValue)`) + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => r.boolValue == %v)`, value)) + case "vs": + comparator := readers.ParseValueComparator(query) + sb.WriteString(`|> filter(fn: (r) => exists r.stringValue)`) + switch comparator { + case "=": + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => r.stringValue == "%s")`, value)) + case "<": + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => strings.containsStr(v: "%s", substr: r.stringValue) == true)`, value)) + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => r.stringValue !="%s")`, value)) + case "<=": + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => strings.containsStr(v: "%s", substr: r.stringValue) == true)`, value)) + case ">": + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => strings.containsStr(v: r.stringValue, substr: "%s") == true)`, value)) + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => r.stringValue != "%s")`, value)) + case ">=": + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => strings.containsStr(v: r.stringValue, substr: "%s") == true)`, value)) + } + case "vd": + comparator := readers.ParseValueComparator(query) + if comparator == "=" { + comparator = "==" + } + sb.WriteString(`|> filter(fn: (r) => exists r.dataValue)`) + sb.WriteString(fmt.Sprintf(`|> filter(fn: (r) => r.dataValue%s"%s")`, comparator, value)) + } + } + + return sb.String(), timeRange +} + +func parseMessage(measurement string, valueMap map[string]interface{}) (interface{}, error) { + switch measurement { + case defMeasurement: + return parseSenml(valueMap) + default: + return parseJSON(valueMap) + } +} + +func underscore(name string) string { + var buff []rune + idx := 0 + for i, c := range name { + if unicode.IsUpper(c) { + buff = append(buff, []rune(name[idx:i])...) + buff = append(buff, []rune{'_', unicode.ToLower(c)}...) + idx = i + 1 + continue + } + } + buff = append(buff, []rune(name[idx:])...) + return string(buff) +} + +func parseSenml(valueMap map[string]interface{}) (interface{}, error) { + msg := make(map[string]interface{}) + + for k, v := range valueMap { + k = underscore(k) + if k == "_time" { + k = "time" + t, ok := v.(time.Time) + if !ok { + return nil, errResultTime + } + v := float64(t.UnixNano()) / 1e9 + msg[k] = v + continue + } + msg[k] = v + } + data, err := json.Marshal(msg) + if err != nil { + return nil, err + } + senmlMsg := senml.Message{} + if err := json.Unmarshal(data, &senmlMsg); err != nil { + return nil, err + } + return senmlMsg, nil +} + +func parseJSON(valueMap map[string]interface{}) (interface{}, error) { + ret := make(map[string]interface{}) + pld := make(map[string]interface{}) + for name, field := range valueMap { + switch name { + case "channel", "created", "subtopic", "publisher", "protocol": + ret[name] = field + case "_time": + name = "time" + t, ok := field.(time.Time) + if !ok { + return nil, errResultTime + } + v := float64(t.UnixNano()) / 1e9 + ret[name] = v + continue + case "table", "_start", "_stop", "result", "_measurement": + default: + v := field + if val, ok := v.(json.Number); ok { + var err error + v, err = val.Float64() + if err != nil { + return nil, err + } + } + pld[name] = v + } + } + ret["payload"] = jsont.ParseFlat(pld) + return ret, nil +} diff --git a/readers/influxdb/messages_test.go b/readers/influxdb/messages_test.go new file mode 100644 index 0000000..579c01e --- /dev/null +++ b/readers/influxdb/messages_test.go @@ -0,0 +1,726 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + iwriter "github.com/absmach/mg-contrib/consumers/writers/influxdb" + "github.com/absmach/mg-contrib/pkg/testsutil" + ireader "github.com/absmach/mg-contrib/readers/influxdb" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + subtopic = "topic" + msgsNum = 100 + limit = 10 + valueFields = 5 + mqttProt = "mqtt" + httpProt = "http" + msgName = "temperature" + offset = 21 + format1 = "format1" + format2 = "format2" + wrongID = "wrong_id" +) + +var ( + v float64 = 5 + vs string = "stringValue" + vb bool = true + vd string = "dataValue" + sum float64 = 42 + + client influxdb2.Client + repoCfg = struct { + Bucket string + Org string + }{ + Bucket: dbBucket, + Org: dbOrg, + } +) + +func TestReadSenml(t *testing.T) { + asyncWriter := iwriter.NewAsync(client, repoCfg) + + chanID := testsutil.GenerateUUID(t) + pubID := testsutil.GenerateUUID(t) + pubID2 := testsutil.GenerateUUID(t) + wrongID := testsutil.GenerateUUID(t) + + m := senml.Message{ + Channel: chanID, + Publisher: pubID, + Protocol: mqttProt, + Name: "name", + Unit: "U", + UpdateTime: 1234, + } + + messages := []senml.Message{} + valueMsgs := []senml.Message{} + boolMsgs := []senml.Message{} + stringMsgs := []senml.Message{} + dataMsgs := []senml.Message{} + queryMsgs := []senml.Message{} + now := float64(time.Now().Unix()) + + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + msg := m + msg.Time = now - float64(i) + + count := i % valueFields + switch count { + case 0: + msg.Value = &v + valueMsgs = append(valueMsgs, msg) + case 1: + msg.BoolValue = &vb + boolMsgs = append(boolMsgs, msg) + case 2: + msg.StringValue = &vs + stringMsgs = append(stringMsgs, msg) + case 3: + msg.DataValue = &vd + dataMsgs = append(dataMsgs, msg) + case 4: + msg.Sum = &sum + msg.Subtopic = subtopic + msg.Protocol = httpProt + msg.Publisher = pubID2 + msg.Name = msgName + queryMsgs = append(queryMsgs, msg) + } + messages = append(messages, msg) + } + + errs := asyncWriter.Errors() + asyncWriter.ConsumeAsync(context.TODO(), messages) + err := <-errs + assert.Nil(t, err, fmt.Sprintf("Save operation expected to succeed: %s.\n", err)) + + reader := ireader.New(client, repoCfg) + + cases := []struct { + desc string + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + { + desc: "read message page for existing channel", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages), + }, + }, + { + desc: "read message page for non-existent channel", + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message last page", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages[msgsNum-20 : msgsNum]), + }, + }, + { + desc: "read message with non-existent subtopic", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + Subtopic: "not-present", + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message with subtopic", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Subtopic: subtopic, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with publisher", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with wrong format", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Format: "messagess", + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: 0, + Messages: []readers.Message{}, + }, + }, + { + desc: "read message with protocol", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with name", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Name: msgName, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs[0:limit]), + }, + }, + { + desc: "read message with value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + Comparator: readers.EqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with boolean value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + BoolValue: vb, + }, + page: readers.MessagesPage{ + Total: uint64(len(boolMsgs)), + Messages: fromSenml(boolMsgs[0:limit]), + }, + }, + { + desc: "read message with string value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.EqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: "a stringValues b", + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: "alu", + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with data value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd + string(rune(1)), + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd + string(rune(1)), + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd[:len(vd)-1] + string(rune(1)), + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd[:len(vd)-1] + string(rune(1)), + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with from", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[0:21])), + From: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[0:21])), + Messages: fromSenml(messages[0:21]), + }, + }, + { + desc: "read message with to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[21:])), + To: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[21:])), + Messages: fromSenml(messages[21:]), + }, + }, + { + desc: "failing test case : read message with from", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[0 : offset+1])), + From: messages[offset].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[0 : offset+1])), + Messages: fromSenml(messages[0 : offset+1]), + }, + }, + { + desc: "failing test case : read message with to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[offset-1:])), + To: messages[offset-1].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[offset:])), + Messages: fromSenml(messages[offset:]), + }, + }, + { + desc: "read message with from/to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + From: messages[5].Time, + To: messages[0].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[0+1 : 5+1])), + Messages: fromSenml(messages[0+1 : 5+1]), + }, + }, + } + + for _, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error: %s\n", tc.desc, err)) + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: expected: %v, got: %v\n", tc.desc, tc.page.Messages, result.Messages)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.page.Total, result.Total)) + } +} + +func TestReadJSON(t *testing.T) { + asyncWriter := iwriter.NewAsync(client, repoCfg) + + id1 := testsutil.GenerateUUID(t) + m := json.Message{ + Channel: id1, + Publisher: id1, + Created: time.Now().Unix() * 1e9, + Subtopic: "subtopic/format/some_json", + Protocol: "coap", + Payload: map[string]interface{}{ + "field_1": 123.0, + "field_2": "value", + "field_3": false, + }, + } + messages1 := json.Messages{ + Format: format1, + } + msgs1 := []map[string]interface{}{} + for i := 0; i < msgsNum; i++ { + messages1.Data = append(messages1.Data, m) + m := toMap(m) + msgs1 = append(msgs1, m) + } + + errs := asyncWriter.Errors() + asyncWriter.ConsumeAsync(context.TODO(), messages1) + err := <-errs + require.Nil(t, err, fmt.Sprintf("Save operation expected to succeed: %s.\n", err)) + + id2 := testsutil.GenerateUUID(t) + m = json.Message{ + Channel: id2, + Publisher: id2, + Created: time.Now().Unix()*1e9 + msgsNum, + Subtopic: "subtopic/other_format/some_other_json", + Protocol: "udp", + Payload: map[string]interface{}{ + "field_pi": 3.14159265, + }, + } + messages2 := json.Messages{ + Format: format2, + } + msgs2 := []map[string]interface{}{} + for i := 0; i < msgsNum; i++ { + msg := m + if i%2 == 0 { + msg.Protocol = httpProt + } + messages2.Data = append(messages2.Data, msg) + m := toMap(msg) + msgs2 = append(msgs2, m) + } + + // Test async + asyncWriter.ConsumeAsync(context.TODO(), messages2) + err = <-errs + assert.Nil(t, err, fmt.Sprintf("Save operation expected to succeed: %s.\n", err)) + + httpMsgs := []map[string]interface{}{} + for i := 0; i < msgsNum; i += 2 { + httpMsgs = append(httpMsgs, msgs2[i]) + } + reader := ireader.New(client, repoCfg) + + cases := []struct { + desc string + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + { + desc: "read message page for existing channel", + chanID: id1, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 1, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromJSON(msgs1[:1]), + }, + }, + { + desc: "read message page for non-existent channel", + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message last page", + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromJSON(msgs2[msgsNum-20 : msgsNum]), + }, + }, + { + desc: "read message with protocol", + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: 0, + Limit: uint64(msgsNum / 2), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(msgsNum / 2), + Messages: fromJSON(httpMsgs), + }, + }, + } + + for _, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error: %s", tc.desc, err)) + + for i := 0; i < len(result.Messages); i++ { + m := result.Messages[i] + // Remove time as it is not sent by the client. + delete(m.(map[string]interface{}), "time") + + result.Messages[i] = m + } + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: expected \n%v got \n%v", tc.desc, tc.page.Messages, result.Messages)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: got incorrect list of json Messages from ReadAll()", tc.desc)) + } +} + +func fromSenml(in []senml.Message) []readers.Message { + var ret []readers.Message + for _, m := range in { + ret = append(ret, m) + } + return ret +} + +func fromJSON(msg []map[string]interface{}) []readers.Message { + var ret []readers.Message + for _, m := range msg { + ret = append(ret, m) + } + return ret +} + +func toMap(msg json.Message) map[string]interface{} { + return map[string]interface{}{ + "channel": msg.Channel, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "protocol": msg.Protocol, + "payload": map[string]interface{}(msg.Payload), + } +} diff --git a/readers/influxdb/setup_test.go b/readers/influxdb/setup_test.go new file mode 100644 index 0000000..c8b1c98 --- /dev/null +++ b/readers/influxdb/setup_test.go @@ -0,0 +1,100 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package influxdb_test + +import ( + "context" + "fmt" + "log" + "os" + "os/signal" + "syscall" + "testing" + "time" + + mglog "github.com/absmach/magistrala/logger" + influxdata "github.com/influxdata/influxdb-client-go/v2" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var ( + testLog, _ = mglog.New(os.Stdout, "info") + address string +) + +const ( + dbToken = "test-token" + dbOrg = "test-org" + dbAdmin = "test-admin" + dbPass = "test-password" + dbBucket = "test-bucket" + dbInitMode = "setup" + dbFluxEnabled = "true" + dbBindAddress = ":8088" + port = "8086/tcp" + db = "influxdb" + dbVersion = "2.7-alpine" + poolMaxWait = 120 * time.Second +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: db, + Tag: dbVersion, + Env: []string{ + fmt.Sprintf("DOCKER_INFLUXDB_INIT_MODE=%s", dbInitMode), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_USERNAME=%s", dbAdmin), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_PASSWORD=%s", dbPass), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_ORG=%s", dbOrg), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_BUCKET=%s", dbBucket), + fmt.Sprintf("DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=%s", dbToken), + fmt.Sprintf("INFLUXDB_HTTP_FLUX_ENABLED=%s", dbFluxEnabled), + fmt.Sprintf("INFLUXDB_BIND_ADDRESS=%s", dbBindAddress), + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + handleInterrupt(m, pool, container) + + address = fmt.Sprintf("%s:%s", "http://localhost", container.GetPort(port)) + pool.MaxWait = poolMaxWait + + if err := pool.Retry(func() error { + client = influxdata.NewClient(address, dbToken) + _, err = client.Ready(context.Background()) + return err + }); err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + code := m.Run() + + if err := pool.Purge(container); err != nil { + testLog.Error(fmt.Sprintf("Could not purge container: %s", err)) + } + + os.Exit(code) +} + +func handleInterrupt(m *testing.M, pool *dockertest.Pool, container *dockertest.Resource) { + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + os.Exit(0) + }() +} diff --git a/readers/messages.go b/readers/messages.go new file mode 100644 index 0000000..19ce1c0 --- /dev/null +++ b/readers/messages.go @@ -0,0 +1,84 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package readers + +import "errors" + +const ( + // EqualKey represents the equal comparison operator key. + EqualKey = "eq" + // LowerThanKey represents the lower-than comparison operator key. + LowerThanKey = "lt" + // LowerThanEqualKey represents the lower-than-or-equal comparison operator key. + LowerThanEqualKey = "le" + // GreaterThanKey represents the greater-than-or-equal comparison operator key. + GreaterThanKey = "gt" + // GreaterThanEqualKey represents the greater-than-or-equal comparison operator key. + GreaterThanEqualKey = "ge" +) + +// ErrReadMessages indicates failure occurred while reading messages from database. +var ErrReadMessages = errors.New("failed to read messages from database") + +// MessageRepository specifies message reader API. +// +//go:generate mockery --name MessageRepository --output=./mocks --filename messages.go --quiet --note "Copyright (c) Abstract Machines" +type MessageRepository interface { + // ReadAll skips given number of messages for given channel and returns next + // limited number of messages. + ReadAll(chanID string, pm PageMetadata) (MessagesPage, error) +} + +// Message represents any message format. +type Message interface{} + +// MessagesPage contains page related metadata as well as list of messages that +// belong to this page. +type MessagesPage struct { + PageMetadata + Total uint64 + Messages []Message +} + +// PageMetadata represents the parameters used to create database queries. +type PageMetadata struct { + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` + Subtopic string `json:"subtopic,omitempty"` + Publisher string `json:"publisher,omitempty"` + Protocol string `json:"protocol,omitempty"` + Name string `json:"name,omitempty"` + Value float64 `json:"v,omitempty"` + Comparator string `json:"comparator,omitempty"` + BoolValue bool `json:"vb,omitempty"` + StringValue string `json:"vs,omitempty"` + DataValue string `json:"vd,omitempty"` + From float64 `json:"from,omitempty"` + To float64 `json:"to,omitempty"` + Format string `json:"format,omitempty"` + Aggregation string `json:"aggregation,omitempty"` + Interval string `json:"interval,omitempty"` +} + +// ParseValueComparator convert comparison operator keys into mathematic anotation. +func ParseValueComparator(query map[string]interface{}) string { + comparator := "=" + val, ok := query["comparator"] + if ok { + switch val.(string) { + case EqualKey: + comparator = "=" + case LowerThanKey: + comparator = "<" + case LowerThanEqualKey: + comparator = "<=" + case GreaterThanKey: + comparator = ">" + case GreaterThanEqualKey: + comparator = ">=" + } + } + + return comparator +} diff --git a/readers/mocks/doc.go b/readers/mocks/doc.go new file mode 100644 index 0000000..16ed198 --- /dev/null +++ b/readers/mocks/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mocks contains mocks for testing purposes. +package mocks diff --git a/readers/mocks/messages.go b/readers/mocks/messages.go new file mode 100644 index 0000000..3968840 --- /dev/null +++ b/readers/mocks/messages.go @@ -0,0 +1,57 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + readers "github.com/absmach/magistrala/readers" + mock "github.com/stretchr/testify/mock" +) + +// MessageRepository is an autogenerated mock type for the MessageRepository type +type MessageRepository struct { + mock.Mock +} + +// ReadAll provides a mock function with given fields: chanID, pm +func (_m *MessageRepository) ReadAll(chanID string, pm readers.PageMetadata) (readers.MessagesPage, error) { + ret := _m.Called(chanID, pm) + + if len(ret) == 0 { + panic("no return value specified for ReadAll") + } + + var r0 readers.MessagesPage + var r1 error + if rf, ok := ret.Get(0).(func(string, readers.PageMetadata) (readers.MessagesPage, error)); ok { + return rf(chanID, pm) + } + if rf, ok := ret.Get(0).(func(string, readers.PageMetadata) readers.MessagesPage); ok { + r0 = rf(chanID, pm) + } else { + r0 = ret.Get(0).(readers.MessagesPage) + } + + if rf, ok := ret.Get(1).(func(string, readers.PageMetadata) error); ok { + r1 = rf(chanID, pm) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewMessageRepository creates a new instance of MessageRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMessageRepository(t interface { + mock.TestingT + Cleanup(func()) +}) *MessageRepository { + mock := &MessageRepository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/readers/mongodb/README.md b/readers/mongodb/README.md new file mode 100644 index 0000000..3f5d31f --- /dev/null +++ b/readers/mongodb/README.md @@ -0,0 +1,96 @@ +# MongoDB reader + +MongoDB reader provides message repository implementation for MongoDB. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| -------------------------------- | --------------------------------------------------- | ------------------------------ | +| MG_MONGO_READER_LOG_LEVEL | Service log level | info | +| MG_MONGO_READER_HTTP_HOST | Service HTTP host | localhost | +| MG_MONGO_READER_HTTP_PORT | Service HTTP port | 9007 | +| MG_MONGO_READER_HTTP_SERVER_CERT | Service HTTP server cert | "" | +| MG_MONGO_READER_HTTP_SERVER_KEY | Service HTTP server key | "" | +| MG_MONGO_NAME | MongoDB database name | messages | +| MG_MONGO_HOST | MongoDB database host | localhost | +| MG_MONGO_PORT | MongoDB database port | 27017 | +| MG_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | +| MG_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC request timeout in seconds | 1s | +| MG_THINGS_AUTH_GRPC_CLIENT_TLS | Flag that indicates if TLS should be turned on | false | +| MG_THINGS_AUTH_GRPC_CA_CERTS | Path to trusted CAs in PEM format | "" | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MG_AUTH_GRPC_CLIENT_TLS | Flag that indicates if TLS should be turned on | false | +| MG_AUTH_GRPC_CA_CERT | Path to trusted CAs in PEM format | "" | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_MONGO_READER_INSTANCE_ID | Service instance ID | "" | + +## Deployment + +The service itself is distributed as Docker container. Check the [`mongodb-reader`](https://github.com/absmach/magistrala/blob/main/docker/addons/mongodb-reader/docker-compose.yml#L16-L37) service section in +docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the mongodb reader +make mongodb-reader + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_MONGO_READER_LOG_LEVEL=[Service log level] \ +MG_MONGO_READER_HTTP_HOST=[Service HTTP host] \ +MG_MONGO_READER_HTTP_PORT=[Service HTTP port] \ +MG_MONGO_READER_HTTP_SERVER_CERT=[Path to server pem certificate file] \ +MG_MONGO_READER_HTTP_SERVER_KEY=[Path to server pem key file] \ +MG_MONGO_NAME=[MongoDB database name] \ +MG_MONGO_HOST=[MongoDB database host] \ +MG_MONGO_PORT=[MongoDB database port] \ +MG_THINGS_AUTH_GRPC_URL=[Things service Auth gRPC URL] \ +MG_THINGS_AUTH_GRPC_TIMEOUT=[Things service Auth gRPC request timeout in seconds] \ +MG_THINGS_AUTH_GRPC_CLIENT_TLS=[Flag that indicates if TLS should be turned on] \ +MG_THINGS_AUTH_GRPC_CA_CERTS=[Path to trusted CAs in PEM format] \ +MG_AUTH_GRPC_URL=[Auth service gRPC URL] \ +MG_AUTH_GRPC_TIMEOUT=[Auth service gRPC request timeout in seconds] \ +MG_AUTH_GRPC_CLIENT_TLS=[Flag that indicates if TLS should be turned on] \ +MG_AUTH_GRPC_CA_CERT=[Path to trusted CAs in PEM format] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_MONGO_READER_INSTANCE_ID=[Service instance ID] \ +$GOBIN/magistrala-mongodb-reader + +``` + +### Using docker-compose + +This service can be deployed using docker containers. Docker compose file is +available in `<project_root>/docker/addons/mongodb-reader/docker-compose.yml`. +In order to run all Magistrala core services, as well as mentioned optional ones, +execute following command: + +```bash +docker compose -f docker/docker-compose.yml up -d +docker compose -f docker/addons/mongodb-reader/docker-compose.yml up -d +``` + +## Usage + +Service exposes [HTTP API](https://docs.api.magistrala.abstractmachines.fr/?urls.primaryName=readers-openapi.yml) for fetching messages. + +``` +Note: MongoDB Reader doesn't support searching substrings from string_value, due to inefficient searching as the current data model is not suitable for this type of queries. +``` + +[doc]: https://docs.magistrala.abstractmachines.fr diff --git a/readers/mongodb/doc.go b/readers/mongodb/doc.go new file mode 100644 index 0000000..34bae9f --- /dev/null +++ b/readers/mongodb/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mongodb contains the domain concept definitions needed to +// support Magistrala MondoDB reader service functionality. +package mongodb diff --git a/readers/mongodb/messages.go b/readers/mongodb/messages.go new file mode 100644 index 0000000..5e27645 --- /dev/null +++ b/readers/mongodb/messages.go @@ -0,0 +1,149 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb + +import ( + "context" + "encoding/json" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +// Collection for SenML messages. +const defCollection = "messages" + +var _ readers.MessageRepository = (*mongoRepository)(nil) + +type mongoRepository struct { + db *mongo.Database +} + +// New returns new MongoDB reader. +func New(db *mongo.Database) readers.MessageRepository { + return mongoRepository{ + db: db, + } +} + +func (repo mongoRepository) ReadAll(chanID string, rpm readers.PageMetadata) (readers.MessagesPage, error) { + format := defCollection + order := "time" + if rpm.Format != "" && rpm.Format != defCollection { + order = "created" + format = rpm.Format + } + + col := repo.db.Collection(format) + + sortMap := map[string]interface{}{ + order: -1, + } + // Remove format filter and format the rest properly. + filter := fmtCondition(chanID, rpm) + cursor, err := col.Find(context.Background(), filter, options.Find().SetSort(sortMap).SetLimit(int64(rpm.Limit)).SetSkip(int64(rpm.Offset))) + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + defer cursor.Close(context.Background()) + + var messages []readers.Message + switch format { + case defCollection: + for cursor.Next(context.Background()) { + var m senml.Message + if err := cursor.Decode(&m); err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + + messages = append(messages, m) + } + default: + for cursor.Next(context.Background()) { + var m map[string]interface{} + if err := cursor.Decode(&m); err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + + messages = append(messages, m) + } + } + + total, err := col.CountDocuments(context.Background(), filter) + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + + mp := readers.MessagesPage{ + PageMetadata: rpm, + Total: uint64(total), + Messages: messages, + } + + return mp, nil +} + +func fmtCondition(chanID string, rpm readers.PageMetadata) bson.D { + filter := bson.D{ + bson.E{ + Key: "channel", + Value: chanID, + }, + } + + var query map[string]interface{} + meta, err := json.Marshal(rpm) + if err != nil { + return filter + } + if err := json.Unmarshal(meta, &query); err != nil { + return filter + } + + for name, value := range query { + switch name { + case + "channel", + "subtopic", + "publisher", + "name", + "protocol": + filter = append(filter, bson.E{Key: name, Value: value}) + case "v": + bsonFilter := value + val, ok := query["comparator"] + if ok { + switch val.(string) { + case readers.EqualKey: + bsonFilter = value + case readers.LowerThanKey: + bsonFilter = bson.M{"$lt": value} + case readers.LowerThanEqualKey: + bsonFilter = bson.M{"$lte": value} + case readers.GreaterThanKey: + bsonFilter = bson.M{"$gt": value} + case readers.GreaterThanEqualKey: + bsonFilter = bson.M{"$gte": value} + } + } + filter = append(filter, bson.E{Key: "value", Value: bsonFilter}) + case "vb": + filter = append(filter, bson.E{Key: "bool_value", Value: value}) + case "vs": + filter = append(filter, bson.E{Key: "string_value", Value: value}) + case "vd": + filter = append(filter, bson.E{Key: "data_value", Value: value}) + case "from": + filter = append(filter, bson.E{Key: "time", Value: bson.M{"$gte": value}}) + case "to": + filter = append(filter, bson.E{Key: "time", Value: bson.M{"$lt": value}}) + } + } + + return filter +} diff --git a/readers/mongodb/messages_test.go b/readers/mongodb/messages_test.go new file mode 100644 index 0000000..b86e7e0 --- /dev/null +++ b/readers/mongodb/messages_test.go @@ -0,0 +1,549 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + mwriter "github.com/absmach/mg-contrib/consumers/writers/mongodb" + "github.com/absmach/mg-contrib/pkg/testsutil" + mreader "github.com/absmach/mg-contrib/readers/mongodb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + testDB = "test" + subtopic = "subtopic" + msgsNum = 100 + limit = 10 + valueFields = 5 + mqttProt = "mqtt" + httpProt = "http" + msgName = "temperature" + wrongID = "wrong-id" + + format1 = "format_1" + format2 = "format_2" +) + +var ( + port string + addr string + + v float64 = 5 + vs = "stringValue" + vb = true + vd = "dataValue" + sum float64 = 42 +) + +func TestReadSenml(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + writer := mwriter.New(db) + + chanID := testsutil.GenerateUUID(t) + pubID := testsutil.GenerateUUID(t) + pubID2 := testsutil.GenerateUUID(t) + wrongID := testsutil.GenerateUUID(t) + + m := senml.Message{ + Channel: chanID, + Publisher: pubID, + Protocol: mqttProt, + } + + messages := []senml.Message{} + valueMsgs := []senml.Message{} + boolMsgs := []senml.Message{} + stringMsgs := []senml.Message{} + dataMsgs := []senml.Message{} + queryMsgs := []senml.Message{} + now := time.Now().Unix() + + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + msg := m + msg.Time = float64(now - int64(i)) + + count := i % valueFields + switch count { + case 0: + msg.Value = &v + valueMsgs = append(valueMsgs, msg) + case 1: + msg.BoolValue = &vb + boolMsgs = append(boolMsgs, msg) + case 2: + msg.StringValue = &vs + stringMsgs = append(stringMsgs, msg) + case 3: + msg.DataValue = &vd + dataMsgs = append(dataMsgs, msg) + case 4: + msg.Sum = &sum + msg.Subtopic = subtopic + msg.Protocol = httpProt + msg.Publisher = pubID2 + msg.Name = msgName + queryMsgs = append(queryMsgs, msg) + } + messages = append(messages, msg) + } + err = writer.ConsumeBlocking(context.TODO(), messages) + require.Nil(t, err, fmt.Sprintf("failed to store message to MongoDB: %s", err)) + reader := mreader.New(db) + + cases := map[string]struct { + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + "read message page for existing channel": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages), + }, + }, + "read message page for non-existent channel": { + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + "read message last page": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages[msgsNum-20 : msgsNum]), + }, + }, + "read message with non-existent subtopic": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + Subtopic: "not-present", + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + "read message with subtopic": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Subtopic: subtopic, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + "read message with publisher": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + "read message with invalid format": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Format: "messagess", + Offset: 0, + Limit: uint64(len(queryMsgs)), + }, + page: readers.MessagesPage{ + Total: 0, + Messages: []readers.Message{}, + }, + }, + "read message with protocol": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + "read message with name": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Name: msgName, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs[0:limit]), + }, + }, + "read message with value": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + "read message with value and equal comparator": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + Comparator: readers.EqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + "read message with value and lower-than comparator": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + "read message with value and lower-than-or-equal comparator": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + "read message with value and greater-than comparator": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + "read message with value and greater-than-or-equal comparator": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + "read message with boolean value": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + BoolValue: vb, + }, + page: readers.MessagesPage{ + Total: uint64(len(boolMsgs)), + Messages: fromSenml(boolMsgs[0:limit]), + }, + }, + "read message with string value": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + "read message with data value": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + "read message with from": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[0:21])), + From: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[0:21])), + Messages: fromSenml(messages[0:21]), + }, + }, + "read message with to": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[21:])), + To: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[21:])), + Messages: fromSenml(messages[21:]), + }, + }, + "read message with from/to": { + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + From: messages[5].Time, + To: messages[0].Time, + }, + page: readers.MessagesPage{ + Total: 5, + Messages: fromSenml(messages[1:6]), + }, + }, + } + + for desc, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %s", desc, err)) + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: got incorrect list of senml Messages from ReadAll()", desc)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %v got %v", desc, tc.page.Total, result.Total)) + } +} + +func TestReadJSON(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + writer := mwriter.New(db) + + id1 := testsutil.GenerateUUID(t) + m := json.Message{ + Channel: id1, + Publisher: id1, + Created: time.Now().Unix(), + Subtopic: "subtopic/format/some_json", + Protocol: "coap", + Payload: map[string]interface{}{ + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42.0, + }, + }, + } + messages1 := json.Messages{ + Format: format1, + } + msgs1 := []map[string]interface{}{} + for i := 0; i < msgsNum; i++ { + msg := m + messages1.Data = append(messages1.Data, msg) + m := toMap(msg) + msgs1 = append(msgs1, m) + } + + err = writer.ConsumeBlocking(context.TODO(), messages1) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + id2 := testsutil.GenerateUUID(t) + m = json.Message{ + Channel: id2, + Publisher: id2, + Created: time.Now().Unix(), + Subtopic: "subtopic/other_format/some_other_json", + Protocol: "udp", + Payload: map[string]interface{}{ + "field_2": "other_value", + "field_3": false, + "field_5": map[string]interface{}{ + "field_1": "wrong_value", + "field_2": 24.5, + }, + }, + } + messages2 := json.Messages{ + Format: format2, + } + msgs2 := []map[string]interface{}{} + for i := 0; i < msgsNum; i++ { + msg := m + if i%2 == 0 { + msg.Protocol = httpProt + } + messages2.Data = append(messages2.Data, msg) + m := toMap(msg) + msgs2 = append(msgs2, m) + } + + err = writer.ConsumeBlocking(context.TODO(), messages2) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + httpMsgs := []map[string]interface{}{} + for i := 0; i < msgsNum; i += 2 { + httpMsgs = append(httpMsgs, msgs2[i]) + } + reader := mreader.New(db) + + cases := map[string]struct { + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + "read message page for existing channel": { + chanID: id1, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Total: 100, + Messages: fromJSON(msgs1[:10]), + }, + }, + "read message page for non-existent channel": { + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + "read message last page": { + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromJSON(msgs2[msgsNum-20 : msgsNum]), + }, + }, + "read message with protocol": { + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: 0, + Limit: uint64(msgsNum / 2), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(msgsNum / 2), + Messages: fromJSON(httpMsgs), + }, + }, + } + + for desc, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + + for i := 0; i < len(result.Messages); i++ { + m := result.Messages[i] + // Remove id as it is not sent by the client. + delete(m.(map[string]interface{}), "_id") + result.Messages[i] = m + } + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %s", desc, err)) + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: got incorrect list of json Messages from ReadAll()", desc)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %v got %v", desc, tc.page.Total, result.Total)) + } +} + +func fromSenml(in []senml.Message) []readers.Message { + var ret []readers.Message + for _, m := range in { + ret = append(ret, m) + } + return ret +} + +func fromJSON(msg []map[string]interface{}) []readers.Message { + var ret []readers.Message + for _, m := range msg { + ret = append(ret, m) + } + return ret +} + +func toMap(msg json.Message) map[string]interface{} { + return map[string]interface{}{ + "channel": msg.Channel, + "created": msg.Created, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "protocol": msg.Protocol, + "payload": map[string]interface{}(msg.Payload), + } +} diff --git a/readers/mongodb/setup_test.go b/readers/mongodb/setup_test.go new file mode 100644 index 0000000..21ca88c --- /dev/null +++ b/readers/mongodb/setup_test.go @@ -0,0 +1,59 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb_test + +import ( + "context" + "fmt" + "log" + "os" + "testing" + + mglog "github.com/absmach/magistrala/logger" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +var testLog, _ = mglog.New(os.Stdout, "info") + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "mongo", + Tag: "7.0.5", + Env: []string{ + "MONGO_INITDB_DATABASE=test", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port = container.GetPort("27017/tcp") + addr = fmt.Sprintf("mongodb://localhost:%s", port) + + if err := pool.Retry(func() error { + _, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + return err + }); err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + code := m.Run() + + if err := pool.Purge(container); err != nil { + testLog.Error(fmt.Sprintf("Could not purge container: %s", err)) + } + + os.Exit(code) +} diff --git a/readers/postgres/README.md b/readers/postgres/README.md new file mode 100644 index 0000000..5df71d1 --- /dev/null +++ b/readers/postgres/README.md @@ -0,0 +1,101 @@ +# Postgres reader + +Postgres reader provides message repository implementation for Postgres. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| ----------------------------------- | --------------------------------------------- | ------------------------------ | +| MG_POSTGRES_READER_LOG_LEVEL | Service log level | info | +| MG_POSTGRES_READER_HTTP_HOST | Service HTTP host | localhost | +| MG_POSTGRES_READER_HTTP_PORT | Service HTTP port | 9009 | +| MG_POSTGRES_READER_HTTP_SERVER_CERT | Service HTTP server cert | "" | +| MG_POSTGRES_READER_HTTP_SERVER_KEY | Service HTTP server key | "" | +| MG_POSTGRES_HOST | Postgres DB host | localhost | +| MG_POSTGRES_PORT | Postgres DB port | 5432 | +| MG_POSTGRES_USER | Postgres user | magistrala | +| MG_POSTGRES_PASS | Postgres password | magistrala | +| MG_POSTGRES_NAME | Postgres database name | messages | +| MG_POSTGRES_SSL_MODE | Postgres SSL mode | disabled | +| MG_POSTGRES_SSL_CERT | Postgres SSL certificate path | "" | +| MG_POSTGRES_SSL_KEY | Postgres SSL key | "" | +| MG_POSTGRES_SSL_ROOT_CERT | Postgres SSL root certificate path | "" | +| MG_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | +| MG_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC timeout in seconds | 1s | +| MG_THINGS_AUTH_GRPC_CLIENT_TLS | Things service Auth gRPC TLS mode flag | false | +| MG_THINGS_AUTH_GRPC_CA_CERTS | Things service Auth gRPC CA certificates | "" | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MG_AUTH_GRPC_CLIENT_TLS | Auth service gRPC TLS mode flag | false | +| MG_AUTH_GRPC_CA_CERTS | Auth service gRPC CA certificates | "" | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_POSTGRES_READER_INSTANCE_ID | Postgres reader instance ID | | + +## Deployment + +The service itself is distributed as Docker container. Check the [`postgres-reader`](https://github.com/absmach/magistrala/blob/main/docker/addons/postgres-reader/docker-compose.yml#L17-L41) service section in +docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the postgres writer +make postgres-writer + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_POSTGRES_READER_LOG_LEVEL=[Service log level] \ +MG_POSTGRES_READER_HTTP_HOST=[Service HTTP host] \ +MG_POSTGRES_READER_HTTP_PORT=[Service HTTP port] \ +MG_POSTGRES_READER_HTTP_SERVER_CERT=[Service HTTPS server certificate path] \ +MG_POSTGRES_READER_HTTP_SERVER_KEY=[Service HTTPS server key path] \ +MG_POSTGRES_HOST=[Postgres host] \ +MG_POSTGRES_PORT=[Postgres port] \ +MG_POSTGRES_USER=[Postgres user] \ +MG_POSTGRES_PASS=[Postgres password] \ +MG_POSTGRES_NAME=[Postgres database name] \ +MG_POSTGRES_SSL_MODE=[Postgres SSL mode] \ +MG_POSTGRES_SSL_CERT=[Postgres SSL cert] \ +MG_POSTGRES_SSL_KEY=[Postgres SSL key] \ +MG_POSTGRES_SSL_ROOT_CERT=[Postgres SSL Root cert] \ +MG_THINGS_AUTH_GRPC_URL=[Things service Auth GRPC URL] \ +MG_THINGS_AUTH_GRPC_TIMEOUT=[Things service Auth gRPC request timeout in seconds] \ +MG_THINGS_AUTH_GRPC_CLIENT_TLS=[Things service Auth gRPC TLS mode flag] \ +MG_THINGS_AUTH_GRPC_CA_CERTS=[Things service Auth gRPC CA certificates] \ +MG_AUTH_GRPC_URL=[Auth service gRPC URL] \ +MG_AUTH_GRPC_TIMEOUT=[Auth service gRPC request timeout in seconds] \ +MG_AUTH_GRPC_CLIENT_TLS=[Auth service gRPC TLS mode flag] \ +MG_AUTH_GRPC_CA_CERTS=[Auth service gRPC CA certificates] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_POSTGRES_READER_INSTANCE_ID=[Postgres reader instance ID] \ +$GOBIN/magistrala-postgres-reader +``` + +## Usage + +Starting service will start consuming normalized messages in SenML format. + +Comparator Usage Guide: + +| Comparator | Usage | Example | +| ---------- | --------------------------------------------------------------------------- | ---------------------------------- | +| eq | Return values that are equal to the query | eq["active"] -> "active" | +| ge | Return values that are substrings of the query | ge["tiv"] -> "active" and "tiv" | +| gt | Return values that are substrings of the query and not equal to the query | gt["tiv"] -> "active" | +| le | Return values that are superstrings of the query | le["active"] -> "tiv" | +| lt | Return values that are superstrings of the query and not equal to the query | lt["active"] -> "active" and "tiv" | + +Official docs can be found [here](https://docs.magistrala.abstractmachines.fr). diff --git a/readers/postgres/doc.go b/readers/postgres/doc.go new file mode 100644 index 0000000..a92d4f9 --- /dev/null +++ b/readers/postgres/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package postgres contains repository implementations using Postgres as +// the underlying database. +package postgres diff --git a/readers/postgres/init.go b/readers/postgres/init.go new file mode 100644 index 0000000..10bc5f1 --- /dev/null +++ b/readers/postgres/init.go @@ -0,0 +1,80 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "fmt" + + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" +) + +// Table for SenML messages. +const defTable = "messages" + +// Config defines the options that are used when connecting to a PostgreSQL instance. +type Config struct { + Host string + Port string + User string + Pass string + Name string + SSLMode string + SSLCert string + SSLKey string + SSLRootCert string +} + +// Connect creates a connection to the PostgreSQL instance and applies any +// unapplied database migrations. A non-nil error is returned to indicate +// failure. +func Connect(cfg Config) (*sqlx.DB, error) { + url := fmt.Sprintf("host=%s port=%s user=%s dbname=%s password=%s sslmode=%s sslcert=%s sslkey=%s sslrootcert=%s", cfg.Host, cfg.Port, cfg.User, cfg.Name, cfg.Pass, cfg.SSLMode, cfg.SSLCert, cfg.SSLKey, cfg.SSLRootCert) + + db, err := sqlx.Open("pgx", url) + if err != nil { + return nil, err + } + + if err := migrateDB(db); err != nil { + return nil, err + } + + return db, nil +} + +func migrateDB(db *sqlx.DB) error { + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "messages_1", + Up: []string{ + `CREATE TABLE IF NOT EXISTS messages ( + id UUID, + channel UUID, + subtopic VARCHAR(254), + publisher UUID, + protocol TEXT, + name TEXT, + unit TEXT, + value FLOAT, + string_value TEXT, + bool_value BOOL, + data_value TEXT, + sum FLOAT, + time FlOAT, + update_time FLOAT, + PRIMARY KEY (id) + )`, + }, + Down: []string{ + "DROP TABLE messages", + }, + }, + }, + } + + _, err := migrate.Exec(db.DB, "postgres", migrations, migrate.Up) + return err +} diff --git a/readers/postgres/messages.go b/readers/postgres/messages.go new file mode 100644 index 0000000..4037b5b --- /dev/null +++ b/readers/postgres/messages.go @@ -0,0 +1,199 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "encoding/json" + "fmt" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jmoiron/sqlx" +) + +var _ readers.MessageRepository = (*postgresRepository)(nil) + +type postgresRepository struct { + db *sqlx.DB +} + +// New returns new PostgreSQL writer. +func New(db *sqlx.DB) readers.MessageRepository { + return &postgresRepository{ + db: db, + } +} + +func (tr postgresRepository) ReadAll(chanID string, rpm readers.PageMetadata) (readers.MessagesPage, error) { + order := "time" + format := defTable + + if rpm.Format != "" && rpm.Format != defTable { + order = "created" + format = rpm.Format + } + cond := fmtCondition(chanID, rpm) + + q := fmt.Sprintf(`SELECT * FROM %s + WHERE %s ORDER BY %s DESC + LIMIT :limit OFFSET :offset;`, format, cond, order) + + params := map[string]interface{}{ + "channel": chanID, + "limit": rpm.Limit, + "offset": rpm.Offset, + "subtopic": rpm.Subtopic, + "publisher": rpm.Publisher, + "name": rpm.Name, + "protocol": rpm.Protocol, + "value": rpm.Value, + "bool_value": rpm.BoolValue, + "string_value": rpm.StringValue, + "data_value": rpm.DataValue, + "from": rpm.From, + "to": rpm.To, + } + rows, err := tr.db.NamedQuery(q, params) + if err != nil { + if pgErr, ok := err.(*pgconn.PgError); ok { + if pgErr.Code == pgerrcode.UndefinedTable { + return readers.MessagesPage{}, nil + } + } + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + defer rows.Close() + + page := readers.MessagesPage{ + PageMetadata: rpm, + Messages: []readers.Message{}, + } + switch format { + case defTable: + for rows.Next() { + msg := senmlMessage{Message: senml.Message{}} + if err := rows.StructScan(&msg); err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + + page.Messages = append(page.Messages, msg.Message) + } + default: + for rows.Next() { + msg := jsonMessage{} + if err := rows.StructScan(&msg); err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + m, err := msg.toMap() + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + page.Messages = append(page.Messages, m) + } + } + + q = fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE %s;`, format, cond) + rows, err = tr.db.NamedQuery(q, params) + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + defer rows.Close() + + total := uint64(0) + if rows.Next() { + if err := rows.Scan(&total); err != nil { + return page, err + } + } + page.Total = total + + return page, nil +} + +func fmtCondition(chanID string, rpm readers.PageMetadata) string { + condition := `channel = :channel` + + var query map[string]interface{} + meta, err := json.Marshal(rpm) + if err != nil { + return condition + } + if err := json.Unmarshal(meta, &query); err != nil { + return condition + } + + for name := range query { + switch name { + case + "subtopic", + "publisher", + "name", + "protocol": + condition = fmt.Sprintf(`%s AND %s = :%s`, condition, name, name) + case "v": + comparator := readers.ParseValueComparator(query) + condition = fmt.Sprintf(`%s AND value %s :value`, condition, comparator) + case "vb": + condition = fmt.Sprintf(`%s AND bool_value = :bool_value`, condition) + case "vs": + comparator := readers.ParseValueComparator(query) + switch comparator { + case "=": + condition = fmt.Sprintf("%s AND string_value = :string_value ", condition) + case ">": + condition = fmt.Sprintf("%s AND string_value LIKE '%%' || :string_value || '%%' AND string_value <> :string_value", condition) + case ">=": + condition = fmt.Sprintf("%s AND string_value LIKE '%%' || :string_value || '%%'", condition) + case "<=": + condition = fmt.Sprintf("%s AND :string_value LIKE '%%' || string_value || '%%'", condition) + case "<": + condition = fmt.Sprintf("%s AND :string_value LIKE '%%' || string_value || '%%' AND string_value <> :string_value", condition) + } + case "vd": + comparator := readers.ParseValueComparator(query) + condition = fmt.Sprintf(`%s AND data_value %s :data_value`, condition, comparator) + case "from": + condition = fmt.Sprintf(`%s AND time >= :from`, condition) + case "to": + condition = fmt.Sprintf(`%s AND time < :to`, condition) + } + } + return condition +} + +type senmlMessage struct { + ID string `db:"id"` + senml.Message +} + +type jsonMessage struct { + ID string `db:"id"` + Channel string `db:"channel"` + Created int64 `db:"created"` + Subtopic string `db:"subtopic"` + Publisher string `db:"publisher"` + Protocol string `db:"protocol"` + Payload []byte `db:"payload"` +} + +func (msg jsonMessage) toMap() (map[string]interface{}, error) { + ret := map[string]interface{}{ + "id": msg.ID, + "channel": msg.Channel, + "created": msg.Created, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "protocol": msg.Protocol, + "payload": map[string]interface{}{}, + } + pld := make(map[string]interface{}) + if err := json.Unmarshal(msg.Payload, &pld); err != nil { + return nil, err + } + ret["payload"] = pld + return ret, nil +} diff --git a/readers/postgres/messages_test.go b/readers/postgres/messages_test.go new file mode 100644 index 0000000..0c2fc67 --- /dev/null +++ b/readers/postgres/messages_test.go @@ -0,0 +1,687 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package postgres_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + pwriter "github.com/absmach/mg-contrib/consumers/writers/postgres" + "github.com/absmach/mg-contrib/pkg/testsutil" + preader "github.com/absmach/mg-contrib/readers/postgres" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + subtopic = "subtopic" + msgsNum = 100 + limit = 10 + valueFields = 5 + mqttProt = "mqtt" + httpProt = "http" + msgName = "temperature" + format1 = "format1" + format2 = "format2" + wrongID = "0" +) + +var ( + v float64 = 5 + vs = "stringValue" + vb = true + vd = "dataValue" + sum float64 = 42 +) + +func TestReadSenml(t *testing.T) { + writer := pwriter.New(db) + + chanID := testsutil.GenerateUUID(t) + pubID := testsutil.GenerateUUID(t) + pubID2 := testsutil.GenerateUUID(t) + wrongID := testsutil.GenerateUUID(t) + + m := senml.Message{ + Channel: chanID, + Publisher: pubID, + Protocol: mqttProt, + } + + messages := []senml.Message{} + valueMsgs := []senml.Message{} + boolMsgs := []senml.Message{} + stringMsgs := []senml.Message{} + dataMsgs := []senml.Message{} + queryMsgs := []senml.Message{} + + now := float64(time.Now().Unix()) + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + msg := m + msg.Time = now - float64(i) + + count := i % valueFields + switch count { + case 0: + msg.Value = &v + valueMsgs = append(valueMsgs, msg) + case 1: + msg.BoolValue = &vb + boolMsgs = append(boolMsgs, msg) + case 2: + msg.StringValue = &vs + stringMsgs = append(stringMsgs, msg) + case 3: + msg.DataValue = &vd + dataMsgs = append(dataMsgs, msg) + case 4: + msg.Sum = &sum + msg.Subtopic = subtopic + msg.Protocol = httpProt + msg.Publisher = pubID2 + msg.Name = msgName + queryMsgs = append(queryMsgs, msg) + } + + messages = append(messages, msg) + } + + err := writer.ConsumeBlocking(context.TODO(), messages) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + reader := preader.New(db) + + // Since messages are not saved in natural order, + // cases that return subset of messages are only + // checking data result set size, but not content. + cases := []struct { + desc string + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + { + desc: "read message page for existing channel", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages), + }, + }, + { + desc: "read message page for non-existent channel", + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message last page", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages[msgsNum-20 : msgsNum]), + }, + }, + { + desc: "read message with non-existent subtopic", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + Subtopic: "not-present", + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message with subtopic", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Subtopic: subtopic, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with publisher", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with wrong format", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Format: "messagess", + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: 0, + Messages: []readers.Message{}, + }, + }, + { + desc: "read message with protocol", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with name", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Name: msgName, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs[0:limit]), + }, + }, + { + desc: "read message with value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + Comparator: readers.EqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with boolean value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + BoolValue: vb, + }, + page: readers.MessagesPage{ + Total: uint64(len(boolMsgs)), + Messages: fromSenml(boolMsgs[0:limit]), + }, + }, + { + desc: "read message with string value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.EqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: "a stringValues b", + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: "alu", + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with data value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd + string(rune(1)), + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd + string(rune(1)), + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd[:len(vd)-1], + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd[:len(vd)-1], + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with from", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[0:21])), + From: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[0:21])), + Messages: fromSenml(messages[0:21]), + }, + }, + { + desc: "read message with to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[21:])), + To: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[21:])), + Messages: fromSenml(messages[21:]), + }, + }, + { + desc: "read message with from/to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + From: messages[5].Time, + To: messages[0].Time, + }, + page: readers.MessagesPage{ + Total: 5, + Messages: fromSenml(messages[1:6]), + }, + }, + } + + for _, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %s", tc.desc, err)) + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: got incorrect list of senml Messages from ReadAll()", tc.desc)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.page.Total, result.Total)) + } +} + +func TestReadJSON(t *testing.T) { + writer := pwriter.New(db) + + id1 := testsutil.GenerateUUID(t) + m := json.Message{ + Channel: id1, + Publisher: id1, + Created: time.Now().Unix(), + Subtopic: "subtopic/format/some_json", + Protocol: "coap", + Payload: map[string]interface{}{ + "field_1": 123.0, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42.0, + }, + }, + } + messages1 := json.Messages{ + Format: format1, + } + msgs1 := []map[string]interface{}{} + for i := 0; i < msgsNum; i++ { + msg := m + messages1.Data = append(messages1.Data, msg) + m := toMap(msg) + msgs1 = append(msgs1, m) + } + + err := writer.ConsumeBlocking(context.TODO(), messages1) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + id2 := testsutil.GenerateUUID(t) + m = json.Message{ + Channel: id2, + Publisher: id2, + Created: time.Now().Unix(), + Subtopic: "subtopic/other_format/some_other_json", + Protocol: "udp", + Payload: map[string]interface{}{ + "field_1": "other_value", + "false_value": false, + "field_pi": 3.14159265, + }, + } + messages2 := json.Messages{ + Format: format2, + } + msgs2 := []map[string]interface{}{} + for i := 0; i < msgsNum; i++ { + msg := m + if i%2 == 0 { + msg.Protocol = httpProt + } + messages2.Data = append(messages2.Data, msg) + m := toMap(msg) + msgs2 = append(msgs2, m) + } + + err = writer.ConsumeBlocking(context.TODO(), messages2) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + httpMsgs := []map[string]interface{}{} + for i := 0; i < msgsNum; i += 2 { + httpMsgs = append(httpMsgs, msgs2[i]) + } + + reader := preader.New(db) + + cases := map[string]struct { + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + "read message page for existing channel": { + chanID: id1, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Total: 100, + Messages: fromJSON(msgs1[:10]), + }, + }, + "read message page for non-existent channel": { + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + "read message last page": { + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromJSON(msgs2[msgsNum-20 : msgsNum]), + }, + }, + "read message with protocol": { + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: 0, + Limit: uint64(msgsNum / 2), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(msgsNum / 2), + Messages: fromJSON(httpMsgs), + }, + }, + } + + for desc, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + for i := 0; i < len(result.Messages); i++ { + m := result.Messages[i] + // Remove id as it is not sent by the client. + delete(m.(map[string]interface{}), "id") + result.Messages[i] = m + } + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %s", desc, err)) + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: got incorrect list of json Messages from ReadAll()", desc)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %v got %v", desc, tc.page.Total, result.Total)) + } +} + +func fromSenml(msg []senml.Message) []readers.Message { + var ret []readers.Message + for _, m := range msg { + ret = append(ret, m) + } + return ret +} + +func fromJSON(msg []map[string]interface{}) []readers.Message { + var ret []readers.Message + for _, m := range msg { + ret = append(ret, m) + } + return ret +} + +func toMap(msg json.Message) map[string]interface{} { + return map[string]interface{}{ + "channel": msg.Channel, + "created": msg.Created, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "protocol": msg.Protocol, + "payload": map[string]interface{}(msg.Payload), + } +} diff --git a/readers/postgres/setup_test.go b/readers/postgres/setup_test.go new file mode 100644 index 0000000..6792c52 --- /dev/null +++ b/readers/postgres/setup_test.go @@ -0,0 +1,83 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "fmt" + "log" + "os" + "testing" + + "github.com/absmach/mg-contrib/readers/postgres" + _ "github.com/jackc/pgx/v5/stdlib" // required for SQL access + "github.com/jmoiron/sqlx" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var db *sqlx.DB + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "16.2-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + + if err = pool.Retry(func() error { + db, err = sqlx.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := postgres.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = postgres.Connect(dbConfig); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err = pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/readers/timescale/README.md b/readers/timescale/README.md new file mode 100644 index 0000000..807939e --- /dev/null +++ b/readers/timescale/README.md @@ -0,0 +1,99 @@ +# Timescale reader + +Timescale reader provides message repository implementation for Timescale. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| ------------------------------------ | --------------------------------------------- | ------------------------------ | +| MG_TIMESCALE_READER_LOG_LEVEL | Service log level | info | +| MG_TIMESCALE_READER_HTTP_HOST | Service HTTP host | localhost | +| MG_TIMESCALE_READER_HTTP_PORT | Service HTTP port | 8180 | +| MG_TIMESCALE_READER_HTTP_SERVER_CERT | Service HTTP server certificate path | "" | +| MG_TIMESCALE_READER_HTTP_SERVER_KEY | Service HTTP server key path | "" | +| MG_TIMESCALE_HOST | Timescale DB host | localhost | +| MG_TIMESCALE_PORT | Timescale DB port | 5432 | +| MG_TIMESCALE_USER | Timescale user | magistrala | +| MG_TIMESCALE_PASS | Timescale password | magistrala | +| MG_TIMESCALE_NAME | Timescale database name | messages | +| MG_TIMESCALE_SSL_MODE | Timescale SSL mode | disabled | +| MG_TIMESCALE_SSL_CERT | Timescale SSL certificate path | "" | +| MG_TIMESCALE_SSL_KEY | Timescale SSL key | "" | +| MG_TIMESCALE_SSL_ROOT_CERT | Timescale SSL root certificate path | "" | +| MG_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | +| MG_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC timeout in seconds | 1s | +| MG_THINGS_AUTH_GRPC_CLIENT_TLS | Things service Auth gRPC TLS enabled flag | false | +| MG_THINGS_AUTH_GRPC_CA_CERTS | Things service Auth gRPC CA certificates | "" | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC timeout in seconds | 1s | +| MG_AUTH_GRPC_CLIENT_TLS | Auth service gRPC TLS enabled flag | false | +| MG_AUTH_GRPC_CA_CERT | Auth service gRPC CA certificate | "" | +| MG_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | +| MG_TIMESCALE_READER_INSTANCE_ID | Timescale reader instance ID | "" | + +## Deployment + +The service itself is distributed as Docker container. Check the [`timescale-reader`](https://github.com/absmach/magistrala/blob/main/docker/addons/timescale-reader/docker-compose.yml#L17-L41) service section in docker-compose file to see how service is deployed. + +To start the service, execute the following shell script: + +```bash +# download the latest version of the service +git clone https://github.com/absmach/magistrala + +cd magistrala + +# compile the timescale writer +make timescale-writer + +# copy binary to bin +make install + +# Set the environment variables and run the service +MG_TIMESCALE_READER_LOG_LEVEL=[Service log level] \ +MG_TIMESCALE_READER_HTTP_HOST=[Service HTTP host] \ +MG_TIMESCALE_READER_HTTP_PORT=[Service HTTP port] \ +MG_TIMESCALE_READER_HTTP_SERVER_CERT=[Service HTTP server cert] \ +MG_TIMESCALE_READER_HTTP_SERVER_KEY=[Service HTTP server key] \ +MG_TIMESCALE_HOST=[Timescale host] \ +MG_TIMESCALE_PORT=[Timescale port] \ +MG_TIMESCALE_USER=[Timescale user] \ +MG_TIMESCALE_PASS=[Timescale password] \ +MG_TIMESCALE_NAME=[Timescale database name] \ +MG_TIMESCALE_SSL_MODE=[Timescale SSL mode] \ +MG_TIMESCALE_SSL_CERT=[Timescale SSL cert] \ +MG_TIMESCALE_SSL_KEY=[Timescale SSL key] \ +MG_TIMESCALE_SSL_ROOT_CERT=[Timescale SSL Root cert] \ +MG_THINGS_AUTH_GRPC_URL=[Things service Auth GRPC URL] \ +MG_THINGS_AUTH_GRPC_TIMEOUT=[Things service Auth gRPC request timeout in seconds] \ +MG_THINGS_AUTH_GRPC_CLIENT_TLS=[Things service Auth gRPC TLS enabled flag] \ +MG_THINGS_AUTH_GRPC_CA_CERTS=[Things service Auth gRPC CA certificates] \ +MG_AUTH_GRPC_URL=[Auth service Auth gRPC URL] \ +MG_AUTH_GRPC_TIMEOUT=[Auth service Auth gRPC request timeout in seconds] \ +MG_AUTH_GRPC_CLIENT_TLS=[Auth service Auth gRPC TLS enabled flag] \ +MG_AUTH_GRPC_CA_CERT=[Auth service Auth gRPC CA certificates] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_SEND_TELEMETRY=[Send telemetry to magistrala call home server] \ +MG_TIMESCALE_READER_INSTANCE_ID=[Timescale reader instance ID] \ +$GOBIN/magistrala-timescale-reader +``` + +## Usage + +Starting service will start consuming normalized messages in SenML format. + +Comparator Usage Guide: +| Comparator | Usage | Example | +|----------------------|-----------------------------------------------------------------------------|------------------------------------| +| eq | Return values that are equal to the query | eq["active"] -> "active" | +| ge | Return values that are substrings of the query | ge["tiv"] -> "active" and "tiv" | +| gt | Return values that are substrings of the query and not equal to the query | gt["tiv"] -> "active" | +| le | Return values that are superstrings of the query | le["active"] -> "tiv" | +| lt | Return values that are superstrings of the query and not equal to the query | lt["active"] -> "active" and "tiv" | + +Official docs can be found [here](https://docs.magistrala.abstractmachines.fr). diff --git a/readers/timescale/doc.go b/readers/timescale/doc.go new file mode 100644 index 0000000..302be6e --- /dev/null +++ b/readers/timescale/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package timescale contains repository implementations using Timescale as +// the underlying database. +package timescale diff --git a/readers/timescale/init.go b/readers/timescale/init.go new file mode 100644 index 0000000..9513df1 --- /dev/null +++ b/readers/timescale/init.go @@ -0,0 +1,80 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package timescale + +import ( + "fmt" + + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" +) + +// Table for SenML messages. +const defTable = "messages" + +// Config defines the options that are used when connecting to a TimescaleSQL instance. +type Config struct { + Host string + Port string + User string + Pass string + Name string + SSLMode string + SSLCert string + SSLKey string + SSLRootCert string +} + +// Connect creates a connection to the TimescaleSQL instance and applies any +// unapplied database migrations. A non-nil error is returned to indicate +// failure. +func Connect(cfg Config) (*sqlx.DB, error) { + url := fmt.Sprintf("host=%s port=%s user=%s dbname=%s password=%s sslmode=%s sslcert=%s sslkey=%s sslrootcert=%s", cfg.Host, cfg.Port, cfg.User, cfg.Name, cfg.Pass, cfg.SSLMode, cfg.SSLCert, cfg.SSLKey, cfg.SSLRootCert) + + db, err := sqlx.Open("pgx", url) + if err != nil { + return nil, err + } + + if err := migrateDB(db); err != nil { + return nil, err + } + + return db, nil +} + +func migrateDB(db *sqlx.DB) error { + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "messages_1", + Up: []string{ + `CREATE TABLE IF NOT EXISTS messages ( + time BIGINT NOT NULL, + channel UUID, + subtopic VARCHAR(254), + publisher UUID, + protocol TEXT, + name VARCHAR(254), + unit TEXT, + value FLOAT, + string_value TEXT, + bool_value BOOL, + data_value BYTEA, + sum FLOAT, + update_time FLOAT, + PRIMARY KEY (time, publisher, subtopic, name) + ); + SELECT create_hypertable('messages', 'time', create_default_indexes => FALSE, chunk_time_interval => 86400000, if_not_exists => TRUE);`, + }, + Down: []string{ + "DROP TABLE messages", + }, + }, + }, + } + + _, err := migrate.Exec(db.DB, "postgres", migrations, migrate.Up) + return err +} diff --git a/readers/timescale/messages.go b/readers/timescale/messages.go new file mode 100644 index 0000000..ebde422 --- /dev/null +++ b/readers/timescale/messages.go @@ -0,0 +1,201 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package timescale + +import ( + "encoding/json" + "fmt" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jmoiron/sqlx" // required for DB access +) + +var _ readers.MessageRepository = (*timescaleRepository)(nil) + +type timescaleRepository struct { + db *sqlx.DB +} + +// New returns new TimescaleSQL writer. +func New(db *sqlx.DB) readers.MessageRepository { + return ×caleRepository{ + db: db, + } +} + +func (tr timescaleRepository) ReadAll(chanID string, rpm readers.PageMetadata) (readers.MessagesPage, error) { + order := "time" + format := defTable + + if rpm.Format != "" && rpm.Format != defTable { + order = "created" + format = rpm.Format + } + + q := fmt.Sprintf(`SELECT * FROM %s WHERE %s ORDER BY %s DESC LIMIT :limit OFFSET :offset;`, format, fmtCondition(rpm), order) + totalQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE %s;`, format, fmtCondition(rpm)) + + // If aggregation is provided, add time_bucket and aggregation to the query + if rpm.Aggregation != "" { + q = fmt.Sprintf(`SELECT EXTRACT(epoch FROM time_bucket('%s', to_timestamp(time/1000000))) *1000000 AS time, publisher, protocol, subtopic, name, unit, %s(value) AS value FROM %s WHERE %s GROUP BY time, publisher, protocol, subtopic, name, unit ORDER BY time DESC LIMIT :limit OFFSET :offset;`, rpm.Interval, rpm.Aggregation, format, fmtCondition(rpm)) + totalQuery = fmt.Sprintf(`SELECT COUNT(*) FROM (SELECT EXTRACT(epoch FROM time_bucket('%s', to_timestamp(time/1000000))) AS time, %s(value) AS value FROM %s WHERE %s GROUP BY time) AS subquery;`, rpm.Interval, rpm.Aggregation, format, fmtCondition(rpm)) + } + + params := map[string]interface{}{ + "channel": chanID, + "limit": rpm.Limit, + "offset": rpm.Offset, + "subtopic": rpm.Subtopic, + "publisher": rpm.Publisher, + "name": rpm.Name, + "protocol": rpm.Protocol, + "value": rpm.Value, + "bool_value": rpm.BoolValue, + "string_value": rpm.StringValue, + "data_value": rpm.DataValue, + "from": rpm.From, + "to": rpm.To, + } + + rows, err := tr.db.NamedQuery(q, params) + if err != nil { + if pgErr, ok := err.(*pgconn.PgError); ok { + if pgErr.Code == pgerrcode.UndefinedTable { + return readers.MessagesPage{}, nil + } + } + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + defer rows.Close() + + page := readers.MessagesPage{ + PageMetadata: rpm, + Messages: []readers.Message{}, + } + switch format { + case defTable: + for rows.Next() { + msg := senmlMessage{Message: senml.Message{}} + if err := rows.StructScan(&msg); err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + + page.Messages = append(page.Messages, msg.Message) + } + default: + for rows.Next() { + msg := jsonMessage{} + if err := rows.StructScan(&msg); err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + m, err := msg.toMap() + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + page.Messages = append(page.Messages, m) + } + } + + rows, err = tr.db.NamedQuery(totalQuery, params) + if err != nil { + return readers.MessagesPage{}, errors.Wrap(readers.ErrReadMessages, err) + } + defer rows.Close() + + total := uint64(0) + if rows.Next() { + if err := rows.Scan(&total); err != nil { + return page, err + } + } + page.Total = total + + return page, nil +} + +func fmtCondition(rpm readers.PageMetadata) string { + condition := `channel = :channel` + + var query map[string]interface{} + meta, err := json.Marshal(rpm) + if err != nil { + return condition + } + if err := json.Unmarshal(meta, &query); err != nil { + return condition + } + + for name := range query { + switch name { + case + "subtopic", + "publisher", + "name", + "protocol": + condition = fmt.Sprintf(`%s AND %s = :%s`, condition, name, name) + case "v": + comparator := readers.ParseValueComparator(query) + condition = fmt.Sprintf(`%s AND value %s :value`, condition, comparator) + case "vb": + condition = fmt.Sprintf(`%s AND bool_value = :bool_value`, condition) + case "vs": + comparator := readers.ParseValueComparator(query) + switch comparator { + case "=": + condition = fmt.Sprintf("%s AND string_value = :string_value ", condition) + case ">": + condition = fmt.Sprintf("%s AND string_value LIKE '%%' || :string_value || '%%' AND string_value <> :string_value", condition) + case ">=": + condition = fmt.Sprintf("%s AND string_value LIKE '%%' || :string_value || '%%'", condition) + case "<=": + condition = fmt.Sprintf("%s AND :string_value LIKE '%%' || string_value || '%%'", condition) + case "<": + condition = fmt.Sprintf("%s AND :string_value LIKE '%%' || string_value || '%%' AND string_value <> :string_value", condition) + } + case "vd": + comparator := readers.ParseValueComparator(query) + condition = fmt.Sprintf(`%s AND data_value %s :data_value`, condition, comparator) + case "from": + condition = fmt.Sprintf(`%s AND time >= :from`, condition) + case "to": + condition = fmt.Sprintf(`%s AND time < :to`, condition) + } + } + return condition +} + +type senmlMessage struct { + ID string `db:"id"` + senml.Message +} + +type jsonMessage struct { + Channel string `db:"channel"` + Created int64 `db:"created"` + Subtopic string `db:"subtopic"` + Publisher string `db:"publisher"` + Protocol string `db:"protocol"` + Payload []byte `db:"payload"` +} + +func (msg jsonMessage) toMap() (map[string]interface{}, error) { + ret := map[string]interface{}{ + "channel": msg.Channel, + "created": msg.Created, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "protocol": msg.Protocol, + "payload": map[string]interface{}{}, + } + pld := make(map[string]interface{}) + if err := json.Unmarshal(msg.Payload, &pld); err != nil { + return nil, err + } + ret["payload"] = pld + return ret, nil +} diff --git a/readers/timescale/messages_test.go b/readers/timescale/messages_test.go new file mode 100644 index 0000000..ff74183 --- /dev/null +++ b/readers/timescale/messages_test.go @@ -0,0 +1,684 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package timescale_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/magistrala/pkg/transformers/json" + "github.com/absmach/magistrala/pkg/transformers/senml" + "github.com/absmach/magistrala/readers" + twriter "github.com/absmach/mg-contrib/consumers/writers/timescale" + "github.com/absmach/mg-contrib/pkg/testsutil" + treader "github.com/absmach/mg-contrib/readers/timescale" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + subtopic = "subtopic" + msgsNum = 100 + limit = 10 + valueFields = 5 + mqttProt = "mqtt" + httpProt = "http" + msgName = "temperature" + format1 = "format1" + format2 = "format2" + wrongID = "0" +) + +var ( + v float64 = 5 + vs = "stringValue" + vb = true + vd = "dataValue" + sum float64 = 42 +) + +func TestReadSenml(t *testing.T) { + writer := twriter.New(db) + + chanID := testsutil.GenerateUUID(t) + pubID := testsutil.GenerateUUID(t) + pubID2 := testsutil.GenerateUUID(t) + wrongID := testsutil.GenerateUUID(t) + + m := senml.Message{ + Channel: chanID, + Publisher: pubID, + Protocol: mqttProt, + } + + messages := []senml.Message{} + valueMsgs := []senml.Message{} + boolMsgs := []senml.Message{} + stringMsgs := []senml.Message{} + dataMsgs := []senml.Message{} + queryMsgs := []senml.Message{} + + now := float64(time.Now().Unix()) + for i := 0; i < msgsNum; i++ { + // Mix possible values as well as value sum. + msg := m + msg.Time = now - float64(i) + + count := i % valueFields + switch count { + case 0: + msg.Value = &v + valueMsgs = append(valueMsgs, msg) + case 1: + msg.BoolValue = &vb + boolMsgs = append(boolMsgs, msg) + case 2: + msg.StringValue = &vs + stringMsgs = append(stringMsgs, msg) + case 3: + msg.DataValue = &vd + dataMsgs = append(dataMsgs, msg) + case 4: + msg.Sum = &sum + msg.Subtopic = subtopic + msg.Protocol = httpProt + msg.Publisher = pubID2 + msg.Name = msgName + queryMsgs = append(queryMsgs, msg) + } + + messages = append(messages, msg) + } + + err := writer.ConsumeBlocking(context.TODO(), messages) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + reader := treader.New(db) + + // Since messages are not saved in natural order, + // cases that return subset of messages are only + // checking data result set size, but not content. + cases := []struct { + desc string + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + { + desc: "read message page for existing channel", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages), + }, + }, + { + desc: "read message page for non-existent channel", + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message last page", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromSenml(messages[msgsNum-20 : msgsNum]), + }, + }, + { + desc: "read message with non-existent subtopic", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: msgsNum, + Subtopic: "not-present", + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + { + desc: "read message with subtopic", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Subtopic: subtopic, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with publisher", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with wrong format", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Format: "messagess", + Offset: 0, + Limit: uint64(len(queryMsgs)), + Publisher: pubID2, + }, + page: readers.MessagesPage{ + Total: 0, + Messages: []readers.Message{}, + }, + }, + { + desc: "read message with protocol", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(queryMsgs)), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs), + }, + }, + { + desc: "read message with name", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Name: msgName, + }, + page: readers.MessagesPage{ + Total: uint64(len(queryMsgs)), + Messages: fromSenml(queryMsgs[0:limit]), + }, + }, + { + desc: "read message with value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v, + Comparator: readers.EqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v + 1, + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + Value: v - 1, + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(valueMsgs)), + Messages: fromSenml(valueMsgs[0:limit]), + }, + }, + { + desc: "read message with boolean value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + BoolValue: vb, + }, + page: readers.MessagesPage{ + Total: uint64(len(boolMsgs)), + Messages: fromSenml(boolMsgs[0:limit]), + }, + }, + { + desc: "read message with string value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.EqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: "a stringValues b", + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: "alu", + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with string value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + StringValue: vs, + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(stringMsgs)), + Messages: fromSenml(stringMsgs[0:limit]), + }, + }, + { + desc: "read message with data value", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and lower-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd + string(rune(1)), + Comparator: readers.LowerThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and lower-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd + string(rune(1)), + Comparator: readers.LowerThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and greater-than comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd[:len(vd)-1] + string(rune(1)), + Comparator: readers.GreaterThanKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with data value and greater-than-or-equal comparator", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + DataValue: vd[:len(vd)-1] + string(rune(1)), + Comparator: readers.GreaterThanEqualKey, + }, + page: readers.MessagesPage{ + Total: uint64(len(dataMsgs)), + Messages: fromSenml(dataMsgs[0:limit]), + }, + }, + { + desc: "read message with from", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[0:21])), + From: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[0:21])), + Messages: fromSenml(messages[0:21]), + }, + }, + { + desc: "read message with to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: uint64(len(messages[21:])), + To: messages[20].Time, + }, + page: readers.MessagesPage{ + Total: uint64(len(messages[21:])), + Messages: fromSenml(messages[21:]), + }, + }, + { + desc: "read message with from/to", + chanID: chanID, + pageMeta: readers.PageMetadata{ + Offset: 0, + Limit: limit, + From: messages[5].Time, + To: messages[0].Time, + }, + page: readers.MessagesPage{ + Total: 5, + Messages: fromSenml(messages[1:6]), + }, + }, + } + + for _, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %s", tc.desc, err)) + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.page.Messages, result.Messages)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.page.Total, result.Total)) + } +} + +func TestReadJSON(t *testing.T) { + writer := twriter.New(db) + + id1 := testsutil.GenerateUUID(t) + messages1 := json.Messages{ + Format: format1, + } + msgs1 := []map[string]interface{}{} + timeNow := time.Now().UnixMilli() + for i := 0; i < msgsNum; i++ { + m := json.Message{ + Channel: id1, + Publisher: id1, + Created: timeNow - int64(i), + Subtopic: "subtopic/format/some_json", + Protocol: "coap", + Payload: map[string]interface{}{ + "field_1": 123.0, + "field_2": "value", + "field_3": false, + "field_4": 12.344, + "field_5": map[string]interface{}{ + "field_1": "value", + "field_2": 42.0, + }, + }, + } + + msg := m + messages1.Data = append(messages1.Data, msg) + mapped := toMap(msg) + msgs1 = append(msgs1, mapped) + } + + err := writer.ConsumeBlocking(context.TODO(), messages1) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + id2 := testsutil.GenerateUUID(t) + messages2 := json.Messages{ + Format: format2, + } + msgs2 := []map[string]interface{}{} + for i := 0; i < msgsNum; i++ { + m := json.Message{ + Channel: id2, + Publisher: id2, + Created: timeNow - int64(i), + Subtopic: "subtopic/other_format/some_other_json", + Protocol: "udp", + Payload: map[string]interface{}{ + "field_1": "other_value", + "false_value": false, + "field_pi": 3.14159265, + }, + } + + msg := m + if i%2 == 0 { + msg.Protocol = httpProt + } + messages2.Data = append(messages2.Data, msg) + mapped := toMap(msg) + msgs2 = append(msgs2, mapped) + } + + err = writer.ConsumeBlocking(context.TODO(), messages2) + require.Nil(t, err, fmt.Sprintf("expected no error got %s\n", err)) + + httpMsgs := []map[string]interface{}{} + for i := 0; i < msgsNum; i += 2 { + httpMsgs = append(httpMsgs, msgs2[i]) + } + + reader := treader.New(db) + + cases := map[string]struct { + chanID string + pageMeta readers.PageMetadata + page readers.MessagesPage + }{ + "read message page for existing channel": { + chanID: id1, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Total: 100, + Messages: fromJSON(msgs1[:10]), + }, + }, + "read message page for non-existent channel": { + chanID: wrongID, + pageMeta: readers.PageMetadata{ + Format: messages1.Format, + Offset: 0, + Limit: 10, + }, + page: readers.MessagesPage{ + Messages: []readers.Message{}, + }, + }, + "read message last page": { + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: msgsNum - 20, + Limit: msgsNum, + }, + page: readers.MessagesPage{ + Total: msgsNum, + Messages: fromJSON(msgs2[msgsNum-20 : msgsNum]), + }, + }, + "read message with protocol": { + chanID: id2, + pageMeta: readers.PageMetadata{ + Format: messages2.Format, + Offset: 0, + Limit: uint64(msgsNum / 2), + Protocol: httpProt, + }, + page: readers.MessagesPage{ + Total: uint64(msgsNum / 2), + Messages: fromJSON(httpMsgs), + }, + }, + } + + for desc, tc := range cases { + result, err := reader.ReadAll(tc.chanID, tc.pageMeta) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %s", desc, err)) + assert.ElementsMatch(t, tc.page.Messages, result.Messages, fmt.Sprintf("%s: got incorrect list of json Messages from ReadAll()", desc)) + assert.Equal(t, tc.page.Total, result.Total, fmt.Sprintf("%s: expected %v got %v", desc, tc.page.Total, result.Total)) + } +} + +func fromSenml(msg []senml.Message) []readers.Message { + var ret []readers.Message + for _, m := range msg { + ret = append(ret, m) + } + return ret +} + +func fromJSON(msg []map[string]interface{}) []readers.Message { + var ret []readers.Message + for _, m := range msg { + ret = append(ret, m) + } + return ret +} + +func toMap(msg json.Message) map[string]interface{} { + return map[string]interface{}{ + "channel": msg.Channel, + "created": msg.Created, + "subtopic": msg.Subtopic, + "publisher": msg.Publisher, + "protocol": msg.Protocol, + "payload": map[string]interface{}(msg.Payload), + } +} diff --git a/readers/timescale/setup_test.go b/readers/timescale/setup_test.go new file mode 100644 index 0000000..5ce692f --- /dev/null +++ b/readers/timescale/setup_test.go @@ -0,0 +1,84 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package timescale_test contains tests for PostgreSQL repository +// implementations. +package timescale_test + +import ( + "fmt" + "log" + "os" + "testing" + + "github.com/absmach/mg-contrib/readers/timescale" + _ "github.com/jackc/pgx/v5/stdlib" // required for SQL access + "github.com/jmoiron/sqlx" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var db *sqlx.DB + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "timescale/timescaledb", + Tag: "2.13.1-pg16", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + + if err = pool.Retry(func() error { + db, err = sqlx.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := timescale.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = timescale.Connect(dbConfig); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err = pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/twins/README.md b/twins/README.md new file mode 100644 index 0000000..e77c4eb --- /dev/null +++ b/twins/README.md @@ -0,0 +1,103 @@ +# Twins + +Service twins is used for CRUD and update of digital twins. Twin is a semantic +representation of a real world data system consisting of data producers and +consumers. It stores the sequence of attribute based definitions of a system and +refers to a time series of definition based states that store the system +historical data. + +## Configuration + +The service is configured using the environment variables presented in the +following table. Note that any unset variables will be replaced with their +default values. + +| Variable | Description | Default | +| -------------------------- | ------------------------------------------------------------------- | -------------------------------- | +| MG_TWINS_LOG_LEVEL | Log level for twin service (debug, info, warn, error) | info | +| MG_TWINS_HTTP_PORT | Twins service HTTP port | 9018 | +| MG_TWINS_SERVER_CERT | Path to server certificate in PEM format | | +| MG_TWINS_SERVER_KEY | Path to server key in PEM format | | +| MG_JAEGER_URL | Jaeger server URL | <http://jaeger:14268/api/traces> | +| MG_TWINS_DB | Database name | magistrala | +| MG_TWINS_DB_HOST | Database host address | localhost | +| MG_TWINS_DB_PORT | Database host port | 27017 | +| MG_THINGS_STANDALONE_ID | User ID for standalone mode (no gRPC communication with users) | | +| MG_THINGS_STANDALONE_TOKEN | User token for standalone mode that should be passed in auth header | | +| MG_TWINS_CLIENT_TLS | Flag that indicates if TLS should be turned on | false | +| MG_TWINS_CA_CERTS | Path to trusted CAs in PEM format | | +| MG_TWINS_CHANNEL_ID | Message broker notifications channel ID | | +| MG_MESSAGE_BROKER_URL | Magistrala Message broker URL | <nats://localhost:4222> | +| MG_AUTH_GRPC_URL | Auth service gRPC URL | <localhost:7001> | +| MG_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MG_TWINS_CACHE_URL | Cache database URL | <redis://localhost:6379/0> | +| MG_SEND_TELEMETRY | Send telemetry to magistrala call home server | true | + +## Deployment + +The service itself is distributed as Docker container. Check the [`twins`](https://github.com/absmach/magistrala/blob/main/docker/addons/twins/docker-compose.yml#L35-L58) service section in +docker-compose file to see how service is deployed. + +To start the service outside of the container, execute the following shell +script: + +```bash +# download the latest version of the service +go get github.com/absmach/magistrala + +cd $GOPATH/src/github.com/absmach/magistrala + +# compile the twins +make twins + +# copy binary to bin +make install + +# set the environment variables and run the service +MG_TWINS_LOG_LEVEL=[Twins log level] \ +MG_TWINS_HTTP_PORT=[Service HTTP port] \ +MG_TWINS_SERVER_CERT=[String path to server cert in pem format] \ +MG_TWINS_SERVER_KEY=[String path to server key in pem format] \ +MG_JAEGER_URL=[Jaeger server URL] \ +MG_TWINS_DB=[Database name] \ +MG_TWINS_DB_HOST=[Database host address] \ +MG_TWINS_DB_PORT=[Database host port] \ +MG_THINGS_STANDALONE_EMAIL=[User email for standalone mode (no gRPC communication with auth)] \ +MG_THINGS_STANDALONE_TOKEN=[User token for standalone mode that should be passed in auth header] \ +MG_TWINS_CLIENT_TLS=[Flag that indicates if TLS should be turned on] \ +MG_TWINS_CA_CERTS=[Path to trusted CAs in PEM format] \ +MG_TWINS_CHANNEL_ID=[Message broker notifications channel ID] \ +MG_MESSAGE_BROKER_URL=[Magistrala Message broker URL] \ +MG_AUTH_GRPC_URL=[Auth service gRPC URL] \ +MG_AUTH_GRPC_TIMEOUT=[Auth service gRPC request timeout in seconds] \ +MG_TWINS_CACHE_URL=[Cache database URL] \ +$GOBIN/magistrala-twins +``` + +## Usage + +### Starting twins service + +The twins service publishes notifications on a Message broker subject of the format +`channels.<MG_TWINS_CHANNEL_ID>.messages.<twinID>.<crudOp>`, where `crudOp` +stands for the crud operation done on twin - create, update, delete or +retrieve - or state - save state. In order to use twin service notifications, +one must inform it - via environment variables - about the Magistrala channel used +for notification publishing. You must use an already existing channel, since you +cannot know in advance or set the channel ID (Magistrala does it automatically). + +To set the environment variable, please go to `.env` file and set the following +variable: + +``` +MG_TWINS_CHANNEL_ID= +``` + +with the corresponding values of the desired channel. If you are running +magistrala natively, than do the same thing in the corresponding console +environment. + +For more information about service capabilities and its usage, please check out +the [API documentation](https://docs.api.magistrala.abstractmachines.fr/?urls.primaryName=twins-openapi.yml). + +[doc]: https://docs.magistrala.abstractmachines.fr diff --git a/twins/api/doc.go b/twins/api/doc.go new file mode 100644 index 0000000..2424852 --- /dev/null +++ b/twins/api/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package api contains API-related concerns: endpoint definitions, middlewares +// and all resource representations. +package api diff --git a/twins/api/http/doc.go b/twins/api/http/doc.go new file mode 100644 index 0000000..b1013e3 --- /dev/null +++ b/twins/api/http/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package http contains implementation of kit service HTTP API. +package http diff --git a/twins/api/http/endpoint.go b/twins/api/http/endpoint.go new file mode 100644 index 0000000..041178b --- /dev/null +++ b/twins/api/http/endpoint.go @@ -0,0 +1,179 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package http + +import ( + "context" + + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/twins" + "github.com/go-kit/kit/endpoint" +) + +func addTwinEndpoint(svc twins.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(addTwinReq) + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + twin := twins.Twin{ + Name: req.Name, + Metadata: req.Metadata, + } + saved, err := svc.AddTwin(ctx, req.token, twin, req.Definition) + if err != nil { + return nil, err + } + + res := twinRes{ + id: saved.ID, + created: true, + } + return res, nil + } +} + +func updateTwinEndpoint(svc twins.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateTwinReq) + + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + twin := twins.Twin{ + ID: req.id, + Name: req.Name, + Metadata: req.Metadata, + } + + if err := svc.UpdateTwin(ctx, req.token, twin, req.Definition); err != nil { + return nil, err + } + + res := twinRes{id: req.id, created: false} + return res, nil + } +} + +func viewTwinEndpoint(svc twins.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(viewTwinReq) + + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + twin, err := svc.ViewTwin(ctx, req.token, req.id) + if err != nil { + return nil, err + } + + res := viewTwinRes{ + Owner: twin.Owner, + ID: twin.ID, + Name: twin.Name, + Created: twin.Created, + Updated: twin.Updated, + Revision: twin.Revision, + Definitions: twin.Definitions, + Metadata: twin.Metadata, + } + return res, nil + } +} + +func listTwinsEndpoint(svc twins.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listReq) + + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + page, err := svc.ListTwins(ctx, req.token, req.offset, req.limit, req.name, req.metadata) + if err != nil { + return nil, err + } + + res := twinsPageRes{ + pageRes: pageRes{ + Total: page.Total, + Offset: page.Offset, + Limit: page.Limit, + }, + Twins: []viewTwinRes{}, + } + for _, twin := range page.Twins { + view := viewTwinRes{ + Owner: twin.Owner, + ID: twin.ID, + Name: twin.Name, + Created: twin.Created, + Updated: twin.Updated, + Revision: twin.Revision, + Definitions: twin.Definitions, + Metadata: twin.Metadata, + } + res.Twins = append(res.Twins, view) + } + + return res, nil + } +} + +func removeTwinEndpoint(svc twins.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(viewTwinReq) + + err := req.validate() + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + if err := svc.RemoveTwin(ctx, req.token, req.id); err != nil { + return nil, err + } + + return removeRes{}, nil + } +} + +func listStatesEndpoint(svc twins.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listStatesReq) + + if err := req.validate(); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + page, err := svc.ListStates(ctx, req.token, req.offset, req.limit, req.id) + if err != nil { + return nil, err + } + + res := statesPageRes{ + pageRes: pageRes{ + Total: page.Total, + Offset: page.Offset, + Limit: page.Limit, + }, + States: []viewStateRes{}, + } + for _, state := range page.States { + view := viewStateRes{ + TwinID: state.TwinID, + ID: state.ID, + Definition: state.Definition, + Created: state.Created, + Payload: state.Payload, + } + res.States = append(res.States, view) + } + + return res, nil + } +} diff --git a/twins/api/http/endpoint_states_test.go b/twins/api/http/endpoint_states_test.go new file mode 100644 index 0000000..36ae0ef --- /dev/null +++ b/twins/api/http/endpoint_states_test.go @@ -0,0 +1,306 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package http_test + +import ( + "encoding/json" + "fmt" + "net/http" + "testing" + "time" + + "github.com/absmach/magistrala" + authmocks "github.com/absmach/magistrala/auth/mocks" + mglog "github.com/absmach/magistrala/logger" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/absmach/mg-contrib/twins" + "github.com/absmach/mg-contrib/twins/mocks" + "github.com/absmach/senml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + numRecs = 100 + publisher = "twins" +) + +var ( + subtopics = []string{"engine", "chassis", "wheel_2"} + channels = []string{"01ec3c3e-0e66-4e69-9751-a0545b44e08f", "48061e4f-7c23-4f5c-9012-0f9b7cd9d18d", "5b2180e4-e96b-4469-9dc1-b6745078d0b6"} +) + +type stateRes struct { + TwinID string `json:"twin_id"` + ID int64 `json:"id"` + Definition int `json:"definition"` + Payload map[string]interface{} `json:"payload"` +} + +type statesPageRes struct { + pageRes + States []stateRes `json:"states"` +} + +func NewService() (twins.Service, *authmocks.AuthClient, *mocks.TwinRepository, *mocks.TwinCache, *mocks.StateRepository) { + auth := new(authmocks.AuthClient) + twinsRepo := new(mocks.TwinRepository) + twinCache := new(mocks.TwinCache) + statesRepo := new(mocks.StateRepository) + idProvider := uuid.NewMock() + subs := map[string]string{"chanID": "chanID"} + broker := mocks.NewBroker(subs) + + return twins.New(broker, auth, twinsRepo, twinCache, statesRepo, idProvider, "chanID", mglog.NewMock()), auth, twinsRepo, twinCache, statesRepo +} + +func TestListStates(t *testing.T) { + svc, auth, _, _, stateRepo := NewService() + ts := newServer(svc) + defer ts.Close() + + def := mocks.CreateDefinition(channels[0:2], subtopics[0:2]) + twin := twins.Twin{ + Owner: email, + Definitions: []twins.Definition{def}, + ID: testsutil.GenerateUUID(t), + Created: time.Now(), + } + recs := make([]senml.Record, numRecs) + + var data []stateRes + for i := 0; i < len(recs); i++ { + res := createStateResponse(i, twin, recs[i]) + data = append(data, res) + } + + baseURL := fmt.Sprintf("%s/states/%s", ts.URL, twin.ID) + queryFmt := "%s?offset=%d&limit=%d" + cases := []struct { + desc string + auth string + status int + url string + res []stateRes + err error + page twins.StatesPage + identifyErr error + userID string + }{ + { + desc: "get a list of states", + auth: token, + status: http.StatusOK, + url: baseURL, + res: data[0:10], + err: nil, + page: twins.StatesPage{ + States: convState(data[0:10]), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with valid offset and limit", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf(queryFmt, baseURL, 20, 15), + res: data[20:35], + page: twins.StatesPage{ + States: convState(data[20:35]), + }, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with invalid token", + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + url: fmt.Sprintf(queryFmt, baseURL, 0, 5), + res: nil, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "get a list of states with empty token", + auth: "", + status: http.StatusUnauthorized, + url: fmt.Sprintf(queryFmt, baseURL, 0, 5), + res: nil, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "get a list of states with + limit > total", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf(queryFmt, baseURL, 91, 20), + res: data[91:], + page: twins.StatesPage{ + States: convState(data[91:]), + }, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with negative offset", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf(queryFmt, baseURL, -1, 5), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with negative limit", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf(queryFmt, baseURL, 0, -5), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with zero limit", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf(queryFmt, baseURL, 0, 0), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with limit greater than max", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf(queryFmt, baseURL, 0, 110), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with invalid offset", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf("%s?offset=invalid&limit=%d", baseURL, 15), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with invalid limit", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf("%s?offset=%d&limit=invalid", baseURL, 0), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states without offset", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf("%s?limit=%d", baseURL, 15), + res: data[0:15], + page: twins.StatesPage{ + States: convState(data[0:15]), + }, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states without limit", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf("%s?offset=%d", baseURL, 14), + res: data[14:24], + page: twins.StatesPage{ + States: convState(data[14:24]), + }, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with invalid number of parameters", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf("%s%s", baseURL, "?offset=4&limit=4&limit=5&offset=5"), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of states with redundant query parameters", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf("%s?offset=%d&limit=%d&value=something", baseURL, 0, 5), + res: data[0:5], + page: twins.StatesPage{ + States: convState(data[0:5]), + }, + err: nil, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", mock.Anything, &magistrala.IdentityReq{Token: tc.auth}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := stateRepo.On("RetrieveAll", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.page, tc.err) + req := testRequest{ + client: ts.Client(), + method: http.MethodGet, + url: tc.url, + token: tc.auth, + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + + var resData statesPageRes + if tc.res != nil { + err = json.NewDecoder(res.Body).Decode(&resData) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + } + + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + assert.ElementsMatch(t, tc.res, resData.States, fmt.Sprintf("%s: got incorrect body from response", tc.desc)) + authCall.Unset() + repoCall.Unset() + } +} + +func createStateResponse(id int, tw twins.Twin, rec senml.Record) stateRes { + return stateRes{ + TwinID: tw.ID, + ID: int64(id), + Definition: tw.Definitions[len(tw.Definitions)-1].ID, + Payload: map[string]interface{}{rec.BaseName: nil}, + } +} + +func convState(data []stateRes) []twins.State { + states := make([]twins.State, len(data)) + for i, d := range data { + states[i] = twins.State{ + TwinID: d.TwinID, + ID: d.ID, + Definition: d.Definition, + Payload: d.Payload, + } + } + return states +} diff --git a/twins/api/http/endpoint_twins_test.go b/twins/api/http/endpoint_twins_test.go new file mode 100644 index 0000000..59f2609 --- /dev/null +++ b/twins/api/http/endpoint_twins_test.go @@ -0,0 +1,865 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package http_test + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + + "github.com/absmach/magistrala" + authmocks "github.com/absmach/magistrala/auth/mocks" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/apiutil" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/absmach/mg-contrib/twins" + httpapi "github.com/absmach/mg-contrib/twins/api/http" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + twinName = "name" + contentType = "application/json" + email = "user@example.com" + token = "token" + wrongID = 0 + maxNameSize = 1024 + instanceID = "5de9b29a-feb9-11ed-be56-0242ac120002" + retained = "saved" + validID = "123e4567-e89b-12d3-a456-426614174000" +) + +var invalidName = strings.Repeat("m", maxNameSize+1) + +type twinReq struct { + Name string `json:"name,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +type twinRes struct { + Owner string `json:"owner"` + ID string `json:"id"` + Name string `json:"name,omitempty"` + Revision int `json:"revision"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +type pageRes struct { + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` +} + +type twinsPageRes struct { + pageRes + Twins []twinRes `json:"twins"` +} + +type testRequest struct { + client *http.Client + method string + url string + contentType string + token string + body io.Reader +} + +func (tr testRequest) make() (*http.Response, error) { + req, err := http.NewRequest(tr.method, tr.url, tr.body) + if err != nil { + return nil, err + } + if tr.token != "" { + req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) + } + if tr.contentType != "" { + req.Header.Set("Content-Type", tr.contentType) + } + return tr.client.Do(req) +} + +func newServer(svc twins.Service) *httptest.Server { + logger := mglog.NewMock() + mux := httpapi.MakeHandler(svc, logger, instanceID) + return httptest.NewServer(mux) +} + +func toJSON(data interface{}) (string, error) { + jsonData, err := json.Marshal(data) + if err != nil { + return "", err + } + return string(jsonData), nil +} + +func TestAddTwin(t *testing.T) { + svc, auth, twinRepo, twinCache, _ := NewService() + ts := newServer(svc) + defer ts.Close() + + tw := twinReq{} + data, err := toJSON(tw) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + tw.Name = invalidName + invalidData, err := toJSON(tw) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := []struct { + desc string + req string + contentType string + auth string + status int + location string + err error + saveErr error + identifyErr error + userID string + }{ + { + desc: "add valid twin", + req: data, + contentType: contentType, + auth: token, + status: http.StatusCreated, + location: "/twins/123e4567-e89b-12d3-a456-000000000001", + err: nil, + saveErr: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "add twin with empty JSON request", + req: "{}", + contentType: contentType, + auth: token, + status: http.StatusCreated, + location: "/twins/123e4567-e89b-12d3-a456-000000000002", + err: nil, + saveErr: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "add twin with invalid auth token", + req: data, + contentType: contentType, + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + location: "", + err: svcerr.ErrAuthentication, + saveErr: svcerr.ErrCreateEntity, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "add twin with empty auth token", + req: data, + contentType: contentType, + auth: "", + status: http.StatusUnauthorized, + location: "", + err: svcerr.ErrAuthentication, + saveErr: svcerr.ErrCreateEntity, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "add twin with invalid request format", + req: "}", + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + location: "", + err: svcerr.ErrMalformedEntity, + saveErr: svcerr.ErrCreateEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "add twin with empty request", + req: "", + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + location: "", + err: svcerr.ErrMalformedEntity, + saveErr: svcerr.ErrCreateEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "add twin without content type", + req: data, + contentType: "", + auth: token, + status: http.StatusUnsupportedMediaType, + location: "", + err: apiutil.ErrUnsupportedContentType, + saveErr: svcerr.ErrCreateEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "add twin with invalid name", + req: invalidData, + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + location: "", + err: svcerr.ErrMalformedEntity, + saveErr: svcerr.ErrCreateEntity, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", mock.Anything, &magistrala.IdentityReq{Token: tc.auth}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("Save", mock.Anything, mock.Anything).Return(retained, tc.saveErr) + cacheCall := twinCache.On("Save", mock.Anything, mock.Anything).Return(tc.err) + req := testRequest{ + client: ts.Client(), + method: http.MethodPost, + url: fmt.Sprintf("%s/twins", ts.URL), + contentType: tc.contentType, + token: tc.auth, + body: strings.NewReader(tc.req), + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + + location := res.Header.Get("Location") + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + assert.Equal(t, tc.location, location, fmt.Sprintf("%s: expected location %s got %s", tc.desc, tc.location, location)) + authCall.Unset() + repoCall.Unset() + cacheCall.Unset() + } +} + +func TestUpdateTwin(t *testing.T) { + svc, auth, twinRepo, twinCache, _ := NewService() + ts := newServer(svc) + defer ts.Close() + + twin := twins.Twin{ + Owner: email, + ID: testsutil.GenerateUUID(t), + } + twin.Name = twinName + data, err := toJSON(twin) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + tw := twin + tw.Name = invalidName + invalidData, err := toJSON(tw) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := []struct { + desc string + req string + id string + contentType string + auth string + status int + err error + retrieveErr error + updateErr error + identifyErr error + userID string + }{ + { + desc: "update existing twin", + req: data, + id: twin.ID, + contentType: contentType, + auth: token, + status: http.StatusOK, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "update twin with empty JSON request", + req: "{}", + id: twin.ID, + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + err: svcerr.ErrMalformedEntity, + retrieveErr: nil, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "update non-existent twin", + req: data, + id: strconv.FormatUint(wrongID, 10), + contentType: contentType, + auth: token, + status: http.StatusNotFound, + err: svcerr.ErrNotFound, + retrieveErr: svcerr.ErrNotFound, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "update twin with invalid token", + req: data, + id: twin.ID, + contentType: contentType, + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + err: svcerr.ErrAuthentication, + retrieveErr: svcerr.ErrNotFound, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "update twin with empty token", + req: data, + id: twin.ID, + contentType: contentType, + auth: "", + status: http.StatusUnauthorized, + err: svcerr.ErrAuthentication, + retrieveErr: svcerr.ErrNotFound, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "update twin with invalid data format", + req: "{", + id: twin.ID, + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + retrieveErr: nil, + updateErr: svcerr.ErrUpdateEntity, + userID: validID, + }, + { + desc: "update twin with empty request", + req: "", + id: twin.ID, + contentType: contentType, + auth: token, + status: http.StatusBadRequest, + err: svcerr.ErrMalformedEntity, + retrieveErr: nil, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "update twin without content type", + req: data, + id: twin.ID, + contentType: "", + auth: token, + status: http.StatusUnsupportedMediaType, + err: apiutil.ErrUnsupportedContentType, + retrieveErr: nil, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "update twin with invalid name", + req: invalidData, + contentType: contentType, + auth: token, + status: http.StatusMethodNotAllowed, + err: svcerr.ErrMalformedEntity, + retrieveErr: svcerr.ErrNotFound, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", mock.Anything, &magistrala.IdentityReq{Token: tc.auth}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("RetrieveByID", mock.Anything, tc.id).Return(twins.Twin{}, tc.retrieveErr) + repoCall1 := twinRepo.On("Update", mock.Anything, mock.Anything).Return(tc.updateErr) + cacheCall := twinCache.On("Update", mock.Anything, mock.Anything).Return(tc.err) + req := testRequest{ + client: ts.Client(), + method: http.MethodPut, + url: fmt.Sprintf("%s/twins/%s", ts.URL, tc.id), + contentType: tc.contentType, + token: tc.auth, + body: strings.NewReader(tc.req), + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + authCall.Unset() + repoCall.Unset() + repoCall1.Unset() + cacheCall.Unset() + } +} + +func TestViewTwin(t *testing.T) { + svc, auth, twinRepo, _, _ := NewService() + ts := newServer(svc) + defer ts.Close() + + twin := twins.Twin{ + Owner: email, + ID: testsutil.GenerateUUID(t), + Name: twinName, + Revision: 50, + } + + twres := twinRes{ + Owner: twin.Owner, + Name: twin.Name, + ID: twin.ID, + Revision: twin.Revision, + Metadata: twin.Metadata, + } + + cases := []struct { + desc string + id string + auth string + status int + res twinRes + err error + twin twins.Twin + identifyErr error + userID string + }{ + { + desc: "view existing twin", + id: twin.ID, + auth: token, + status: http.StatusOK, + res: twres, + err: nil, + twin: twin, + identifyErr: nil, + userID: validID, + }, + { + desc: "view non-existent twin", + id: strconv.FormatUint(wrongID, 10), + auth: token, + status: http.StatusNotFound, + res: twinRes{}, + err: svcerr.ErrNotFound, + identifyErr: nil, + userID: validID, + }, + { + desc: "view twin by passing invalid token", + id: twin.ID, + auth: authmocks.InvalidValue, + status: http.StatusForbidden, + res: twinRes{}, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "view twin by passing empty token", + id: twin.ID, + auth: "", + status: http.StatusUnauthorized, + res: twinRes{}, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", mock.Anything, &magistrala.IdentityReq{Token: tc.auth}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("RetrieveByID", mock.Anything, tc.id).Return(tc.twin, tc.err) + req := testRequest{ + client: ts.Client(), + method: http.MethodGet, + url: fmt.Sprintf("%s/twins/%s", ts.URL, tc.id), + token: tc.auth, + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + + var resData twinRes + err = json.NewDecoder(res.Body).Decode(&resData) + assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error while decoding response body: %s\n", tc.desc, err)) + assert.Equal(t, tc.res, resData, fmt.Sprintf("%s: expected body %v got %v", tc.desc, tc.res, resData)) + authCall.Unset() + repoCall.Unset() + } +} + +func TestListTwins(t *testing.T) { + svc, auth, twinRepo, _, _ := NewService() + ts := newServer(svc) + defer ts.Close() + + var data []twinRes + for i := 0; i < 100; i++ { + name := fmt.Sprintf("%s-%d", twinName, i) + twin := twins.Twin{ + Owner: email, + Name: name, + ID: testsutil.GenerateUUID(t), + Revision: 150, + } + + twres := twinRes{ + Owner: twin.Owner, + ID: twin.ID, + Name: twin.Name, + Revision: twin.Revision, + Metadata: twin.Metadata, + } + data = append(data, twres) + } + + baseURL := fmt.Sprintf("%s/twins", ts.URL) + queryFmt := "%s?offset=%d&limit=%d" + cases := []struct { + desc string + auth string + status int + url string + res []twinRes + err error + page twins.Page + identifyErr error + userID string + }{ + { + desc: "get a list of twins", + auth: token, + status: http.StatusOK, + url: baseURL, + res: data[0:10], + err: nil, + page: twins.Page{ + Twins: convTwin(data[0:10]), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with invalid token", + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + url: fmt.Sprintf(queryFmt, baseURL, 0, 1), + res: nil, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "get a list of twins with empty token", + auth: "", + status: http.StatusUnauthorized, + url: fmt.Sprintf(queryFmt, baseURL, 0, 1), + res: nil, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "get a list of twins with valid offset and limit", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf(queryFmt, baseURL, 25, 40), + res: data[25:65], + err: nil, + page: twins.Page{ + Twins: convTwin(data[25:65]), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with offset + limit > total", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf(queryFmt, baseURL, 91, 20), + res: data[91:], + err: nil, + page: twins.Page{ + Twins: convTwin(data[91:]), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with negative offset", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf(queryFmt, baseURL, -1, 5), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with negative limit", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf(queryFmt, baseURL, 1, -5), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with zero limit", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf(queryFmt, baseURL, 1, 0), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with limit greater than max", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf("%s?offset=%d&limit=%d", baseURL, 0, 110), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with invalid offset", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf("%s%s", baseURL, "?offset=e&limit=5"), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with invalid limit", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf("%s%s", baseURL, "?offset=5&limit=e"), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins without offset", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf("%s?limit=%d", baseURL, 5), + res: data[0:5], + err: nil, + page: twins.Page{ + Twins: convTwin(data[0:5]), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins without limit", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf("%s?offset=%d", baseURL, 1), + res: data[1:11], + err: nil, + page: twins.Page{ + Twins: convTwin(data[1:11]), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with invalid number of parameters", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf("%s%s", baseURL, "?offset=4&limit=4&limit=5&offset=5"), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins with redundant query parameters", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf("%s?offset=%d&limit=%d&value=something", baseURL, 0, 5), + res: data[0:5], + err: nil, + page: twins.Page{ + Twins: convTwin(data[0:5]), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins filtering with invalid name", + auth: token, + status: http.StatusBadRequest, + url: fmt.Sprintf("%s?offset=%d&limit=%d&name=%s", baseURL, 0, 5, invalidName), + res: nil, + err: svcerr.ErrMalformedEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of twins filtering with valid name", + auth: token, + status: http.StatusOK, + url: fmt.Sprintf("%s?offset=%d&limit=%d&name=%s", baseURL, 2, 1, twinName+"-2"), + res: data[2:3], + err: nil, + page: twins.Page{ + Twins: convTwin(data[2:3]), + }, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", mock.Anything, &magistrala.IdentityReq{Token: tc.auth}).Return(&magistrala.IdentityRes{Id: tc.userID}, nil) + repoCall := twinRepo.On("RetrieveAll", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.page, tc.err) + req := testRequest{ + client: ts.Client(), + method: http.MethodGet, + url: tc.url, + token: tc.auth, + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + + var resData twinsPageRes + if tc.res != nil { + err = json.NewDecoder(res.Body).Decode(&resData) + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + } + + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + assert.ElementsMatch(t, tc.res, resData.Twins, fmt.Sprintf("%s: got incorrect list of twins", tc.desc)) + authCall.Unset() + repoCall.Unset() + } +} + +func TestRemoveTwin(t *testing.T) { + svc, auth, twinRepo, twinCache, _ := NewService() + ts := newServer(svc) + defer ts.Close() + + twin := twins.Twin{ + Owner: email, + ID: testsutil.GenerateUUID(t), + Name: twinName, + Revision: 50, + } + + cases := []struct { + desc string + id string + auth string + status int + err error + removeErr error + identifyErr error + userID string + }{ + { + desc: "delete existing twin", + id: twin.ID, + auth: token, + status: http.StatusNoContent, + err: nil, + removeErr: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "delete non-existent twin", + id: strconv.FormatUint(wrongID, 10), + auth: token, + status: http.StatusNoContent, + err: nil, + removeErr: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "delete twin by passing empty id", + id: "", + auth: token, + status: http.StatusMethodNotAllowed, + err: svcerr.ErrMalformedEntity, + removeErr: svcerr.ErrRemoveEntity, + identifyErr: nil, + userID: validID, + }, + { + desc: "delete twin with invalid token", + id: twin.ID, + auth: authmocks.InvalidValue, + status: http.StatusUnauthorized, + err: svcerr.ErrAuthentication, + removeErr: svcerr.ErrRemoveEntity, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "delete twin with empty token", + id: twin.ID, + auth: "", + status: http.StatusUnauthorized, + err: svcerr.ErrAuthentication, + removeErr: svcerr.ErrRemoveEntity, + identifyErr: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", mock.Anything, &magistrala.IdentityReq{Token: tc.auth}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("Remove", mock.Anything, tc.id).Return(tc.removeErr) + cacheCall2 := twinCache.On("Remove", mock.Anything, tc.id).Return(tc.err) + req := testRequest{ + client: ts.Client(), + method: http.MethodDelete, + url: fmt.Sprintf("%s/twins/%s", ts.URL, tc.id), + token: tc.auth, + } + res, err := req.make() + assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) + authCall.Unset() + repoCall.Unset() + cacheCall2.Unset() + } +} + +func convTwin(data []twinRes) []twins.Twin { + twinSlice := make([]twins.Twin, len(data)) + for i, d := range data { + twinSlice[i].ID = d.ID + twinSlice[i].Name = d.Name + twinSlice[i].Owner = d.Owner + twinSlice[i].Revision = d.Revision + twinSlice[i].Metadata = d.Metadata + } + return twinSlice +} diff --git a/twins/api/http/requests.go b/twins/api/http/requests.go new file mode 100644 index 0000000..2fa371c --- /dev/null +++ b/twins/api/http/requests.go @@ -0,0 +1,121 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package http + +import ( + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/mg-contrib/twins" +) + +const ( + maxNameSize = 1024 + maxLimitSize = 100 +) + +type addTwinReq struct { + token string + Name string `json:"name,omitempty"` + Definition twins.Definition `json:"definition,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +func (req addTwinReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if len(req.Name) > maxNameSize { + return apiutil.ErrNameSize + } + + return nil +} + +type updateTwinReq struct { + token string + id string + Name string `json:"name,omitempty"` + Definition twins.Definition `json:"definition,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +func (req updateTwinReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.id == "" { + return apiutil.ErrMissingID + } + + if len(req.Name) > maxNameSize { + return apiutil.ErrNameSize + } + + return nil +} + +type viewTwinReq struct { + token string + id string +} + +func (req viewTwinReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.id == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type listReq struct { + token string + offset uint64 + limit uint64 + name string + metadata map[string]interface{} +} + +func (req *listReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.limit < 1 || req.limit > maxLimitSize { + return apiutil.ErrLimitSize + } + + if len(req.name) > maxNameSize { + return apiutil.ErrNameSize + } + + return nil +} + +type listStatesReq struct { + token string + offset uint64 + limit uint64 + id string +} + +func (req *listStatesReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.id == "" { + return apiutil.ErrMissingID + } + + if req.limit == 0 || req.limit > maxLimitSize { + return apiutil.ErrLimitSize + } + + return nil +} diff --git a/twins/api/http/responses.go b/twins/api/http/responses.go new file mode 100644 index 0000000..2b7f32f --- /dev/null +++ b/twins/api/http/responses.go @@ -0,0 +1,146 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package http + +import ( + "fmt" + "net/http" + "time" + + "github.com/absmach/magistrala" + "github.com/absmach/mg-contrib/twins" +) + +var ( + _ magistrala.Response = (*twinRes)(nil) + _ magistrala.Response = (*viewTwinRes)(nil) + _ magistrala.Response = (*viewStateRes)(nil) + _ magistrala.Response = (*twinsPageRes)(nil) + _ magistrala.Response = (*statesPageRes)(nil) + _ magistrala.Response = (*removeRes)(nil) +) + +type twinRes struct { + id string + created bool +} + +func (res twinRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res twinRes) Headers() map[string]string { + if res.created { + return map[string]string{ + "Location": fmt.Sprintf("/twins/%s", res.id), + } + } + + return map[string]string{} +} + +func (res twinRes) Empty() bool { + return true +} + +type viewTwinRes struct { + Owner string `json:"owner,omitempty"` + ID string `json:"id"` + Name string `json:"name,omitempty"` + Revision int `json:"revision"` + Created time.Time `json:"created"` + Updated time.Time `json:"updated"` + Definitions []twins.Definition `json:"definitions,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +func (res viewTwinRes) Code() int { + return http.StatusOK +} + +func (res viewTwinRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewTwinRes) Empty() bool { + return false +} + +type viewStateRes struct { + TwinID string `json:"twin_id"` + ID int64 `json:"id"` + Definition int `json:"definition"` + Created time.Time `json:"created"` + Payload map[string]interface{} `json:"payload"` +} + +func (res viewStateRes) Code() int { + return http.StatusOK +} + +func (res viewStateRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewStateRes) Empty() bool { + return false +} + +type pageRes struct { + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` +} + +type twinsPageRes struct { + pageRes + Twins []viewTwinRes `json:"twins"` +} + +func (res twinsPageRes) Code() int { + return http.StatusOK +} + +func (res twinsPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res twinsPageRes) Empty() bool { + return false +} + +type statesPageRes struct { + pageRes + States []viewStateRes `json:"states"` +} + +func (res statesPageRes) Code() int { + return http.StatusOK +} + +func (res statesPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res statesPageRes) Empty() bool { + return false +} + +type removeRes struct{} + +func (res removeRes) Code() int { + return http.StatusNoContent +} + +func (res removeRes) Headers() map[string]string { + return map[string]string{} +} + +func (res removeRes) Empty() bool { + return true +} diff --git a/twins/api/http/transport.go b/twins/api/http/transport.go new file mode 100644 index 0000000..35bb2d9 --- /dev/null +++ b/twins/api/http/transport.go @@ -0,0 +1,176 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package http + +import ( + "context" + "encoding/json" + "log/slog" + "net/http" + "strings" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/apiutil" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/pkg/api" + "github.com/absmach/mg-contrib/twins" + "github.com/go-chi/chi/v5" + kithttp "github.com/go-kit/kit/transport/http" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +const ( + contentType = "application/json" + offsetKey = "offset" + limitKey = "limit" + nameKey = "name" + metadataKey = "metadata" + defLimit = 10 + defOffset = 0 +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(svc twins.Service, logger *slog.Logger, instanceID string) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + + r := chi.NewRouter() + + r.Route("/twins", func(r chi.Router) { + r.Post("/", otelhttp.NewHandler(kithttp.NewServer( + addTwinEndpoint(svc), + decodeTwinCreation, + api.EncodeResponse, + opts..., + ), "add_twin").ServeHTTP) + r.Get("/", otelhttp.NewHandler(kithttp.NewServer( + listTwinsEndpoint(svc), + decodeList, + api.EncodeResponse, + opts..., + ), "list_twins").ServeHTTP) + r.Put("/{twinID}", otelhttp.NewHandler(kithttp.NewServer( + updateTwinEndpoint(svc), + decodeTwinUpdate, + api.EncodeResponse, + opts..., + ), "update_twin").ServeHTTP) + r.Get("/{twinID}", otelhttp.NewHandler(kithttp.NewServer( + viewTwinEndpoint(svc), + decodeView, + api.EncodeResponse, + opts..., + ), "view_twin").ServeHTTP) + r.Delete("/{twinID}", otelhttp.NewHandler(kithttp.NewServer( + removeTwinEndpoint(svc), + decodeView, + api.EncodeResponse, + opts..., + ), "remove_twin").ServeHTTP) + }) + r.Get("/states/{twinID}", otelhttp.NewHandler(kithttp.NewServer( + listStatesEndpoint(svc), + decodeListStates, + api.EncodeResponse, + opts..., + ), "list_states").ServeHTTP) + + r.Get("/health", magistrala.Health("twins", instanceID)) + r.Handle("/metrics", promhttp.Handler()) + + return r +} + +func decodeTwinCreation(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), contentType) { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + + req := addTwinReq{token: apiutil.ExtractBearerToken(r)} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, errors.Wrap(errors.ErrMalformedEntity, err)) + } + + return req, nil +} + +func decodeTwinUpdate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), contentType) { + return nil, errors.Wrap(apiutil.ErrValidation, apiutil.ErrUnsupportedContentType) + } + + req := updateTwinReq{ + token: apiutil.ExtractBearerToken(r), + id: chi.URLParam(r, "twinID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, errors.Wrap(err, errors.ErrMalformedEntity)) + } + + return req, nil +} + +func decodeView(_ context.Context, r *http.Request) (interface{}, error) { + req := viewTwinReq{ + token: apiutil.ExtractBearerToken(r), + id: chi.URLParam(r, "twinID"), + } + + return req, nil +} + +func decodeList(_ context.Context, r *http.Request) (interface{}, error) { + l, err := apiutil.ReadNumQuery[uint64](r, limitKey, defLimit) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + o, err := apiutil.ReadNumQuery[uint64](r, offsetKey, defOffset) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + n, err := apiutil.ReadStringQuery(r, nameKey, "") + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + m, err := apiutil.ReadMetadataQuery(r, metadataKey, nil) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + req := listReq{ + token: apiutil.ExtractBearerToken(r), + limit: l, + offset: o, + name: n, + metadata: m, + } + + return req, nil +} + +func decodeListStates(_ context.Context, r *http.Request) (interface{}, error) { + l, err := apiutil.ReadNumQuery[uint64](r, limitKey, defLimit) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + o, err := apiutil.ReadNumQuery[uint64](r, offsetKey, defOffset) + if err != nil { + return nil, errors.Wrap(apiutil.ErrValidation, err) + } + + req := listStatesReq{ + token: apiutil.ExtractBearerToken(r), + limit: l, + offset: o, + id: chi.URLParam(r, "twinID"), + } + + return req, nil +} diff --git a/twins/api/logging.go b/twins/api/logging.go new file mode 100644 index 0000000..a114f22 --- /dev/null +++ b/twins/api/logging.go @@ -0,0 +1,168 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "log/slog" + "time" + + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/mg-contrib/twins" +) + +var _ twins.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger *slog.Logger + svc twins.Service +} + +// LoggingMiddleware adds logging facilities to the core service. +func LoggingMiddleware(svc twins.Service, logger *slog.Logger) twins.Service { + return &loggingMiddleware{logger, svc} +} + +func (lm *loggingMiddleware) AddTwin(ctx context.Context, token string, twin twins.Twin, def twins.Definition) (tw twins.Twin, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("twin", + slog.String("id", tw.ID), + slog.String("name", tw.Name), + slog.Any("definitions", tw.Definitions), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Add twin failed", args...) + return + } + lm.logger.Info("Add twin completed successfully", args...) + }(time.Now()) + + return lm.svc.AddTwin(ctx, token, twin, def) +} + +func (lm *loggingMiddleware) UpdateTwin(ctx context.Context, token string, twin twins.Twin, def twins.Definition) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("twin", + slog.String("id", twin.ID), + slog.String("name", twin.Name), + slog.Any("definitions", def), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Update twin failed", args...) + return + } + lm.logger.Info("Update twin completed successfully", args...) + }(time.Now()) + + return lm.svc.UpdateTwin(ctx, token, twin, def) +} + +func (lm *loggingMiddleware) ViewTwin(ctx context.Context, token, twinID string) (tw twins.Twin, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("twin_id", twinID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("View twin failed", args...) + return + } + lm.logger.Info("View twin completed successfully", args...) + }(time.Now()) + + return lm.svc.ViewTwin(ctx, token, twinID) +} + +func (lm *loggingMiddleware) ListTwins(ctx context.Context, token string, offset, limit uint64, name string, metadata twins.Metadata) (page twins.Page, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("page", + slog.String("name", name), + slog.Uint64("offset", offset), + slog.Uint64("limit", limit), + slog.Uint64("total", page.Total), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("List twins failed", args...) + return + } + lm.logger.Info("List twins completed successfully", args...) + }(time.Now()) + + return lm.svc.ListTwins(ctx, token, offset, limit, name, metadata) +} + +func (lm *loggingMiddleware) SaveStates(ctx context.Context, msg *messaging.Message) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.Group("message", + slog.String("channel", msg.GetChannel()), + slog.String("subtopic", msg.GetSubtopic()), + slog.String("publisher", msg.GetPublisher()), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Save states failed", args...) + return + } + lm.logger.Info("Save states completed successfully", args...) + }(time.Now()) + + return lm.svc.SaveStates(ctx, msg) +} + +func (lm *loggingMiddleware) ListStates(ctx context.Context, token string, offset, limit uint64, twinID string) (page twins.StatesPage, err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("twin_id", twinID), + slog.Group("page", + slog.Uint64("offset", offset), + slog.Uint64("limit", limit), + slog.Uint64("total", page.Total), + ), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("List states failed", args...) + return + } + lm.logger.Info("List states completed successfully", args...) + }(time.Now()) + + return lm.svc.ListStates(ctx, token, offset, limit, twinID) +} + +func (lm *loggingMiddleware) RemoveTwin(ctx context.Context, token, twinID string) (err error) { + defer func(begin time.Time) { + args := []any{ + slog.String("duration", time.Since(begin).String()), + slog.String("twin_id", twinID), + } + if err != nil { + args = append(args, slog.Any("error", err)) + lm.logger.Warn("Remove twin failed", args...) + return + } + lm.logger.Info("Remove twin completed successfully", args...) + }(time.Now()) + + return lm.svc.RemoveTwin(ctx, token, twinID) +} diff --git a/twins/api/metrics.go b/twins/api/metrics.go new file mode 100644 index 0000000..e062e01 --- /dev/null +++ b/twins/api/metrics.go @@ -0,0 +1,95 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +//go:build !test + +package api + +import ( + "context" + "time" + + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/mg-contrib/twins" + "github.com/go-kit/kit/metrics" +) + +var _ twins.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc twins.Service +} + +// MetricsMiddleware instruments core service by tracking request count and latency. +func MetricsMiddleware(svc twins.Service, counter metrics.Counter, latency metrics.Histogram) twins.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (ms *metricsMiddleware) AddTwin(ctx context.Context, token string, twin twins.Twin, def twins.Definition) (saved twins.Twin, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "add_twin").Add(1) + ms.latency.With("method", "add_twin").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.AddTwin(ctx, token, twin, def) +} + +func (ms *metricsMiddleware) UpdateTwin(ctx context.Context, token string, twin twins.Twin, def twins.Definition) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_twin").Add(1) + ms.latency.With("method", "update_twin").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.UpdateTwin(ctx, token, twin, def) +} + +func (ms *metricsMiddleware) ViewTwin(ctx context.Context, token, twinID string) (tw twins.Twin, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_twin").Add(1) + ms.latency.With("method", "view_twin").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ViewTwin(ctx, token, twinID) +} + +func (ms *metricsMiddleware) ListTwins(ctx context.Context, token string, offset, limit uint64, name string, metadata twins.Metadata) (page twins.Page, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_twins").Add(1) + ms.latency.With("method", "list_twins").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ListTwins(ctx, token, offset, limit, name, metadata) +} + +func (ms *metricsMiddleware) SaveStates(ctx context.Context, msg *messaging.Message) error { + defer func(begin time.Time) { + ms.counter.With("method", "save_states").Add(1) + ms.latency.With("method", "save_states").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.SaveStates(ctx, msg) +} + +func (ms *metricsMiddleware) ListStates(ctx context.Context, token string, offset, limit uint64, twinID string) (st twins.StatesPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_states").Add(1) + ms.latency.With("method", "list_states").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.ListStates(ctx, token, offset, limit, twinID) +} + +func (ms *metricsMiddleware) RemoveTwin(ctx context.Context, token, twinID string) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "remove_twin").Add(1) + ms.latency.With("method", "remove_twin").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return ms.svc.RemoveTwin(ctx, token, twinID) +} diff --git a/twins/doc.go b/twins/doc.go new file mode 100644 index 0000000..acb6704 --- /dev/null +++ b/twins/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package twins contains the domain concept definitions needed to support +// Magistrala twins service functionality. Twin is a digital representation of a +// real world data system consisting of data producers and consumers. It stores +// the sequence of attribute based definitions of a data system and refers to a +// time series of definition based states that store the system historical data. +package twins diff --git a/twins/events/doc.go b/twins/events/doc.go new file mode 100644 index 0000000..992d0bf --- /dev/null +++ b/twins/events/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package events provides the domain concept definitions needed to support +// twins clients events functionality. +package events diff --git a/twins/events/events.go b/twins/events/events.go new file mode 100644 index 0000000..7281bcc --- /dev/null +++ b/twins/events/events.go @@ -0,0 +1,252 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "encoding/json" + "time" + + "github.com/absmach/magistrala/pkg/events" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/mg-contrib/twins" +) + +const ( + twinPrefix = "twins." + twinAdd = twinPrefix + "add" + twinUpdate = twinPrefix + "update" + twinRemove = twinPrefix + "remove" + twinView = twinPrefix + "view" + twinList = twinPrefix + "list" + twinListStates = twinPrefix + "list_states" + twinSaveStates = twinPrefix + "save_states" +) + +var ( + _ events.Event = (*addTwinEvent)(nil) + _ events.Event = (*updateTwinEvent)(nil) + _ events.Event = (*removeTwinEvent)(nil) + _ events.Event = (*viewTwinEvent)(nil) + _ events.Event = (*listTwinsEvent)(nil) + _ events.Event = (*listStatesEvent)(nil) + _ events.Event = (*saveStatesEvent)(nil) +) + +type addTwinEvent struct { + Twin twins.Twin + Definition twins.Definition +} + +func (ate addTwinEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": twinAdd, + "id": ate.Twin.ID, + "created": ate.Twin.Created, + } + + if ate.Twin.Owner != "" { + val["owner"] = ate.Twin.Owner + } + if ate.Twin.Name != "" { + val["name"] = ate.Twin.Name + } + if ate.Twin.Revision != 0 { + val["revision"] = ate.Twin.Revision + } + if ate.Twin.Metadata != nil { + metadata, err := json.Marshal(ate.Twin.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if len(ate.Twin.Definitions) > 0 { + definitions, err := json.Marshal(ate.Twin.Definitions) + if err != nil { + return map[string]interface{}{}, err + } + + val["twin_definitions"] = definitions + } + if ate.Definition.ID != 0 { + val["definition_id"] = ate.Definition.ID + } + if ate.Definition.Created != (time.Time{}) { + val["definition_created"] = ate.Definition.Created + } + if len(ate.Definition.Attributes) > 0 { + attributes, err := json.Marshal(ate.Definition.Attributes) + if err != nil { + return map[string]interface{}{}, err + } + + val["definition_attributes"] = attributes + } + if ate.Definition.Delta != 0 { + val["definition_delta"] = ate.Definition.Delta + } + + return val, nil +} + +type updateTwinEvent struct { + twin twins.Twin + definition twins.Definition +} + +func (ute updateTwinEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": twinUpdate, + "id": ute.twin.ID, + } + + if ute.twin.Owner != "" { + val["owner"] = ute.twin.Owner + } + if ute.twin.Name != "" { + val["name"] = ute.twin.Name + } + if ute.twin.Revision != 0 { + val["revision"] = ute.twin.Revision + } + if ute.twin.Metadata != nil { + metadata, err := json.Marshal(ute.twin.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if len(ute.twin.Definitions) > 0 { + definitions, err := json.Marshal(ute.twin.Definitions) + if err != nil { + return map[string]interface{}{}, err + } + + val["twin_definitions"] = definitions + } + if ute.twin.Created != (time.Time{}) { + val["created"] = ute.twin.Created + } + if ute.definition.ID != 0 { + val["definition_id"] = ute.definition.ID + } + if ute.definition.Created != (time.Time{}) { + val["definition_created"] = ute.definition.Created + } + if len(ute.definition.Attributes) > 0 { + attributes, err := json.Marshal(ute.definition.Attributes) + if err != nil { + return map[string]interface{}{}, err + } + + val["definition_attributes"] = attributes + } + if ute.definition.Delta != 0 { + val["definition_delta"] = ute.definition.Delta + } + + return val, nil +} + +type viewTwinEvent struct { + id string +} + +func (vte viewTwinEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": twinView, + "id": vte.id, + }, nil +} + +type removeTwinEvent struct { + id string +} + +func (rte removeTwinEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": twinRemove, + "id": rte.id, + }, nil +} + +type listTwinsEvent struct { + offset uint64 + limit uint64 + name string + metadata twins.Metadata +} + +func (lte listTwinsEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": twinList, + } + + if lte.name != "" { + val["name"] = lte.name + } + if lte.metadata != nil { + metadata, err := json.Marshal(lte.metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if lte.offset != 0 { + val["offset"] = lte.offset + } + if lte.limit != 0 { + val["limit"] = lte.limit + } + + return val, nil +} + +type listStatesEvent struct { + offset uint64 + limit uint64 + id string +} + +func (lsge listStatesEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": twinListStates, + } + + if lsge.offset != 0 { + val["offset"] = lsge.offset + } + if lsge.limit != 0 { + val["limit"] = lsge.limit + } + if lsge.id != "" { + val["id"] = lsge.id + } + + return val, nil +} + +type saveStatesEvent struct { + msg *messaging.Message +} + +func (ice saveStatesEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": twinSaveStates, + } + + if ice.msg != nil { + msg, err := json.Marshal(ice.msg) + if err != nil { + return map[string]interface{}{}, err + } + + val["message"] = msg + } + + return val, nil +} diff --git a/twins/events/setup_test.go b/twins/events/setup_test.go new file mode 100644 index 0000000..617ac3d --- /dev/null +++ b/twins/events/setup_test.go @@ -0,0 +1,61 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events_test + +import ( + "context" + "fmt" + "log" + "os" + "testing" + + "github.com/go-redis/redis/v8" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +var ( + redisClient *redis.Client + redisURL string +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "redis", + Tag: "7.2.4-alpine", + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + redisURL = fmt.Sprintf("redis://localhost:%s/0", container.GetPort("6379/tcp")) + opts, err := redis.ParseURL(redisURL) + if err != nil { + log.Fatalf("Could not parse redis URL: %s", err) + } + + if err := pool.Retry(func() error { + redisClient = redis.NewClient(opts) + + return redisClient.Ping(context.Background()).Err() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + code := m.Run() + + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/twins/events/streams.go b/twins/events/streams.go new file mode 100644 index 0000000..e733585 --- /dev/null +++ b/twins/events/streams.go @@ -0,0 +1,155 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "context" + + "github.com/absmach/magistrala/pkg/events" + "github.com/absmach/magistrala/pkg/events/store" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/mg-contrib/twins" +) + +const streamID = "magistrala.twins" + +var _ twins.Service = (*eventStore)(nil) + +type eventStore struct { + events.Publisher + svc twins.Service +} + +// NewEventStoreMiddleware returns wrapper around things service that sends +// events to event store. +func NewEventStoreMiddleware(ctx context.Context, svc twins.Service, url string) (twins.Service, error) { + publisher, err := store.NewPublisher(ctx, url, streamID) + if err != nil { + return nil, err + } + + return &eventStore{ + svc: svc, + Publisher: publisher, + }, nil +} + +func (es eventStore) AddTwin(ctx context.Context, token string, twin twins.Twin, def twins.Definition) (twins.Twin, error) { + twin, err := es.svc.AddTwin(ctx, token, twin, def) + if err != nil { + return twin, err + } + + event := addTwinEvent{ + twin, def, + } + + if err := es.Publish(ctx, event); err != nil { + return twin, err + } + + return twin, nil +} + +func (es eventStore) UpdateTwin(ctx context.Context, token string, twin twins.Twin, def twins.Definition) error { + if err := es.svc.UpdateTwin(ctx, token, twin, def); err != nil { + return err + } + + event := updateTwinEvent{ + twin, def, + } + + if err := es.Publish(ctx, event); err != nil { + return err + } + + return nil +} + +func (es eventStore) ViewTwin(ctx context.Context, token, id string) (twins.Twin, error) { + twin, err := es.svc.ViewTwin(ctx, token, id) + if err != nil { + return twin, err + } + + event := viewTwinEvent{ + id, + } + + if err := es.Publish(ctx, event); err != nil { + return twin, err + } + + return twin, nil +} + +func (es eventStore) RemoveTwin(ctx context.Context, token, id string) error { + if err := es.svc.RemoveTwin(ctx, token, id); err != nil { + return err + } + + event := removeTwinEvent{ + id, + } + + if err := es.Publish(ctx, event); err != nil { + return err + } + + return nil +} + +func (es eventStore) ListTwins(ctx context.Context, token string, offset, limit uint64, name string, metadata twins.Metadata) (twins.Page, error) { + tp, err := es.svc.ListTwins(ctx, token, offset, limit, name, metadata) + if err != nil { + return tp, err + } + event := listTwinsEvent{ + offset, + limit, + name, + metadata, + } + + if err := es.Publish(ctx, event); err != nil { + return tp, err + } + + return tp, nil +} + +func (es eventStore) ListStates(ctx context.Context, token string, offset, limit uint64, id string) (twins.StatesPage, error) { + sp, err := es.svc.ListStates(ctx, token, offset, limit, id) + if err != nil { + return sp, err + } + + event := listStatesEvent{ + offset, + limit, + id, + } + + if err := es.Publish(ctx, event); err != nil { + return sp, err + } + + return sp, nil +} + +func (es eventStore) SaveStates(ctx context.Context, msg *messaging.Message) error { + if err := es.svc.SaveStates(ctx, msg); err != nil { + return err + } + event := saveStatesEvent{ + msg, + } + + if err := es.Publish(ctx, event); err != nil { + return err + } + + return nil +} diff --git a/twins/events/twins.go b/twins/events/twins.go new file mode 100644 index 0000000..39c4430 --- /dev/null +++ b/twins/events/twins.go @@ -0,0 +1,128 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "context" + "fmt" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/mg-contrib/twins" + "github.com/go-redis/redis/v8" +) + +const ( + prefix = "twin" +) + +var ( + // errRedisTwinSave indicates error while saving Twin in redis cache. + errRedisTwinSave = errors.New("failed to save twin in redis cache") + + // errRedisTwinUpdate indicates error while saving Twin in redis cache. + errRedisTwinUpdate = errors.New("failed to update twin in redis cache") + + // errRedisTwinIDs indicates error while getting Twin IDs from redis cache. + errRedisTwinIDs = errors.New("failed to get twin id from redis cache") + + // errRedisTwinRemove indicates error while removing Twin from redis cache. + errRedisTwinRemove = errors.New("failed to remove twin from redis cache") +) + +var _ twins.TwinCache = (*twinCache)(nil) + +type twinCache struct { + client *redis.Client +} + +// NewTwinCache returns redis twin cache implementation. +func NewTwinCache(client *redis.Client) twins.TwinCache { + return &twinCache{ + client: client, + } +} + +func (tc *twinCache) Save(ctx context.Context, twin twins.Twin) error { + return tc.save(ctx, twin) +} + +func (tc *twinCache) Update(ctx context.Context, twin twins.Twin) error { + if err := tc.remove(ctx, twin.ID); err != nil { + return errors.Wrap(errRedisTwinUpdate, err) + } + if err := tc.save(ctx, twin); err != nil { + return errors.Wrap(errRedisTwinUpdate, err) + } + return nil +} + +func (tc *twinCache) SaveIDs(ctx context.Context, channel, subtopic string, ids []string) error { + for _, id := range ids { + if err := tc.client.SAdd(ctx, attrKey(channel, subtopic), id).Err(); err != nil { + return errors.Wrap(errRedisTwinSave, err) + } + if err := tc.client.SAdd(ctx, twinKey(id), attrKey(channel, subtopic)).Err(); err != nil { + return errors.Wrap(errRedisTwinSave, err) + } + } + return nil +} + +func (tc *twinCache) IDs(ctx context.Context, channel, subtopic string) ([]string, error) { + ids, err := tc.client.SMembers(ctx, attrKey(channel, subtopic)).Result() + if err != nil { + return nil, errors.Wrap(errRedisTwinIDs, err) + } + idsWildcard, err := tc.client.SMembers(ctx, attrKey(channel, twins.SubtopicWildcard)).Result() + if err != nil { + return nil, errors.Wrap(errRedisTwinIDs, err) + } + ids = append(ids, idsWildcard...) + return ids, nil +} + +func (tc *twinCache) Remove(ctx context.Context, twinID string) error { + return tc.remove(ctx, twinID) +} + +func (tc *twinCache) save(ctx context.Context, twin twins.Twin) error { + if len(twin.Definitions) < 1 { + return nil + } + attributes := twin.Definitions[len(twin.Definitions)-1].Attributes + for _, attr := range attributes { + if err := tc.client.SAdd(ctx, attrKey(attr.Channel, attr.Subtopic), twin.ID).Err(); err != nil { + return errors.Wrap(errRedisTwinSave, err) + } + if err := tc.client.SAdd(ctx, twinKey(twin.ID), attrKey(attr.Channel, attr.Subtopic)).Err(); err != nil { + return errors.Wrap(errRedisTwinSave, err) + } + } + return nil +} + +func (tc *twinCache) remove(ctx context.Context, twinID string) error { + twinKey := twinKey(twinID) + attrKeys, err := tc.client.SMembers(ctx, twinKey).Result() + if err != nil { + return errors.Wrap(errRedisTwinRemove, err) + } + if err := tc.client.Del(ctx, twinKey).Err(); err != nil { + return errors.Wrap(errRedisTwinRemove, err) + } + for _, attrKey := range attrKeys { + if err := tc.client.SRem(ctx, attrKey, twinID).Err(); err != nil { + return errors.Wrap(errRedisTwinRemove, err) + } + } + return nil +} + +func twinKey(twinID string) string { + return fmt.Sprintf("%s:%s", prefix, twinID) +} + +func attrKey(channel, subtopic string) string { + return fmt.Sprintf("%s:%s-%s", prefix, channel, subtopic) +} diff --git a/twins/events/twins_test.go b/twins/events/twins_test.go new file mode 100644 index 0000000..383e5cf --- /dev/null +++ b/twins/events/twins_test.go @@ -0,0 +1,291 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package events_test + +import ( + "context" + "fmt" + "testing" + + "github.com/absmach/mg-contrib/twins" + "github.com/absmach/mg-contrib/twins/events" + "github.com/absmach/mg-contrib/twins/mocks" + "github.com/stretchr/testify/assert" +) + +var ( + subtopics = []string{"engine", "chassis", "wheel_2"} + channels = []string{"01ec3c3e-0e66-4e69-9751-a0545b44e08f", "48061e4f-7c23-4f5c-9012-0f9b7cd9d18d", "5b2180e4-e96b-4469-9dc1-b6745078d0b6"} +) + +func TestTwinSave(t *testing.T) { + redisClient.FlushAll(context.Background()) + twinCache := events.NewTwinCache(redisClient) + + twin1 := mocks.CreateTwin(channels[0:2], subtopics[0:2]) + twin2 := mocks.CreateTwin(channels[1:3], subtopics[1:3]) + + cases := []struct { + desc string + twin twins.Twin + err error + }{ + { + desc: "Save twin to cache", + twin: twin1, + err: nil, + }, + { + desc: "Save already cached twin to cache", + twin: twin1, + err: nil, + }, + { + desc: "Save another twin to cache", + twin: twin2, + err: nil, + }, + { + desc: "Save already cached twin to cache", + twin: twin2, + err: nil, + }, + } + + for _, tc := range cases { + ctx := context.Background() + err := twinCache.Save(ctx, tc.twin) + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.err, err)) + + def := tc.twin.Definitions[len(tc.twin.Definitions)-1] + for _, attr := range def.Attributes { + ids, err := twinCache.IDs(ctx, attr.Channel, attr.Subtopic) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + assert.Contains(t, ids, tc.twin.ID, fmt.Sprintf("%s: id %s not found in %v", tc.desc, tc.twin.ID, ids)) + } + } +} + +func TestTwinSaveIDs(t *testing.T) { + redisClient.FlushAll(context.Background()) + twinCache := events.NewTwinCache(redisClient) + + twinIDs := []string{"7956f132-0b42-488d-9bd1-0f6dd9d77f98", "a2210c42-1eaf-41ad-b8c1-813317719ed9", "6e815c79-a159-41b0-9ff0-cfa14430e07e"} + + cases := []struct { + desc string + channel string + subtopic string + ids []string + err error + }{ + { + desc: "Save ids to cache", + channel: channels[0], + subtopic: subtopics[0], + ids: twinIDs, + err: nil, + }, + { + desc: "Save empty ids array to cache", + channel: channels[2], + subtopic: subtopics[2], + ids: []string{}, + err: nil, + }, + { + desc: "Save already saved ids to cache", + channel: channels[0], + subtopic: subtopics[0], + ids: twinIDs, + err: nil, + }, + { + desc: "Save ids to cache", + channel: channels[1], + subtopic: subtopics[1], + ids: twinIDs[0:2], + err: nil, + }, + } + + for _, tc := range cases { + ctx := context.Background() + err := twinCache.SaveIDs(ctx, tc.channel, tc.subtopic, tc.ids) + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.err, err)) + + ids, err := twinCache.IDs(ctx, tc.channel, tc.subtopic) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + assert.ElementsMatch(t, ids, tc.ids, fmt.Sprintf("%s: got incorrect ids", tc.desc)) + } +} + +func TestTwinUpdate(t *testing.T) { + redisClient.FlushAll(context.Background()) + twinCache := events.NewTwinCache(redisClient) + ctx := context.Background() + + var tws []twins.Twin + for i := range channels { + tw := mocks.CreateTwin(channels[i:i+1], subtopics[i:i+1]) + tws = append(tws, tw) + } + err := twinCache.Save(ctx, tws[0]) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + tws[1].ID = tws[0].ID + + cases := []struct { + desc string + twinID string + twin twins.Twin + err error + }{ + { + desc: "Update saved twin", + twinID: tws[0].ID, + twin: tws[1], + err: nil, + }, + { + desc: "Update twin with same definition", + twinID: tws[0].ID, + twin: tws[1], + err: nil, + }, + { + desc: "Update unsaved twin definition", + twinID: tws[2].ID, + twin: tws[2], + err: nil, + }, + } + + for _, tc := range cases { + err := twinCache.Update(ctx, tc.twin) + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.err, err)) + + attr := tc.twin.Definitions[0].Attributes[0] + ids, err := twinCache.IDs(ctx, attr.Channel, attr.Subtopic) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + assert.Contains(t, ids, tc.twinID, fmt.Sprintf("%s: the list doesn't contain the correct elements", tc.desc)) + } +} + +func TestTwinIDs(t *testing.T) { + redisClient.FlushAll(context.Background()) + twinCache := events.NewTwinCache(redisClient) + ctx := context.Background() + + var tws []twins.Twin + for i := 0; i < len(channels); i++ { + tw := mocks.CreateTwin(channels[0:1], subtopics[0:1]) + err := twinCache.Save(ctx, tw) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + tws = append(tws, tw) + } + for i := 0; i < len(channels); i++ { + tw := mocks.CreateTwin(channels[1:2], subtopics[1:2]) + err := twinCache.Save(ctx, tw) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + tws = append(tws, tw) + } + twEmptySubt := mocks.CreateTwin(channels[0:1], []string{""}) + err := twinCache.Save(ctx, twEmptySubt) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + twSubtWild := mocks.CreateTwin(channels[0:1], []string{twins.SubtopicWildcard}) + err = twinCache.Save(ctx, twSubtWild) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + nonExistAttr := twins.Attribute{ + Channel: channels[2], + Subtopic: subtopics[2], + PersistState: true, + } + + cases := []struct { + desc string + ids []string + attr twins.Attribute + err error + }{ + { + desc: "Get twin IDs from cache for empty subtopic attribute", + ids: []string{twEmptySubt.ID, twSubtWild.ID}, + attr: twEmptySubt.Definitions[0].Attributes[0], + err: nil, + }, + { + desc: "Get twin IDs from cache for subset of ids", + ids: []string{tws[0].ID, tws[1].ID, tws[2].ID, twSubtWild.ID}, + attr: tws[0].Definitions[0].Attributes[0], + err: nil, + }, + { + desc: "Get twin IDs from cache for subset of ids", + ids: []string{tws[3].ID, tws[4].ID, tws[5].ID}, + attr: tws[3].Definitions[0].Attributes[0], + err: nil, + }, + { + desc: "Get twin IDs from cache for non existing attribute", + ids: []string{}, + attr: nonExistAttr, + err: nil, + }, + } + + for _, tc := range cases { + ids, err := twinCache.IDs(ctx, tc.attr.Channel, tc.attr.Subtopic) + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.err, err)) + assert.ElementsMatch(t, ids, tc.ids, fmt.Sprintf("%s: got unexpected list of IDs", tc.desc)) + } +} + +func TestTwinRemove(t *testing.T) { + redisClient.FlushAll(context.Background()) + twinCache := events.NewTwinCache(redisClient) + ctx := context.Background() + + var tws []twins.Twin + for i := range channels { + tw := mocks.CreateTwin(channels[i:i+1], subtopics[i:i+1]) + err := twinCache.Save(ctx, tw) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + tws = append(tws, tw) + } + + cases := []struct { + desc string + twin twins.Twin + err error + }{ + { + desc: "Remove twin from cache", + twin: tws[0], + err: nil, + }, + { + desc: "Remove already removed twin from cache", + twin: tws[0], + err: nil, + }, + { + desc: "Remove another twin from cache", + twin: tws[1], + err: nil, + }, + } + + for _, tc := range cases { + err := twinCache.Remove(ctx, tc.twin.ID) + assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.err, err)) + + def := tc.twin.Definitions[len(tc.twin.Definitions)-1] + for _, attr := range def.Attributes { + ids, err := twinCache.IDs(ctx, attr.Channel, attr.Subtopic) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + assert.NotContains(t, ids, tc.twin.ID, fmt.Sprintf("%s: found unexpected ID in the list", tc.desc)) + } + } +} diff --git a/twins/mocks/cache.go b/twins/mocks/cache.go new file mode 100644 index 0000000..1057ef5 --- /dev/null +++ b/twins/mocks/cache.go @@ -0,0 +1,133 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + twins "github.com/absmach/mg-contrib/twins" + mock "github.com/stretchr/testify/mock" +) + +// TwinCache is an autogenerated mock type for the TwinCache type +type TwinCache struct { + mock.Mock +} + +// IDs provides a mock function with given fields: ctx, channel, subtopic +func (_m *TwinCache) IDs(ctx context.Context, channel string, subtopic string) ([]string, error) { + ret := _m.Called(ctx, channel, subtopic) + + if len(ret) == 0 { + panic("no return value specified for IDs") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]string, error)); ok { + return rf(ctx, channel, subtopic) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) []string); ok { + r0 = rf(ctx, channel, subtopic) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, channel, subtopic) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Remove provides a mock function with given fields: ctx, twinID +func (_m *TwinCache) Remove(ctx context.Context, twinID string) error { + ret := _m.Called(ctx, twinID) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, twinID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Save provides a mock function with given fields: ctx, twin +func (_m *TwinCache) Save(ctx context.Context, twin twins.Twin) error { + ret := _m.Called(ctx, twin) + + if len(ret) == 0 { + panic("no return value specified for Save") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, twins.Twin) error); ok { + r0 = rf(ctx, twin) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveIDs provides a mock function with given fields: ctx, channel, subtopic, twinIDs +func (_m *TwinCache) SaveIDs(ctx context.Context, channel string, subtopic string, twinIDs []string) error { + ret := _m.Called(ctx, channel, subtopic, twinIDs) + + if len(ret) == 0 { + panic("no return value specified for SaveIDs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, []string) error); ok { + r0 = rf(ctx, channel, subtopic, twinIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Update provides a mock function with given fields: ctx, twin +func (_m *TwinCache) Update(ctx context.Context, twin twins.Twin) error { + ret := _m.Called(ctx, twin) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, twins.Twin) error); ok { + r0 = rf(ctx, twin) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTwinCache creates a new instance of TwinCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTwinCache(t interface { + mock.TestingT + Cleanup(func()) +}) *TwinCache { + mock := &TwinCache{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/twins/mocks/create.go b/twins/mocks/create.go new file mode 100644 index 0000000..7c96215 --- /dev/null +++ b/twins/mocks/create.go @@ -0,0 +1,55 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "encoding/json" + "strconv" + + "github.com/absmach/magistrala/pkg/messaging" + twins "github.com/absmach/mg-contrib/twins" + "github.com/absmach/senml" +) + +var ( + publisher = "twins" + id = 0 +) + +// CreateMessage creates Magistrala message using SenML record array. +func CreateMessage(attr twins.Attribute, recs []senml.Record) (*messaging.Message, error) { + mRecs, err := json.Marshal(recs) + if err != nil { + return nil, err + } + return &messaging.Message{ + Channel: attr.Channel, + Subtopic: attr.Subtopic, + Payload: mRecs, + Publisher: publisher, + }, nil +} + +// CreateDefinition creates twin definition. +func CreateDefinition(channels, subtopics []string) twins.Definition { + var def twins.Definition + for i := range channels { + attr := twins.Attribute{ + Channel: channels[i], + Subtopic: subtopics[i], + PersistState: true, + } + def.Attributes = append(def.Attributes, attr) + } + return def +} + +// CreateTwin creates twin. +func CreateTwin(channels, subtopics []string) twins.Twin { + id++ + return twins.Twin{ + ID: strconv.Itoa(id), + Definitions: []twins.Definition{CreateDefinition(channels, subtopics)}, + } +} diff --git a/twins/mocks/doc.go b/twins/mocks/doc.go new file mode 100644 index 0000000..16ed198 --- /dev/null +++ b/twins/mocks/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mocks contains mocks for testing purposes. +package mocks diff --git a/twins/mocks/messages.go b/twins/mocks/messages.go new file mode 100644 index 0000000..2e662f4 --- /dev/null +++ b/twins/mocks/messages.go @@ -0,0 +1,35 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "context" + + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/messaging" +) + +var _ messaging.Publisher = (*mockBroker)(nil) + +type mockBroker struct { + subscriptions map[string]string +} + +// NewBroker returns mock message publisher. +func NewBroker(sub map[string]string) messaging.Publisher { + return &mockBroker{ + subscriptions: sub, + } +} + +func (mb mockBroker) Publish(ctx context.Context, topic string, msg *messaging.Message) error { + if len(msg.GetPayload()) == 0 { + return errors.New("failed to publish") + } + return nil +} + +func (mb mockBroker) Close() error { + return nil +} diff --git a/twins/mocks/repository.go b/twins/mocks/repository.go new file mode 100644 index 0000000..18c6b85 --- /dev/null +++ b/twins/mocks/repository.go @@ -0,0 +1,181 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + twins "github.com/absmach/mg-contrib/twins" + mock "github.com/stretchr/testify/mock" +) + +// TwinRepository is an autogenerated mock type for the TwinRepository type +type TwinRepository struct { + mock.Mock +} + +// Remove provides a mock function with given fields: ctx, twinID +func (_m *TwinRepository) Remove(ctx context.Context, twinID string) error { + ret := _m.Called(ctx, twinID) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, twinID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RetrieveAll provides a mock function with given fields: ctx, owner, offset, limit, name, metadata +func (_m *TwinRepository) RetrieveAll(ctx context.Context, owner string, offset uint64, limit uint64, name string, metadata twins.Metadata) (twins.Page, error) { + ret := _m.Called(ctx, owner, offset, limit, name, metadata) + + if len(ret) == 0 { + panic("no return value specified for RetrieveAll") + } + + var r0 twins.Page + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, string, twins.Metadata) (twins.Page, error)); ok { + return rf(ctx, owner, offset, limit, name, metadata) + } + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, string, twins.Metadata) twins.Page); ok { + r0 = rf(ctx, owner, offset, limit, name, metadata) + } else { + r0 = ret.Get(0).(twins.Page) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, uint64, uint64, string, twins.Metadata) error); ok { + r1 = rf(ctx, owner, offset, limit, name, metadata) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveByAttribute provides a mock function with given fields: ctx, channel, subtopic +func (_m *TwinRepository) RetrieveByAttribute(ctx context.Context, channel string, subtopic string) ([]string, error) { + ret := _m.Called(ctx, channel, subtopic) + + if len(ret) == 0 { + panic("no return value specified for RetrieveByAttribute") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]string, error)); ok { + return rf(ctx, channel, subtopic) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) []string); ok { + r0 = rf(ctx, channel, subtopic) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, channel, subtopic) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveByID provides a mock function with given fields: ctx, twinID +func (_m *TwinRepository) RetrieveByID(ctx context.Context, twinID string) (twins.Twin, error) { + ret := _m.Called(ctx, twinID) + + if len(ret) == 0 { + panic("no return value specified for RetrieveByID") + } + + var r0 twins.Twin + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (twins.Twin, error)); ok { + return rf(ctx, twinID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) twins.Twin); ok { + r0 = rf(ctx, twinID) + } else { + r0 = ret.Get(0).(twins.Twin) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, twinID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Save provides a mock function with given fields: ctx, twin +func (_m *TwinRepository) Save(ctx context.Context, twin twins.Twin) (string, error) { + ret := _m.Called(ctx, twin) + + if len(ret) == 0 { + panic("no return value specified for Save") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, twins.Twin) (string, error)); ok { + return rf(ctx, twin) + } + if rf, ok := ret.Get(0).(func(context.Context, twins.Twin) string); ok { + r0 = rf(ctx, twin) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, twins.Twin) error); ok { + r1 = rf(ctx, twin) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Update provides a mock function with given fields: ctx, twin +func (_m *TwinRepository) Update(ctx context.Context, twin twins.Twin) error { + ret := _m.Called(ctx, twin) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, twins.Twin) error); ok { + r0 = rf(ctx, twin) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTwinRepository creates a new instance of TwinRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTwinRepository(t interface { + mock.TestingT + Cleanup(func()) +}) *TwinRepository { + mock := &TwinRepository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/twins/mocks/service.go b/twins/mocks/service.go new file mode 100644 index 0000000..5007971 --- /dev/null +++ b/twins/mocks/service.go @@ -0,0 +1,199 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + messaging "github.com/absmach/magistrala/pkg/messaging" + mock "github.com/stretchr/testify/mock" + + twins "github.com/absmach/mg-contrib/twins" +) + +// Service is an autogenerated mock type for the Service type +type Service struct { + mock.Mock +} + +// AddTwin provides a mock function with given fields: ctx, token, twin, def +func (_m *Service) AddTwin(ctx context.Context, token string, twin twins.Twin, def twins.Definition) (twins.Twin, error) { + ret := _m.Called(ctx, token, twin, def) + + if len(ret) == 0 { + panic("no return value specified for AddTwin") + } + + var r0 twins.Twin + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, twins.Twin, twins.Definition) (twins.Twin, error)); ok { + return rf(ctx, token, twin, def) + } + if rf, ok := ret.Get(0).(func(context.Context, string, twins.Twin, twins.Definition) twins.Twin); ok { + r0 = rf(ctx, token, twin, def) + } else { + r0 = ret.Get(0).(twins.Twin) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, twins.Twin, twins.Definition) error); ok { + r1 = rf(ctx, token, twin, def) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListStates provides a mock function with given fields: ctx, token, offset, limit, twinID +func (_m *Service) ListStates(ctx context.Context, token string, offset uint64, limit uint64, twinID string) (twins.StatesPage, error) { + ret := _m.Called(ctx, token, offset, limit, twinID) + + if len(ret) == 0 { + panic("no return value specified for ListStates") + } + + var r0 twins.StatesPage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, string) (twins.StatesPage, error)); ok { + return rf(ctx, token, offset, limit, twinID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, string) twins.StatesPage); ok { + r0 = rf(ctx, token, offset, limit, twinID) + } else { + r0 = ret.Get(0).(twins.StatesPage) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, uint64, uint64, string) error); ok { + r1 = rf(ctx, token, offset, limit, twinID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListTwins provides a mock function with given fields: ctx, token, offset, limit, name, metadata +func (_m *Service) ListTwins(ctx context.Context, token string, offset uint64, limit uint64, name string, metadata twins.Metadata) (twins.Page, error) { + ret := _m.Called(ctx, token, offset, limit, name, metadata) + + if len(ret) == 0 { + panic("no return value specified for ListTwins") + } + + var r0 twins.Page + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, string, twins.Metadata) (twins.Page, error)); ok { + return rf(ctx, token, offset, limit, name, metadata) + } + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, string, twins.Metadata) twins.Page); ok { + r0 = rf(ctx, token, offset, limit, name, metadata) + } else { + r0 = ret.Get(0).(twins.Page) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, uint64, uint64, string, twins.Metadata) error); ok { + r1 = rf(ctx, token, offset, limit, name, metadata) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveTwin provides a mock function with given fields: ctx, token, twinID +func (_m *Service) RemoveTwin(ctx context.Context, token string, twinID string) error { + ret := _m.Called(ctx, token, twinID) + + if len(ret) == 0 { + panic("no return value specified for RemoveTwin") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, token, twinID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveStates provides a mock function with given fields: ctx, msg +func (_m *Service) SaveStates(ctx context.Context, msg *messaging.Message) error { + ret := _m.Called(ctx, msg) + + if len(ret) == 0 { + panic("no return value specified for SaveStates") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *messaging.Message) error); ok { + r0 = rf(ctx, msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTwin provides a mock function with given fields: ctx, token, twin, def +func (_m *Service) UpdateTwin(ctx context.Context, token string, twin twins.Twin, def twins.Definition) error { + ret := _m.Called(ctx, token, twin, def) + + if len(ret) == 0 { + panic("no return value specified for UpdateTwin") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, twins.Twin, twins.Definition) error); ok { + r0 = rf(ctx, token, twin, def) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ViewTwin provides a mock function with given fields: ctx, token, twinID +func (_m *Service) ViewTwin(ctx context.Context, token string, twinID string) (twins.Twin, error) { + ret := _m.Called(ctx, token, twinID) + + if len(ret) == 0 { + panic("no return value specified for ViewTwin") + } + + var r0 twins.Twin + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (twins.Twin, error)); ok { + return rf(ctx, token, twinID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) twins.Twin); ok { + r0 = rf(ctx, token, twinID) + } else { + r0 = ret.Get(0).(twins.Twin) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, token, twinID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewService(t interface { + mock.TestingT + Cleanup(func()) +}) *Service { + mock := &Service{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/twins/mocks/states.go b/twins/mocks/states.go new file mode 100644 index 0000000..2474881 --- /dev/null +++ b/twins/mocks/states.go @@ -0,0 +1,151 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +// Copyright (c) Abstract Machines + +package mocks + +import ( + context "context" + + twins "github.com/absmach/mg-contrib/twins" + mock "github.com/stretchr/testify/mock" +) + +// StateRepository is an autogenerated mock type for the StateRepository type +type StateRepository struct { + mock.Mock +} + +// Count provides a mock function with given fields: ctx, twin +func (_m *StateRepository) Count(ctx context.Context, twin twins.Twin) (int64, error) { + ret := _m.Called(ctx, twin) + + if len(ret) == 0 { + panic("no return value specified for Count") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, twins.Twin) (int64, error)); ok { + return rf(ctx, twin) + } + if rf, ok := ret.Get(0).(func(context.Context, twins.Twin) int64); ok { + r0 = rf(ctx, twin) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, twins.Twin) error); ok { + r1 = rf(ctx, twin) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveAll provides a mock function with given fields: ctx, offset, limit, twinID +func (_m *StateRepository) RetrieveAll(ctx context.Context, offset uint64, limit uint64, twinID string) (twins.StatesPage, error) { + ret := _m.Called(ctx, offset, limit, twinID) + + if len(ret) == 0 { + panic("no return value specified for RetrieveAll") + } + + var r0 twins.StatesPage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, string) (twins.StatesPage, error)); ok { + return rf(ctx, offset, limit, twinID) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, string) twins.StatesPage); ok { + r0 = rf(ctx, offset, limit, twinID) + } else { + r0 = ret.Get(0).(twins.StatesPage) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, string) error); ok { + r1 = rf(ctx, offset, limit, twinID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveLast provides a mock function with given fields: ctx, twinID +func (_m *StateRepository) RetrieveLast(ctx context.Context, twinID string) (twins.State, error) { + ret := _m.Called(ctx, twinID) + + if len(ret) == 0 { + panic("no return value specified for RetrieveLast") + } + + var r0 twins.State + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (twins.State, error)); ok { + return rf(ctx, twinID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) twins.State); ok { + r0 = rf(ctx, twinID) + } else { + r0 = ret.Get(0).(twins.State) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, twinID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Save provides a mock function with given fields: ctx, state +func (_m *StateRepository) Save(ctx context.Context, state twins.State) error { + ret := _m.Called(ctx, state) + + if len(ret) == 0 { + panic("no return value specified for Save") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, twins.State) error); ok { + r0 = rf(ctx, state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Update provides a mock function with given fields: ctx, state +func (_m *StateRepository) Update(ctx context.Context, state twins.State) error { + ret := _m.Called(ctx, state) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, twins.State) error); ok { + r0 = rf(ctx, state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStateRepository creates a new instance of StateRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateRepository(t interface { + mock.TestingT + Cleanup(func()) +}) *StateRepository { + mock := &StateRepository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/twins/mongodb/doc.go b/twins/mongodb/doc.go new file mode 100644 index 0000000..baaf76f --- /dev/null +++ b/twins/mongodb/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package mongodb contains repository implementations using MongoDB as +// the underlying database. +package mongodb diff --git a/twins/mongodb/init.go b/twins/mongodb/init.go new file mode 100644 index 0000000..22f745e --- /dev/null +++ b/twins/mongodb/init.go @@ -0,0 +1,33 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb + +import ( + "context" + "fmt" + "log/slog" + + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +// Config defines the options that are used when connecting to a MongoDB instance. +type Config struct { + Host string + Port string + Name string +} + +// Connect creates a connection to the MongoDB instance. +func Connect(cfg Config, logger *slog.Logger) (*mongo.Database, error) { + addr := fmt.Sprintf("mongodb://%s:%s", cfg.Host, cfg.Port) + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + if err != nil { + logger.Error(fmt.Sprintf("Failed to connect to database: %s", err)) + return nil, err + } + + db := client.Database(cfg.Name) + return db, nil +} diff --git a/twins/mongodb/setup_test.go b/twins/mongodb/setup_test.go new file mode 100644 index 0000000..0998be4 --- /dev/null +++ b/twins/mongodb/setup_test.go @@ -0,0 +1,56 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb_test + +import ( + "context" + "fmt" + "log" + "os" + "testing" + + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const wrongValue = "wrong-value" + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "mongo", + Tag: "7.0.5", + Env: []string{"MONGO_INITDB_DATABASE=test"}, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port = container.GetPort("27017/tcp") + addr = fmt.Sprintf("mongodb://localhost:%s", port) + + if err := pool.Retry(func() error { + _, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + return err + }); err != nil { + testLog.Error(fmt.Sprintf("Could not connect to docker: %s", err)) + } + + code := m.Run() + + if err := pool.Purge(container); err != nil { + testLog.Error(fmt.Sprintf("Could not purge container: %s", err)) + } + + os.Exit(code) +} diff --git a/twins/mongodb/states.go b/twins/mongodb/states.go new file mode 100644 index 0000000..bac0013 --- /dev/null +++ b/twins/mongodb/states.go @@ -0,0 +1,156 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb + +import ( + "context" + + "github.com/absmach/mg-contrib/twins" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + statesCollection string = "states" + twinid string = "twinid" +) + +type stateRepository struct { + db *mongo.Database +} + +var _ twins.StateRepository = (*stateRepository)(nil) + +// NewStateRepository instantiates a MongoDB implementation of state +// repository. +func NewStateRepository(db *mongo.Database) twins.StateRepository { + return &stateRepository{ + db: db, + } +} + +// SaveState persists the state. +func (sr *stateRepository) Save(ctx context.Context, st twins.State) error { + coll := sr.db.Collection(statesCollection) + + if _, err := coll.InsertOne(ctx, st); err != nil { + return err + } + + return nil +} + +// Update persists the state. +func (sr *stateRepository) Update(ctx context.Context, st twins.State) error { + coll := sr.db.Collection(statesCollection) + + filter := bson.M{"id": st.ID, twinid: st.TwinID} + update := bson.M{"$set": st} + if _, err := coll.UpdateOne(ctx, filter, update); err != nil { + return err + } + + return nil +} + +// CountStates returns the number of states related to twin. +func (sr *stateRepository) Count(ctx context.Context, tw twins.Twin) (int64, error) { + coll := sr.db.Collection(statesCollection) + + filter := bson.M{twinid: tw.ID} + total, err := coll.CountDocuments(ctx, filter) + if err != nil { + return 0, err + } + + return total, nil +} + +// RetrieveAll retrieves the subset of states related to twin specified by id. +func (sr *stateRepository) RetrieveAll(ctx context.Context, offset, limit uint64, twinID string) (twins.StatesPage, error) { + coll := sr.db.Collection(statesCollection) + + findOptions := options.Find() + findOptions.SetSkip(int64(offset)) + findOptions.SetLimit(int64(limit)) + + filter := bson.M{twinid: twinID} + + cur, err := coll.Find(ctx, filter, findOptions) + if err != nil { + return twins.StatesPage{}, err + } + + results, err := decodeStates(ctx, cur) + if err != nil { + return twins.StatesPage{}, err + } + + total, err := coll.CountDocuments(ctx, filter) + if err != nil { + return twins.StatesPage{}, err + } + + return twins.StatesPage{ + States: results, + PageMetadata: twins.PageMetadata{ + Total: uint64(total), + Offset: offset, + Limit: limit, + }, + }, nil +} + +// RetrieveLast returns the last state related to twin spec by id. +func (sr *stateRepository) RetrieveLast(ctx context.Context, twinID string) (twins.State, error) { + coll := sr.db.Collection(statesCollection) + + filter := bson.M{twinid: twinID} + total, err := coll.CountDocuments(ctx, filter) + if err != nil { + return twins.State{}, err + } + + findOptions := options.Find() + var skip int64 + if total > 0 { + skip = total - 1 + } + findOptions.SetSkip(skip) + findOptions.SetLimit(1) + + cur, err := coll.Find(ctx, filter, findOptions) + if err != nil { + return twins.State{}, err + } + + results, err := decodeStates(ctx, cur) + if err != nil { + return twins.State{}, err + } + + if len(results) < 1 { + return twins.State{}, nil + } + return results[0], nil +} + +func decodeStates(ctx context.Context, cur *mongo.Cursor) ([]twins.State, error) { + defer cur.Close(ctx) + + var results []twins.State + for cur.Next(ctx) { + var elem twins.State + if err := cur.Decode(&elem); err != nil { + return []twins.State{}, nil + } + results = append(results, elem) + } + + if err := cur.Err(); err != nil { + return []twins.State{}, nil + } + return results, nil +} diff --git a/twins/mongodb/states_test.go b/twins/mongodb/states_test.go new file mode 100644 index 0000000..95184c3 --- /dev/null +++ b/twins/mongodb/states_test.go @@ -0,0 +1,164 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/absmach/mg-contrib/twins" + "github.com/absmach/mg-contrib/twins/mongodb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func TestStateSave(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + repo := mongodb.NewStateRepository(db) + + twid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + var id int64 + state := twins.State{ + TwinID: twid, + ID: id, + Created: time.Now(), + } + + cases := []struct { + desc string + state twins.State + err error + }{ + { + desc: "save state", + state: state, + err: nil, + }, + } + + for _, tc := range cases { + err := repo.Save(context.Background(), tc.state) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestStatesRetrieveAll(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + _, err = db.Collection("states").DeleteMany(context.Background(), bson.D{}) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + + repo := mongodb.NewStateRepository(db) + + twid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + n := uint64(10) + for i := uint64(0); i < n; i++ { + st := twins.State{ + TwinID: twid, + ID: int64(i), + Created: time.Now(), + } + + err = repo.Save(context.Background(), st) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + } + + cases := map[string]struct { + twid string + limit uint64 + offset uint64 + size uint64 + total uint64 + }{ + "retrieve all states with existing twin": { + twid: twid, + offset: 0, + limit: n, + size: n, + total: n, + }, + "retrieve subset of states with existing twin": { + twid: twid, + offset: 0, + limit: n / 2, + size: n / 2, + total: n, + }, + "retrieve states with non-existing twin": { + twid: wrongValue, + offset: 0, + limit: n, + size: 0, + total: 0, + }, + } + + for desc, tc := range cases { + page, err := repo.RetrieveAll(context.Background(), tc.offset, tc.limit, tc.twid) + size := uint64(len(page.States)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.size, size)) + assert.Equal(t, tc.total, page.Total, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.total, page.Total)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} + +func TestStatesRetrieveLast(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + _, err = db.Collection("states").DeleteMany(context.Background(), bson.D{}) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + + repo := mongodb.NewStateRepository(db) + + twid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + n := int64(10) + for i := int64(1); i <= n; i++ { + st := twins.State{ + TwinID: twid, + ID: i, + Created: time.Now(), + } + + err = repo.Save(context.Background(), st) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + } + + cases := map[string]struct { + twid string + id int64 + }{ + "retrieve last state with existing twin": { + twid: twid, + id: n, + }, + "retrieve states with non-existing owner": { + twid: wrongValue, + id: 0, + }, + } + + for desc, tc := range cases { + state, err := repo.RetrieveLast(context.Background(), tc.twid) + assert.Equal(t, tc.id, state.ID, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.id, state.ID)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} diff --git a/twins/mongodb/twins.go b/twins/mongodb/twins.go new file mode 100644 index 0000000..c464252 --- /dev/null +++ b/twins/mongodb/twins.go @@ -0,0 +1,210 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb + +import ( + "context" + + "github.com/absmach/magistrala/pkg/errors" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + "github.com/absmach/mg-contrib/twins" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + maxNameSize = 1024 + twinsCollection string = "twins" +) + +type twinRepository struct { + db *mongo.Database +} + +var _ twins.TwinRepository = (*twinRepository)(nil) + +// NewTwinRepository instantiates a MongoDB implementation of twin repository. +func NewTwinRepository(db *mongo.Database) twins.TwinRepository { + return &twinRepository{ + db: db, + } +} + +func (tr *twinRepository) Save(ctx context.Context, tw twins.Twin) (string, error) { + if len(tw.Name) > maxNameSize { + return "", errors.ErrMalformedEntity + } + + coll := tr.db.Collection(twinsCollection) + + if _, err := coll.InsertOne(ctx, tw); err != nil { + return "", errors.Wrap(repoerr.ErrCreateEntity, err) + } + + return tw.ID, nil +} + +func (tr *twinRepository) Update(ctx context.Context, tw twins.Twin) error { + if len(tw.Name) > maxNameSize { + return errors.ErrMalformedEntity + } + + coll := tr.db.Collection(twinsCollection) + + filter := bson.M{"id": tw.ID} + update := bson.M{"$set": tw} + res, err := coll.UpdateOne(ctx, filter, update) + if err != nil { + return err + } + + if res.ModifiedCount < 1 { + return repoerr.ErrNotFound + } + + return nil +} + +func (tr *twinRepository) RetrieveByID(ctx context.Context, twinID string) (twins.Twin, error) { + coll := tr.db.Collection(twinsCollection) + var tw twins.Twin + + filter := bson.M{"id": twinID} + if err := coll.FindOne(ctx, filter).Decode(&tw); err != nil { + return tw, repoerr.ErrNotFound + } + + return tw, nil +} + +func (tr *twinRepository) RetrieveByAttribute(ctx context.Context, channel, subtopic string) ([]string, error) { + coll := tr.db.Collection(twinsCollection) + + findOptions := options.Aggregate() + prj1 := bson.M{ + "$project": bson.M{ + "definition": bson.M{ + "$arrayElemAt": []interface{}{"$definitions.attributes", -1}, + }, + "id": true, + "_id": 0, + }, + } + match := bson.M{ + "$match": bson.M{ + "definition.channel": channel, + "$or": []interface{}{ + bson.M{"definition.subtopic": subtopic}, + bson.M{"definition.subtopic": twins.SubtopicWildcard}, + }, + }, + } + prj2 := bson.M{ + "$project": bson.M{ + "id": true, + }, + } + + cur, err := coll.Aggregate(ctx, []bson.M{prj1, match, prj2}, findOptions) + if err != nil { + return []string{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + defer cur.Close(ctx) + + if err := cur.Err(); err != nil { + return []string{}, nil + } + + var ids []string + for cur.Next(ctx) { + var elem struct { + ID string `json:"id"` + } + err := cur.Decode(&elem) + if err != nil { + return ids, nil + } + ids = append(ids, elem.ID) + } + + return ids, nil +} + +func (tr *twinRepository) RetrieveAll(ctx context.Context, owner string, offset, limit uint64, name string, metadata twins.Metadata) (twins.Page, error) { + coll := tr.db.Collection(twinsCollection) + + findOptions := options.Find() + findOptions.SetSkip(int64(offset)) + findOptions.SetLimit(int64(limit)) + + filter := bson.M{} + + if owner != "" { + filter["owner"] = owner + } + if name != "" { + filter["name"] = name + } + if len(metadata) > 0 { + filter["metadata"] = metadata + } + cur, err := coll.Find(ctx, filter, findOptions) + if err != nil { + return twins.Page{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + + results, err := decodeTwins(ctx, cur) + if err != nil { + return twins.Page{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + + total, err := coll.CountDocuments(ctx, filter) + if err != nil { + return twins.Page{}, errors.Wrap(repoerr.ErrViewEntity, err) + } + + return twins.Page{ + Twins: results, + PageMetadata: twins.PageMetadata{ + Total: uint64(total), + Offset: offset, + Limit: limit, + }, + }, nil +} + +func (tr *twinRepository) Remove(ctx context.Context, twinID string) error { + coll := tr.db.Collection(twinsCollection) + + filter := bson.M{"id": twinID} + res, err := coll.DeleteOne(ctx, filter) + if err != nil { + return errors.Wrap(repoerr.ErrRemoveEntity, err) + } + + if res.DeletedCount < 1 { + return repoerr.ErrNotFound + } + + return nil +} + +func decodeTwins(ctx context.Context, cur *mongo.Cursor) ([]twins.Twin, error) { + defer cur.Close(ctx) + var results []twins.Twin + for cur.Next(ctx) { + var elem twins.Twin + err := cur.Decode(&elem) + if err != nil { + return []twins.Twin{}, nil + } + results = append(results, elem) + } + + if err := cur.Err(); err != nil { + return []twins.Twin{}, nil + } + return results, nil +} diff --git a/twins/mongodb/twins_test.go b/twins/mongodb/twins_test.go new file mode 100644 index 0000000..e13b958 --- /dev/null +++ b/twins/mongodb/twins_test.go @@ -0,0 +1,388 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package mongodb_test + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + mglog "github.com/absmach/magistrala/logger" + repoerr "github.com/absmach/magistrala/pkg/errors/repository" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/twins" + "github.com/absmach/mg-contrib/twins/mocks" + "github.com/absmach/mg-contrib/twins/mongodb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + maxNameSize = 1024 + testDB = "test" + collection = "twins" + email = "mgx_twin@example.com" + validName = "mgx_twin" + subtopic = "engine" +) + +var ( + port string + addr string + testLog, _ = mglog.New(os.Stdout, "info") + idProvider = uuid.New() + invalidName = strings.Repeat("m", maxNameSize+1) +) + +func TestTwinsSave(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + repo := mongodb.NewTwinRepository(db) + + twid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + nonexistentTwinID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + twin := twins.Twin{ + Owner: email, + ID: twid, + } + + cases := []struct { + desc string + twin twins.Twin + err error + }{ + { + desc: "create new twin", + twin: twin, + err: nil, + }, + { + desc: "create twin with invalid name", + twin: twins.Twin{ + ID: nonexistentTwinID, + Owner: email, + Name: invalidName, + }, + err: repoerr.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + _, err := repo.Save(context.Background(), tc.twin) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestTwinsUpdate(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + repo := mongodb.NewTwinRepository(db) + + twid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + nonexistentTwinID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + twin := twins.Twin{ + ID: twid, + Name: validName, + } + + if _, err := repo.Save(context.Background(), twin); err != nil { + testLog.Error(err.Error()) + } + + twin.Name = "new_name" + cases := []struct { + desc string + twin twins.Twin + err error + }{ + { + desc: "update existing twin", + twin: twin, + err: nil, + }, + { + desc: "update non-existing twin", + twin: twins.Twin{ + ID: nonexistentTwinID, + }, + err: repoerr.ErrNotFound, + }, + { + desc: "update twin with invalid name", + twin: twins.Twin{ + ID: twid, + Owner: email, + Name: invalidName, + }, + err: repoerr.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + err := repo.Update(context.Background(), tc.twin) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestTwinsRetrieveByID(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + repo := mongodb.NewTwinRepository(db) + + twid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + nonexistentTwinID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + twin := twins.Twin{ + ID: twid, + } + + if _, err := repo.Save(context.Background(), twin); err != nil { + testLog.Error(err.Error()) + } + + cases := []struct { + desc string + id string + err error + }{ + { + desc: "retrieve an existing twin", + id: twin.ID, + err: nil, + }, + { + desc: "retrieve a non-existing twin", + id: nonexistentTwinID, + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + _, err := repo.RetrieveByID(context.Background(), tc.id) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestTwinsRetrieveByAttribute(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + repo := mongodb.NewTwinRepository(db) + + chID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + empty := mocks.CreateTwin([]string{chID}, []string{""}) + _, err = repo.Save(context.Background(), empty) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + wildcard := mocks.CreateTwin([]string{chID}, []string{twins.SubtopicWildcard}) + _, err = repo.Save(context.Background(), wildcard) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + nonEmpty := mocks.CreateTwin([]string{chID}, []string{subtopic}) + _, err = repo.Save(context.Background(), nonEmpty) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + cases := []struct { + desc string + subtopic string + ids []string + }{ + { + desc: "retrieve empty subtopic", + subtopic: "", + ids: []string{wildcard.ID, empty.ID}, + }, + { + desc: "retrieve wildcard subtopic", + subtopic: twins.SubtopicWildcard, + ids: []string{wildcard.ID}, + }, + { + desc: "retrieve non-empty subtopic", + subtopic: subtopic, + ids: []string{wildcard.ID, nonEmpty.ID}, + }, + } + + for _, tc := range cases { + ids, err := repo.RetrieveByAttribute(context.Background(), chID, tc.subtopic) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + assert.ElementsMatch(t, ids, tc.ids, fmt.Sprintf("%s: expected ids %v do not match received ids %v", tc.desc, tc.ids, ids)) + } +} + +func TestTwinsRetrieveAll(t *testing.T) { + email := "twin-multi-retrieval@example.com" + name := "magistrala" + metadata := twins.Metadata{ + "type": "test", + } + wrongMetadata := twins.Metadata{ + "wrong": "wrong", + } + + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + _, err = db.Collection(collection).DeleteMany(context.Background(), bson.D{}) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + + twinRepo := mongodb.NewTwinRepository(db) + + n := uint64(10) + for i := uint64(0); i < n; i++ { + twid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + tw := twins.Twin{ + Owner: email, + ID: twid, + Metadata: metadata, + } + + // Create first two Twins with name. + if i < 2 { + tw.Name = name + } + + _, err = twinRepo.Save(context.Background(), tw) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + } + + cases := map[string]struct { + owner string + limit uint64 + offset uint64 + name string + size uint64 + total uint64 + metadata twins.Metadata + }{ + "retrieve all twins with existing owner": { + owner: email, + offset: 0, + limit: n, + size: n, + total: n, + }, + "retrieve subset of twins with existing owner": { + owner: email, + offset: 0, + limit: n / 2, + size: n / 2, + total: n, + }, + "retrieve twins with non-existing owner": { + owner: wrongValue, + offset: 0, + limit: n, + size: 0, + total: 0, + }, + "retrieve twins with existing name": { + offset: 0, + limit: 1, + name: name, + size: 1, + total: 2, + }, + "retrieve twins with non-existing name": { + offset: 0, + limit: n, + name: "wrong", + size: 0, + total: 0, + }, + "retrieve twins with metadata": { + offset: 0, + limit: n, + size: n, + total: n, + metadata: metadata, + }, + "retrieve twins with wrong metadata": { + offset: 0, + limit: n, + size: 0, + total: 0, + metadata: wrongMetadata, + }, + } + + for desc, tc := range cases { + page, err := twinRepo.RetrieveAll(context.Background(), tc.owner, tc.offset, tc.limit, tc.name, tc.metadata) + size := uint64(len(page.Twins)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.size, size)) + assert.Equal(t, tc.total, page.Total, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.total, page.Total)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} + +func TestTwinsRemove(t *testing.T) { + client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(addr)) + require.Nil(t, err, fmt.Sprintf("Creating new MongoDB client expected to succeed: %s.\n", err)) + + db := client.Database(testDB) + repo := mongodb.NewTwinRepository(db) + + twid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + nonexistentTwinID, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + twin := twins.Twin{ + ID: twid, + } + + if _, err := repo.Save(context.Background(), twin); err != nil { + testLog.Error(err.Error()) + } + + cases := []struct { + desc string + id string + err error + }{ + { + desc: "remove an existing twin", + id: twin.ID, + err: nil, + }, + { + desc: "remove a non-existing twin", + id: nonexistentTwinID, + err: repoerr.ErrNotFound, + }, + } + + for _, tc := range cases { + err := repo.Remove(context.Background(), tc.id) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} diff --git a/twins/service.go b/twins/service.go new file mode 100644 index 0000000..25a4a49 --- /dev/null +++ b/twins/service.go @@ -0,0 +1,427 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package twins + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "math" + "time" + + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/senml" +) + +const publisher = "twins" + +// Service specifies an API that must be fullfiled by the domain service +// implementation, and all of its decorators (e.g. logging & metrics). +// +//go:generate mockery --name Service --output=./mocks --filename service.go --quiet --note "Copyright (c) Abstract Machines" +type Service interface { + // AddTwin adds new twin related to user identified by the provided key. + AddTwin(ctx context.Context, token string, twin Twin, def Definition) (tw Twin, err error) + + // UpdateTwin updates twin identified by the provided Twin that + // belongs to the user identified by the provided key. + UpdateTwin(ctx context.Context, token string, twin Twin, def Definition) (err error) + + // ViewTwin retrieves data about twin with the provided + // ID belonging to the user identified by the provided key. + ViewTwin(ctx context.Context, token, twinID string) (tw Twin, err error) + + // RemoveTwin removes the twin identified with the provided ID, that + // belongs to the user identified by the provided key. + RemoveTwin(ctx context.Context, token, twinID string) (err error) + + // ListTwins retrieves data about subset of twins that belongs to the + // user identified by the provided key. + ListTwins(ctx context.Context, token string, offset uint64, limit uint64, name string, metadata Metadata) (Page, error) + + // ListStates retrieves data about subset of states that belongs to the + // twin identified by the id. + ListStates(ctx context.Context, token string, offset uint64, limit uint64, twinID string) (StatesPage, error) + + // SaveStates persists states into database + SaveStates(ctx context.Context, msg *messaging.Message) error +} + +const ( + noop = iota + update + save + millisec = 1e6 + nanosec = 1e9 + SubtopicWildcard = ">" +) + +var crudOp = map[string]string{ + "createSucc": "create.success", + "createFail": "create.failure", + "updateSucc": "update.success", + "updateFail": "update.failure", + "getSucc": "get.success", + "getFail": "get.failure", + "removeSucc": "remove.success", + "removeFail": "remove.failure", + "stateSucc": "save.success", + "stateFail": "save.failure", +} + +type twinsService struct { + publisher messaging.Publisher + auth magistrala.AuthServiceClient + twins TwinRepository + states StateRepository + idProvider magistrala.IDProvider + channelID string + twinCache TwinCache + logger *slog.Logger +} + +var _ Service = (*twinsService)(nil) + +// New instantiates the twins service implementation. +func New(publisher messaging.Publisher, auth magistrala.AuthServiceClient, twins TwinRepository, tcache TwinCache, sr StateRepository, idp magistrala.IDProvider, chann string, logger *slog.Logger) Service { + return &twinsService{ + publisher: publisher, + auth: auth, + twins: twins, + twinCache: tcache, + states: sr, + idProvider: idp, + channelID: chann, + logger: logger, + } +} + +func (ts *twinsService) AddTwin(ctx context.Context, token string, twin Twin, def Definition) (tw Twin, err error) { + var id string + var b []byte + defer ts.publish(ctx, &id, &err, crudOp["createSucc"], crudOp["createFail"], &b) + res, err := ts.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return Twin{}, errors.Wrap(svcerr.ErrAuthentication, err) + } + + twin.ID, err = ts.idProvider.ID() + if err != nil { + return Twin{}, err + } + + twin.Owner = res.GetId() + + t := time.Now() + twin.Created = t + twin.Updated = t + + if def.Attributes == nil { + def.Attributes = []Attribute{} + } + if def.Delta == 0 { + def.Delta = millisec + } + + def.Created = time.Now() + def.ID = 0 + twin.Definitions = append(twin.Definitions, def) + + twin.Revision = 0 + if _, err = ts.twins.Save(ctx, twin); err != nil { + return Twin{}, errors.Wrap(svcerr.ErrCreateEntity, err) + } + + id = twin.ID + b, err = json.Marshal(twin) + + return twin, ts.twinCache.Save(ctx, twin) +} + +func (ts *twinsService) UpdateTwin(ctx context.Context, token string, twin Twin, def Definition) (err error) { + var b []byte + var id string + defer ts.publish(ctx, &id, &err, crudOp["updateSucc"], crudOp["updateFail"], &b) + + _, err = ts.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return errors.Wrap(svcerr.ErrAuthentication, err) + } + + tw, err := ts.twins.RetrieveByID(ctx, twin.ID) + if err != nil { + return errors.Wrap(svcerr.ErrNotFound, err) + } + + revision := false + + if twin.Name != "" { + revision = true + tw.Name = twin.Name + } + + if len(def.Attributes) > 0 { + revision = true + def.Created = time.Now() + def.ID = tw.Definitions[len(tw.Definitions)-1].ID + 1 + tw.Definitions = append(tw.Definitions, def) + } + + if len(twin.Metadata) > 0 { + revision = true + tw.Metadata = twin.Metadata + } + + if !revision { + return errors.ErrMalformedEntity + } + + tw.Updated = time.Now() + tw.Revision++ + + if err := ts.twins.Update(ctx, tw); err != nil { + return errors.Wrap(svcerr.ErrUpdateEntity, err) + } + + id = twin.ID + b, err = json.Marshal(tw) + + return ts.twinCache.Update(ctx, twin) +} + +func (ts *twinsService) ViewTwin(ctx context.Context, token, twinID string) (tw Twin, err error) { + var b []byte + defer ts.publish(ctx, &twinID, &err, crudOp["getSucc"], crudOp["getFail"], &b) + + _, err = ts.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return Twin{}, errors.Wrap(svcerr.ErrAuthorization, err) + } + + twin, err := ts.twins.RetrieveByID(ctx, twinID) + if err != nil { + return Twin{}, errors.Wrap(svcerr.ErrNotFound, err) + } + + b, err = json.Marshal(twin) + + return twin, nil +} + +func (ts *twinsService) RemoveTwin(ctx context.Context, token, twinID string) (err error) { + var b []byte + defer ts.publish(ctx, &twinID, &err, crudOp["removeSucc"], crudOp["removeFail"], &b) + + _, err = ts.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return errors.Wrap(svcerr.ErrAuthentication, err) + } + + if err := ts.twins.Remove(ctx, twinID); err != nil { + return errors.Wrap(svcerr.ErrRemoveEntity, err) + } + + return ts.twinCache.Remove(ctx, twinID) +} + +func (ts *twinsService) ListTwins(ctx context.Context, token string, offset, limit uint64, name string, metadata Metadata) (Page, error) { + res, err := ts.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return Page{}, errors.Wrap(svcerr.ErrAuthentication, err) + } + + return ts.twins.RetrieveAll(ctx, res.GetId(), offset, limit, name, metadata) +} + +func (ts *twinsService) ListStates(ctx context.Context, token string, offset, limit uint64, twinID string) (StatesPage, error) { + _, err := ts.auth.Identify(ctx, &magistrala.IdentityReq{Token: token}) + if err != nil { + return StatesPage{}, svcerr.ErrAuthentication + } + + return ts.states.RetrieveAll(ctx, offset, limit, twinID) +} + +func (ts *twinsService) SaveStates(ctx context.Context, msg *messaging.Message) error { + var ids []string + + channel, subtopic := msg.GetChannel(), msg.GetSubtopic() + ids, err := ts.twinCache.IDs(ctx, channel, subtopic) + if err != nil { + return err + } + if len(ids) < 1 { + ids, err = ts.twins.RetrieveByAttribute(ctx, channel, subtopic) + if err != nil { + return err + } + if len(ids) < 1 { + return nil + } + if err := ts.twinCache.SaveIDs(ctx, channel, subtopic, ids); err != nil { + return err + } + } + + for _, id := range ids { + if err := ts.saveState(ctx, msg, id); err != nil { + return err + } + } + + return nil +} + +func (ts *twinsService) saveState(ctx context.Context, msg *messaging.Message, twinID string) error { + var b []byte + var err error + + defer ts.publish(ctx, &twinID, &err, crudOp["stateSucc"], crudOp["stateFail"], &b) + + tw, err := ts.twins.RetrieveByID(ctx, twinID) + if err != nil { + return fmt.Errorf("retrieving twin for %s failed: %s", msg.GetPublisher(), err) + } + + var recs []senml.Record + if err := json.Unmarshal(msg.GetPayload(), &recs); err != nil { + return fmt.Errorf("unmarshal payload for %s failed: %s", msg.GetPublisher(), err) + } + + st, err := ts.states.RetrieveLast(ctx, tw.ID) + if err != nil { + return fmt.Errorf("retrieve last state for %s failed: %s", msg.GetPublisher(), err) + } + + for _, rec := range recs { + action := ts.prepareState(&st, &tw, rec, msg) + switch action { + case noop: + return nil + case update: + if err := ts.states.Update(ctx, st); err != nil { + return fmt.Errorf("update state for %s failed: %s", msg.GetPublisher(), err) + } + case save: + if err := ts.states.Save(ctx, st); err != nil { + return fmt.Errorf("save state for %s failed: %s", msg.GetPublisher(), err) + } + } + } + + twinID = msg.GetPublisher() + b = msg.GetPayload() + + return nil +} + +func (ts *twinsService) prepareState(st *State, tw *Twin, rec senml.Record, msg *messaging.Message) int { + def := tw.Definitions[len(tw.Definitions)-1] + st.TwinID = tw.ID + st.Definition = def.ID + + if st.Payload == nil { + st.Payload = make(map[string]interface{}) + st.ID = -1 // state is incremented on save -> zero-based index + } else { + for k := range st.Payload { + idx := findAttribute(k, def.Attributes) + if idx < 0 || !def.Attributes[idx].PersistState { + delete(st.Payload, k) + } + } + } + + recSec := rec.BaseTime + rec.Time + recNano := recSec * nanosec + sec, dec := math.Modf(recSec) + recTime := time.Unix(int64(sec), int64(dec*nanosec)) + + action := noop + for _, attr := range def.Attributes { + if !attr.PersistState { + continue + } + if attr.Channel == msg.GetChannel() && (attr.Subtopic == SubtopicWildcard || attr.Subtopic == msg.GetSubtopic()) { + action = update + delta := math.Abs(float64(st.Created.UnixNano()) - recNano) + if recNano == 0 || delta > float64(def.Delta) { + action = save + st.ID++ + st.Created = time.Now() + if recNano != 0 { + st.Created = recTime + } + } + val := findValue(rec) + st.Payload[attr.Name] = val + + break + } + } + + return action +} + +func findValue(rec senml.Record) interface{} { + if rec.Value != nil { + return rec.Value + } + if rec.StringValue != nil { + return rec.StringValue + } + if rec.DataValue != nil { + return rec.DataValue + } + if rec.BoolValue != nil { + return rec.BoolValue + } + if rec.Sum != nil { + return rec.Sum + } + return nil +} + +func findAttribute(name string, attrs []Attribute) (idx int) { + for idx, attr := range attrs { + if attr.Name == name { + return idx + } + } + return -1 +} + +func (ts *twinsService) publish(ctx context.Context, twinID *string, err *error, succOp, failOp string, payload *[]byte) { + if ts.channelID == "" { + return + } + + op := succOp + if *err != nil { + op = failOp + esb := []byte((*err).Error()) + payload = &esb + } + + pl := *payload + if pl == nil { + pl = []byte(fmt.Sprintf("{\"deleted\":\"%s\"}", *twinID)) + } + + msg := messaging.Message{ + Channel: ts.channelID, + Subtopic: op, + Payload: pl, + Publisher: publisher, + Created: time.Now().UnixNano(), + } + + if err := ts.publisher.Publish(ctx, msg.GetChannel(), &msg); err != nil { + ts.logger.Warn(fmt.Sprintf("Failed to publish notification on Message Broker: %s", err)) + } +} diff --git a/twins/service_test.go b/twins/service_test.go new file mode 100644 index 0000000..36ab960 --- /dev/null +++ b/twins/service_test.go @@ -0,0 +1,596 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package twins_test + +import ( + "context" + "fmt" + "testing" + + "github.com/absmach/magistrala" + authmocks "github.com/absmach/magistrala/auth/mocks" + mglog "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/errors" + svcerr "github.com/absmach/magistrala/pkg/errors/service" + "github.com/absmach/magistrala/pkg/uuid" + "github.com/absmach/mg-contrib/pkg/testsutil" + "github.com/absmach/mg-contrib/twins" + "github.com/absmach/mg-contrib/twins/mocks" + "github.com/absmach/senml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + twinName = "name" + wrongID = "" + token = "token" + email = "user@example.com" + numRecs = 100 + retained = "saved" + validID = "123e4567-e89b-12d3-a456-426614174000" +) + +var ( + subtopics = []string{"engine", "chassis", "wheel_2"} + channels = []string{"01ec3c3e-0e66-4e69-9751-a0545b44e08f", "48061e4f-7c23-4f5c-9012-0f9b7cd9d18d", "5b2180e4-e96b-4469-9dc1-b6745078d0b6"} +) + +func NewService() (twins.Service, *authmocks.AuthClient, *mocks.TwinRepository, *mocks.TwinCache, *mocks.StateRepository) { + auth := new(authmocks.AuthClient) + twinsRepo := new(mocks.TwinRepository) + twinCache := new(mocks.TwinCache) + statesRepo := new(mocks.StateRepository) + idProvider := uuid.NewMock() + subs := map[string]string{"chanID": "chanID"} + broker := mocks.NewBroker(subs) + + return twins.New(broker, auth, twinsRepo, twinCache, statesRepo, idProvider, "chanID", mglog.NewMock()), auth, twinsRepo, twinCache, statesRepo +} + +func TestAddTwin(t *testing.T) { + svc, auth, twinRepo, twinCache, _ := NewService() + twin := twins.Twin{} + def := twins.Definition{} + + cases := []struct { + desc string + twin twins.Twin + token string + err error + saveErr error + identifyErr error + userID string + }{ + { + desc: "add new twin", + twin: twin, + token: token, + err: nil, + saveErr: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "add twin with wrong credentials", + twin: twin, + token: authmocks.InvalidValue, + err: svcerr.ErrAuthentication, + saveErr: svcerr.ErrCreateEntity, + identifyErr: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("Save", context.Background(), mock.Anything).Return(retained, tc.saveErr) + cacheCall := twinCache.On("Save", context.Background(), mock.Anything).Return(tc.err) + _, err := svc.AddTwin(context.Background(), tc.token, tc.twin, def) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + repoCall.Unset() + cacheCall.Unset() + } +} + +func TestUpdateTwin(t *testing.T) { + svc, auth, twinRepo, twinCache, _ := NewService() + + other := twins.Twin{} + def := twins.Definition{} + twin := twins.Twin{ + Owner: email, + ID: testsutil.GenerateUUID(t), + Name: twinName, + } + + other.ID = wrongID + + cases := []struct { + desc string + twin twins.Twin + token string + err error + retrieveErr error + updateErr error + identifyErr error + userID string + }{ + { + desc: "update existing twin", + twin: twin, + token: token, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "update twin with wrong credentials", + twin: twin, + token: authmocks.InvalidValue, + err: svcerr.ErrAuthentication, + retrieveErr: svcerr.ErrNotFound, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "update non-existing twin", + twin: other, + token: token, + err: svcerr.ErrNotFound, + retrieveErr: svcerr.ErrNotFound, + updateErr: svcerr.ErrUpdateEntity, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("RetrieveByID", context.Background(), tc.twin.ID).Return(tc.twin, tc.retrieveErr) + repoCall1 := twinRepo.On("Update", context.Background(), mock.Anything).Return(tc.updateErr) + cacheCall := twinCache.On("Update", context.Background(), mock.Anything).Return(tc.err) + err := svc.UpdateTwin(context.Background(), tc.token, tc.twin, def) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + repoCall.Unset() + repoCall1.Unset() + cacheCall.Unset() + } +} + +func TestViewTwin(t *testing.T) { + svc, auth, twinRepo, _, _ := NewService() + + twin := twins.Twin{ + Owner: email, + ID: testsutil.GenerateUUID(t), + Name: twinName, + } + + cases := []struct { + desc string + id string + token string + err error + identifyErr error + userID string + }{ + { + desc: "view existing twin", + id: twin.ID, + token: token, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "view twin with wrong credentials", + id: twin.ID, + token: authmocks.InvalidValue, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "view non-existing twin", + id: wrongID, + token: token, + err: svcerr.ErrNotFound, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("RetrieveByID", context.Background(), tc.id).Return(twins.Twin{}, tc.err) + _, err := svc.ViewTwin(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + repoCall.Unset() + } +} + +func TestListTwins(t *testing.T) { + svc, auth, twinRepo, _, _ := NewService() + twin := twins.Twin{Name: twinName, Owner: email} + m := make(map[string]interface{}) + m["serial"] = "123456" + twin.Metadata = m + + n := uint64(10) + + cases := []struct { + desc string + token string + offset uint64 + limit uint64 + size uint64 + metadata map[string]interface{} + err error + repoerr error + identifyErr error + userID string + }{ + { + desc: "list all twins", + token: token, + offset: 0, + limit: n, + size: n, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "list with zero limit", + token: token, + limit: 0, + offset: 0, + size: 0, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "list with offset and limit", + token: token, + offset: 8, + limit: 5, + size: 2, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "list with wrong credentials", + token: authmocks.InvalidValue, + limit: 0, + offset: n, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("RetrieveAll", context.Background(), mock.Anything, tc.offset, tc.limit, twinName, mock.Anything).Return(twins.Page{}, tc.err) + _, err := svc.ListTwins(context.Background(), tc.token, tc.offset, tc.limit, twinName, tc.metadata) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + repoCall.Unset() + } +} + +func TestRemoveTwin(t *testing.T) { + svc, auth, twinRepo, twinCache, _ := NewService() + twin := twins.Twin{ + Owner: email, + ID: testsutil.GenerateUUID(t), + Name: twinName, + } + + cases := []struct { + desc string + id string + token string + err error + removeErr error + identifyErr error + userID string + }{ + { + desc: "remove twin with wrong credentials", + id: twin.ID, + token: authmocks.InvalidValue, + err: svcerr.ErrAuthentication, + removeErr: svcerr.ErrRemoveEntity, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "remove existing twin", + id: twin.ID, + token: token, + err: nil, + removeErr: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "remove removed twin", + id: twin.ID, + token: token, + err: nil, + removeErr: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "remove non-existing twin", + id: wrongID, + token: token, + err: nil, + removeErr: nil, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + authCall := auth.On("Identify", context.Background(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall := twinRepo.On("Remove", context.Background(), tc.id).Return(tc.removeErr) + cacheCall := twinCache.On("Remove", context.Background(), tc.id).Return(tc.err) + err := svc.RemoveTwin(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + authCall.Unset() + repoCall.Unset() + cacheCall.Unset() + } +} + +func TestSaveStates(t *testing.T) { + svc, auth, twinRepo, twinCache, stateRepo := NewService() + + def := mocks.CreateDefinition(channels[0:2], subtopics[0:2]) + twin := twins.Twin{ + Owner: email, + ID: testsutil.GenerateUUID(t), + Name: twinName, + Definitions: []twins.Definition{def}, + } + + attr := def.Attributes[0] + attrSansTwin := mocks.CreateDefinition(channels[2:3], subtopics[2:3]).Attributes[0] + + defWildcard := mocks.CreateDefinition(channels[0:2], []string{twins.SubtopicWildcard, twins.SubtopicWildcard}) + twWildcard := twins.Twin{ + Definitions: []twins.Definition{defWildcard}, + } + + recs := make([]senml.Record, numRecs) + + var ttlAdded uint64 + + cases := []struct { + desc string + recs []senml.Record + attr twins.Attribute + size uint64 + err error + String []string + page twins.StatesPage + }{ + { + desc: "add 100 states", + recs: recs, + attr: attr, + size: numRecs, + err: nil, + page: twins.StatesPage{ + PageMetadata: twins.PageMetadata{ + Total: numRecs, + }, + }, + }, + { + desc: "add 20 states", + recs: recs[10:30], + attr: attr, + size: 20, + err: nil, + page: twins.StatesPage{ + PageMetadata: twins.PageMetadata{ + Total: numRecs + 20, + }, + }, + }, + { + desc: "add 20 states for atttribute without twin", + recs: recs[30:50], + size: 0, + attr: attrSansTwin, + err: svcerr.ErrNotFound, + page: twins.StatesPage{ + PageMetadata: twins.PageMetadata{ + Total: numRecs + 20, + }, + }, + }, + { + desc: "use empty senml record", + recs: []senml.Record{}, + attr: attr, + size: 0, + err: nil, + page: twins.StatesPage{ + PageMetadata: twins.PageMetadata{ + Total: numRecs + 20, + }, + }, + }, + } + + for _, tc := range cases { + repoCall := auth.On("Identify", context.TODO(), &magistrala.IdentityReq{Token: token}).Return(&magistrala.IdentityRes{Id: testsutil.GenerateUUID(t)}, nil) + message, err := mocks.CreateMessage(tc.attr, tc.recs) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + repoCall1 := twinRepo.On("RetrieveByAttribute", context.Background(), mock.Anything, mock.Anything).Return(tc.String, nil) + repoCall2 := twinRepo.On("SaveIDs", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(tc.err) + repoCall3 := twinCache.On("IDs", context.Background(), mock.Anything, mock.Anything).Return(tc.String, nil) + err = svc.SaveStates(context.Background(), message) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + ttlAdded += tc.size + repoCall4 := stateRepo.On("RetrieveAll", context.TODO(), mock.Anything, mock.Anything, twin.ID).Return(tc.page, nil) + page, err := svc.ListStates(context.TODO(), token, 0, 10, twin.ID) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + assert.Equal(t, ttlAdded, page.Total, fmt.Sprintf("%s: expected %d total got %d total\n", tc.desc, ttlAdded, page.Total)) + + repoCall5 := stateRepo.On("RetrieveAll", context.TODO(), mock.Anything, mock.Anything, twWildcard.ID).Return(tc.page, nil) + page, err = svc.ListStates(context.TODO(), token, 0, 10, twWildcard.ID) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + assert.Equal(t, ttlAdded, page.Total, fmt.Sprintf("%s: expected %d total got %d total\n", tc.desc, ttlAdded, page.Total)) + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + repoCall3.Unset() + repoCall4.Unset() + repoCall5.Unset() + } +} + +func TestListStates(t *testing.T) { + svc, auth, _, _, stateRepo := NewService() + + def := mocks.CreateDefinition(channels[0:2], subtopics[0:2]) + twin := twins.Twin{ + Owner: email, + ID: testsutil.GenerateUUID(t), + Name: twinName, + Definitions: []twins.Definition{def}, + } + + tw2 := twins.Twin{ + Owner: email, + Definitions: []twins.Definition{mocks.CreateDefinition(channels[2:3], subtopics[2:3])}, + } + + cases := []struct { + desc string + id string + token string + offset uint64 + limit uint64 + size int + err error + page twins.StatesPage + identifyErr error + userID string + }{ + { + desc: "get a list of first 10 states", + id: twin.ID, + token: token, + offset: 0, + limit: 10, + size: 10, + err: nil, + page: twins.StatesPage{ + States: genStates(10), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of last 10 states", + id: twin.ID, + token: token, + offset: numRecs - 10, + limit: numRecs, + size: 10, + err: nil, + page: twins.StatesPage{ + States: genStates(10), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of last 10 states with limit > numRecs", + id: twin.ID, + token: token, + offset: numRecs - 10, + limit: numRecs + 10, + size: 10, + err: nil, + page: twins.StatesPage{ + States: genStates(10), + }, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list of first 10 states with offset == numRecs", + id: twin.ID, + token: token, + offset: numRecs, + limit: numRecs + 10, + size: 0, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list with wrong user token", + id: twin.ID, + token: authmocks.InvalidValue, + offset: 0, + limit: 10, + size: 0, + err: svcerr.ErrAuthentication, + identifyErr: svcerr.ErrAuthentication, + }, + { + desc: "get a list with id of non-existent twin", + id: "1234567890", + token: token, + offset: 0, + limit: 10, + size: 0, + err: nil, + identifyErr: nil, + userID: validID, + }, + { + desc: "get a list with id of existing twin without states ", + id: tw2.ID, + token: token, + offset: 0, + limit: 10, + size: 0, + err: nil, + identifyErr: nil, + userID: validID, + }, + } + + for _, tc := range cases { + repoCall := auth.On("Identify", context.TODO(), &magistrala.IdentityReq{Token: tc.token}).Return(&magistrala.IdentityRes{Id: tc.userID}, tc.identifyErr) + repoCall1 := stateRepo.On("RetrieveAll", context.TODO(), mock.Anything, mock.Anything, tc.id).Return(tc.page, nil) + page, err := svc.ListStates(context.TODO(), tc.token, tc.offset, tc.limit, tc.id) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.size, len(page.States), fmt.Sprintf("%s: expected %d total got %d total\n", tc.desc, tc.size, len(page.States))) + repoCall.Unset() + repoCall1.Unset() + } +} + +func genStates(length int) []twins.State { + states := make([]twins.State, length) + for i := range states { + states[i] = twins.State{} + } + return states +} diff --git a/twins/states.go b/twins/states.go new file mode 100644 index 0000000..2ce0d91 --- /dev/null +++ b/twins/states.go @@ -0,0 +1,45 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package twins + +import ( + "context" + "time" +) + +// State stores actual snapshot of entity's values. +type State struct { + TwinID string + ID int64 + Definition int + Created time.Time + Payload map[string]interface{} +} + +// StatesPage contains page related metadata as well as a list of twins that +// belong to this page. +type StatesPage struct { + PageMetadata + States []State +} + +// StateRepository specifies a state persistence API. +// +//go:generate mockery --name StateRepository --output=./mocks --filename states.go --quiet --note "Copyright (c) Abstract Machines" +type StateRepository interface { + // Save persists the state + Save(ctx context.Context, state State) error + + // Update updates the state + Update(ctx context.Context, state State) error + + // Count returns the number of states related to state + Count(ctx context.Context, twin Twin) (int64, error) + + // RetrieveAll retrieves the subset of states related to twin specified by id + RetrieveAll(ctx context.Context, offset uint64, limit uint64, twinID string) (StatesPage, error) + + // RetrieveLast retrieves the last saved state + RetrieveLast(ctx context.Context, twinID string) (State, error) +} diff --git a/twins/tracing/doc.go b/twins/tracing/doc.go new file mode 100644 index 0000000..ede7e05 --- /dev/null +++ b/twins/tracing/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +// Package tracing contains middlewares that will add spans +// to existing traces. +package tracing diff --git a/twins/tracing/states.go b/twins/tracing/states.go new file mode 100644 index 0000000..1148e1e --- /dev/null +++ b/twins/tracing/states.go @@ -0,0 +1,69 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package tracing + +import ( + "context" + + "github.com/absmach/mg-contrib/twins" + "go.opentelemetry.io/otel/trace" +) + +const ( + saveStateOp = "save_state" + updateStateOp = "update_state" + countStatesOp = "count_states" + retrieveAllStatesOp = "retrieve_all_states" +) + +var _ twins.StateRepository = (*stateRepositoryMiddleware)(nil) + +type stateRepositoryMiddleware struct { + tracer trace.Tracer + repo twins.StateRepository +} + +// StateRepositoryMiddleware tracks request and their latency, and adds spans +// to context. +func StateRepositoryMiddleware(tracer trace.Tracer, repo twins.StateRepository) twins.StateRepository { + return stateRepositoryMiddleware{ + tracer: tracer, + repo: repo, + } +} + +func (trm stateRepositoryMiddleware) Save(ctx context.Context, st twins.State) error { + ctx, span := createSpan(ctx, trm.tracer, saveStateOp) + defer span.End() + + return trm.repo.Save(ctx, st) +} + +func (trm stateRepositoryMiddleware) Update(ctx context.Context, st twins.State) error { + ctx, span := createSpan(ctx, trm.tracer, updateStateOp) + defer span.End() + + return trm.repo.Update(ctx, st) +} + +func (trm stateRepositoryMiddleware) Count(ctx context.Context, tw twins.Twin) (int64, error) { + ctx, span := createSpan(ctx, trm.tracer, countStatesOp) + defer span.End() + + return trm.repo.Count(ctx, tw) +} + +func (trm stateRepositoryMiddleware) RetrieveAll(ctx context.Context, offset, limit uint64, twinID string) (twins.StatesPage, error) { + ctx, span := createSpan(ctx, trm.tracer, retrieveAllStatesOp) + defer span.End() + + return trm.repo.RetrieveAll(ctx, offset, limit, twinID) +} + +func (trm stateRepositoryMiddleware) RetrieveLast(ctx context.Context, twinID string) (twins.State, error) { + ctx, span := createSpan(ctx, trm.tracer, retrieveAllStatesOp) + defer span.End() + + return trm.repo.RetrieveLast(ctx, twinID) +} diff --git a/twins/tracing/twins.go b/twins/tracing/twins.go new file mode 100644 index 0000000..a248329 --- /dev/null +++ b/twins/tracing/twins.go @@ -0,0 +1,130 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package tracing + +import ( + "context" + + "github.com/absmach/mg-contrib/twins" + "go.opentelemetry.io/otel/trace" +) + +const ( + saveTwinOp = "save_twin" + saveTwinsOp = "save_twins" + updateTwinOp = "update_twin" + retrieveTwinByIDOp = "retrieve_twin_by_id" + retrieveAllTwinsOp = "retrieve_all_twins" + retrieveTwinsByAttributeOp = "retrieve_twins_by_attribute" + removeTwinOp = "remove_twin" +) + +var _ twins.TwinRepository = (*twinRepositoryMiddleware)(nil) + +type twinRepositoryMiddleware struct { + tracer trace.Tracer + repo twins.TwinRepository +} + +// TwinRepositoryMiddleware tracks request and their latency, and adds spans to context. +func TwinRepositoryMiddleware(tracer trace.Tracer, repo twins.TwinRepository) twins.TwinRepository { + return twinRepositoryMiddleware{ + tracer: tracer, + repo: repo, + } +} + +func (trm twinRepositoryMiddleware) Save(ctx context.Context, tw twins.Twin) (string, error) { + ctx, span := createSpan(ctx, trm.tracer, saveTwinOp) + defer span.End() + + return trm.repo.Save(ctx, tw) +} + +func (trm twinRepositoryMiddleware) Update(ctx context.Context, tw twins.Twin) error { + ctx, span := createSpan(ctx, trm.tracer, updateTwinOp) + defer span.End() + + return trm.repo.Update(ctx, tw) +} + +func (trm twinRepositoryMiddleware) RetrieveByID(ctx context.Context, twinID string) (twins.Twin, error) { + ctx, span := createSpan(ctx, trm.tracer, retrieveTwinByIDOp) + defer span.End() + + return trm.repo.RetrieveByID(ctx, twinID) +} + +func (trm twinRepositoryMiddleware) RetrieveAll(ctx context.Context, owner string, offset, limit uint64, name string, metadata twins.Metadata) (twins.Page, error) { + ctx, span := createSpan(ctx, trm.tracer, retrieveAllTwinsOp) + defer span.End() + + return trm.repo.RetrieveAll(ctx, owner, offset, limit, name, metadata) +} + +func (trm twinRepositoryMiddleware) RetrieveByAttribute(ctx context.Context, channel, subtopic string) ([]string, error) { + ctx, span := createSpan(ctx, trm.tracer, retrieveAllTwinsOp) + defer span.End() + + return trm.repo.RetrieveByAttribute(ctx, channel, subtopic) +} + +func (trm twinRepositoryMiddleware) Remove(ctx context.Context, twinID string) error { + ctx, span := createSpan(ctx, trm.tracer, removeTwinOp) + defer span.End() + + return trm.repo.Remove(ctx, twinID) +} + +type twinCacheMiddleware struct { + tracer trace.Tracer + cache twins.TwinCache +} + +// TwinCacheMiddleware tracks request and their latency, and adds spans to context. +func TwinCacheMiddleware(tracer trace.Tracer, cache twins.TwinCache) twins.TwinCache { + return twinCacheMiddleware{ + tracer: tracer, + cache: cache, + } +} + +func (tcm twinCacheMiddleware) Save(ctx context.Context, twin twins.Twin) error { + ctx, span := createSpan(ctx, tcm.tracer, saveTwinOp) + defer span.End() + + return tcm.cache.Save(ctx, twin) +} + +func (tcm twinCacheMiddleware) SaveIDs(ctx context.Context, channel, subtopic string, ids []string) error { + ctx, span := createSpan(ctx, tcm.tracer, saveTwinsOp) + defer span.End() + + return tcm.cache.SaveIDs(ctx, channel, subtopic, ids) +} + +func (tcm twinCacheMiddleware) Update(ctx context.Context, twin twins.Twin) error { + ctx, span := createSpan(ctx, tcm.tracer, updateTwinOp) + defer span.End() + + return tcm.cache.Update(ctx, twin) +} + +func (tcm twinCacheMiddleware) IDs(ctx context.Context, channel, subtopic string) ([]string, error) { + ctx, span := createSpan(ctx, tcm.tracer, retrieveTwinsByAttributeOp) + defer span.End() + + return tcm.cache.IDs(ctx, channel, subtopic) +} + +func (tcm twinCacheMiddleware) Remove(ctx context.Context, twinID string) error { + ctx, span := createSpan(ctx, tcm.tracer, removeTwinOp) + defer span.End() + + return tcm.cache.Remove(ctx, twinID) +} + +func createSpan(ctx context.Context, tracer trace.Tracer, opName string) (context.Context, trace.Span) { + return tracer.Start(ctx, opName) +} diff --git a/twins/twins.go b/twins/twins.go new file mode 100644 index 0000000..49a2b89 --- /dev/null +++ b/twins/twins.go @@ -0,0 +1,100 @@ +// Copyright (c) Abstract Machines +// SPDX-License-Identifier: Apache-2.0 + +package twins + +import ( + "context" + "time" +) + +// Metadata stores arbitrary twin data. +type Metadata map[string]interface{} + +// Attribute stores individual attribute data. +type Attribute struct { + Name string `json:"name"` + Channel string `json:"channel"` + Subtopic string `json:"subtopic"` + PersistState bool `json:"persist_state"` +} + +// Definition stores entity's attributes. +type Definition struct { + ID int `json:"id"` + Created time.Time `json:"created"` + Attributes []Attribute `json:"attributes"` + Delta int64 `json:"delta"` +} + +// Twin is a Magistrala data system representation. Each twin is owned +// by a single user, and is assigned with the unique identifier. +type Twin struct { + Owner string + ID string + Name string + Created time.Time + Updated time.Time + Revision int + Definitions []Definition + Metadata Metadata +} + +// PageMetadata contains page metadata that helps navigation. +type PageMetadata struct { + Total uint64 + Offset uint64 + Limit uint64 +} + +// Page contains page related metadata as well as a list of twins that +// belong to this page. +type Page struct { + PageMetadata + Twins []Twin +} + +// TwinRepository specifies a twin persistence API. +// +//go:generate mockery --name TwinRepository --output=./mocks --filename repository.go --quiet --note "Copyright (c) Abstract Machines" +type TwinRepository interface { + // Save persists the twin + Save(ctx context.Context, twin Twin) (string, error) + + // Update performs an update to the existing twin. A non-nil error is + // returned to indicate operation failure. + Update(ctx context.Context, twin Twin) error + + // RetrieveByID retrieves the twin having the provided identifier. + RetrieveByID(ctx context.Context, twinID string) (Twin, error) + + // RetrieveByAttribute retrieves twin ids whose definition contains + // the attribute with given channel and subtopic + RetrieveByAttribute(ctx context.Context, channel, subtopic string) ([]string, error) + + // RetrieveAll retrieves the subset of twins owned by the specified user. + RetrieveAll(ctx context.Context, owner string, offset, limit uint64, name string, metadata Metadata) (Page, error) + + // Remove removes the twin having the provided identifier. + Remove(ctx context.Context, twinID string) error +} + +// TwinCache contains twin caching interface. +// +//go:generate mockery --name TwinCache --output=./mocks --filename cache.go --quiet --note "Copyright (c) Abstract Machines" +type TwinCache interface { + // Save stores twin ID as element of channel-subtopic keyed set and vice versa. + Save(ctx context.Context, twin Twin) error + + // SaveIDs stores twin IDs as elements of channel-subtopic keyed set and vice versa. + SaveIDs(ctx context.Context, channel, subtopic string, twinIDs []string) error + + // Update updates update twin id and channel-subtopic attributes mapping + Update(ctx context.Context, twin Twin) error + + // ID returns twin IDs for given attribute. + IDs(ctx context.Context, channel, subtopic string) ([]string, error) + + // Removes twin from cache based on twin id. + Remove(ctx context.Context, twinID string) error +}