From d1bc3af2f417e72ec58421fa5553bd488b53f9ef Mon Sep 17 00:00:00 2001 From: reinkrul Date: Thu, 30 Nov 2023 15:00:28 +0100 Subject: [PATCH 1/2] Discovery: SQLite-based server implementation (#2589) --- README.rst | 13 +- cmd/root.go | 5 + cmd/root_test.go | 2 +- discovery/cmd/cmd.go | 37 ++ discovery/cmd/cmd_test.go | 29 ++ discovery/config.go | 48 +++ discovery/definition.go | 71 ++++ discovery/interface.go | 52 +++ discovery/log/logger.go | 31 ++ discovery/mock.go | 107 +++++ discovery/module.go | 241 +++++++++++ discovery/module_test.go | 262 ++++++++++++ discovery/service-definition-schema.json | 28 ++ discovery/store.go | 384 ++++++++++++++++++ discovery/store_test.go | 373 +++++++++++++++++ discovery/test.go | 224 ++++++++++ discovery/test/duplicate_id/1.json | 49 +++ discovery/test/duplicate_id/2.json | 49 +++ discovery/test/duplicate_id/README.md | 1 + discovery/test/invalid_definition/1.json | 3 + discovery/test/invalid_definition/README.md | 1 + discovery/test/invalid_json/1.json | 1 + discovery/test/invalid_json/README.md | 1 + discovery/test/valid/eoverdracht.json | 49 +++ discovery/test/valid/subdir/README.md | 1 + discovery/test/valid/subdir/empty.json | 2 + docs/index.rst | 1 + docs/pages/deployment/cli-reference.rst | 6 +- docs/pages/deployment/discovery.rst | 29 ++ docs/pages/deployment/server_options.rst | 167 ++++---- go.mod | 1 + makefile | 2 + storage/cmd/cmd.go | 5 +- storage/engine.go | 7 +- storage/engine_test.go | 8 +- storage/mock.go | 28 +- .../2_discoveryservice.down.sql | 4 + .../sql_migrations/2_discoveryservice.up.sql | 53 +++ storage/test.go | 6 +- vcr/pe/schema/v2/schema.go | 16 +- vcr/pe/test/definition_mapping.json | 10 + 41 files changed, 2287 insertions(+), 120 deletions(-) create mode 100644 discovery/cmd/cmd.go create mode 100644 discovery/cmd/cmd_test.go create mode 100644 discovery/config.go create mode 100644 discovery/definition.go create mode 100644 discovery/interface.go create mode 100644 discovery/log/logger.go create mode 100644 discovery/mock.go create mode 100644 discovery/module.go create mode 100644 discovery/module_test.go create mode 100644 discovery/service-definition-schema.json create mode 100644 discovery/store.go create mode 100644 discovery/store_test.go create mode 100644 discovery/test.go create mode 100644 discovery/test/duplicate_id/1.json create mode 100644 discovery/test/duplicate_id/2.json create mode 100644 discovery/test/duplicate_id/README.md create mode 100644 discovery/test/invalid_definition/1.json create mode 100644 discovery/test/invalid_definition/README.md create mode 100644 discovery/test/invalid_json/1.json create mode 100644 discovery/test/invalid_json/README.md create mode 100644 discovery/test/valid/eoverdracht.json create mode 100644 discovery/test/valid/subdir/README.md create mode 100644 discovery/test/valid/subdir/empty.json create mode 100644 docs/pages/deployment/discovery.rst create mode 100644 storage/sql_migrations/2_discoveryservice.down.sql create mode 100644 storage/sql_migrations/2_discoveryservice.up.sql diff --git a/README.rst b/README.rst index f6316b814b..350f0ccb54 100644 --- a/README.rst +++ b/README.rst @@ -176,9 +176,9 @@ The following options can be configured on the server: :widths: 20 30 50 :class: options-table - ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================== + ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================================================================================================================ Key Default Description - ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================== + ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================================================================================================================ configfile nuts.yaml Nuts config file cpuprofile When set, a CPU profile is written to the given path. Ignored when strictmode is set. datadir ./data Directory where the node stores its files. @@ -207,6 +207,9 @@ The following options can be configured on the server: crypto.vault.pathprefix kv The Vault path prefix. crypto.vault.timeout 5s Timeout of client calls to Vault, in Golang time.Duration string format (e.g. 1s). crypto.vault.token The Vault token. If set it overwrites the VAULT_TOKEN env var. + **Discovery** + discovery.definitions.directory Directory to load Discovery Service Definitions from. If not set, the discovery service will be disabled. If the directory contains JSON files that can't be parsed as service definition, the node will fail to start. + discovery.server.definition_ids [] IDs of the Discovery Service Definitions for which to act as server. If an ID does not map to a loaded service definition, the node will fail to start. **Events** events.nats.hostname 0.0.0.0 Hostname for the NATS server events.nats.port 4222 Port where the NATS server listens on @@ -224,7 +227,7 @@ The following options can be configured on the server: http.default.auth.type Whether to enable authentication for the default interface, specify 'token_v2' for bearer token mode or 'token' for legacy bearer token mode. http.default.cors.origin [] When set, enables CORS from the specified origins on the default HTTP interface. **JSONLD** - jsonld.contexts.localmapping [https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json=assets/contexts/lds-jws2020-v1.ldjson,https://schema.org=assets/contexts/schema-org-v13.ldjson,https://nuts.nl/credentials/v1=assets/contexts/nuts.ldjson,https://www.w3.org/2018/credentials/v1=assets/contexts/w3c-credentials-v1.ldjson] This setting allows mapping external URLs to local files for e.g. preventing external dependencies. These mappings have precedence over those in remoteallowlist. + jsonld.contexts.localmapping [https://nuts.nl/credentials/v1=assets/contexts/nuts.ldjson,https://www.w3.org/2018/credentials/v1=assets/contexts/w3c-credentials-v1.ldjson,https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json=assets/contexts/lds-jws2020-v1.ldjson,https://schema.org=assets/contexts/schema-org-v13.ldjson] This setting allows mapping external URLs to local files for e.g. preventing external dependencies. These mappings have precedence over those in remoteallowlist. jsonld.contexts.remoteallowlist [https://schema.org,https://www.w3.org/2018/credentials/v1,https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json] In strict mode, fetching external JSON-LD contexts is not allowed except for context-URLs listed here. **Network** network.bootstrapnodes [] List of bootstrap nodes (':') which the node initially connect to. @@ -252,12 +255,12 @@ The following options can be configured on the server: storage.redis.sentinel.password Password for authenticating to Redis Sentinels. storage.redis.sentinel.username Username for authenticating to Redis Sentinels. storage.redis.tls.truststorefile PEM file containing the trusted CA certificate(s) for authenticating remote Redis servers. Can only be used when connecting over TLS (use 'rediss://' as scheme in address). - storage.sql.connection Connection string for the SQL database. If not set, it defaults to a SQLite database stored inside the configured data directory + storage.sql.connection Connection string for the SQL database. If not set it, defaults to a SQLite database stored inside the configured data directory. Note: using SQLite is not recommended in production environments. If using SQLite anyways, remember to enable foreign keys ('_foreign_keys=on') and the write-ahead-log ('_journal_mode=WAL'). **VCR** vcr.openid4vci.definitionsdir Directory with the additional credential definitions the node could issue (experimental, may change without notice). vcr.openid4vci.enabled true Enable issuing and receiving credentials over OpenID4VCI. vcr.openid4vci.timeout 30s Time-out for OpenID4VCI HTTP client operations. - ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================== + ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================================================================================================================ This table is automatically generated using the configuration flags in the core and engines. When they're changed the options table must be regenerated using the Makefile: diff --git a/cmd/root.go b/cmd/root.go index 4c3c6b302e..a531de27d0 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -23,6 +23,7 @@ import ( "context" "errors" "fmt" + "github.com/nuts-foundation/nuts-node/discovery" "github.com/nuts-foundation/nuts-node/vdr/resolver" "github.com/nuts-foundation/nuts-node/golden_hammer" @@ -46,6 +47,7 @@ import ( "github.com/nuts-foundation/nuts-node/didman" didmanAPI "github.com/nuts-foundation/nuts-node/didman/api/v1" didmanCmd "github.com/nuts-foundation/nuts-node/didman/cmd" + discoveryCmd "github.com/nuts-foundation/nuts-node/discovery/cmd" "github.com/nuts-foundation/nuts-node/events" eventsCmd "github.com/nuts-foundation/nuts-node/events/cmd" httpEngine "github.com/nuts-foundation/nuts-node/http" @@ -192,6 +194,7 @@ func CreateSystem(shutdownCallback context.CancelFunc) *core.System { vdrInstance := vdr.NewVDR(cryptoInstance, networkInstance, didStore, eventManager) credentialInstance := vcr.NewVCRInstance(cryptoInstance, vdrInstance, networkInstance, jsonld, eventManager, storageInstance, pkiInstance) didmanInstance := didman.NewDidmanInstance(vdrInstance, credentialInstance, jsonld) + discoveryInstance := discovery.New(storageInstance, credentialInstance) authInstance := auth.NewAuthInstance(auth.DefaultConfig(), vdrInstance, credentialInstance, cryptoInstance, didmanInstance, jsonld, pkiInstance) statusEngine := status.NewStatusEngine(system) metricsEngine := core.NewMetricsEngine() @@ -233,6 +236,7 @@ func CreateSystem(shutdownCallback context.CancelFunc) *core.System { system.RegisterEngine(credentialInstance) system.RegisterEngine(networkInstance) system.RegisterEngine(authInstance) + system.RegisterEngine(discoveryInstance) system.RegisterEngine(didmanInstance) system.RegisterEngine(goldenHammer) // HTTP engine MUST be registered last, because when started it dispatches HTTP calls to the registered routes. @@ -333,6 +337,7 @@ func serverConfigFlags() *pflag.FlagSet { set.AddFlagSet(eventsCmd.FlagSet()) set.AddFlagSet(pki.FlagSet()) set.AddFlagSet(goldenHammerCmd.FlagSet()) + set.AddFlagSet(discoveryCmd.FlagSet()) return set } diff --git a/cmd/root_test.go b/cmd/root_test.go index 3db937fa69..9f12c828d5 100644 --- a/cmd/root_test.go +++ b/cmd/root_test.go @@ -163,7 +163,7 @@ func Test_CreateSystem(t *testing.T) { system.VisitEngines(func(engine core.Engine) { numEngines++ }) - assert.Equal(t, 15, numEngines) + assert.Equal(t, 16, numEngines) } func Test_ClientCommand_ErrorHandlers(t *testing.T) { diff --git a/discovery/cmd/cmd.go b/discovery/cmd/cmd.go new file mode 100644 index 0000000000..302f812471 --- /dev/null +++ b/discovery/cmd/cmd.go @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package cmd + +import ( + "github.com/nuts-foundation/nuts-node/discovery" + "github.com/spf13/pflag" +) + +// FlagSet contains flags relevant for the module. +func FlagSet() *pflag.FlagSet { + defs := discovery.DefaultConfig() + flagSet := pflag.NewFlagSet("discovery", pflag.ContinueOnError) + flagSet.String("discovery.definitions.directory", defs.Definitions.Directory, + "Directory to load Discovery Service Definitions from. If not set, the discovery service will be disabled. "+ + "If the directory contains JSON files that can't be parsed as service definition, the node will fail to start.") + flagSet.StringSlice("discovery.server.definition_ids", defs.Server.DefinitionIDs, + "IDs of the Discovery Service Definitions for which to act as server. "+ + "If an ID does not map to a loaded service definition, the node will fail to start.") + return flagSet +} diff --git a/discovery/cmd/cmd_test.go b/discovery/cmd/cmd_test.go new file mode 100644 index 0000000000..6f4a8c74ef --- /dev/null +++ b/discovery/cmd/cmd_test.go @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package cmd + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestFlagSet(t *testing.T) { + flagset := FlagSet() + assert.NotNil(t, flagset) +} diff --git a/discovery/config.go b/discovery/config.go new file mode 100644 index 0000000000..1a432cbbf6 --- /dev/null +++ b/discovery/config.go @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package discovery + +// Config holds the config of the module +type Config struct { + Server ServerConfig `koanf:"server"` + Definitions ServiceDefinitionsConfig `koanf:"definitions"` +} + +// ServiceDefinitionsConfig holds the config for loading Service Definitions. +type ServiceDefinitionsConfig struct { + Directory string `koanf:"directory"` +} + +// ServerConfig holds the config for the server +type ServerConfig struct { + // DefinitionIDs specifies which use case lists the server serves. + DefinitionIDs []string `koanf:"definition_ids"` +} + +// DefaultConfig returns the default configuration. +func DefaultConfig() Config { + return Config{ + Server: ServerConfig{}, + } +} + +// IsServer returns true if the node act as Discovery Server. +func (c Config) IsServer() bool { + return len(c.Server.DefinitionIDs) > 0 +} diff --git a/discovery/definition.go b/discovery/definition.go new file mode 100644 index 0000000000..6315424b65 --- /dev/null +++ b/discovery/definition.go @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package discovery + +import ( + "bytes" + "embed" + "encoding/json" + "github.com/nuts-foundation/nuts-node/vcr/pe" + v2 "github.com/nuts-foundation/nuts-node/vcr/pe/schema/v2" + "github.com/santhosh-tekuri/jsonschema" +) + +//go:embed *.json +var jsonSchemaFiles embed.FS +var serviceDefinitionJsonSchema *jsonschema.Schema + +func init() { + serviceDefinitionSchemaData, err := jsonSchemaFiles.ReadFile("service-definition-schema.json") + if err != nil { + panic(err) + } + const schemaURL = "http://nuts.nl/schemas/discovery-service-v0.json" + compiler := v2.Compiler() + if err := compiler.AddResource(schemaURL, bytes.NewReader(serviceDefinitionSchemaData)); err != nil { + panic(err) + } + serviceDefinitionJsonSchema = compiler.MustCompile(schemaURL) +} + +// ServiceDefinition holds the definition of a service. +type ServiceDefinition struct { + // ID is the unique identifier of the use case. + ID string `json:"id"` + // Endpoint is the endpoint where the use case list is served. + Endpoint string `json:"endpoint"` + // PresentationDefinition specifies the Presentation ServiceDefinition submissions to the list must conform to, + // according to the Presentation Exchange specification. + PresentationDefinition pe.PresentationDefinition `json:"presentation_definition"` + // PresentationMaxValidity specifies how long submitted presentations are allowed to be valid (in seconds). + PresentationMaxValidity int `json:"presentation_max_validity"` +} + +// ParseServiceDefinition validates the input against the JSON schema for service definitions. +// If the input is valid, it is parsed and returned as a ServiceDefinition. +func ParseServiceDefinition(data []byte) (*ServiceDefinition, error) { + if err := serviceDefinitionJsonSchema.Validate(bytes.NewReader(data)); err != nil { + return nil, err + } + var definition ServiceDefinition + if err := json.Unmarshal(data, &definition); err != nil { + return nil, err + } + return &definition, nil +} diff --git a/discovery/interface.go b/discovery/interface.go new file mode 100644 index 0000000000..81ffc78d19 --- /dev/null +++ b/discovery/interface.go @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package discovery + +import ( + "errors" + "github.com/nuts-foundation/go-did/vc" +) + +// Timestamp is value that references a point in the list. +// It is used by clients to request new entries since their last query. +// It's implemented as lamport timestamp (https://en.wikipedia.org/wiki/Lamport_timestamp); +// it is incremented when a new entry is added to the list. +// Pass 0 to start at the beginning of the list. +type Timestamp uint64 + +// ErrServiceNotFound is returned when a service (ID) is not found in the discovery service. +var ErrServiceNotFound = errors.New("discovery service not found") + +// ErrPresentationAlreadyExists is returned when a presentation is added to the discovery service, +// but a presentation with this ID already exists. +var ErrPresentationAlreadyExists = errors.New("presentation already exists") + +// Server defines the API for Discovery Servers. +type Server interface { + // Add registers a presentation on the given Discovery Service. + // If the presentation is not valid or it does not conform to the Service ServiceDefinition, it returns an error. + Add(serviceID string, presentation vc.VerifiablePresentation) error + // Get retrieves the presentations for the given service, starting at the given timestamp. + Get(serviceID string, startAt Timestamp) ([]vc.VerifiablePresentation, *Timestamp, error) +} + +// Client defines the API for Discovery Clients. +type Client interface { + Search(serviceID string, query map[string]string) ([]vc.VerifiablePresentation, error) +} diff --git a/discovery/log/logger.go b/discovery/log/logger.go new file mode 100644 index 0000000000..3a4fcf4b4a --- /dev/null +++ b/discovery/log/logger.go @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package log + +import ( + "github.com/nuts-foundation/nuts-node/core" + "github.com/sirupsen/logrus" +) + +var _logger = logrus.StandardLogger().WithField(core.LogFieldModule, "Discovery") + +// Logger returns a logger with the module field set +func Logger() *logrus.Entry { + return _logger +} diff --git a/discovery/mock.go b/discovery/mock.go new file mode 100644 index 0000000000..335c3bcd0d --- /dev/null +++ b/discovery/mock.go @@ -0,0 +1,107 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: discovery/interface.go +// +// Generated by this command: +// +// mockgen -destination=discovery/mock.go -package=discovery -source=discovery/interface.go +// +// Package discovery is a generated GoMock package. +package discovery + +import ( + reflect "reflect" + + vc "github.com/nuts-foundation/go-did/vc" + gomock "go.uber.org/mock/gomock" +) + +// MockServer is a mock of Server interface. +type MockServer struct { + ctrl *gomock.Controller + recorder *MockServerMockRecorder +} + +// MockServerMockRecorder is the mock recorder for MockServer. +type MockServerMockRecorder struct { + mock *MockServer +} + +// NewMockServer creates a new mock instance. +func NewMockServer(ctrl *gomock.Controller) *MockServer { + mock := &MockServer{ctrl: ctrl} + mock.recorder = &MockServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockServer) EXPECT() *MockServerMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockServer) Add(serviceID string, presentation vc.VerifiablePresentation) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", serviceID, presentation) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add. +func (mr *MockServerMockRecorder) Add(serviceID, presentation any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockServer)(nil).Add), serviceID, presentation) +} + +// Get mocks base method. +func (m *MockServer) Get(serviceID string, startAt Timestamp) ([]vc.VerifiablePresentation, *Timestamp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", serviceID, startAt) + ret0, _ := ret[0].([]vc.VerifiablePresentation) + ret1, _ := ret[1].(*Timestamp) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Get indicates an expected call of Get. +func (mr *MockServerMockRecorder) Get(serviceID, startAt any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockServer)(nil).Get), serviceID, startAt) +} + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Search mocks base method. +func (m *MockClient) Search(serviceID string, query map[string]string) ([]vc.VerifiablePresentation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Search", serviceID, query) + ret0, _ := ret[0].([]vc.VerifiablePresentation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Search indicates an expected call of Search. +func (mr *MockClientMockRecorder) Search(serviceID, query any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Search", reflect.TypeOf((*MockClient)(nil).Search), serviceID, query) +} diff --git a/discovery/module.go b/discovery/module.go new file mode 100644 index 0000000000..ce7fd89642 --- /dev/null +++ b/discovery/module.go @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package discovery + +import ( + "errors" + "fmt" + ssi "github.com/nuts-foundation/go-did" + "github.com/nuts-foundation/go-did/vc" + "github.com/nuts-foundation/nuts-node/core" + "github.com/nuts-foundation/nuts-node/storage" + "github.com/nuts-foundation/nuts-node/vcr" + "github.com/nuts-foundation/nuts-node/vcr/credential" + "os" + "path" + "strings" + "time" +) + +const ModuleName = "Discovery" + +var ErrServerModeDisabled = errors.New("node is not a discovery server for this service") + +var _ core.Injectable = &Module{} +var _ core.Runnable = &Module{} +var _ core.Configurable = &Module{} +var _ Server = &Module{} + +var retractionPresentationType = ssi.MustParseURI("RetractedVerifiablePresentation") + +// New creates a new Module. +func New(storageInstance storage.Engine, vcrInstance vcr.VCR) *Module { + return &Module{ + storageInstance: storageInstance, + vcrInstance: vcrInstance, + } +} + +// Module is the main entry point for discovery services. +type Module struct { + config Config + storageInstance storage.Engine + store *sqlStore + serverDefinitions map[string]ServiceDefinition + services map[string]ServiceDefinition + vcrInstance vcr.VCR +} + +func (m *Module) Configure(_ core.ServerConfig) error { + if m.config.Definitions.Directory == "" { + return nil + } + var err error + m.services, err = loadDefinitions(m.config.Definitions.Directory) + if err != nil { + return err + } + if len(m.config.Server.DefinitionIDs) > 0 { + // Get the definitions that are enabled for this server + serverDefinitions := make(map[string]ServiceDefinition) + for _, definitionID := range m.config.Server.DefinitionIDs { + if definition, exists := m.services[definitionID]; !exists { + return fmt.Errorf("service definition '%s' not found", definitionID) + } else { + serverDefinitions[definitionID] = definition + } + } + m.serverDefinitions = serverDefinitions + } + return nil +} + +func (m *Module) Start() error { + var err error + m.store, err = newSQLStore(m.storageInstance.GetSQLDatabase(), m.services) + if err != nil { + return err + } + return nil +} + +func (m *Module) Shutdown() error { + return nil +} + +func (m *Module) Name() string { + return ModuleName +} + +func (m *Module) Config() interface{} { + return &m.config +} + +func (m *Module) Add(serviceID string, presentation vc.VerifiablePresentation) error { + // First, simple sanity checks + definition, serviceExists := m.services[serviceID] + if !serviceExists { + return ErrServiceNotFound + } + if _, isMaintainer := m.serverDefinitions[serviceID]; !isMaintainer { + return ErrServerModeDisabled + } + if presentation.Format() != vc.JWTPresentationProofFormat { + return errors.New("only JWT presentations are supported") + } + if presentation.ID == nil { + return errors.New("presentation does not have an ID") + } + expiration := presentation.JWT().Expiration() + if expiration.IsZero() { + return errors.New("presentation does not have an expiration") + } + // VPs should not be valid for too long, as that would prevent the server from pruning them. + if int(expiration.Sub(time.Now()).Seconds()) > definition.PresentationMaxValidity { + return fmt.Errorf("presentation is valid for too long (max %s)", time.Duration(definition.PresentationMaxValidity)*time.Second) + } + // Check if the presentation already exists + credentialSubjectID, err := credential.PresentationSigner(presentation) + if err != nil { + return err + } + exists, err := m.store.exists(serviceID, credentialSubjectID.String(), presentation.ID.String()) + if err != nil { + return err + } + if exists { + return ErrPresentationAlreadyExists + } + // Depending on the presentation type, we need to validate different properties before storing it. + if presentation.IsType(retractionPresentationType) { + err = m.validateRetraction(definition.ID, presentation) + } else { + err = m.validateRegistration(definition, presentation) + } + if err != nil { + return err + } + // Check signature of presentation and contained credential(s) + _, err = m.vcrInstance.Verifier().VerifyVP(presentation, true, true, nil) + if err != nil { + return fmt.Errorf("presentation verification failed: %w", err) + } + return m.store.add(definition.ID, presentation, nil) +} + +func (m *Module) validateRegistration(definition ServiceDefinition, presentation vc.VerifiablePresentation) error { + // VP can't be valid longer than the credentialRecord it contains + expiration := presentation.JWT().Expiration() + for _, cred := range presentation.VerifiableCredential { + if cred.ExpirationDate != nil && expiration.After(*cred.ExpirationDate) { + return fmt.Errorf("presentation is valid longer than the credential(s) it contains") + } + } + // VP must fulfill the PEX Presentation ServiceDefinition + // We don't have a PresentationSubmission, so we can't use Validate(). + creds, _, err := definition.PresentationDefinition.Match(presentation.VerifiableCredential) + if err != nil { + return err + } + if len(creds) != len(presentation.VerifiableCredential) { + return errors.New("presentation does not fulfill Presentation ServiceDefinition") + } + return nil +} + +func (m *Module) validateRetraction(serviceID string, presentation vc.VerifiablePresentation) error { + // Presentation might be a retraction (deletion of an earlier credentialRecord) must contain no credentials, and refer to the VP being retracted by ID. + // If those conditions aren't met, we don't need to register the retraction. + if len(presentation.VerifiableCredential) > 0 { + return errors.New("retraction presentation must not contain credentials") + } + // Check that the retraction refers to an existing presentation. + // If not, it might've already been removed due to expiry or superseded by a newer presentation. + var retractJTIString string + if retractJTIRaw, ok := presentation.JWT().Get("retract_jti"); !ok { + return errors.New("retraction presentation does not contain 'retract_jti' claim") + } else { + if retractJTIString, ok = retractJTIRaw.(string); !ok { + return errors.New("retraction presentation 'retract_jti' claim is not a string") + } + } + signerDID, _ := credential.PresentationSigner(presentation) // checked before + exists, err := m.store.exists(serviceID, signerDID.String(), retractJTIString) + if err != nil { + return err + } + if !exists { + return errors.New("retraction presentation refers to a non-existing presentation") + } + return nil +} + +func (m *Module) Get(serviceID string, startAt Timestamp) ([]vc.VerifiablePresentation, *Timestamp, error) { + if _, exists := m.services[serviceID]; !exists { + return nil, nil, ErrServiceNotFound + } + return m.store.get(serviceID, startAt) +} + +func loadDefinitions(directory string) (map[string]ServiceDefinition, error) { + entries, err := os.ReadDir(directory) + if err != nil { + return nil, fmt.Errorf("unable to read definitions directory '%s': %w", directory, err) + } + result := make(map[string]ServiceDefinition) + for _, entry := range entries { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".json") { + continue + } + filePath := path.Join(directory, entry.Name()) + data, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("unable to read service definition file '%s': %w", filePath, err) + } + definition, err := ParseServiceDefinition(data) + if err != nil { + return nil, fmt.Errorf("unable to parse service definition file '%s': %w", filePath, err) + } + if _, exists := result[definition.ID]; exists { + return nil, fmt.Errorf("duplicate service definition ID '%s' in file '%s'", definition.ID, filePath) + } + result[definition.ID] = *definition + } + return result, nil +} diff --git a/discovery/module_test.go b/discovery/module_test.go new file mode 100644 index 0000000000..4c6d96b570 --- /dev/null +++ b/discovery/module_test.go @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package discovery + +import ( + "errors" + "github.com/nuts-foundation/go-did/vc" + "github.com/nuts-foundation/nuts-node/core" + "github.com/nuts-foundation/nuts-node/storage" + "github.com/nuts-foundation/nuts-node/vcr" + "github.com/nuts-foundation/nuts-node/vcr/verifier" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "testing" + "time" +) + +func TestModule_Name(t *testing.T) { + assert.Equal(t, "Discovery", (&Module{}).Name()) +} + +func TestModule_Shutdown(t *testing.T) { + assert.NoError(t, (&Module{}).Shutdown()) +} + +func Test_Module_Add(t *testing.T) { + storageEngine := storage.NewTestStorageEngine(t) + require.NoError(t, storageEngine.Start()) + + t.Run("not a maintainer", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + + err := m.Add("other", vpAlice) + require.EqualError(t, err, "node is not a discovery server for this service") + }) + t.Run("VP verification fails (e.g. invalid signature)", func(t *testing.T) { + m, presentationVerifier := setupModule(t, storageEngine) + presentationVerifier.EXPECT().VerifyVP(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("failed")) + + err := m.Add(testServiceID, vpAlice) + require.EqualError(t, err, "presentation verification failed: failed") + + _, timestamp, err := m.Get(testServiceID, 0) + require.NoError(t, err) + assert.Equal(t, Timestamp(0), *timestamp) + }) + t.Run("already exists", func(t *testing.T) { + m, presentationVerifier := setupModule(t, storageEngine) + presentationVerifier.EXPECT().VerifyVP(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + + err := m.Add(testServiceID, vpAlice) + assert.NoError(t, err) + err = m.Add(testServiceID, vpAlice) + assert.EqualError(t, err, "presentation already exists") + }) + t.Run("valid for too long", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + def := m.services[testServiceID] + def.PresentationMaxValidity = 1 + m.services[testServiceID] = def + + err := m.Add(testServiceID, vpAlice) + assert.EqualError(t, err, "presentation is valid for too long (max 1s)") + }) + t.Run("no expiration", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + err := m.Add(testServiceID, createPresentationCustom(aliceDID, func(claims map[string]interface{}, _ *vc.VerifiablePresentation) { + delete(claims, "exp") + })) + assert.EqualError(t, err, "presentation does not have an expiration") + }) + t.Run("presentation does not contain an ID", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + + vpWithoutID := createPresentationCustom(aliceDID, func(claims map[string]interface{}, _ *vc.VerifiablePresentation) { + delete(claims, "jti") + }, vcAlice) + err := m.Add(testServiceID, vpWithoutID) + assert.EqualError(t, err, "presentation does not have an ID") + }) + t.Run("not a JWT", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + err := m.Add(testServiceID, vc.VerifiablePresentation{}) + assert.EqualError(t, err, "only JWT presentations are supported") + }) + t.Run("service unknown", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + err := m.Add("unknown", vpAlice) + assert.ErrorIs(t, err, ErrServiceNotFound) + }) + + t.Run("registration", func(t *testing.T) { + t.Run("ok", func(t *testing.T) { + m, presentationVerifier := setupModule(t, storageEngine) + presentationVerifier.EXPECT().VerifyVP(gomock.Any(), true, true, nil) + + err := m.Add(testServiceID, vpAlice) + require.NoError(t, err) + + _, timestamp, err := m.Get(testServiceID, 0) + require.NoError(t, err) + assert.Equal(t, Timestamp(1), *timestamp) + }) + t.Run("valid longer than its credentials", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + + vcAlice := createCredential(authorityDID, aliceDID, nil, func(claims map[string]interface{}) { + claims["exp"] = time.Now().Add(time.Hour) + }) + vpAlice := createPresentation(aliceDID, vcAlice) + err := m.Add(testServiceID, vpAlice) + assert.EqualError(t, err, "presentation is valid longer than the credential(s) it contains") + }) + t.Run("not conform to Presentation Definition", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + + // Presentation Definition only allows did:example DIDs + otherVP := createPresentation(unsupportedDID, createCredential(unsupportedDID, unsupportedDID, nil, nil)) + err := m.Add(testServiceID, otherVP) + require.ErrorContains(t, err, "presentation does not fulfill Presentation ServiceDefinition") + + _, timestamp, _ := m.Get(testServiceID, 0) + assert.Equal(t, Timestamp(0), *timestamp) + }) + }) + t.Run("retraction", func(t *testing.T) { + vpAliceRetract := createPresentationCustom(aliceDID, func(claims map[string]interface{}, vp *vc.VerifiablePresentation) { + vp.Type = append(vp.Type, retractionPresentationType) + claims["retract_jti"] = vpAlice.ID.String() + }) + t.Run("ok", func(t *testing.T) { + m, presentationVerifier := setupModule(t, storageEngine) + presentationVerifier.EXPECT().VerifyVP(gomock.Any(), true, true, nil).Times(2) + + err := m.Add(testServiceID, vpAlice) + require.NoError(t, err) + err = m.Add(testServiceID, vpAliceRetract) + assert.NoError(t, err) + }) + t.Run("non-existent presentation", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + err := m.Add(testServiceID, vpAliceRetract) + assert.EqualError(t, err, "retraction presentation refers to a non-existing presentation") + }) + t.Run("must not contain credentials", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + vp := createPresentationCustom(aliceDID, func(claims map[string]interface{}, vp *vc.VerifiablePresentation) { + vp.Type = append(vp.Type, retractionPresentationType) + }, vcAlice) + err := m.Add(testServiceID, vp) + assert.EqualError(t, err, "retraction presentation must not contain credentials") + }) + t.Run("missing 'retract_jti' claim", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + vp := createPresentationCustom(aliceDID, func(_ map[string]interface{}, vp *vc.VerifiablePresentation) { + vp.Type = append(vp.Type, retractionPresentationType) + }) + err := m.Add(testServiceID, vp) + assert.EqualError(t, err, "retraction presentation does not contain 'retract_jti' claim") + }) + t.Run("'retract_jti' claim in not a string", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + vp := createPresentationCustom(aliceDID, func(claims map[string]interface{}, vp *vc.VerifiablePresentation) { + vp.Type = append(vp.Type, retractionPresentationType) + claims["retract_jti"] = 10 + }) + err := m.Add(testServiceID, vp) + assert.EqualError(t, err, "retraction presentation 'retract_jti' claim is not a string") + }) + }) +} + +func Test_Module_Get(t *testing.T) { + storageEngine := storage.NewTestStorageEngine(t) + require.NoError(t, storageEngine.Start()) + t.Run("ok", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + require.NoError(t, m.store.add(testServiceID, vpAlice, nil)) + presentations, timestamp, err := m.Get(testServiceID, 0) + assert.NoError(t, err) + assert.Equal(t, []vc.VerifiablePresentation{vpAlice}, presentations) + assert.Equal(t, Timestamp(1), *timestamp) + }) + t.Run("service unknown", func(t *testing.T) { + m, _ := setupModule(t, storageEngine) + _, _, err := m.Get("unknown", 0) + assert.ErrorIs(t, err, ErrServiceNotFound) + }) +} + +func setupModule(t *testing.T, storageInstance storage.Engine) (*Module, *verifier.MockVerifier) { + resetStore(t, storageInstance.GetSQLDatabase()) + ctrl := gomock.NewController(t) + mockVerifier := verifier.NewMockVerifier(ctrl) + mockVCR := vcr.NewMockVCR(ctrl) + mockVCR.EXPECT().Verifier().Return(mockVerifier).AnyTimes() + m := New(storageInstance, mockVCR) + require.NoError(t, m.Configure(core.ServerConfig{})) + m.services = testDefinitions() + m.serverDefinitions = map[string]ServiceDefinition{ + testServiceID: m.services[testServiceID], + } + require.NoError(t, m.Start()) + return m, mockVerifier +} + +func TestModule_Configure(t *testing.T) { + serverConfig := core.ServerConfig{} + t.Run("duplicate ID", func(t *testing.T) { + config := Config{ + Definitions: ServiceDefinitionsConfig{ + Directory: "test/duplicate_id", + }, + } + err := (&Module{config: config}).Configure(serverConfig) + assert.EqualError(t, err, "duplicate service definition ID 'urn:nuts.nl:usecase:eOverdrachtDev2023' in file 'test/duplicate_id/2.json'") + }) + t.Run("invalid JSON", func(t *testing.T) { + config := Config{ + Definitions: ServiceDefinitionsConfig{ + Directory: "test/invalid_json", + }, + } + err := (&Module{config: config}).Configure(serverConfig) + assert.ErrorContains(t, err, "unable to parse service definition file 'test/invalid_json/1.json'") + }) + t.Run("invalid service definition", func(t *testing.T) { + config := Config{ + Definitions: ServiceDefinitionsConfig{ + Directory: "test/invalid_definition", + }, + } + err := (&Module{config: config}).Configure(serverConfig) + assert.ErrorContains(t, err, "unable to parse service definition file 'test/invalid_definition/1.json'") + }) + t.Run("non-existent directory", func(t *testing.T) { + config := Config{ + Definitions: ServiceDefinitionsConfig{ + Directory: "test/non_existent", + }, + } + err := (&Module{config: config}).Configure(serverConfig) + assert.ErrorContains(t, err, "unable to read definitions directory 'test/non_existent'") + }) +} diff --git a/discovery/service-definition-schema.json b/discovery/service-definition-schema.json new file mode 100644 index 0000000000..b1b3bc177e --- /dev/null +++ b/discovery/service-definition-schema.json @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Service Definition", + "type": "object", + "properties": { + "id": { + "type": "string", + "minLength": 1 + }, + "endpoint": { + "type": "string", + "minLength": 1 + }, + "presentation_max_validity": { + "type": "integer", + "minimum": 1 + }, + "presentation_definition": { + "$ref": "http://identity.foundation/presentation-exchange/schemas/presentation-definition.json" + } + }, + "required": [ + "id", + "endpoint", + "presentation_max_validity", + "presentation_definition" + ] +} \ No newline at end of file diff --git a/discovery/store.go b/discovery/store.go new file mode 100644 index 0000000000..c86da5f898 --- /dev/null +++ b/discovery/store.go @@ -0,0 +1,384 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package discovery + +import ( + "errors" + "fmt" + "github.com/google/uuid" + "github.com/nuts-foundation/go-did/vc" + "github.com/nuts-foundation/nuts-node/discovery/log" + credential "github.com/nuts-foundation/nuts-node/vcr/credential" + "gorm.io/driver/sqlite" + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + "strconv" + "strings" + "sync" + "time" +) + +type serviceRecord struct { + ID string `gorm:"primaryKey"` + LamportTimestamp uint64 +} + +func (s serviceRecord) TableName() string { + return "discovery_service" +} + +var _ schema.Tabler = (*presentationRecord)(nil) + +type presentationRecord struct { + ID string `gorm:"primaryKey"` + ServiceID string + LamportTimestamp uint64 + CredentialSubjectID string + PresentationID string + PresentationRaw string + PresentationExpiration int64 + Credentials []credentialRecord `gorm:"foreignKey:PresentationID;references:ID"` +} + +func (s presentationRecord) TableName() string { + return "discovery_presentation" +} + +// credentialRecord is a Verifiable Credential, part of a presentation (entry) on a use case list. +type credentialRecord struct { + // ID is the unique identifier of the entry. + ID string `gorm:"primaryKey"` + // PresentationID corresponds to the discovery_presentation record ID (not VerifiablePresentation.ID) this credentialRecord belongs to. + PresentationID string + // CredentialID contains the 'id' property of the Verifiable Credential. + CredentialID string + // CredentialIssuer contains the 'issuer' property of the Verifiable Credential. + CredentialIssuer string + // CredentialSubjectID contains the 'credentialSubject.id' property of the Verifiable Credential. + CredentialSubjectID string + // CredentialType contains the 'type' property of the Verifiable Credential (not being 'VerifiableCredential'). + CredentialType *string + Properties []credentialPropertyRecord `gorm:"foreignKey:CredentialID;references:ID"` +} + +// TableName returns the table name for this DTO. +func (p credentialRecord) TableName() string { + return "discovery_credential" +} + +// credentialPropertyRecord is a property of a Verifiable Credential in a Verifiable Presentation in a discovery service. +type credentialPropertyRecord struct { + // CredentialID refers to the entry record in discovery_credential + CredentialID string `gorm:"primaryKey"` + // Key is JSON path of the property. + Key string `gorm:"primaryKey"` + // Value is the value of the property. + Value string +} + +// TableName returns the table name for this DTO. +func (l credentialPropertyRecord) TableName() string { + return "discovery_credential_prop" +} + +type sqlStore struct { + db *gorm.DB + writeLock sync.Mutex +} + +func newSQLStore(db *gorm.DB, definitions map[string]ServiceDefinition) (*sqlStore, error) { + // Creates entries in the discovery service table with initial timestamp, if they don't exist yet + for _, definition := range definitions { + currentList := serviceRecord{ + ID: definition.ID, + } + if err := db.FirstOrCreate(¤tList, "id = ?", definition.ID).Error; err != nil { + return nil, err + } + } + return &sqlStore{ + db: db, + writeLock: sync.Mutex{}, + }, nil +} + +// Add adds a presentation to the list of presentations. +// Timestamp should be passed if the presentation was received from a remote Discovery Server, then it is stored alongside the presentation. +// If the local node is the Discovery Server and thus is responsible for the timestamping, +// nil should be passed to let the store determine the right value. +func (s *sqlStore) add(serviceID string, presentation vc.VerifiablePresentation, timestamp *Timestamp) error { + credentialSubjectID, err := credential.PresentationSigner(presentation) + if err != nil { + return err + } + if _, isSQLite := s.db.Config.Dialector.(*sqlite.Dialector); isSQLite { + // SQLite does not support SELECT FOR UPDATE and allows only 1 active write transaction at any time, + // and any other attempt to acquire a write transaction will directly return an error. + // This is in contrast to most other SQL-databases, which let the 2nd thread wait for some time to acquire the lock. + // The general advice for SQLite is to retry the operation, which is just poor-man's scheduling. + // So to keep behavior consistent across databases, we'll just lock the entire store for the duration of the transaction. + // See https://github.com/nuts-foundation/nuts-node/pull/2589#discussion_r1399130608 + s.writeLock.Lock() + defer s.writeLock.Unlock() + } + if err := s.prune(); err != nil { + return err + } + return s.db.Transaction(func(tx *gorm.DB) error { + newTimestamp, err := s.updateTimestamp(tx, serviceID, timestamp) + if err != nil { + return err + } + // Delete any previous presentations of the subject + if err := tx.Delete(&presentationRecord{}, "service_id = ? AND credential_subject_id = ?", serviceID, credentialSubjectID.String()). + Error; err != nil { + return err + } + + newPresentation, err := createPresentationRecord(serviceID, newTimestamp, presentation) + if err != nil { + return err + } + + return tx.Create(&newPresentation).Error + }) +} + +// createPresentationRecord creates a presentationRecord from a VerifiablePresentation. +// It creates the following types: +// - presentationRecord +// - presentationRecord.Credentials with credentialRecords of the credentials in the presentation +// - presentationRecord.Credentials.Properties of the credentialSubject properties of the credential (for s +func createPresentationRecord(serviceID string, timestamp Timestamp, presentation vc.VerifiablePresentation) (*presentationRecord, error) { + credentialSubjectID, err := credential.PresentationSigner(presentation) + if err != nil { + return nil, err + } + + newPresentation := presentationRecord{ + ID: uuid.NewString(), + ServiceID: serviceID, + LamportTimestamp: uint64(timestamp), + CredentialSubjectID: credentialSubjectID.String(), + PresentationID: presentation.ID.String(), + PresentationRaw: presentation.Raw(), + PresentationExpiration: presentation.JWT().Expiration().Unix(), + } + + for _, currCred := range presentation.VerifiableCredential { + var credentialType *string + for _, currType := range currCred.Type { + if currType.String() != "VerifiableCredential" { + credentialType = new(string) + *credentialType = currType.String() + break + } + } + if len(currCred.CredentialSubject) != 1 { + return nil, errors.New("credential must contain exactly one subject") + } + + newCredential := credentialRecord{ + ID: uuid.NewString(), + PresentationID: newPresentation.ID, + CredentialID: currCred.ID.String(), + CredentialIssuer: currCred.Issuer.String(), + CredentialSubjectID: credentialSubjectID.String(), + CredentialType: credentialType, + } + // Create key-value properties of the credential subject, which is then stored in the property table for searching. + keys, values := indexJSONObject(currCred.CredentialSubject[0].(map[string]interface{}), nil, nil, "credentialSubject") + for i, key := range keys { + if key == "credentialSubject.id" { + // present as column, don't index + continue + } + newCredential.Properties = append(newCredential.Properties, credentialPropertyRecord{ + CredentialID: newCredential.ID, + Key: key, + Value: values[i], + }) + } + newPresentation.Credentials = append(newPresentation.Credentials, newCredential) + } + return &newPresentation, nil +} + +// get returns all presentations, registered on the given service, starting after the given timestamp. +// It also returns the latest timestamp of the returned presentations. +// This timestamp can then be used next time to only retrieve presentations that were added after that timestamp. +func (s *sqlStore) get(serviceID string, startAt Timestamp) ([]vc.VerifiablePresentation, *Timestamp, error) { + var rows []presentationRecord + err := s.db.Order("lamport_timestamp ASC").Find(&rows, "service_id = ? AND lamport_timestamp > ?", serviceID, int(startAt)).Error + if err != nil { + return nil, nil, fmt.Errorf("query service '%s': %w", serviceID, err) + } + timestamp := startAt + presentations := make([]vc.VerifiablePresentation, 0, len(rows)) + for _, row := range rows { + presentation, err := vc.ParseVerifiablePresentation(row.PresentationRaw) + if err != nil { + return nil, nil, fmt.Errorf("parse presentation '%s' of service '%s': %w", row.PresentationID, serviceID, err) + } + presentations = append(presentations, *presentation) + timestamp = Timestamp(row.LamportTimestamp) + } + return presentations, ×tamp, nil +} + +// search searches for presentations, registered on the given service, matching the given query. +// The query is a map of JSON paths and expected string values, matched against the presentation's credentials. +// Wildcard matching is supported by prefixing or suffixing the value with an asterisk (*). +// It returns the presentations which contain credentials that match the given query. +func (s *sqlStore) search(serviceID string, query map[string]string) ([]vc.VerifiablePresentation, error) { + propertyColumns := map[string]string{ + "id": "cred.credential_id", + "issuer": "cred.credential_issuer", + "type": "cred.credential_type", + "credentialSubject.id": "cred.credential_subject_id", + } + + stmt := s.db.Model(&presentationRecord{}). + Where("service_id = ?", serviceID). + Joins("inner join discovery_credential cred ON cred.presentation_id = discovery_presentation.id") + numProps := 0 + for jsonPath, value := range query { + if value == "*" { + continue + } + // sort out wildcard mode: prefix and postfix asterisks (*) are replaced with %, which then is used in a LIKE query. + // Otherwise, exact match (=) is used. + var eq = "=" + if strings.HasPrefix(value, "*") { + value = "%" + value[1:] + eq = "LIKE" + } + if strings.HasSuffix(value, "*") { + value = value[:len(value)-1] + "%" + eq = "LIKE" + } + if column := propertyColumns[jsonPath]; column != "" { + stmt = stmt.Where(column+" "+eq+" ?", value) + } else { + // This property is not present as column, but indexed as key-value property. + // Multiple (inner) joins to filter on a dynamic number of properties to filter on is not pretty, but it works + alias := "p" + strconv.Itoa(numProps) + numProps++ + stmt = stmt.Joins("inner join discovery_credential_prop "+alias+" ON "+alias+".credential_id = cred.id AND "+alias+".key = ? AND "+alias+".value "+eq+" ?", jsonPath, value) + } + } + + var matches []presentationRecord + if err := stmt.Find(&matches).Error; err != nil { + return nil, err + } + var results []vc.VerifiablePresentation + for _, match := range matches { + if match.PresentationExpiration <= time.Now().Unix() { + continue + } + presentation, err := vc.ParseVerifiablePresentation(match.PresentationRaw) + if err != nil { + return nil, fmt.Errorf("failed to parse presentation '%s': %w", match.PresentationID, err) + } + results = append(results, *presentation) + } + return results, nil +} + +// updateTimestamp updates the timestamp of the given service. +// Clients should pass the timestamp they received from the server (which simply sets it). +// Servers should pass nil (since they "own" the timestamp), which causes it to be incremented. +func (s *sqlStore) updateTimestamp(tx *gorm.DB, serviceID string, newTimestamp *Timestamp) (Timestamp, error) { + var result serviceRecord + // Lock (SELECT FOR UPDATE) discovery_service row to prevent concurrent updates to the same list, which could mess up the lamport timestamp. + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Where(serviceRecord{ID: serviceID}). + Find(&result). + Error; err != nil { + return 0, err + } + result.ID = serviceID + if newTimestamp == nil { + // Increment timestamp + result.LamportTimestamp++ + } else { + result.LamportTimestamp = uint64(*newTimestamp) + } + if err := tx.Save(&result).Error; err != nil { + return 0, err + } + return Timestamp(result.LamportTimestamp), nil +} + +// exists checks whether a presentation of the given subject is registered on a service. +func (s *sqlStore) exists(serviceID string, credentialSubjectID string, presentationID string) (bool, error) { + var count int64 + if err := s.db.Model(presentationRecord{}).Where(presentationRecord{ + ServiceID: serviceID, + CredentialSubjectID: credentialSubjectID, + PresentationID: presentationID, + }).Count(&count).Error; err != nil { + return false, fmt.Errorf("check presentation existence: %w", err) + } + return count > 0, nil +} + +func (s *sqlStore) prune() error { + num, err := s.removeExpired() + if err != nil { + return err + } + if num > 0 { + log.Logger().Debugf("Pruned %d expired presentations", num) + } + return nil +} + +func (s *sqlStore) removeExpired() (int, error) { + result := s.db.Where("presentation_expiration < ?", time.Now().Unix()).Delete(presentationRecord{}) + if result.Error != nil { + return 0, fmt.Errorf("prune presentations: %w", result.Error) + } + return int(result.RowsAffected), nil +} + +// indexJSONObject indexes a JSON object, resulting in a slice of JSON paths and corresponding string values. +// It only traverses JSON objects and only adds string values to the result. +func indexJSONObject(target map[string]interface{}, jsonPaths []string, stringValues []string, currentPath string) ([]string, []string) { + for key, value := range target { + thisPath := currentPath + if len(thisPath) > 0 { + thisPath += "." + } + thisPath += key + + switch typedValue := value.(type) { + case string: + jsonPaths = append(jsonPaths, thisPath) + stringValues = append(stringValues, typedValue) + case map[string]interface{}: + jsonPaths, stringValues = indexJSONObject(typedValue, jsonPaths, stringValues, thisPath) + default: + // other values (arrays, booleans, numbers, null) are not indexed + } + } + return jsonPaths, stringValues +} diff --git a/discovery/store_test.go b/discovery/store_test.go new file mode 100644 index 0000000000..11b1af52c1 --- /dev/null +++ b/discovery/store_test.go @@ -0,0 +1,373 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package discovery + +import ( + "github.com/nuts-foundation/go-did/vc" + "github.com/nuts-foundation/nuts-node/storage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gorm.io/gorm" + "sync" + "testing" +) + +func Test_sqlStore_exists(t *testing.T) { + storageEngine := storage.NewTestStorageEngine(t) + require.NoError(t, storageEngine.Start()) + + t.Run("empty list", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + exists, err := m.exists(testServiceID, aliceDID.String(), vpAlice.ID.String()) + assert.NoError(t, err) + assert.False(t, exists) + }) + t.Run("non-empty list, no match (other subject and ID)", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + require.NoError(t, m.add(testServiceID, vpBob, nil)) + exists, err := m.exists(testServiceID, aliceDID.String(), vpAlice.ID.String()) + assert.NoError(t, err) + assert.False(t, exists) + }) + t.Run("non-empty list, no match (other list)", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + require.NoError(t, m.add(testServiceID, vpAlice, nil)) + exists, err := m.exists("other", aliceDID.String(), vpAlice.ID.String()) + assert.NoError(t, err) + assert.False(t, exists) + }) + t.Run("non-empty list, match", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + require.NoError(t, m.add(testServiceID, vpAlice, nil)) + exists, err := m.exists(testServiceID, aliceDID.String(), vpAlice.ID.String()) + assert.NoError(t, err) + assert.True(t, exists) + }) +} + +func Test_sqlStore_add(t *testing.T) { + storageEngine := storage.NewTestStorageEngine(t) + require.NoError(t, storageEngine.Start()) + + t.Run("no credentials in presentation", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + err := m.add(testServiceID, createPresentation(aliceDID), nil) + assert.NoError(t, err) + }) + t.Run("with indexable properties in credential", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + err := m.add(testServiceID, createPresentation(aliceDID, createCredential(authorityDID, aliceDID, map[string]interface{}{ + "name": "Alice", + "placeOfBirth": "Bristol", + }, nil)), nil) + assert.NoError(t, err) + + var actual []credentialPropertyRecord + assert.NoError(t, m.db.Find(&actual).Error) + require.Len(t, actual, 2) + assert.Equal(t, "Alice", sliceToMap(actual)["credentialSubject.name"]) + assert.Equal(t, "Bristol", sliceToMap(actual)["credentialSubject.placeOfBirth"]) + }) + t.Run("with non-indexable properties in credential", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + err := m.add(testServiceID, createPresentation(aliceDID, createCredential(authorityDID, aliceDID, map[string]interface{}{ + "name": "Alice", + "age": 35, + }, nil)), nil) + assert.NoError(t, err) + + var actual []credentialPropertyRecord + assert.NoError(t, m.db.Find(&actual).Error) + require.Len(t, actual, 1) + assert.Equal(t, "Alice", sliceToMap(actual)["credentialSubject.name"]) + }) + t.Run("without indexable properties in credential", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + presentation := createCredential(authorityDID, aliceDID, map[string]interface{}{}, nil) + err := m.add(testServiceID, createPresentation(aliceDID, presentation), nil) + assert.NoError(t, err) + + var actual []credentialPropertyRecord + assert.NoError(t, m.db.Find(&actual).Error) + assert.Empty(t, actual) + }) + t.Run("replaces previous presentation of same subject", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + + secondVP := createPresentation(aliceDID, vcAlice) + require.NoError(t, m.add(testServiceID, vpAlice, nil)) + require.NoError(t, m.add(testServiceID, secondVP, nil)) + + // First VP should not exist + exists, err := m.exists(testServiceID, aliceDID.String(), vpAlice.ID.String()) + require.NoError(t, err) + assert.False(t, exists) + + // Only second VP should exist + exists, err = m.exists(testServiceID, aliceDID.String(), secondVP.ID.String()) + require.NoError(t, err) + assert.True(t, exists) + }) +} + +func Test_sqlStore_get(t *testing.T) { + storageEngine := storage.NewTestStorageEngine(t) + require.NoError(t, storageEngine.Start()) + + t.Run("empty list, empty timestamp", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + presentations, timestamp, err := m.get(testServiceID, 0) + assert.NoError(t, err) + assert.Empty(t, presentations) + assert.Empty(t, timestamp) + }) + t.Run("1 entry, empty timestamp", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + require.NoError(t, m.add(testServiceID, vpAlice, nil)) + presentations, timestamp, err := m.get(testServiceID, 0) + assert.NoError(t, err) + assert.Equal(t, []vc.VerifiablePresentation{vpAlice}, presentations) + assert.Equal(t, Timestamp(1), *timestamp) + }) + t.Run("2 entries, empty timestamp", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + require.NoError(t, m.add(testServiceID, vpAlice, nil)) + require.NoError(t, m.add(testServiceID, vpBob, nil)) + presentations, timestamp, err := m.get(testServiceID, 0) + assert.NoError(t, err) + assert.Equal(t, []vc.VerifiablePresentation{vpAlice, vpBob}, presentations) + assert.Equal(t, Timestamp(2), *timestamp) + }) + t.Run("2 entries, start after first", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + require.NoError(t, m.add(testServiceID, vpAlice, nil)) + require.NoError(t, m.add(testServiceID, vpBob, nil)) + presentations, timestamp, err := m.get(testServiceID, 1) + assert.NoError(t, err) + assert.Equal(t, []vc.VerifiablePresentation{vpBob}, presentations) + assert.Equal(t, Timestamp(2), *timestamp) + }) + t.Run("2 entries, start after end", func(t *testing.T) { + m := setupStore(t, storageEngine.GetSQLDatabase()) + require.NoError(t, m.add(testServiceID, vpAlice, nil)) + require.NoError(t, m.add(testServiceID, vpBob, nil)) + presentations, timestamp, err := m.get(testServiceID, 2) + assert.NoError(t, err) + assert.Equal(t, []vc.VerifiablePresentation{}, presentations) + assert.Equal(t, Timestamp(2), *timestamp) + }) +} + +func Test_sqlStore_search(t *testing.T) { + storageEngine := storage.NewTestStorageEngine(t) + require.NoError(t, storageEngine.Start()) + t.Cleanup(func() { + _ = storageEngine.Shutdown() + }) + + type testCase struct { + name string + inputVPs []vc.VerifiablePresentation + query map[string]string + expectedVPs []string + } + testCases := []testCase{ + { + name: "issuer", + inputVPs: []vc.VerifiablePresentation{vpAlice}, + query: map[string]string{ + "issuer": authorityDID.String(), + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "id", + inputVPs: []vc.VerifiablePresentation{vpAlice}, + query: map[string]string{ + "id": vcAlice.ID.String(), + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "type", + inputVPs: []vc.VerifiablePresentation{vpAlice}, + query: map[string]string{ + "type": "TestCredential", + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "credentialSubject.id", + inputVPs: []vc.VerifiablePresentation{vpAlice}, + query: map[string]string{ + "credentialSubject.id": aliceDID.String(), + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "1 property", + inputVPs: []vc.VerifiablePresentation{vpAlice}, + query: map[string]string{ + "credentialSubject.person.givenName": "Alice", + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "2 properties", + inputVPs: []vc.VerifiablePresentation{vpAlice}, + query: map[string]string{ + "credentialSubject.person.givenName": "Alice", + "credentialSubject.person.familyName": "Jones", + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "properties and base properties", + inputVPs: []vc.VerifiablePresentation{vpAlice}, + query: map[string]string{ + "issuer": authorityDID.String(), + "credentialSubject.person.givenName": "Alice", + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "wildcard postfix", + inputVPs: []vc.VerifiablePresentation{vpAlice, vpBob}, + query: map[string]string{ + "credentialSubject.person.familyName": "Jo*", + }, + expectedVPs: []string{vpAlice.ID.String(), vpBob.ID.String()}, + }, + { + name: "wildcard prefix", + inputVPs: []vc.VerifiablePresentation{vpAlice, vpBob}, + query: map[string]string{ + "credentialSubject.person.givenName": "*ce", + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "wildcard midway (no interpreted as wildcard)", + inputVPs: []vc.VerifiablePresentation{vpAlice, vpBob}, + query: map[string]string{ + "credentialSubject.person.givenName": "A*ce", + }, + expectedVPs: []string{}, + }, + { + name: "just wildcard", + inputVPs: []vc.VerifiablePresentation{vpAlice, vpBob}, + query: map[string]string{ + "id": "*", + }, + expectedVPs: []string{vpAlice.ID.String(), vpBob.ID.String()}, + }, + { + name: "2 VPs, 1 match", + inputVPs: []vc.VerifiablePresentation{vpAlice, vpBob}, + query: map[string]string{ + "credentialSubject.person.givenName": "Alice", + }, + expectedVPs: []string{vpAlice.ID.String()}, + }, + { + name: "multiple matches", + inputVPs: []vc.VerifiablePresentation{vpAlice, vpBob}, + query: map[string]string{ + "issuer": authorityDID.String(), + }, + expectedVPs: []string{vpAlice.ID.String(), vpBob.ID.String()}, + }, + { + name: "no match", + inputVPs: []vc.VerifiablePresentation{vpAlice}, + query: map[string]string{ + "credentialSubject.person.givenName": "Bob", + }, + expectedVPs: []string{}, + }, + { + name: "empty database", + query: map[string]string{ + "credentialSubject.person.givenName": "Bob", + }, + expectedVPs: []string{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c := setupStore(t, storageEngine.GetSQLDatabase()) + for _, vp := range tc.inputVPs { + err := c.add(testServiceID, vp, nil) + require.NoError(t, err) + } + actualVPs, err := c.search(testServiceID, tc.query) + require.NoError(t, err) + require.Len(t, actualVPs, len(tc.expectedVPs)) + for _, expectedVP := range tc.expectedVPs { + found := false + for _, actualVP := range actualVPs { + if actualVP.ID.String() == expectedVP { + found = true + break + } + } + require.True(t, found, "expected to find VP with ID %s", expectedVP) + } + }) + } + + t.Run("concurrency", func(t *testing.T) { + c := setupStore(t, storageEngine.GetSQLDatabase()) + wg := &sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + err := c.add(testServiceID, createPresentation(aliceDID, vcAlice), nil) + require.NoError(t, err) + }() + } + wg.Wait() + }) +} + +func setupStore(t *testing.T, db *gorm.DB) *sqlStore { + resetStore(t, db) + store, err := newSQLStore(db, testDefinitions()) + require.NoError(t, err) + return store +} + +func resetStore(t *testing.T, db *gorm.DB) { + underlyingDB, err := db.DB() + require.NoError(t, err) + // related tables are emptied due to on-delete-cascade clause + _, err = underlyingDB.Exec("DELETE FROM discovery_service") + require.NoError(t, err) +} + +func sliceToMap(slice []credentialPropertyRecord) map[string]string { + var result = make(map[string]string) + for _, curr := range slice { + result[curr.Key] = curr.Value + } + return result +} diff --git a/discovery/test.go b/discovery/test.go new file mode 100644 index 0000000000..c8a2444202 --- /dev/null +++ b/discovery/test.go @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2023 Nuts community + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package discovery + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "fmt" + "github.com/google/uuid" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/jwx/v2/jws" + "github.com/lestrrat-go/jwx/v2/jwt" + ssi "github.com/nuts-foundation/go-did" + "github.com/nuts-foundation/go-did/did" + "github.com/nuts-foundation/go-did/vc" + "github.com/nuts-foundation/nuts-node/vcr/pe" + "time" +) + +var keyPairs map[string]*ecdsa.PrivateKey +var authorityDID did.DID +var aliceDID did.DID +var vcAlice vc.VerifiableCredential +var vpAlice vc.VerifiablePresentation +var bobDID did.DID +var vcBob vc.VerifiableCredential +var vpBob vc.VerifiablePresentation +var unsupportedDID did.DID + +var testServiceID = "usecase_v1" + +func testDefinitions() map[string]ServiceDefinition { + issuerPattern := "did:example:*" + return map[string]ServiceDefinition{ + testServiceID: { + ID: testServiceID, + Endpoint: "http://example.com/usecase", + PresentationDefinition: pe.PresentationDefinition{ + InputDescriptors: []*pe.InputDescriptor{ + { + Id: "1", + Constraints: &pe.Constraints{ + Fields: []pe.Field{ + { + Path: []string{"$.issuer"}, + Filter: &pe.Filter{ + Type: "string", + Pattern: &issuerPattern, + }, + }, + }, + }, + }, + }, + }, + PresentationMaxValidity: int((24 * time.Hour).Seconds()), + }, + "other": { + ID: "other", + Endpoint: "http://example.com/other", + PresentationDefinition: pe.PresentationDefinition{ + InputDescriptors: []*pe.InputDescriptor{ + { + Constraints: &pe.Constraints{ + Fields: []pe.Field{ + { + Path: []string{"$.issuer"}, + Filter: &pe.Filter{ + Type: "string", + }, + }, + }, + }, + }, + }, + }, + PresentationMaxValidity: int((24 * time.Hour).Seconds()), + }, + } +} + +func init() { + keyPairs = make(map[string]*ecdsa.PrivateKey) + authorityDID = did.MustParseDID("did:example:authority") + keyPairs[authorityDID.String()], _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + aliceDID = did.MustParseDID("did:example:alice") + keyPairs[aliceDID.String()], _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + bobDID = did.MustParseDID("did:example:bob") + keyPairs[bobDID.String()], _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + unsupportedDID = did.MustParseDID("did:web:example.com") + keyPairs[unsupportedDID.String()], _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + + vcAlice = createCredential(authorityDID, aliceDID, map[string]interface{}{ + "person": map[string]interface{}{ + "givenName": "Alice", + "familyName": "Jones", + }, + }, nil) + vpAlice = createPresentation(aliceDID, vcAlice) + vcBob = createCredential(authorityDID, bobDID, map[string]interface{}{ + "person": map[string]interface{}{ + "givenName": "Bob", + "familyName": "Jomper", + }, + }, nil) + vpBob = createPresentation(bobDID, vcBob) +} + +func createCredential(issuerDID did.DID, subjectDID did.DID, credentialSubject map[string]interface{}, claimVisitor func(map[string]interface{})) vc.VerifiableCredential { + vcID := did.DIDURL{DID: issuerDID} + vcID.Fragment = uuid.NewString() + vcIDURI := vcID.URI() + expirationDate := time.Now().Add(time.Hour * 24) + if credentialSubject == nil { + credentialSubject = make(map[string]interface{}) + } + credentialSubject["id"] = subjectDID.String() + result, err := vc.CreateJWTVerifiableCredential(context.Background(), vc.VerifiableCredential{ + ID: &vcIDURI, + Type: []ssi.URI{ssi.MustParseURI("VerifiableCredential"), ssi.MustParseURI("TestCredential")}, + Issuer: issuerDID.URI(), + IssuanceDate: time.Now(), + ExpirationDate: &expirationDate, + CredentialSubject: []interface{}{credentialSubject}, + }, func(ctx context.Context, claims map[string]interface{}, headers map[string]interface{}) (string, error) { + if claimVisitor != nil { + claimVisitor(claims) + } + return signJWT(subjectDID, claims, headers) + }) + if err != nil { + panic(err) + } + return *result +} + +func createPresentation(subjectDID did.DID, credentials ...vc.VerifiableCredential) vc.VerifiablePresentation { + return createPresentationCustom(subjectDID, func(_ map[string]interface{}, _ *vc.VerifiablePresentation) { + // do nothing + }, credentials...) +} + +func createPresentationCustom(subjectDID did.DID, visitor func(claims map[string]interface{}, vp *vc.VerifiablePresentation), credentials ...vc.VerifiableCredential) vc.VerifiablePresentation { + headers := map[string]interface{}{ + jws.TypeKey: "JWT", + } + innerVP := &vc.VerifiablePresentation{ + Type: append([]ssi.URI{ssi.MustParseURI("VerifiablePresentation")}), + VerifiableCredential: credentials, + } + claims := map[string]interface{}{ + jwt.IssuerKey: subjectDID.String(), + jwt.SubjectKey: subjectDID.String(), + jwt.JwtIDKey: subjectDID.String() + "#" + uuid.NewString(), + jwt.NotBeforeKey: time.Now().Unix(), + jwt.ExpirationKey: time.Now().Add(time.Hour * 8), + } + visitor(claims, innerVP) + claims["vp"] = *innerVP + token, err := signJWT(subjectDID, claims, headers) + if err != nil { + panic(err) + } + presentation, err := vc.ParseVerifiablePresentation(token) + if err != nil { + panic(err) + } + return *presentation +} + +func signJWT(subjectDID did.DID, claims map[string]interface{}, headers map[string]interface{}) (string, error) { + // Build JWK + signingKey := keyPairs[subjectDID.String()] + if signingKey == nil { + return "", fmt.Errorf("key not found for DID: %s", subjectDID) + } + subjectKeyJWK, err := jwk.FromRaw(signingKey) + if err != nil { + return "", nil + } + keyID := did.DIDURL{DID: subjectDID} + keyID.Fragment = "0" + if err := subjectKeyJWK.Set(jwk.AlgorithmKey, jwa.ES256); err != nil { + return "", err + } + if err := subjectKeyJWK.Set(jwk.KeyIDKey, keyID.String()); err != nil { + return "", err + } + + // Build token + token := jwt.New() + for k, v := range claims { + if err := token.Set(k, v); err != nil { + return "", err + } + } + hdr := jws.NewHeaders() + for k, v := range headers { + if err := hdr.Set(k, v); err != nil { + return "", err + } + } + bytes, err := jwt.Sign(token, jwt.WithKey(jwa.ES256, signingKey, jws.WithProtectedHeaders(hdr))) + return string(bytes), err +} diff --git a/discovery/test/duplicate_id/1.json b/discovery/test/duplicate_id/1.json new file mode 100644 index 0000000000..533dc08feb --- /dev/null +++ b/discovery/test/duplicate_id/1.json @@ -0,0 +1,49 @@ +{ + "id": "urn:nuts.nl:usecase:eOverdrachtDev2023", + "endpoint": "https://example.com/usecase/eoverdracht_dev", + "presentation_max_validity": 36000, + "presentation_definition": { + "id": "pd_eoverdracht_dev_care_organization", + "format": { + "ldp_vc": { + "proof_type": [ + "JsonWebSignature2020" + ] + } + }, + "input_descriptors": [ + { + "id": "id_nuts_care_organization_cred", + "constraints": { + "fields": [ + { + "path": [ + "$.type" + ], + "filter": { + "type": "string", + "const": "NutsOrganizationCredential" + } + }, + { + "path": [ + "$.credentialSubject.organization.name" + ], + "filter": { + "type": "string" + } + }, + { + "path": [ + "$.credentialSubject.organization.city" + ], + "filter": { + "type": "string" + } + } + ] + } + } + ] + } +} diff --git a/discovery/test/duplicate_id/2.json b/discovery/test/duplicate_id/2.json new file mode 100644 index 0000000000..533dc08feb --- /dev/null +++ b/discovery/test/duplicate_id/2.json @@ -0,0 +1,49 @@ +{ + "id": "urn:nuts.nl:usecase:eOverdrachtDev2023", + "endpoint": "https://example.com/usecase/eoverdracht_dev", + "presentation_max_validity": 36000, + "presentation_definition": { + "id": "pd_eoverdracht_dev_care_organization", + "format": { + "ldp_vc": { + "proof_type": [ + "JsonWebSignature2020" + ] + } + }, + "input_descriptors": [ + { + "id": "id_nuts_care_organization_cred", + "constraints": { + "fields": [ + { + "path": [ + "$.type" + ], + "filter": { + "type": "string", + "const": "NutsOrganizationCredential" + } + }, + { + "path": [ + "$.credentialSubject.organization.name" + ], + "filter": { + "type": "string" + } + }, + { + "path": [ + "$.credentialSubject.organization.city" + ], + "filter": { + "type": "string" + } + } + ] + } + } + ] + } +} diff --git a/discovery/test/duplicate_id/README.md b/discovery/test/duplicate_id/README.md new file mode 100644 index 0000000000..f0fc1802ed --- /dev/null +++ b/discovery/test/duplicate_id/README.md @@ -0,0 +1 @@ +This directory contains an invalid use case definition: 2 definitions have the same ID. \ No newline at end of file diff --git a/discovery/test/invalid_definition/1.json b/discovery/test/invalid_definition/1.json new file mode 100644 index 0000000000..0db3279e44 --- /dev/null +++ b/discovery/test/invalid_definition/1.json @@ -0,0 +1,3 @@ +{ + +} diff --git a/discovery/test/invalid_definition/README.md b/discovery/test/invalid_definition/README.md new file mode 100644 index 0000000000..0d166a7da4 --- /dev/null +++ b/discovery/test/invalid_definition/README.md @@ -0,0 +1 @@ +This directory contains an invalid use case definition: it does not contain the fields that are required according to the JSON schema. \ No newline at end of file diff --git a/discovery/test/invalid_json/1.json b/discovery/test/invalid_json/1.json new file mode 100644 index 0000000000..7e31dc3cad --- /dev/null +++ b/discovery/test/invalid_json/1.json @@ -0,0 +1 @@ +this is not JSON \ No newline at end of file diff --git a/discovery/test/invalid_json/README.md b/discovery/test/invalid_json/README.md new file mode 100644 index 0000000000..30610e3784 --- /dev/null +++ b/discovery/test/invalid_json/README.md @@ -0,0 +1 @@ +This directory contains an invalid use case definition: it is not valid JSON. \ No newline at end of file diff --git a/discovery/test/valid/eoverdracht.json b/discovery/test/valid/eoverdracht.json new file mode 100644 index 0000000000..533dc08feb --- /dev/null +++ b/discovery/test/valid/eoverdracht.json @@ -0,0 +1,49 @@ +{ + "id": "urn:nuts.nl:usecase:eOverdrachtDev2023", + "endpoint": "https://example.com/usecase/eoverdracht_dev", + "presentation_max_validity": 36000, + "presentation_definition": { + "id": "pd_eoverdracht_dev_care_organization", + "format": { + "ldp_vc": { + "proof_type": [ + "JsonWebSignature2020" + ] + } + }, + "input_descriptors": [ + { + "id": "id_nuts_care_organization_cred", + "constraints": { + "fields": [ + { + "path": [ + "$.type" + ], + "filter": { + "type": "string", + "const": "NutsOrganizationCredential" + } + }, + { + "path": [ + "$.credentialSubject.organization.name" + ], + "filter": { + "type": "string" + } + }, + { + "path": [ + "$.credentialSubject.organization.city" + ], + "filter": { + "type": "string" + } + } + ] + } + } + ] + } +} diff --git a/discovery/test/valid/subdir/README.md b/discovery/test/valid/subdir/README.md new file mode 100644 index 0000000000..b1778a548c --- /dev/null +++ b/discovery/test/valid/subdir/README.md @@ -0,0 +1 @@ +This directory (with an invalid definition) is there to assert subdirectories are not traversed. \ No newline at end of file diff --git a/discovery/test/valid/subdir/empty.json b/discovery/test/valid/subdir/empty.json new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/discovery/test/valid/subdir/empty.json @@ -0,0 +1,2 @@ +{ +} diff --git a/docs/index.rst b/docs/index.rst index 131c752e84..733a67b69f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -37,6 +37,7 @@ Nuts documentation pages/deployment/monitoring.rst pages/deployment/administering-your-node.rst pages/deployment/cli-reference.rst + pages/deployment/discovery.rst pages/deployment/backup-restore.rst pages/deployment/key-rotation.rst pages/deployment/storage-configuration.rst diff --git a/docs/pages/deployment/cli-reference.rst b/docs/pages/deployment/cli-reference.rst index 0bf3c272ea..fea8bf69eb 100755 --- a/docs/pages/deployment/cli-reference.rst +++ b/docs/pages/deployment/cli-reference.rst @@ -30,6 +30,8 @@ The following options apply to the server commands below: --crypto.vault.timeout duration Timeout of client calls to Vault, in Golang time.Duration string format (e.g. 1s). (default 5s) --crypto.vault.token string The Vault token. If set it overwrites the VAULT_TOKEN env var. --datadir string Directory where the node stores its files. (default "./data") + --discovery.definitions.directory string Directory to load Discovery Service Definitions from. If not set, the discovery service will be disabled. If the directory contains JSON files that can't be parsed as service definition, the node will fail to start. + --discovery.server.definition_ids strings IDs of the Discovery Service Definitions for which to act as server. If an ID does not map to a loaded service definition, the node will fail to start. --events.nats.hostname string Hostname for the NATS server (default "0.0.0.0") --events.nats.port int Port where the NATS server listens on (default 4222) --events.nats.storagedir string Directory where file-backed streams are stored in the NATS server @@ -44,7 +46,7 @@ The following options apply to the server commands below: --http.default.log string What to log about HTTP requests. Options are 'nothing', 'metadata' (log request method, URI, IP and response code), and 'metadata-and-body' (log the request and response body, in addition to the metadata). (default "metadata") --http.default.tls string Whether to enable TLS for the default interface, options are 'disabled', 'server', 'server-client'. Leaving it empty is synonymous to 'disabled', --internalratelimiter When set, expensive internal calls are rate-limited to protect the network. Always enabled in strict mode. (default true) - --jsonld.contexts.localmapping stringToString This setting allows mapping external URLs to local files for e.g. preventing external dependencies. These mappings have precedence over those in remoteallowlist. (default [https://schema.org=assets/contexts/schema-org-v13.ldjson,https://nuts.nl/credentials/v1=assets/contexts/nuts.ldjson,https://www.w3.org/2018/credentials/v1=assets/contexts/w3c-credentials-v1.ldjson,https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json=assets/contexts/lds-jws2020-v1.ldjson]) + --jsonld.contexts.localmapping stringToString This setting allows mapping external URLs to local files for e.g. preventing external dependencies. These mappings have precedence over those in remoteallowlist. (default [https://www.w3.org/2018/credentials/v1=assets/contexts/w3c-credentials-v1.ldjson,https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json=assets/contexts/lds-jws2020-v1.ldjson,https://schema.org=assets/contexts/schema-org-v13.ldjson,https://nuts.nl/credentials/v1=assets/contexts/nuts.ldjson]) --jsonld.contexts.remoteallowlist strings In strict mode, fetching external JSON-LD contexts is not allowed except for context-URLs listed here. (default [https://schema.org,https://www.w3.org/2018/credentials/v1,https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json]) --loggerformat string Log format (text, json) (default "text") --network.bootstrapnodes strings List of bootstrap nodes (':') which the node initially connect to. @@ -70,7 +72,7 @@ The following options apply to the server commands below: --storage.redis.sentinel.username string Username for authenticating to Redis Sentinels. --storage.redis.tls.truststorefile string PEM file containing the trusted CA certificate(s) for authenticating remote Redis servers. Can only be used when connecting over TLS (use 'rediss://' as scheme in address). --storage.redis.username string Redis database username. If set, it overrides the username in the connection URL. - --storage.sql.connection string Connection string for the SQL database. If not set, it defaults to a SQLite database stored inside the configured data directory + --storage.sql.connection string Connection string for the SQL database. If not set it, defaults to a SQLite database stored inside the configured data directory. Note: using SQLite is not recommended in production environments. If using SQLite anyways, remember to enable foreign keys ('_foreign_keys=on') and the write-ahead-log ('_journal_mode=WAL'). --strictmode When set, insecure settings are forbidden. (default true) --tls.certfile string PEM file containing the certificate for the server (also used as client certificate). --tls.certheader string Name of the HTTP header that will contain the client certificate when TLS is offloaded. diff --git a/docs/pages/deployment/discovery.rst b/docs/pages/deployment/discovery.rst new file mode 100644 index 0000000000..e00527fde3 --- /dev/null +++ b/docs/pages/deployment/discovery.rst @@ -0,0 +1,29 @@ +.. _discovery: + +Discovery +######### + +.. warning:: + This feature is under development and subject to change. + +Discovery allows parties to publish information about themselves as a Verifiable Presentation, +so that other parties can discover them for further (data) exchange. + +In this Discovery Service protocol there are clients and servers: clients register their Verifiable Presentations on a server, +which can be queried by other clients. +Where to find the server and what is allowed in the Verifiable Presentations is defined in a Discovery Service Definition. +These are JSON documents that are loaded by both client and server. + +The Nuts node always acts as client for every loaded service definition, meaning it can register itself on the server and query it. +It only acts as server for a specific server if configured to do so. + +Configuration +************* + +Service definitions are JSON files loaded from the ``discovery.definitions.directory`` directory. +It loads all files wih the ``.json`` extension in this directory. It does not load subdirectories. +If the directory contains JSON files that are not (valid) service definitions, the node will fail to start. + +To act as server for a specific discovery service definition, +the service ID from the definition needs to be specified in ``discovery.server.defition_ids``. +The IDs in this list must correspond to the ``id`` fields of the loaded service definition, otherwise the node will fail to start. \ No newline at end of file diff --git a/docs/pages/deployment/server_options.rst b/docs/pages/deployment/server_options.rst index 03b274bb46..cb1e7afef7 100755 --- a/docs/pages/deployment/server_options.rst +++ b/docs/pages/deployment/server_options.rst @@ -2,85 +2,88 @@ :widths: 20 30 50 :class: options-table - ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================== - Key Default Description - ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================== - configfile nuts.yaml Nuts config file - cpuprofile When set, a CPU profile is written to the given path. Ignored when strictmode is set. - datadir ./data Directory where the node stores its files. - internalratelimiter true When set, expensive internal calls are rate-limited to protect the network. Always enabled in strict mode. - loggerformat text Log format (text, json) - strictmode true When set, insecure settings are forbidden. - verbosity info Log level (trace, debug, info, warn, error) - tls.certfile PEM file containing the certificate for the server (also used as client certificate). - tls.certheader Name of the HTTP header that will contain the client certificate when TLS is offloaded. - tls.certkeyfile PEM file containing the private key of the server certificate. - tls.offload Whether to enable TLS offloading for incoming connections. Enable by setting it to 'incoming'. If enabled 'tls.certheader' must be configured as well. - tls.truststorefile truststore.pem PEM file containing the trusted CA certificates for authenticating remote servers. - **Auth** - auth.accesstokenlifespan 60 defines how long (in seconds) an access token is valid. Uses default in strict mode. - auth.clockskew 5000 allowed JWT Clock skew in milliseconds - auth.contractvalidators [irma,uzi,dummy,employeeid] sets the different contract validators to use - auth.publicurl public URL which can be reached by a users IRMA client, this should include the scheme and domain: https://example.com. Additional paths should only be added if some sort of url-rewriting is done in a reverse-proxy. - auth.http.timeout 30 HTTP timeout (in seconds) used by the Auth API HTTP client - auth.irma.autoupdateschemas true set if you want automatically update the IRMA schemas every 60 minutes. - auth.irma.schememanager pbdf IRMA schemeManager to use for attributes. Can be either 'pbdf' or 'irma-demo'. - **Crypto** - crypto.storage fs Storage to use, 'external' for an external backend (experimental), 'fs' for file system (for development purposes), 'vaultkv' for Vault KV store (recommended, will be replaced by external backend in future). - crypto.external.address Address of the external storage service. - crypto.external.timeout 100ms Time-out when invoking the external storage backend, in Golang time.Duration string format (e.g. 1s). - crypto.vault.address The Vault address. If set it overwrites the VAULT_ADDR env var. - crypto.vault.pathprefix kv The Vault path prefix. - crypto.vault.timeout 5s Timeout of client calls to Vault, in Golang time.Duration string format (e.g. 1s). - crypto.vault.token The Vault token. If set it overwrites the VAULT_TOKEN env var. - **Events** - events.nats.hostname 0.0.0.0 Hostname for the NATS server - events.nats.port 4222 Port where the NATS server listens on - events.nats.storagedir Directory where file-backed streams are stored in the NATS server - events.nats.timeout 30 Timeout for NATS server operations - **GoldenHammer** - goldenhammer.enabled true Whether to enable automatically fixing DID documents with the required endpoints. - goldenhammer.interval 10m0s The interval in which to check for DID documents to fix. - **HTTP** - http.default.address \:1323 Address and port the server will be listening to - http.default.log metadata What to log about HTTP requests. Options are 'nothing', 'metadata' (log request method, URI, IP and response code), and 'metadata-and-body' (log the request and response body, in addition to the metadata). - http.default.tls Whether to enable TLS for the default interface, options are 'disabled', 'server', 'server-client'. Leaving it empty is synonymous to 'disabled', - http.default.auth.audience Expected audience for JWT tokens (default: hostname) - http.default.auth.authorizedkeyspath Path to an authorized_keys file for trusted JWT signers - http.default.auth.type Whether to enable authentication for the default interface, specify 'token_v2' for bearer token mode or 'token' for legacy bearer token mode. - http.default.cors.origin [] When set, enables CORS from the specified origins on the default HTTP interface. - **JSONLD** - jsonld.contexts.localmapping [https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json=assets/contexts/lds-jws2020-v1.ldjson,https://schema.org=assets/contexts/schema-org-v13.ldjson,https://nuts.nl/credentials/v1=assets/contexts/nuts.ldjson,https://www.w3.org/2018/credentials/v1=assets/contexts/w3c-credentials-v1.ldjson] This setting allows mapping external URLs to local files for e.g. preventing external dependencies. These mappings have precedence over those in remoteallowlist. - jsonld.contexts.remoteallowlist [https://schema.org,https://www.w3.org/2018/credentials/v1,https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json] In strict mode, fetching external JSON-LD contexts is not allowed except for context-URLs listed here. - **Network** - network.bootstrapnodes [] List of bootstrap nodes (':') which the node initially connect to. - network.connectiontimeout 5000 Timeout before an outbound connection attempt times out (in milliseconds). - network.enablediscovery true Whether to enable automatic connecting to other nodes. - network.enabletls true Whether to enable TLS for gRPC connections, which can be disabled for demo/development purposes. It is NOT meant for TLS offloading (see 'tls.offload'). Disabling TLS is not allowed in strict-mode. - network.grpcaddr \:5555 Local address for gRPC to listen on. If empty the gRPC server won't be started and other nodes will not be able to connect to this node (outbound connections can still be made). - network.maxbackoff 24h0m0s Maximum between outbound connections attempts to unresponsive nodes (in Golang duration format, e.g. '1h', '30m'). - network.nodedid Specifies the DID of the organization that operates this node, typically a vendor for EPD software. It is used to identify the node on the network. If the DID document does not exist of is deactivated, the node will not start. - network.protocols [] Specifies the list of network protocols to enable on the server. They are specified by version (1, 2). If not set, all protocols are enabled. - network.v2.diagnosticsinterval 5000 Interval (in milliseconds) that specifies how often the node should broadcast its diagnostic information to other nodes (specify 0 to disable). - network.v2.gossipinterval 5000 Interval (in milliseconds) that specifies how often the node should gossip its new hashes to other nodes. - **PKI** - pki.maxupdatefailhours 4 Maximum number of hours that a denylist update can fail - pki.softfail true Do not reject certificates if their revocation status cannot be established when softfail is true - **Storage** - storage.bbolt.backup.directory Target directory for BBolt database backups. - storage.bbolt.backup.interval 0s Interval, formatted as Golang duration (e.g. 10m, 1h) at which BBolt database backups will be performed. - storage.redis.address Redis database server address. This can be a simple 'host:port' or a Redis connection URL with scheme, auth and other options. - storage.redis.database Redis database name, which is used as prefix every key. Can be used to have multiple instances use the same Redis instance. - storage.redis.password Redis database password. If set, it overrides the username in the connection URL. - storage.redis.username Redis database username. If set, it overrides the username in the connection URL. - storage.redis.sentinel.master Name of the Redis Sentinel master. Setting this property enables Redis Sentinel. - storage.redis.sentinel.nodes [] Addresses of the Redis Sentinels to connect to initially. Setting this property enables Redis Sentinel. - storage.redis.sentinel.password Password for authenticating to Redis Sentinels. - storage.redis.sentinel.username Username for authenticating to Redis Sentinels. - storage.redis.tls.truststorefile PEM file containing the trusted CA certificate(s) for authenticating remote Redis servers. Can only be used when connecting over TLS (use 'rediss://' as scheme in address). - storage.sql.connection Connection string for the SQL database. If not set, it defaults to a SQLite database stored inside the configured data directory - **VCR** - vcr.openid4vci.definitionsdir Directory with the additional credential definitions the node could issue (experimental, may change without notice). - vcr.openid4vci.enabled true Enable issuing and receiving credentials over OpenID4VCI. - vcr.openid4vci.timeout 30s Time-out for OpenID4VCI HTTP client operations. - ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================== + ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================================================================================================================ + Key Default Description + ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================================================================================================================ + configfile nuts.yaml Nuts config file + cpuprofile When set, a CPU profile is written to the given path. Ignored when strictmode is set. + datadir ./data Directory where the node stores its files. + internalratelimiter true When set, expensive internal calls are rate-limited to protect the network. Always enabled in strict mode. + loggerformat text Log format (text, json) + strictmode true When set, insecure settings are forbidden. + verbosity info Log level (trace, debug, info, warn, error) + tls.certfile PEM file containing the certificate for the server (also used as client certificate). + tls.certheader Name of the HTTP header that will contain the client certificate when TLS is offloaded. + tls.certkeyfile PEM file containing the private key of the server certificate. + tls.offload Whether to enable TLS offloading for incoming connections. Enable by setting it to 'incoming'. If enabled 'tls.certheader' must be configured as well. + tls.truststorefile truststore.pem PEM file containing the trusted CA certificates for authenticating remote servers. + **Auth** + auth.accesstokenlifespan 60 defines how long (in seconds) an access token is valid. Uses default in strict mode. + auth.clockskew 5000 allowed JWT Clock skew in milliseconds + auth.contractvalidators [irma,uzi,dummy,employeeid] sets the different contract validators to use + auth.publicurl public URL which can be reached by a users IRMA client, this should include the scheme and domain: https://example.com. Additional paths should only be added if some sort of url-rewriting is done in a reverse-proxy. + auth.http.timeout 30 HTTP timeout (in seconds) used by the Auth API HTTP client + auth.irma.autoupdateschemas true set if you want automatically update the IRMA schemas every 60 minutes. + auth.irma.schememanager pbdf IRMA schemeManager to use for attributes. Can be either 'pbdf' or 'irma-demo'. + **Crypto** + crypto.storage fs Storage to use, 'external' for an external backend (experimental), 'fs' for file system (for development purposes), 'vaultkv' for Vault KV store (recommended, will be replaced by external backend in future). + crypto.external.address Address of the external storage service. + crypto.external.timeout 100ms Time-out when invoking the external storage backend, in Golang time.Duration string format (e.g. 1s). + crypto.vault.address The Vault address. If set it overwrites the VAULT_ADDR env var. + crypto.vault.pathprefix kv The Vault path prefix. + crypto.vault.timeout 5s Timeout of client calls to Vault, in Golang time.Duration string format (e.g. 1s). + crypto.vault.token The Vault token. If set it overwrites the VAULT_TOKEN env var. + **Discovery** + discovery.definitions.directory Directory to load Discovery Service Definitions from. If not set, the discovery service will be disabled. If the directory contains JSON files that can't be parsed as service definition, the node will fail to start. + discovery.server.definition_ids [] IDs of the Discovery Service Definitions for which to act as server. If an ID does not map to a loaded service definition, the node will fail to start. + **Events** + events.nats.hostname 0.0.0.0 Hostname for the NATS server + events.nats.port 4222 Port where the NATS server listens on + events.nats.storagedir Directory where file-backed streams are stored in the NATS server + events.nats.timeout 30 Timeout for NATS server operations + **GoldenHammer** + goldenhammer.enabled true Whether to enable automatically fixing DID documents with the required endpoints. + goldenhammer.interval 10m0s The interval in which to check for DID documents to fix. + **HTTP** + http.default.address \:1323 Address and port the server will be listening to + http.default.log metadata What to log about HTTP requests. Options are 'nothing', 'metadata' (log request method, URI, IP and response code), and 'metadata-and-body' (log the request and response body, in addition to the metadata). + http.default.tls Whether to enable TLS for the default interface, options are 'disabled', 'server', 'server-client'. Leaving it empty is synonymous to 'disabled', + http.default.auth.audience Expected audience for JWT tokens (default: hostname) + http.default.auth.authorizedkeyspath Path to an authorized_keys file for trusted JWT signers + http.default.auth.type Whether to enable authentication for the default interface, specify 'token_v2' for bearer token mode or 'token' for legacy bearer token mode. + http.default.cors.origin [] When set, enables CORS from the specified origins on the default HTTP interface. + **JSONLD** + jsonld.contexts.localmapping [https://nuts.nl/credentials/v1=assets/contexts/nuts.ldjson,https://www.w3.org/2018/credentials/v1=assets/contexts/w3c-credentials-v1.ldjson,https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json=assets/contexts/lds-jws2020-v1.ldjson,https://schema.org=assets/contexts/schema-org-v13.ldjson] This setting allows mapping external URLs to local files for e.g. preventing external dependencies. These mappings have precedence over those in remoteallowlist. + jsonld.contexts.remoteallowlist [https://schema.org,https://www.w3.org/2018/credentials/v1,https://w3c-ccg.github.io/lds-jws2020/contexts/lds-jws2020-v1.json] In strict mode, fetching external JSON-LD contexts is not allowed except for context-URLs listed here. + **Network** + network.bootstrapnodes [] List of bootstrap nodes (':') which the node initially connect to. + network.connectiontimeout 5000 Timeout before an outbound connection attempt times out (in milliseconds). + network.enablediscovery true Whether to enable automatic connecting to other nodes. + network.enabletls true Whether to enable TLS for gRPC connections, which can be disabled for demo/development purposes. It is NOT meant for TLS offloading (see 'tls.offload'). Disabling TLS is not allowed in strict-mode. + network.grpcaddr \:5555 Local address for gRPC to listen on. If empty the gRPC server won't be started and other nodes will not be able to connect to this node (outbound connections can still be made). + network.maxbackoff 24h0m0s Maximum between outbound connections attempts to unresponsive nodes (in Golang duration format, e.g. '1h', '30m'). + network.nodedid Specifies the DID of the organization that operates this node, typically a vendor for EPD software. It is used to identify the node on the network. If the DID document does not exist of is deactivated, the node will not start. + network.protocols [] Specifies the list of network protocols to enable on the server. They are specified by version (1, 2). If not set, all protocols are enabled. + network.v2.diagnosticsinterval 5000 Interval (in milliseconds) that specifies how often the node should broadcast its diagnostic information to other nodes (specify 0 to disable). + network.v2.gossipinterval 5000 Interval (in milliseconds) that specifies how often the node should gossip its new hashes to other nodes. + **PKI** + pki.maxupdatefailhours 4 Maximum number of hours that a denylist update can fail + pki.softfail true Do not reject certificates if their revocation status cannot be established when softfail is true + **Storage** + storage.bbolt.backup.directory Target directory for BBolt database backups. + storage.bbolt.backup.interval 0s Interval, formatted as Golang duration (e.g. 10m, 1h) at which BBolt database backups will be performed. + storage.redis.address Redis database server address. This can be a simple 'host:port' or a Redis connection URL with scheme, auth and other options. + storage.redis.database Redis database name, which is used as prefix every key. Can be used to have multiple instances use the same Redis instance. + storage.redis.password Redis database password. If set, it overrides the username in the connection URL. + storage.redis.username Redis database username. If set, it overrides the username in the connection URL. + storage.redis.sentinel.master Name of the Redis Sentinel master. Setting this property enables Redis Sentinel. + storage.redis.sentinel.nodes [] Addresses of the Redis Sentinels to connect to initially. Setting this property enables Redis Sentinel. + storage.redis.sentinel.password Password for authenticating to Redis Sentinels. + storage.redis.sentinel.username Username for authenticating to Redis Sentinels. + storage.redis.tls.truststorefile PEM file containing the trusted CA certificate(s) for authenticating remote Redis servers. Can only be used when connecting over TLS (use 'rediss://' as scheme in address). + storage.sql.connection Connection string for the SQL database. If not set it, defaults to a SQLite database stored inside the configured data directory. Note: using SQLite is not recommended in production environments. If using SQLite anyways, remember to enable foreign keys ('_foreign_keys=on') and the write-ahead-log ('_journal_mode=WAL'). + **VCR** + vcr.openid4vci.definitionsdir Directory with the additional credential definitions the node could issue (experimental, may change without notice). + vcr.openid4vci.enabled true Enable issuing and receiving credentials over OpenID4VCI. + vcr.openid4vci.timeout 30s Time-out for OpenID4VCI HTTP client operations. + ==================================== =============================================================================================================================================================================================================================================================================================================== ================================================================================================================================================================================================================================================================================================================================ diff --git a/go.mod b/go.mod index a07a960f0c..fffd1c8fbd 100644 --- a/go.mod +++ b/go.mod @@ -106,6 +106,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/gorm v1.9.16 // indirect github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.2 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/makefile b/makefile index 31b5d65065..49ff29af93 100644 --- a/makefile +++ b/makefile @@ -19,6 +19,7 @@ gen-mocks: mockgen -destination=crypto/mock.go -package=crypto -source=crypto/interface.go mockgen -destination=crypto/storage/spi/mock.go -package spi -source=crypto/storage/spi/interface.go mockgen -destination=didman/mock.go -package=didman -source=didman/types.go + mockgen -destination=discovery/mock.go -package=discovery -source=discovery/interface.go mockgen -destination=events/events_mock.go -package=events -source=events/interface.go Event mockgen -destination=events/mock.go -package=events -source=events/conn.go Conn ConnectionPool mockgen -destination=http/echo_mock.go -package=http -source=http/echo.go -imports echo=github.com/labstack/echo/v4 @@ -55,6 +56,7 @@ gen-mocks: mockgen -destination=vdr/management/management_mock.go -package=management -source=vdr/management/management.go mockgen -destination=vdr/management/finder_mock.go -package=management -source=vdr/management/finder.go + gen-api: oapi-codegen --config codegen/configs/common_ssi_types.yaml docs/_static/common/ssi_types.yaml | gofmt > api/ssi_types.go oapi-codegen --config codegen/configs/crypto_v1.yaml -package v1 docs/_static/crypto/v1.yaml | gofmt > crypto/api/v1/generated.go diff --git a/storage/cmd/cmd.go b/storage/cmd/cmd.go index 9cf8d59626..535046274b 100644 --- a/storage/cmd/cmd.go +++ b/storage/cmd/cmd.go @@ -38,6 +38,9 @@ func FlagSet() *pflag.FlagSet { flagSet.StringSlice("storage.redis.sentinel.nodes", defs.Redis.Sentinel.Nodes, "Addresses of the Redis Sentinels to connect to initially. Setting this property enables Redis Sentinel.") flagSet.String("storage.redis.sentinel.username", defs.Redis.Sentinel.Username, "Username for authenticating to Redis Sentinels.") flagSet.String("storage.redis.sentinel.password", defs.Redis.Sentinel.Password, "Password for authenticating to Redis Sentinels.") - flagSet.String("storage.sql.connection", defs.SQL.ConnectionString, "Connection string for the SQL database. If not set, it defaults to a SQLite database stored inside the configured data directory") + flagSet.String("storage.sql.connection", defs.SQL.ConnectionString, "Connection string for the SQL database. "+ + "If not set it, defaults to a SQLite database stored inside the configured data directory. "+ + "Note: using SQLite is not recommended in production environments. "+ + "If using SQLite anyways, remember to enable foreign keys ('_foreign_keys=on') and the write-ahead-log ('_journal_mode=WAL').") return flagSet } diff --git a/storage/engine.go b/storage/engine.go index fecd2db7d6..17c1197c3a 100644 --- a/storage/engine.go +++ b/storage/engine.go @@ -162,8 +162,9 @@ func (e *engine) GetSQLDatabase() *gorm.DB { func (e *engine) initSQLDatabase() error { connectionString := e.config.SQL.ConnectionString if len(connectionString) == 0 { - connectionString = "file:" + path.Join(e.datadir, "sqlite.db") + connectionString = sqliteConnectionString(e.datadir) } + var err error e.sqlDB, err = gorm.Open(sqlite.Open(connectionString), &gorm.Config{}) if err != nil { @@ -195,6 +196,10 @@ func (e *engine) initSQLDatabase() error { return err } +func sqliteConnectionString(datadir string) string { + return "file:" + path.Join(datadir, "sqlite.db?_journal_mode=WAL&_foreign_keys=on") +} + type provider struct { moduleName string engine *engine diff --git a/storage/engine_test.go b/storage/engine_test.go index 82f1af7524..befd3bae6f 100644 --- a/storage/engine_test.go +++ b/storage/engine_test.go @@ -52,7 +52,7 @@ func Test_engine_lifecycle(t *testing.T) { func Test_engine_GetProvider(t *testing.T) { sut := New() - _ = sut.Configure(*core.NewServerConfig()) + _ = sut.Configure(core.ServerConfig{Datadir: io.TestDirectory(t)}) t.Run("moduleName is empty", func(t *testing.T) { store, err := sut.GetProvider("").GetKVStore("store", VolatileStorageClass) assert.Nil(t, store) @@ -62,7 +62,7 @@ func Test_engine_GetProvider(t *testing.T) { func Test_engine_GetKVStore(t *testing.T) { sut := New() - _ = sut.Configure(*core.NewServerConfig()) + _ = sut.Configure(core.ServerConfig{Datadir: io.TestDirectory(t)}) t.Run("store is empty", func(t *testing.T) { store, err := sut.GetProvider("engine").GetKVStore("", VolatileStorageClass) assert.Nil(t, store) @@ -132,8 +132,7 @@ func Test_engine_sqlDatabase(t *testing.T) { }) t.Run("runs migrations", func(t *testing.T) { e := New().(*engine) - e.config.SQL.ConnectionString = SQLiteInMemoryConnectionString - require.NoError(t, e.Configure(*core.NewServerConfig())) + require.NoError(t, e.Configure(core.ServerConfig{Datadir: io.TestDirectory(t)})) require.NoError(t, e.Start()) t.Cleanup(func() { _ = e.Shutdown() @@ -147,5 +146,4 @@ func Test_engine_sqlDatabase(t *testing.T) { assert.NoError(t, row.Scan(&count)) assert.Equal(t, 1, count) }) - } diff --git a/storage/mock.go b/storage/mock.go index a4aa1c0bfa..ecbfce1b17 100644 --- a/storage/mock.go +++ b/storage/mock.go @@ -69,32 +69,32 @@ func (mr *MockEngineMockRecorder) GetProvider(moduleName any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProvider", reflect.TypeOf((*MockEngine)(nil).GetProvider), moduleName) } -// GetSessionDatabase mocks base method. -func (m *MockEngine) GetSessionDatabase() SessionDatabase { +// GetSQLDatabase mocks base method. +func (m *MockEngine) GetSQLDatabase() *gorm.DB { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSessionDatabase") - ret0, _ := ret[0].(SessionDatabase) + ret := m.ctrl.Call(m, "GetSQLDatabase") + ret0, _ := ret[0].(*gorm.DB) return ret0 } -// GetSessionDatabase indicates an expected call of GetSessionDatabase. -func (mr *MockEngineMockRecorder) GetSessionDatabase() *gomock.Call { +// GetSQLDatabase indicates an expected call of GetSQLDatabase. +func (mr *MockEngineMockRecorder) GetSQLDatabase() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSessionDatabase", reflect.TypeOf((*MockEngine)(nil).GetSessionDatabase)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSQLDatabase", reflect.TypeOf((*MockEngine)(nil).GetSQLDatabase)) } -// SQLDatabase mocks base method. -func (m *MockEngine) GetSQLDatabase() *gorm.DB { +// GetSessionDatabase mocks base method. +func (m *MockEngine) GetSessionDatabase() SessionDatabase { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSQLDatabase") - ret0, _ := ret[0].(*gorm.DB) + ret := m.ctrl.Call(m, "GetSessionDatabase") + ret0, _ := ret[0].(SessionDatabase) return ret0 } -// SQLDatabase indicates an expected call of SQLDatabase. -func (mr *MockEngineMockRecorder) SQLDatabase() *gomock.Call { +// GetSessionDatabase indicates an expected call of GetSessionDatabase. +func (mr *MockEngineMockRecorder) GetSessionDatabase() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSQLDatabase", reflect.TypeOf((*MockEngine)(nil).GetSQLDatabase)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSessionDatabase", reflect.TypeOf((*MockEngine)(nil).GetSessionDatabase)) } // Shutdown mocks base method. diff --git a/storage/sql_migrations/2_discoveryservice.down.sql b/storage/sql_migrations/2_discoveryservice.down.sql new file mode 100644 index 0000000000..02fe661c06 --- /dev/null +++ b/storage/sql_migrations/2_discoveryservice.down.sql @@ -0,0 +1,4 @@ +drop table discovery_service; +drop table discovery_presentation; +drop table discovery_credential; +drop table discovery_credential_prop; \ No newline at end of file diff --git a/storage/sql_migrations/2_discoveryservice.up.sql b/storage/sql_migrations/2_discoveryservice.up.sql new file mode 100644 index 0000000000..d2e87102fa --- /dev/null +++ b/storage/sql_migrations/2_discoveryservice.up.sql @@ -0,0 +1,53 @@ +-- discovery contains the known discovery services and the highest timestamp +create table discovery_service +( + -- id is the unique identifier for the service. It comes from the service definition. + id varchar(200) not null primary key, + lamport_timestamp integer not null +); + +-- discovery_presentation contains the presentations of the discovery services +create table discovery_presentation +( + id varchar(36) not null primary key, + service_id varchar(36) not null, + lamport_timestamp integer not null, + credential_subject_id varchar not null, + presentation_id varchar not null, + presentation_raw varchar not null, + presentation_expiration integer not null, + unique (service_id, credential_subject_id), + constraint fk_discovery_presentation_service_id foreign key (service_id) references discovery_service (id) on delete cascade +); +-- index for the presentation_expiration column, used by prune() +create index idx_discovery_presentation_expiration on discovery_presentation (presentation_expiration); + +-- discovery_credential is a credential in a presentation of the discovery service. +-- We could do without the table, but having it allows to have a normalized index for credential properties that appear on every credential. +-- Then we don't need rows in the properties table for them (having a column for those is faster than having a row in the properties table which needs to be joined). +create table discovery_credential +( + id varchar(36) not null primary key, + -- presentation_id is NOT the ID of the presentation (VerifiablePresentation.ID), but refers to the presentation record in the discovery_presentation table. + presentation_id varchar(36) not null, + credential_id varchar not null, + credential_issuer varchar not null, + credential_subject_id varchar not null, + -- for now, credentials with at most 2 types are supported. + -- The type stored in the type column will be the 'other' type, not being 'VerifiableCredential'. + -- When credentials with 3 or more types appear, we could have to use a separate table for the types. + credential_type varchar, + constraint fk_discovery_credential_presentation foreign key (presentation_id) references discovery_presentation (id) on delete cascade +); + +-- discovery_credential_prop contains the credentialSubject properties of a credential in a presentation of the discovery service. +-- It is used by clients to search for presentations. +create table discovery_credential_prop +( + credential_id varchar(36) not null, + key varchar not null, + value varchar, + PRIMARY KEY (credential_id, key), + -- cascading delete: if the presentation gets deleted, the properties get deleted as well + constraint fk_discovery_credential_id foreign key (credential_id) references discovery_credential (id) on delete cascade +); \ No newline at end of file diff --git a/storage/test.go b/storage/test.go index 34cc9a0ef7..0366208ea0 100644 --- a/storage/test.go +++ b/storage/test.go @@ -28,12 +28,10 @@ import ( "testing" ) -// SQLiteInMemoryConnectionString is a connection string for an in-memory SQLite database -const SQLiteInMemoryConnectionString = "file::memory:?cache=shared" - func NewTestStorageEngineInDir(dir string) Engine { result := New().(*engine) - result.config.SQL = SQLConfig{ConnectionString: SQLiteInMemoryConnectionString} + + result.config.SQL = SQLConfig{ConnectionString: sqliteConnectionString(dir)} _ = result.Configure(core.TestServerConfig(core.ServerConfig{Datadir: dir + "/data"})) return result } diff --git a/vcr/pe/schema/v2/schema.go b/vcr/pe/schema/v2/schema.go index 44de55fdd8..c1db1c59a5 100644 --- a/vcr/pe/schema/v2/schema.go +++ b/vcr/pe/schema/v2/schema.go @@ -51,17 +51,23 @@ var PresentationDefinition *jsonschema.Schema // PresentationSubmission is the JSON schema for a presentation submission. var PresentationSubmission *jsonschema.Schema +// Compiler returns a JSON schema compiler with the Presentation Exchange schemas loaded. +func Compiler() *jsonschema.Compiler { + compiler := jsonschema.NewCompiler() + compiler.Draft = jsonschema.Draft7 + if err := loadSchemas(schemaFiles, compiler); err != nil { + panic(err) + } + return compiler +} + func init() { // By default, it loads from filesystem, but that sounds unsafe. // Since register our schemas, we don't need to allow loading resources. loader.Load = func(url string) (io.ReadCloser, error) { return nil, fmt.Errorf("refusing to load unknown schema: %s", url) } - compiler := jsonschema.NewCompiler() - compiler.Draft = jsonschema.Draft7 - if err := loadSchemas(schemaFiles, compiler); err != nil { - panic(err) - } + compiler := Compiler() PresentationDefinition = compiler.MustCompile(presentationDefinition) PresentationSubmission = compiler.MustCompile(presentationSubmission) } diff --git a/vcr/pe/test/definition_mapping.json b/vcr/pe/test/definition_mapping.json index b543faa577..5459a3ea65 100644 --- a/vcr/pe/test/definition_mapping.json +++ b/vcr/pe/test/definition_mapping.json @@ -20,6 +20,16 @@ "const": "NutsOrganizationCredential" } }, + { + "path": ["$.issuer"], + "filter": { + "type": "string", + "filter": { + "type": "string", + "pattern": "^did:example:123456789abcdefghi$" + } + } + }, { "path": ["$.credentialSubject.organization.name"], "filter": { From 79ded218828f38088327f309bfda0096eb0d5de2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:01:07 +0100 Subject: [PATCH 2/2] Bump github.com/nuts-foundation/go-leia/v4 from 4.0.0 to 4.0.1 (#2632) Bumps [github.com/nuts-foundation/go-leia/v4](https://github.com/nuts-foundation/go-leia) from 4.0.0 to 4.0.1. - [Release notes](https://github.com/nuts-foundation/go-leia/releases) - [Commits](https://github.com/nuts-foundation/go-leia/compare/v4.0.0...v4.0.1) --- updated-dependencies: - dependency-name: github.com/nuts-foundation/go-leia/v4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fffd1c8fbd..4c00e45faf 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/nats-io/nats.go v1.31.0 github.com/nuts-foundation/crypto-ecies v0.0.0-20211207143025-5b84f9efce2b github.com/nuts-foundation/go-did v0.9.0 - github.com/nuts-foundation/go-leia/v4 v4.0.0 + github.com/nuts-foundation/go-leia/v4 v4.0.1 github.com/nuts-foundation/go-stoabs v1.9.0 // check the oapi-codegen tool version in the makefile when upgrading the runtime github.com/oapi-codegen/runtime v1.1.0 diff --git a/go.sum b/go.sum index 19168e8001..0dd076eccb 100644 --- a/go.sum +++ b/go.sum @@ -450,8 +450,8 @@ github.com/nuts-foundation/crypto-ecies v0.0.0-20211207143025-5b84f9efce2b h1:80 github.com/nuts-foundation/crypto-ecies v0.0.0-20211207143025-5b84f9efce2b/go.mod h1:6YUioYirD6/8IahZkoS4Ypc8xbeJW76Xdk1QKcziNTM= github.com/nuts-foundation/go-did v0.9.0 h1:JBz1cYaMxplKZ31QyWierrR3Yt2RIpaxZTt8KFm4Ph4= github.com/nuts-foundation/go-did v0.9.0/go.mod h1:L39mh6SBsuenqeZw2JxARx4a/bwdARwchG2x3zPMTjc= -github.com/nuts-foundation/go-leia/v4 v4.0.0 h1:/unYCk18qGG2HWcJK4ld4CaM6k7Tdr0bR1vQd1Jwfcg= -github.com/nuts-foundation/go-leia/v4 v4.0.0/go.mod h1:A246dA4nhY99OPCQpG/XbQ/iPyyfSaJchanivuPWpao= +github.com/nuts-foundation/go-leia/v4 v4.0.1 h1:+Sbk3Bew1QnRUqRXSOwomMw3nIZgncmTX425J7U5Q34= +github.com/nuts-foundation/go-leia/v4 v4.0.1/go.mod h1:eaZuWIolpU61TMvTMcen85+SOEOnHiALdg5SxqLXzz8= github.com/nuts-foundation/go-stoabs v1.9.0 h1:zK+ugfolaJYyBvGwsRuavLVdycXk4Yw/1gI+tz17lWQ= github.com/nuts-foundation/go-stoabs v1.9.0/go.mod h1:htbUqSZiaihqAvJfHwtAbQusGaJtIeWpm1pmKjBYXlM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=