From 5aab5944c862131a50cb62c1fd4146a49a8bf0a2 Mon Sep 17 00:00:00 2001 From: Robin Sonefors Date: Mon, 11 Apr 2022 16:52:03 +0100 Subject: [PATCH] Make namespace cache mockable when starting server This means we can start a server and send it a request, without spinning up a k8s api server. It doesn't mean this server is very useful, but we can test things like "are the endpoints present" --- cmd/gitops-server/cmd/cmd.go | 8 +- core/cache/cachefakes/fake_container.go | 212 +++++++++++++++ core/cache/container.go | 28 +- core/clustersmngr/clustersmngr.go | 1 + .../clustersmngrfakes/fake_clients_pool.go | 257 ++++++++++++++++++ core/server/server.go | 31 +-- core/server/server_test.go | 74 +++++ core/server/suite_test.go | 7 +- 8 files changed, 590 insertions(+), 28 deletions(-) create mode 100644 core/cache/cachefakes/fake_container.go create mode 100644 core/clustersmngr/clustersmngrfakes/fake_clients_pool.go create mode 100644 core/server/server_test.go diff --git a/cmd/gitops-server/cmd/cmd.go b/cmd/gitops-server/cmd/cmd.go index f101f5b668..da8660789c 100644 --- a/cmd/gitops-server/cmd/cmd.go +++ b/cmd/gitops-server/cmd/cmd.go @@ -20,6 +20,7 @@ import ( "github.com/spf13/cobra" "github.com/weaveworks/weave-gitops/api/v1alpha1" "github.com/weaveworks/weave-gitops/cmd/gitops/cmderrors" + corecache "github.com/weaveworks/weave-gitops/core/cache" "github.com/weaveworks/weave-gitops/core/logger" core "github.com/weaveworks/weave-gitops/core/server" "github.com/weaveworks/weave-gitops/pkg/helm/watcher" @@ -193,7 +194,12 @@ func runCmd(cmd *cobra.Command, args []string) error { authServer = srv } - coreConfig := core.NewCoreConfig(log, rest, clusterName) + cache, err := corecache.NewContainer(context.Background(), rest, log) + if err != nil { + return fmt.Errorf("could not create cache container: %w", err) + } + + coreConfig := core.NewCoreConfig(log, rest, cache, clusterName) appConfig, err := server.DefaultApplicationsConfig(log) if err != nil { diff --git a/core/cache/cachefakes/fake_container.go b/core/cache/cachefakes/fake_container.go new file mode 100644 index 0000000000..ae97c5143c --- /dev/null +++ b/core/cache/cachefakes/fake_container.go @@ -0,0 +1,212 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package cachefakes + +import ( + "context" + "sync" + + "github.com/weaveworks/weave-gitops/core/cache" + v1 "k8s.io/api/core/v1" +) + +type FakeContainer struct { + ForceRefreshStub func(cache.StorageType) + forceRefreshMutex sync.RWMutex + forceRefreshArgsForCall []struct { + arg1 cache.StorageType + } + NamespacesStub func() map[string][]v1.Namespace + namespacesMutex sync.RWMutex + namespacesArgsForCall []struct { + } + namespacesReturns struct { + result1 map[string][]v1.Namespace + } + namespacesReturnsOnCall map[int]struct { + result1 map[string][]v1.Namespace + } + StartStub func(context.Context) + startMutex sync.RWMutex + startArgsForCall []struct { + arg1 context.Context + } + StopStub func() + stopMutex sync.RWMutex + stopArgsForCall []struct { + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeContainer) ForceRefresh(arg1 cache.StorageType) { + fake.forceRefreshMutex.Lock() + fake.forceRefreshArgsForCall = append(fake.forceRefreshArgsForCall, struct { + arg1 cache.StorageType + }{arg1}) + stub := fake.ForceRefreshStub + fake.recordInvocation("ForceRefresh", []interface{}{arg1}) + fake.forceRefreshMutex.Unlock() + if stub != nil { + fake.ForceRefreshStub(arg1) + } +} + +func (fake *FakeContainer) ForceRefreshCallCount() int { + fake.forceRefreshMutex.RLock() + defer fake.forceRefreshMutex.RUnlock() + return len(fake.forceRefreshArgsForCall) +} + +func (fake *FakeContainer) ForceRefreshCalls(stub func(cache.StorageType)) { + fake.forceRefreshMutex.Lock() + defer fake.forceRefreshMutex.Unlock() + fake.ForceRefreshStub = stub +} + +func (fake *FakeContainer) ForceRefreshArgsForCall(i int) cache.StorageType { + fake.forceRefreshMutex.RLock() + defer fake.forceRefreshMutex.RUnlock() + argsForCall := fake.forceRefreshArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeContainer) Namespaces() map[string][]v1.Namespace { + fake.namespacesMutex.Lock() + ret, specificReturn := fake.namespacesReturnsOnCall[len(fake.namespacesArgsForCall)] + fake.namespacesArgsForCall = append(fake.namespacesArgsForCall, struct { + }{}) + stub := fake.NamespacesStub + fakeReturns := fake.namespacesReturns + fake.recordInvocation("Namespaces", []interface{}{}) + fake.namespacesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeContainer) NamespacesCallCount() int { + fake.namespacesMutex.RLock() + defer fake.namespacesMutex.RUnlock() + return len(fake.namespacesArgsForCall) +} + +func (fake *FakeContainer) NamespacesCalls(stub func() map[string][]v1.Namespace) { + fake.namespacesMutex.Lock() + defer fake.namespacesMutex.Unlock() + fake.NamespacesStub = stub +} + +func (fake *FakeContainer) NamespacesReturns(result1 map[string][]v1.Namespace) { + fake.namespacesMutex.Lock() + defer fake.namespacesMutex.Unlock() + fake.NamespacesStub = nil + fake.namespacesReturns = struct { + result1 map[string][]v1.Namespace + }{result1} +} + +func (fake *FakeContainer) NamespacesReturnsOnCall(i int, result1 map[string][]v1.Namespace) { + fake.namespacesMutex.Lock() + defer fake.namespacesMutex.Unlock() + fake.NamespacesStub = nil + if fake.namespacesReturnsOnCall == nil { + fake.namespacesReturnsOnCall = make(map[int]struct { + result1 map[string][]v1.Namespace + }) + } + fake.namespacesReturnsOnCall[i] = struct { + result1 map[string][]v1.Namespace + }{result1} +} + +func (fake *FakeContainer) Start(arg1 context.Context) { + fake.startMutex.Lock() + fake.startArgsForCall = append(fake.startArgsForCall, struct { + arg1 context.Context + }{arg1}) + stub := fake.StartStub + fake.recordInvocation("Start", []interface{}{arg1}) + fake.startMutex.Unlock() + if stub != nil { + fake.StartStub(arg1) + } +} + +func (fake *FakeContainer) StartCallCount() int { + fake.startMutex.RLock() + defer fake.startMutex.RUnlock() + return len(fake.startArgsForCall) +} + +func (fake *FakeContainer) StartCalls(stub func(context.Context)) { + fake.startMutex.Lock() + defer fake.startMutex.Unlock() + fake.StartStub = stub +} + +func (fake *FakeContainer) StartArgsForCall(i int) context.Context { + fake.startMutex.RLock() + defer fake.startMutex.RUnlock() + argsForCall := fake.startArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeContainer) Stop() { + fake.stopMutex.Lock() + fake.stopArgsForCall = append(fake.stopArgsForCall, struct { + }{}) + stub := fake.StopStub + fake.recordInvocation("Stop", []interface{}{}) + fake.stopMutex.Unlock() + if stub != nil { + fake.StopStub() + } +} + +func (fake *FakeContainer) StopCallCount() int { + fake.stopMutex.RLock() + defer fake.stopMutex.RUnlock() + return len(fake.stopArgsForCall) +} + +func (fake *FakeContainer) StopCalls(stub func()) { + fake.stopMutex.Lock() + defer fake.stopMutex.Unlock() + fake.StopStub = stub +} + +func (fake *FakeContainer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.forceRefreshMutex.RLock() + defer fake.forceRefreshMutex.RUnlock() + fake.namespacesMutex.RLock() + defer fake.namespacesMutex.RUnlock() + fake.startMutex.RLock() + defer fake.startMutex.RUnlock() + fake.stopMutex.RLock() + defer fake.stopMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeContainer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ cache.Container = new(FakeContainer) diff --git a/core/cache/container.go b/core/cache/container.go index 262a6c6c2c..0ffda77489 100644 --- a/core/cache/container.go +++ b/core/cache/container.go @@ -10,20 +10,30 @@ import ( "k8s.io/client-go/rest" ) +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + const ( NamespaceStorage StorageType = "namespace" ) type StorageType string -type Container struct { +//counterfeiter:generate . Container +type Container interface { + Start(ctx context.Context) + Stop() + ForceRefresh(name StorageType) + Namespaces() map[string][]v1.Namespace +} + +type container struct { namespace namespaceStore logger logr.Logger } -var globalCacheContainer *Container +var globalCacheContainer Container -func NewContainer(ctx context.Context, restCfg *rest.Config, logger logr.Logger) (*Container, error) { +func NewContainer(ctx context.Context, restCfg *rest.Config, logger logr.Logger) (Container, error) { if globalCacheContainer != nil { return globalCacheContainer, nil } @@ -33,7 +43,7 @@ func NewContainer(ctx context.Context, restCfg *rest.Config, logger logr.Logger) return nil, err } - globalCacheContainer = &Container{ + globalCacheContainer = &container{ namespace: newNamespaceStore(clusterClient, logger), logger: logger, } @@ -41,26 +51,26 @@ func NewContainer(ctx context.Context, restCfg *rest.Config, logger logr.Logger) return globalCacheContainer, nil } -func GlobalContainer() *Container { +func GlobalContainer() Container { return globalCacheContainer } -func (c *Container) Start(ctx context.Context) { +func (c *container) Start(ctx context.Context) { c.namespace.Start(ctx) } -func (c *Container) Stop() { +func (c *container) Stop() { c.namespace.Stop() } -func (c *Container) ForceRefresh(name StorageType) { +func (c *container) ForceRefresh(name StorageType) { switch name { case NamespaceStorage: c.namespace.ForceRefresh() } } -func (c *Container) Namespaces() map[string][]v1.Namespace { +func (c *container) Namespaces() map[string][]v1.Namespace { return c.namespace.Namespaces() } diff --git a/core/clustersmngr/clustersmngr.go b/core/clustersmngr/clustersmngr.go index 7d155db0d7..0c0fd88f73 100644 --- a/core/clustersmngr/clustersmngr.go +++ b/core/clustersmngr/clustersmngr.go @@ -55,6 +55,7 @@ type ClusterFetcher interface { } // ClientsPool stores all clients to the leaf clusters +//counterfeiter:generate . ClientsPool type ClientsPool interface { Add(cfg ClusterClientConfig, cluster Cluster) error Clients() map[string]ClusterClient diff --git a/core/clustersmngr/clustersmngrfakes/fake_clients_pool.go b/core/clustersmngr/clustersmngrfakes/fake_clients_pool.go new file mode 100644 index 0000000000..aec549785e --- /dev/null +++ b/core/clustersmngr/clustersmngrfakes/fake_clients_pool.go @@ -0,0 +1,257 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package clustersmngrfakes + +import ( + "sync" + + "github.com/weaveworks/weave-gitops/core/clustersmngr" +) + +type FakeClientsPool struct { + AddStub func(clustersmngr.ClusterClientConfig, clustersmngr.Cluster) error + addMutex sync.RWMutex + addArgsForCall []struct { + arg1 clustersmngr.ClusterClientConfig + arg2 clustersmngr.Cluster + } + addReturns struct { + result1 error + } + addReturnsOnCall map[int]struct { + result1 error + } + ClientStub func(string) (clustersmngr.ClusterClient, error) + clientMutex sync.RWMutex + clientArgsForCall []struct { + arg1 string + } + clientReturns struct { + result1 clustersmngr.ClusterClient + result2 error + } + clientReturnsOnCall map[int]struct { + result1 clustersmngr.ClusterClient + result2 error + } + ClientsStub func() map[string]clustersmngr.ClusterClient + clientsMutex sync.RWMutex + clientsArgsForCall []struct { + } + clientsReturns struct { + result1 map[string]clustersmngr.ClusterClient + } + clientsReturnsOnCall map[int]struct { + result1 map[string]clustersmngr.ClusterClient + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeClientsPool) Add(arg1 clustersmngr.ClusterClientConfig, arg2 clustersmngr.Cluster) error { + fake.addMutex.Lock() + ret, specificReturn := fake.addReturnsOnCall[len(fake.addArgsForCall)] + fake.addArgsForCall = append(fake.addArgsForCall, struct { + arg1 clustersmngr.ClusterClientConfig + arg2 clustersmngr.Cluster + }{arg1, arg2}) + stub := fake.AddStub + fakeReturns := fake.addReturns + fake.recordInvocation("Add", []interface{}{arg1, arg2}) + fake.addMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClientsPool) AddCallCount() int { + fake.addMutex.RLock() + defer fake.addMutex.RUnlock() + return len(fake.addArgsForCall) +} + +func (fake *FakeClientsPool) AddCalls(stub func(clustersmngr.ClusterClientConfig, clustersmngr.Cluster) error) { + fake.addMutex.Lock() + defer fake.addMutex.Unlock() + fake.AddStub = stub +} + +func (fake *FakeClientsPool) AddArgsForCall(i int) (clustersmngr.ClusterClientConfig, clustersmngr.Cluster) { + fake.addMutex.RLock() + defer fake.addMutex.RUnlock() + argsForCall := fake.addArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeClientsPool) AddReturns(result1 error) { + fake.addMutex.Lock() + defer fake.addMutex.Unlock() + fake.AddStub = nil + fake.addReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClientsPool) AddReturnsOnCall(i int, result1 error) { + fake.addMutex.Lock() + defer fake.addMutex.Unlock() + fake.AddStub = nil + if fake.addReturnsOnCall == nil { + fake.addReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.addReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClientsPool) Client(arg1 string) (clustersmngr.ClusterClient, error) { + fake.clientMutex.Lock() + ret, specificReturn := fake.clientReturnsOnCall[len(fake.clientArgsForCall)] + fake.clientArgsForCall = append(fake.clientArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.ClientStub + fakeReturns := fake.clientReturns + fake.recordInvocation("Client", []interface{}{arg1}) + fake.clientMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClientsPool) ClientCallCount() int { + fake.clientMutex.RLock() + defer fake.clientMutex.RUnlock() + return len(fake.clientArgsForCall) +} + +func (fake *FakeClientsPool) ClientCalls(stub func(string) (clustersmngr.ClusterClient, error)) { + fake.clientMutex.Lock() + defer fake.clientMutex.Unlock() + fake.ClientStub = stub +} + +func (fake *FakeClientsPool) ClientArgsForCall(i int) string { + fake.clientMutex.RLock() + defer fake.clientMutex.RUnlock() + argsForCall := fake.clientArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeClientsPool) ClientReturns(result1 clustersmngr.ClusterClient, result2 error) { + fake.clientMutex.Lock() + defer fake.clientMutex.Unlock() + fake.ClientStub = nil + fake.clientReturns = struct { + result1 clustersmngr.ClusterClient + result2 error + }{result1, result2} +} + +func (fake *FakeClientsPool) ClientReturnsOnCall(i int, result1 clustersmngr.ClusterClient, result2 error) { + fake.clientMutex.Lock() + defer fake.clientMutex.Unlock() + fake.ClientStub = nil + if fake.clientReturnsOnCall == nil { + fake.clientReturnsOnCall = make(map[int]struct { + result1 clustersmngr.ClusterClient + result2 error + }) + } + fake.clientReturnsOnCall[i] = struct { + result1 clustersmngr.ClusterClient + result2 error + }{result1, result2} +} + +func (fake *FakeClientsPool) Clients() map[string]clustersmngr.ClusterClient { + fake.clientsMutex.Lock() + ret, specificReturn := fake.clientsReturnsOnCall[len(fake.clientsArgsForCall)] + fake.clientsArgsForCall = append(fake.clientsArgsForCall, struct { + }{}) + stub := fake.ClientsStub + fakeReturns := fake.clientsReturns + fake.recordInvocation("Clients", []interface{}{}) + fake.clientsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClientsPool) ClientsCallCount() int { + fake.clientsMutex.RLock() + defer fake.clientsMutex.RUnlock() + return len(fake.clientsArgsForCall) +} + +func (fake *FakeClientsPool) ClientsCalls(stub func() map[string]clustersmngr.ClusterClient) { + fake.clientsMutex.Lock() + defer fake.clientsMutex.Unlock() + fake.ClientsStub = stub +} + +func (fake *FakeClientsPool) ClientsReturns(result1 map[string]clustersmngr.ClusterClient) { + fake.clientsMutex.Lock() + defer fake.clientsMutex.Unlock() + fake.ClientsStub = nil + fake.clientsReturns = struct { + result1 map[string]clustersmngr.ClusterClient + }{result1} +} + +func (fake *FakeClientsPool) ClientsReturnsOnCall(i int, result1 map[string]clustersmngr.ClusterClient) { + fake.clientsMutex.Lock() + defer fake.clientsMutex.Unlock() + fake.ClientsStub = nil + if fake.clientsReturnsOnCall == nil { + fake.clientsReturnsOnCall = make(map[int]struct { + result1 map[string]clustersmngr.ClusterClient + }) + } + fake.clientsReturnsOnCall[i] = struct { + result1 map[string]clustersmngr.ClusterClient + }{result1} +} + +func (fake *FakeClientsPool) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.addMutex.RLock() + defer fake.addMutex.RUnlock() + fake.clientMutex.RLock() + defer fake.clientMutex.RUnlock() + fake.clientsMutex.RLock() + defer fake.clientsMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeClientsPool) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ clustersmngr.ClientsPool = new(FakeClientsPool) diff --git a/core/server/server.go b/core/server/server.go index 4368d28651..f997e08371 100644 --- a/core/server/server.go +++ b/core/server/server.go @@ -38,24 +38,26 @@ type coreServer struct { pb.UnimplementedCoreServer k8s kube.ClientGetter - cacheContainer *cache.Container + cacheContainer cache.Container logger logr.Logger nsChecker nsaccess.Checker } type CoreServerConfig struct { - log logr.Logger - RestCfg *rest.Config - clusterName string - NSAccess nsaccess.Checker + log logr.Logger + RestCfg *rest.Config + clusterName string + NSAccess nsaccess.Checker + cacheContainer cache.Container } -func NewCoreConfig(log logr.Logger, cfg *rest.Config, clusterName string) CoreServerConfig { +func NewCoreConfig(log logr.Logger, cfg *rest.Config, cacheContainer cache.Container, clusterName string) CoreServerConfig { return CoreServerConfig{ - log: log.WithName("core-server"), - RestCfg: cfg, - clusterName: clusterName, - NSAccess: nsaccess.NewChecker(nsaccess.DefautltWegoAppRules), + log: log.WithName("core-server"), + RestCfg: cfg, + clusterName: clusterName, + NSAccess: nsaccess.NewChecker(nsaccess.DefautltWegoAppRules), + cacheContainer: cacheContainer, } } @@ -63,17 +65,12 @@ func NewCoreServer(cfg CoreServerConfig) (pb.CoreServer, error) { ctx := context.Background() cfgGetter := kube.NewImpersonatingConfigGetter(cfg.RestCfg, false) - cacheContainer, err := cache.NewContainer(ctx, cfg.RestCfg, cfg.log) - if err != nil { - return nil, err - } - - cacheContainer.Start(ctx) + cfg.cacheContainer.Start(ctx) return &coreServer{ k8s: kube.NewDefaultClientGetter(cfgGetter, cfg.clusterName), logger: cfg.log, - cacheContainer: cacheContainer, + cacheContainer: cfg.cacheContainer, nsChecker: cfg.NSAccess, }, nil } diff --git a/core/server/server_test.go b/core/server/server_test.go new file mode 100644 index 0000000000..d893f31387 --- /dev/null +++ b/core/server/server_test.go @@ -0,0 +1,74 @@ +package server_test + +import ( + "context" + "testing" + + kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1beta2" + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + "github.com/weaveworks/weave-gitops/core/cache/cachefakes" + "github.com/weaveworks/weave-gitops/core/clustersmngr" + "github.com/weaveworks/weave-gitops/core/clustersmngr/clustersmngrfakes" + "github.com/weaveworks/weave-gitops/core/server" + pb "github.com/weaveworks/weave-gitops/pkg/api/core" + "github.com/weaveworks/weave-gitops/pkg/kube" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +// Not using a counterfeit: I want the real methods on the provided +// `client.Client` to be invoked. +type clientMock struct { + client.Client +} + +func (c clientMock) RestConfig() *rest.Config { + return &rest.Config{} +} + +func TestStartServer(t *testing.T) { + g := NewGomegaWithT(t) + + ctx := context.Background() + log := logr.Discard() + cacheContainer := &cachefakes.FakeContainer{} + cfg := server.NewCoreConfig(log, &rest.Config{}, cacheContainer, "test-cluster") + svc, err := server.NewCoreServer(cfg) + g.Expect(err).NotTo(HaveOccurred()) + + appName := "my app" + ns := &corev1.Namespace{} + ns.Name = "test" + kust := &kustomizev1.Kustomization{ + Spec: kustomizev1.KustomizationSpec{ + SourceRef: kustomizev1.CrossNamespaceSourceReference{ + Kind: "GitRepository", + }, + }, + } + kust.Name = appName + kust.Namespace = ns.Name + + client := fake.NewClientBuilder(). + WithScheme(kube.CreateScheme()). + WithRuntimeObjects(kust, ns). + Build() + + clientsPool := clustersmngrfakes.FakeClientsPool{} + clientsPool.ClientsReturns(map[string]clustersmngr.ClusterClient{"default": clientMock{client}}) + clientsPool.ClientReturns(clientMock{client}, nil) + + clusterClient := clustersmngr.NewClient(&clientsPool) + ctx = context.WithValue(ctx, clustersmngr.ClustersClientCtxKey, clusterClient) + + resp, err := svc.ListKustomizations(ctx, &pb.ListKustomizationsRequest{ + Namespace: ns.Name, + }) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(resp.Kustomizations).To(HaveLen(1)) + g.Expect(resp.Kustomizations[0].Namespace).To(Equal(ns.Name)) + g.Expect(resp.Kustomizations[0].Name).To(Equal(appName)) +} diff --git a/core/server/suite_test.go b/core/server/suite_test.go index 4acefd8a6d..72a456f009 100644 --- a/core/server/suite_test.go +++ b/core/server/suite_test.go @@ -51,7 +51,12 @@ func makeGRPCServer(cfg *rest.Config, t *testing.T) pb.CoreClient { withClientsPoolInterceptor(cfg, principal), ) - coreCfg := server.NewCoreConfig(logr.Discard(), cfg, "foobar") + container, err := cache.NewContainer(context.Background(), cfg, logr.Discard()) + if err != nil { + t.Fatal(err) + } + + coreCfg := server.NewCoreConfig(logr.Discard(), cfg, container, "foobar") nsChecker = nsaccessfakes.FakeChecker{} nsChecker.FilterAccessibleNamespacesStub = func(ctx context.Context, c *rest.Config, n []v1.Namespace) ([]v1.Namespace, error) { // Pretend the user has access to everything