From bcc511f0759f7d75e34f7744fa35187bf3e6a5fb Mon Sep 17 00:00:00 2001 From: David Festal Date: Wed, 29 Mar 2023 20:34:59 +0200 Subject: [PATCH] Replication: Add e2e tests for workloads-related rbac objects Signed-off-by: David Festal --- test/e2e/reconciler/cache/replication_test.go | 204 +++++++++++++++++- 1 file changed, 201 insertions(+), 3 deletions(-) diff --git a/test/e2e/reconciler/cache/replication_test.go b/test/e2e/reconciler/cache/replication_test.go index 98722f284d18..affa0142bf78 100644 --- a/test/e2e/reconciler/cache/replication_test.go +++ b/test/e2e/reconciler/cache/replication_test.go @@ -30,6 +30,7 @@ import ( "github.com/kcp-dev/logicalcluster/v3" "github.com/stretchr/testify/require" + rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -64,6 +65,10 @@ var scenarios = []testScenario{ {"TestReplicateAPIResourceSchemaNegative", replicateAPIResourceSchemaNegativeScenario}, {"TestReplicateWorkspaceType", replicateWorkspaceTypeScenario}, {"TestReplicateWorkspaceTypeNegative", replicateWorkspaceTypeNegativeScenario}, + {"TestReplicateWorkloadsClusterRole", replicateWorkloadsClusterRoleScenario}, + {"TestReplicateWorkloadsClusterRoleNegative", replicateWorkloadsClusterRoleNegativeScenario}, + {"TestReplicateWorkloadsClusterRoleBinding", replicateWorkloadsClusterRoleBindingScenario}, + {"TestReplicateWorkloadsClusterRoleBindingNegative", replicateWorkloadsClusterRoleBindingNegativeScenario}, } // disruptiveScenarios contains a list of scenarios that will be run in a private environment @@ -330,7 +335,9 @@ func replicateResource(ctx context.Context, t *testing.T, kind string, /*kind for the given resource*/ gvr schema.GroupVersionResource, /*gvr for the given resource*/ res runtime.Object, /*a strongly typed resource object that will be created*/ - resWithModifiedSpec runtime.Object /*a strongly typed resource obj with modified spec only, will be used for an update*/) { + resWithModifiedSpec runtime.Object, /*a strongly typed resource obj with modified spec only, will be used for an update*/ + prepares ...func(*replicateResourceScenario), /*additional functions that allow preparing the context of the source resource before expecting replication*/ +) { t.Helper() orgPath, _ := framework.NewOrganizationFixture(t, server) @@ -343,6 +350,10 @@ func replicateResource(ctx context.Context, t *testing.T, resourceName := resMeta.GetName() scenario := &replicateResourceScenario{resourceName: resourceName, kind: kind, gvr: gvr, cluster: clusterName, server: server, kcpShardClusterDynamicClient: kcpShardClusterDynamicClient, cacheKcpClusterDynamicClient: cacheKcpClusterDynamicClient} + for _, prepare := range prepares { + prepare(scenario) + } + t.Logf("Create source %s %s/%s on the root shard for replication", kind, clusterName, resourceName) scenario.CreateSourceResource(ctx, t, res) t.Logf("Verify that the source %s %s/%s was replicated to the cache server", kind, clusterName, resourceName) @@ -383,7 +394,9 @@ func replicateResourceNegative(ctx context.Context, t *testing.T, kind string, /*kind for the given resource*/ gvr schema.GroupVersionResource, /*gvr for the given resource*/ res runtime.Object, /*a strongly typed resource object that will be created*/ - resWithModifiedSpec runtime.Object /*a strongly typed resource obj with modified spec only, will be used for an update*/) { + resWithModifiedSpec runtime.Object, /*a strongly typed resource obj with modified spec only, will be used for an update*/ + prepares ...func(*replicateResourceScenario), /*additional functions that allow preparing the context of the source resource before expecting replication*/ +) { t.Helper() orgPath, _ := framework.NewOrganizationFixture(t, server) @@ -396,6 +409,10 @@ func replicateResourceNegative(ctx context.Context, t *testing.T, resourceName := resMeta.GetName() scenario := &replicateResourceScenario{resourceName: resourceName, kind: kind, gvr: gvr, cluster: clusterName, server: server, kcpShardClusterDynamicClient: kcpShardClusterDynamicClient, cacheKcpClusterDynamicClient: cacheKcpClusterDynamicClient} + for _, prepare := range prepares { + prepare(scenario) + } + t.Logf("Create source %s %s/%s on the root shard for replication", kind, clusterName, resourceName) scenario.CreateSourceResource(ctx, t, res) t.Logf("Verify that the source %s %s/%s was replicated to the cache server", kind, clusterName, resourceName) @@ -486,6 +503,14 @@ type replicateResourceScenario struct { cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface } +func (b *replicateResourceScenario) CreateAdditionalResource(ctx context.Context, t *testing.T, res runtime.Object, kind string, gvr schema.GroupVersionResource) { + t.Helper() + resUnstructured, err := toUnstructured(res, kind, gvr) + require.NoError(t, err) + _, err = b.kcpShardClusterDynamicClient.Resource(gvr).Cluster(b.cluster.Path()).Create(ctx, resUnstructured, metav1.CreateOptions{}) + require.NoError(t, err) +} + func (b *replicateResourceScenario) CreateSourceResource(ctx context.Context, t *testing.T, res runtime.Object) { t.Helper() resUnstructured, err := toUnstructured(res, b.kind, b.gvr) @@ -678,6 +703,15 @@ func (b *replicateResourceScenario) verifyResourceReplicationHelper(ctx context. } unstructured.RemoveNestedField(originalResource.Object, "metadata", "resourceVersion") unstructured.RemoveNestedField(cachedResource.Object, "metadata", "resourceVersion") + + // TODO(davidfestal): find out why the generation is not equal, specially for rbacv1. + // Is it a characteristic of all built-in KCP resources (which are not backed by CRDs) ? + // Issue opened: https://github.com/kcp-dev/kcp/issues/2935 + if b.gvr.Group == rbacv1.SchemeGroupVersion.Group { + unstructured.RemoveNestedField(originalResource.Object, "metadata", "generation") + unstructured.RemoveNestedField(cachedResource.Object, "metadata", "generation") + } + unstructured.RemoveNestedField(cachedResource.Object, "metadata", "annotations", genericapirequest.AnnotationKey) if cachedStatus, ok := cachedResource.Object["status"]; ok && cachedStatus == nil || (cachedStatus != nil && len(cachedStatus.(map[string]interface{})) == 0) { // TODO: worth investigating: @@ -685,7 +719,7 @@ func (b *replicateResourceScenario) verifyResourceReplicationHelper(ctx context. unstructured.RemoveNestedField(cachedResource.Object, "status") } if diff := cmp.Diff(cachedResource.Object, originalResource.Object); len(diff) > 0 { - return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original", b.gvr, cluster, cachedResourceMeta.GetName()) + return false, fmt.Sprintf("replicated %s root|%s/%s is different from the original: %s", b.gvr, cluster, cachedResourceMeta.GetName(), diff) } return true, "" }, wait.ForeverTestTimeout, 100*time.Millisecond) @@ -732,3 +766,167 @@ func createCacheClientConfigForEnvironment(ctx context.Context, t *testing.T, kc require.NoError(t, err) return cacheServerRestConfig } + +// replicateWorkloadsClusterRoleScenario tests if a ClusterRole related to workloads API is propagated to the cache server. +// The test exercises creation, modification and removal of the Shard object. +func replicateWorkloadsClusterRoleScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) { + t.Helper() + replicateResource(ctx, + t, + server, + kcpShardClusterDynamicClient, + cacheKcpClusterDynamicClient, + "", + "ClusterRole", + rbacv1.SchemeGroupVersion.WithResource("clusterroles"), + &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: withPseudoRandomSuffix("syncer"), + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"sync"}, + APIGroups: []string{"workload.kcp.io"}, + Resources: []string{"synctargets"}, + ResourceNames: []string{"asynctarget"}, + }, + }, + }, + nil, + ) +} + +// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object. +func replicateWorkloadsClusterRoleNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) { + t.Helper() + replicateResourceNegative( + ctx, + t, + server, + kcpShardClusterDynamicClient, + cacheKcpClusterDynamicClient, + "", + "ClusterRole", + rbacv1.SchemeGroupVersion.WithResource("clusterroles"), + &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: withPseudoRandomSuffix("syncer"), + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"sync"}, + APIGroups: []string{"workload.kcp.io"}, + Resources: []string{"synctargets"}, + ResourceNames: []string{"asynctarget"}, + }, + }, + }, + nil, + ) +} + +// replicateWorkloadsClusterRoleBindingScenario tests if a ClusterRoleBinding related to workloads API is propagated to the cache server. +// The test exercises creation, modification and removal of the Shard object. +func replicateWorkloadsClusterRoleBindingScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) { + t.Helper() + + clusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: withPseudoRandomSuffix("syncer"), + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"sync"}, + APIGroups: []string{"workload.kcp.io"}, + Resources: []string{"synctargets"}, + ResourceNames: []string{"asynctarget"}, + }, + }, + } + + replicateResource(ctx, + t, + server, + kcpShardClusterDynamicClient, + cacheKcpClusterDynamicClient, + "", + "ClusterRoleBinding", + rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"), + &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: withPseudoRandomSuffix("syncer"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.SchemeGroupVersion.Group, + Kind: "ClusterRole", + Name: clusterRole.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + APIGroup: "", + Name: "kcp-syncer-0000", + Namespace: "kcp-syncer-namespace", + }, + }, + }, + nil, + func(scenario *replicateResourceScenario) { + t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name) + scenario.CreateAdditionalResource(ctx, t, clusterRole, "ClusterRole", rbacv1.SchemeGroupVersion.WithResource("clusterroles")) + }, + ) +} + +// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object. +func replicateWorkloadsClusterRoleBindingNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) { + t.Helper() + + clusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: withPseudoRandomSuffix("syncer"), + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"sync"}, + APIGroups: []string{"workload.kcp.io"}, + Resources: []string{"synctargets"}, + ResourceNames: []string{"asynctarget"}, + }, + }, + } + + replicateResourceNegative( + ctx, + t, + server, + kcpShardClusterDynamicClient, + cacheKcpClusterDynamicClient, + "", + "ClusterRoleBinding", + rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"), + &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: withPseudoRandomSuffix("syncer"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.SchemeGroupVersion.Group, + Kind: "ClusterRole", + Name: clusterRole.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + APIGroup: "", + Name: "kcp-syncer-0000", + Namespace: "kcp-syncer-namespace", + }, + }, + }, + nil, + func(scenario *replicateResourceScenario) { + t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name) + scenario.CreateAdditionalResource(ctx, t, clusterRole, "ClusterRole", rbacv1.SchemeGroupVersion.WithResource("clusterroles")) + }, + ) +}