-
Notifications
You must be signed in to change notification settings - Fork 4k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
CA: implement a Provider for dynamicresources.Snapshot
The Provider uses DRA object listers to create a Snapshot of the DRA objects.
- Loading branch information
Showing
3 changed files
with
361 additions
and
0 deletions.
There are no files selected for viewing
44 changes: 44 additions & 0 deletions
44
cluster-autoscaler/simulator/dynamicresources/provider/listers.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
/* | ||
Copyright 2024 The Kubernetes Authors. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package provider | ||
|
||
import ( | ||
"k8s.io/apimachinery/pkg/labels" | ||
) | ||
|
||
// allObjectsLister is used by Provider to list DRA objects instead of directly using the API lister | ||
// interfaces, so that test lister implementations don't have to implement the full API lister interfaces. | ||
// Provider only needs to be able to list all DRA objects in the cluster, there shouldn't ever be a need to | ||
// list a subset. The API lister interfaces, on the other hand, require implementing subset selection methods. | ||
type allObjectsLister[O any] interface { | ||
ListAll() ([]O, error) | ||
} | ||
|
||
// apiLister is satisfied by any API object lister. Only defined as a type constraint for allObjectsApiLister. | ||
type apiLister[O any] interface { | ||
List(selector labels.Selector) ([]O, error) | ||
} | ||
|
||
// allObjectsApiLister implements allObjectsLister by wrapping an API object lister. | ||
type allObjectsApiLister[L apiLister[O], O any] struct { | ||
apiLister L | ||
} | ||
|
||
// ListAll lists all objects. | ||
func (l *allObjectsApiLister[L, O]) ListAll() ([]O, error) { | ||
return l.apiLister.List(labels.Everything()) | ||
} |
85 changes: 85 additions & 0 deletions
85
cluster-autoscaler/simulator/dynamicresources/provider/provider.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,85 @@ | ||
/* | ||
Copyright 2024 The Kubernetes Authors. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package provider | ||
|
||
import ( | ||
resourceapi "k8s.io/api/resource/v1beta1" | ||
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot" | ||
"k8s.io/client-go/informers" | ||
resourceapilisters "k8s.io/client-go/listers/resource/v1beta1" | ||
) | ||
|
||
// Provider provides DRA-related objects. | ||
type Provider struct { | ||
resourceClaims allObjectsLister[*resourceapi.ResourceClaim] | ||
resourceSlices allObjectsLister[*resourceapi.ResourceSlice] | ||
deviceClasses allObjectsLister[*resourceapi.DeviceClass] | ||
} | ||
|
||
// NewProviderFromInformers returns a new Provider which uses InformerFactory listers to list the DRA resources. | ||
func NewProviderFromInformers(informerFactory informers.SharedInformerFactory) *Provider { | ||
claims := &allObjectsApiLister[resourceapilisters.ResourceClaimLister, *resourceapi.ResourceClaim]{apiLister: informerFactory.Resource().V1beta1().ResourceClaims().Lister()} | ||
slices := &allObjectsApiLister[resourceapilisters.ResourceSliceLister, *resourceapi.ResourceSlice]{apiLister: informerFactory.Resource().V1beta1().ResourceSlices().Lister()} | ||
devices := &allObjectsApiLister[resourceapilisters.DeviceClassLister, *resourceapi.DeviceClass]{apiLister: informerFactory.Resource().V1beta1().DeviceClasses().Lister()} | ||
return NewProvider(claims, slices, devices) | ||
} | ||
|
||
// NewProvider returns a new Provider which uses the provided listers to list the DRA resources. | ||
func NewProvider(claims allObjectsLister[*resourceapi.ResourceClaim], slices allObjectsLister[*resourceapi.ResourceSlice], classes allObjectsLister[*resourceapi.DeviceClass]) *Provider { | ||
return &Provider{ | ||
resourceClaims: claims, | ||
resourceSlices: slices, | ||
deviceClasses: classes, | ||
} | ||
} | ||
|
||
// Snapshot returns a snapshot of all DRA resources at a ~single point in time. | ||
func (p *Provider) Snapshot() (drasnapshot.Snapshot, error) { | ||
claims, err := p.resourceClaims.ListAll() | ||
if err != nil { | ||
return drasnapshot.Snapshot{}, err | ||
} | ||
claimMap := make(map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim) | ||
for _, claim := range claims { | ||
claimMap[drasnapshot.GetClaimId(claim)] = claim | ||
} | ||
|
||
slices, err := p.resourceSlices.ListAll() | ||
if err != nil { | ||
return drasnapshot.Snapshot{}, err | ||
} | ||
slicesMap := make(map[string][]*resourceapi.ResourceSlice) | ||
var nonNodeLocalSlices []*resourceapi.ResourceSlice | ||
for _, slice := range slices { | ||
if slice.Spec.NodeName == "" { | ||
nonNodeLocalSlices = append(nonNodeLocalSlices, slice) | ||
} else { | ||
slicesMap[slice.Spec.NodeName] = append(slicesMap[slice.Spec.NodeName], slice) | ||
} | ||
} | ||
|
||
classes, err := p.deviceClasses.ListAll() | ||
if err != nil { | ||
return drasnapshot.Snapshot{}, err | ||
} | ||
classMap := make(map[string]*resourceapi.DeviceClass) | ||
for _, class := range classes { | ||
classMap[class.Name] = class | ||
} | ||
|
||
return drasnapshot.NewSnapshot(claimMap, slicesMap, nonNodeLocalSlices, classMap), nil | ||
} |
232 changes: 232 additions & 0 deletions
232
cluster-autoscaler/simulator/dynamicresources/provider/provider_test.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,232 @@ | ||
/* | ||
Copyright 2024 The Kubernetes Authors. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package provider | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"testing" | ||
|
||
"github.com/google/go-cmp/cmp" | ||
"github.com/google/go-cmp/cmp/cmpopts" | ||
|
||
apiv1 "k8s.io/api/core/v1" | ||
resourceapi "k8s.io/api/resource/v1beta1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/runtime" | ||
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot" | ||
"k8s.io/autoscaler/cluster-autoscaler/utils/test" | ||
"k8s.io/client-go/informers" | ||
"k8s.io/client-go/kubernetes/fake" | ||
) | ||
|
||
var ( | ||
claim1 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-1", UID: "claim-1"}} | ||
claim2 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-2", UID: "claim-2"}} | ||
claim3 = &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "claim-3", UID: "claim-3"}} | ||
|
||
localSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-1", UID: "local-slice-1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n1"}} | ||
localSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-2", UID: "local-slice-2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n1"}} | ||
localSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-3", UID: "local-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n2"}} | ||
localSlice4 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "local-slice-4", UID: "local-slice-4"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "n2"}} | ||
globalSlice1 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-1", UID: "global-slice-1"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}} | ||
globalSlice2 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-2", UID: "global-slice-2"}, Spec: resourceapi.ResourceSliceSpec{AllNodes: true}} | ||
globalSlice3 = &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "global-slice-3", UID: "global-slice-3"}, Spec: resourceapi.ResourceSliceSpec{NodeSelector: &apiv1.NodeSelector{}}} | ||
|
||
class1 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class-1", UID: "class-1"}} | ||
class2 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class-2", UID: "class-2"}} | ||
class3 = &resourceapi.DeviceClass{ObjectMeta: metav1.ObjectMeta{Name: "class-3", UID: "class-3"}} | ||
) | ||
|
||
func TestProviderSnapshot(t *testing.T) { | ||
for _, tc := range []struct { | ||
testName string | ||
claims []*resourceapi.ResourceClaim | ||
triggerClaimsError bool | ||
slices []*resourceapi.ResourceSlice | ||
triggerSlicesError bool | ||
classes []*resourceapi.DeviceClass | ||
triggerClassesError bool | ||
wantSnapshot drasnapshot.Snapshot | ||
wantErr error | ||
}{ | ||
{ | ||
testName: "claim lister error results in an error", | ||
triggerClaimsError: true, | ||
wantErr: cmpopts.AnyError, | ||
}, | ||
{ | ||
testName: "slices lister error results in an error", | ||
triggerSlicesError: true, | ||
wantErr: cmpopts.AnyError, | ||
}, | ||
{ | ||
testName: "classes lister error results in an error", | ||
triggerClassesError: true, | ||
wantErr: cmpopts.AnyError, | ||
}, | ||
{ | ||
testName: "claims are correctly snapshot by id", | ||
claims: []*resourceapi.ResourceClaim{claim1, claim2, claim3}, | ||
wantSnapshot: drasnapshot.NewSnapshot( | ||
map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{ | ||
drasnapshot.GetClaimId(claim1): claim1, | ||
drasnapshot.GetClaimId(claim2): claim2, | ||
drasnapshot.GetClaimId(claim3): claim3, | ||
}, nil, nil, nil), | ||
}, | ||
{ | ||
testName: "slices are correctly divided and snapshot", | ||
slices: []*resourceapi.ResourceSlice{localSlice1, localSlice2, localSlice3, localSlice4, globalSlice1, globalSlice2, globalSlice3}, | ||
wantSnapshot: drasnapshot.NewSnapshot(nil, | ||
map[string][]*resourceapi.ResourceSlice{ | ||
"n1": {localSlice1, localSlice2}, | ||
"n2": {localSlice3, localSlice4}, | ||
}, | ||
[]*resourceapi.ResourceSlice{globalSlice1, globalSlice2, globalSlice3}, nil), | ||
}, | ||
{ | ||
testName: "classes are correctly snapshot by name", | ||
classes: []*resourceapi.DeviceClass{class1, class2, class3}, | ||
wantSnapshot: drasnapshot.NewSnapshot(nil, nil, nil, | ||
map[string]*resourceapi.DeviceClass{"class-1": class1, "class-2": class2, "class-3": class3}), | ||
}, | ||
{ | ||
testName: "everything is correctly snapshot together", | ||
claims: []*resourceapi.ResourceClaim{claim1, claim2, claim3}, | ||
slices: []*resourceapi.ResourceSlice{localSlice1, localSlice2, localSlice3, localSlice4, globalSlice1, globalSlice2, globalSlice3}, | ||
classes: []*resourceapi.DeviceClass{class1, class2, class3}, | ||
wantSnapshot: drasnapshot.NewSnapshot( | ||
map[drasnapshot.ResourceClaimId]*resourceapi.ResourceClaim{ | ||
drasnapshot.GetClaimId(claim1): claim1, | ||
drasnapshot.GetClaimId(claim2): claim2, | ||
drasnapshot.GetClaimId(claim3): claim3, | ||
}, | ||
map[string][]*resourceapi.ResourceSlice{ | ||
"n1": {localSlice1, localSlice2}, | ||
"n2": {localSlice3, localSlice4}, | ||
}, | ||
[]*resourceapi.ResourceSlice{globalSlice1, globalSlice2, globalSlice3}, | ||
map[string]*resourceapi.DeviceClass{"class-1": class1, "class-2": class2, "class-3": class3}), | ||
}, | ||
} { | ||
t.Run(tc.testName, func(t *testing.T) { | ||
claimLister := &fakeLister[*resourceapi.ResourceClaim]{objects: tc.claims, triggerErr: tc.triggerClaimsError} | ||
sliceLister := &fakeLister[*resourceapi.ResourceSlice]{objects: tc.slices, triggerErr: tc.triggerSlicesError} | ||
classLister := &fakeLister[*resourceapi.DeviceClass]{objects: tc.classes, triggerErr: tc.triggerClassesError} | ||
provider := NewProvider(claimLister, sliceLister, classLister) | ||
snapshot, err := provider.Snapshot() | ||
if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" { | ||
t.Fatalf("Provider.Snapshot(): unexpected error (-want +got): %s", diff) | ||
} | ||
if diff := cmp.Diff(tc.wantSnapshot, snapshot, cmp.AllowUnexported(drasnapshot.Snapshot{}), cmpopts.EquateEmpty()); diff != "" { | ||
t.Fatalf("Provider.Snapshot(): snapshot differs from expected (-want +got): %s", diff) | ||
} | ||
}) | ||
} | ||
} | ||
|
||
// TestNewProviderFromInformers verifies that the interface translation listers created in NewProviderFromInformers correctly return | ||
// all objects in the cluster. | ||
func TestNewProviderFromInformers(t *testing.T) { | ||
for _, tc := range []struct { | ||
testName string | ||
claims []*resourceapi.ResourceClaim | ||
slices []*resourceapi.ResourceSlice | ||
classes []*resourceapi.DeviceClass | ||
}{ | ||
{ | ||
testName: "no objects in informers", | ||
}, | ||
{ | ||
testName: "ResourceClaims present in informers", | ||
claims: []*resourceapi.ResourceClaim{claim1, claim2, claim3}, | ||
}, | ||
{ | ||
testName: "ResourceSlices present in informers", | ||
slices: []*resourceapi.ResourceSlice{localSlice1, localSlice2, localSlice3}, | ||
}, | ||
{ | ||
testName: "DeviceClasses present in informers", | ||
classes: []*resourceapi.DeviceClass{class1, class2, class3}, | ||
}, | ||
{ | ||
testName: "all objects present in informers together", | ||
claims: []*resourceapi.ResourceClaim{claim1, claim2, claim3}, | ||
slices: []*resourceapi.ResourceSlice{localSlice1, localSlice2, localSlice3}, | ||
classes: []*resourceapi.DeviceClass{class1, class2, class3}, | ||
}, | ||
} { | ||
t.Run(tc.testName, func(t *testing.T) { | ||
var objects []runtime.Object | ||
for _, claim := range tc.claims { | ||
objects = append(objects, claim) | ||
} | ||
for _, slice := range tc.slices { | ||
objects = append(objects, slice) | ||
} | ||
for _, class := range tc.classes { | ||
objects = append(objects, class) | ||
} | ||
client := fake.NewSimpleClientset(objects...) | ||
informerFactory := informers.NewSharedInformerFactory(client, 0) | ||
provider := NewProviderFromInformers(informerFactory) | ||
|
||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
informerFactory.Start(ctx.Done()) | ||
informerFactory.WaitForCacheSync(ctx.Done()) | ||
|
||
allClaims, err := provider.resourceClaims.ListAll() | ||
if err != nil { | ||
t.Fatalf("provider.resourceClaims.ListAll(): got unexpected error %v", err) | ||
} | ||
if diff := cmp.Diff(tc.claims, allClaims, test.IgnoreObjectOrder[*resourceapi.ResourceClaim]()); diff != "" { | ||
t.Errorf("provider.resourceClaims.ListAll(): result differs from expected (-want +got): %s", diff) | ||
} | ||
|
||
allSlices, err := provider.resourceSlices.ListAll() | ||
if err != nil { | ||
t.Fatalf("provider.resourceSlices.ListAll(): got unexpected error %v", err) | ||
} | ||
if diff := cmp.Diff(tc.slices, allSlices, test.IgnoreObjectOrder[*resourceapi.ResourceSlice]()); diff != "" { | ||
t.Errorf("provider.resourceSlices.ListAll(): result differs from expected (-want +got): %s", diff) | ||
} | ||
|
||
allClasses, err := provider.deviceClasses.ListAll() | ||
if err != nil { | ||
t.Fatalf("provider.deviceClasses.ListAll(): got unexpected error %v", err) | ||
} | ||
if diff := cmp.Diff(tc.classes, allClasses, test.IgnoreObjectOrder[*resourceapi.DeviceClass]()); diff != "" { | ||
t.Errorf("provider.deviceClasses.ListAll(): result differs from expected (-want +got): %s", diff) | ||
} | ||
}) | ||
} | ||
} | ||
|
||
type fakeLister[T any] struct { | ||
objects []T | ||
triggerErr bool | ||
} | ||
|
||
func (l *fakeLister[T]) ListAll() ([]T, error) { | ||
var err error | ||
if l.triggerErr { | ||
err = fmt.Errorf("fake test error") | ||
} | ||
return l.objects, err | ||
} |