From d88664a4d818a35f689f310263e509574f22db6c Mon Sep 17 00:00:00 2001 From: Ayush Rangwala Date: Mon, 4 Sep 2023 22:15:14 +0530 Subject: [PATCH 1/7] Rename comments, logs, structs, and vars from packet to equinix metal --- .../cloudprovider/builder/builder_all.go | 2 +- .../cloudprovider/builder/builder_packet.go | 2 +- .../packet/packet_cloud_provider.go | 78 +++++++-------- .../cloudprovider/packet/packet_manager.go | 16 +-- .../packet/packet_manager_rest.go | 98 +++++++++---------- .../packet/packet_manager_rest_test.go | 22 ++--- .../cloudprovider/packet/packet_node_group.go | 58 +++++------ .../packet/packet_node_group_test.go | 50 +++++----- .../packet/packet_price_model.go | 8 +- .../packet/packet_price_model_test.go | 4 +- 10 files changed, 169 insertions(+), 169 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go index a179c0f92e94..12d745c63a98 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_all.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go @@ -122,7 +122,7 @@ func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGro case cloudprovider.HetznerProviderName: return hetzner.BuildHetzner(opts, do, rl) case cloudprovider.PacketProviderName: - return packet.BuildPacket(opts, do, rl) + return packet.BuildEquinixMetal(opts, do, rl) case cloudprovider.ClusterAPIProviderName: return clusterapi.BuildClusterAPI(opts, do, rl) case cloudprovider.IonoscloudProviderName: diff --git a/cluster-autoscaler/cloudprovider/builder/builder_packet.go b/cluster-autoscaler/cloudprovider/builder/builder_packet.go index 403eba21d804..bdd126d40893 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_packet.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_packet.go @@ -36,7 +36,7 @@ const DefaultCloudProvider = packet.ProviderName func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { switch opts.CloudProviderName { case packet.ProviderName: - return packet.BuildPacket(opts, do, rl) + return packet.BuildEquinixMetal(opts, do, rl) } return nil diff --git a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go index 79aa71aef186..bafa2fc65744 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go @@ -34,8 +34,8 @@ import ( ) const ( - // ProviderName is the cloud provider name for Packet - ProviderName = "packet" + // ProviderName is the cloud provider name for Equinix Metal + ProviderName = "equinix-metal" // GPULabel is the label added to nodes with GPU resource. GPULabel = "cloud.google.com/gke-accelerator" // DefaultControllerNodeLabelKey is the label added to Master/Controller to identify as @@ -51,45 +51,45 @@ var ( } ) -// packetCloudProvider implements CloudProvider interface from cluster-autoscaler/cloudprovider module. -type packetCloudProvider struct { - packetManager packetManager - resourceLimiter *cloudprovider.ResourceLimiter - nodeGroups []packetNodeGroup +// equinixMetalCloudProvider implements CloudProvider interface from cluster-autoscaler/cloudprovider module. +type equinixMetalCloudProvider struct { + equinixMetalManager equinixMetalManager + resourceLimiter *cloudprovider.ResourceLimiter + nodeGroups []equinixMetalNodeGroup } -func buildPacketCloudProvider(packetManager packetManager, resourceLimiter *cloudprovider.ResourceLimiter) (cloudprovider.CloudProvider, error) { - pcp := &packetCloudProvider{ - packetManager: packetManager, - resourceLimiter: resourceLimiter, - nodeGroups: []packetNodeGroup{}, +func buildEquinixMetalCloudProvider(metalManager equinixMetalManager, resourceLimiter *cloudprovider.ResourceLimiter) (cloudprovider.CloudProvider, error) { + pcp := &equinixMetalCloudProvider{ + equinixMetalManager: metalManager, + resourceLimiter: resourceLimiter, + nodeGroups: []equinixMetalNodeGroup{}, } return pcp, nil } // Name returns the name of the cloud provider. -func (pcp *packetCloudProvider) Name() string { +func (pcp *equinixMetalCloudProvider) Name() string { return ProviderName } // GPULabel returns the label added to nodes with GPU resource. -func (pcp *packetCloudProvider) GPULabel() string { +func (pcp *equinixMetalCloudProvider) GPULabel() string { return GPULabel } // GetAvailableGPUTypes return all available GPU types cloud provider supports -func (pcp *packetCloudProvider) GetAvailableGPUTypes() map[string]struct{} { +func (pcp *equinixMetalCloudProvider) GetAvailableGPUTypes() map[string]struct{} { return availableGPUTypes } // GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have // any GPUs, it returns nil. -func (pcp *packetCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig { +func (pcp *equinixMetalCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig { return gpu.GetNodeGPUFromCloudProvider(pcp, node) } // NodeGroups returns all node groups managed by this cloud provider. -func (pcp *packetCloudProvider) NodeGroups() []cloudprovider.NodeGroup { +func (pcp *equinixMetalCloudProvider) NodeGroups() []cloudprovider.NodeGroup { groups := make([]cloudprovider.NodeGroup, len(pcp.nodeGroups)) for i := range pcp.nodeGroups { groups[i] = &pcp.nodeGroups[i] @@ -98,14 +98,14 @@ func (pcp *packetCloudProvider) NodeGroups() []cloudprovider.NodeGroup { } // AddNodeGroup appends a node group to the list of node groups managed by this cloud provider. -func (pcp *packetCloudProvider) AddNodeGroup(group packetNodeGroup) { +func (pcp *equinixMetalCloudProvider) AddNodeGroup(group equinixMetalNodeGroup) { pcp.nodeGroups = append(pcp.nodeGroups, group) } // NodeGroupForNode returns the node group that a given node belongs to. // // Since only a single node group is currently supported, the first node group is always returned. -func (pcp *packetCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { +func (pcp *equinixMetalCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { controllerNodeLabel := os.Getenv(ControllerNodeIdentifierEnv) if controllerNodeLabel == "" { klog.V(3).Infof("env %s not set, using default: %s", ControllerNodeIdentifierEnv, DefaultControllerNodeLabelKey) @@ -115,7 +115,7 @@ func (pcp *packetCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovide if _, found := node.ObjectMeta.Labels[controllerNodeLabel]; found { return nil, nil } - nodeGroupId, err := pcp.packetManager.NodeGroupForNode(node.ObjectMeta.Labels, node.Spec.ProviderID) + nodeGroupId, err := pcp.equinixMetalManager.NodeGroupForNode(node.ObjectMeta.Labels, node.Spec.ProviderID) if err != nil { return nil, err } @@ -128,35 +128,35 @@ func (pcp *packetCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovide } // HasInstance returns whether a given node has a corresponding instance in this cloud provider -func (pcp *packetCloudProvider) HasInstance(node *apiv1.Node) (bool, error) { +func (pcp *equinixMetalCloudProvider) HasInstance(node *apiv1.Node) (bool, error) { return true, cloudprovider.ErrNotImplemented } // Pricing returns pricing model for this cloud provider or error if not available. -func (pcp *packetCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { - return &PacketPriceModel{}, nil +func (pcp *equinixMetalCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { + return &EquinixMetalPriceModel{}, nil } // GetAvailableMachineTypes is not implemented. -func (pcp *packetCloudProvider) GetAvailableMachineTypes() ([]string, error) { +func (pcp *equinixMetalCloudProvider) GetAvailableMachineTypes() ([]string, error) { return []string{}, nil } // NewNodeGroup is not implemented. -func (pcp *packetCloudProvider) NewNodeGroup(machineType string, labels map[string]string, systemLabels map[string]string, +func (pcp *equinixMetalCloudProvider) NewNodeGroup(machineType string, labels map[string]string, systemLabels map[string]string, taints []apiv1.Taint, extraResources map[string]resource.Quantity) (cloudprovider.NodeGroup, error) { return nil, cloudprovider.ErrNotImplemented } // GetResourceLimiter returns resource constraints for the cloud provider -func (pcp *packetCloudProvider) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) { +func (pcp *equinixMetalCloudProvider) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) { return pcp.resourceLimiter, nil } // Refresh is called before every autoscaler main loop. // // Currently only prints debug information. -func (pcp *packetCloudProvider) Refresh() error { +func (pcp *equinixMetalCloudProvider) Refresh() error { for _, nodegroup := range pcp.nodeGroups { klog.V(3).Info(nodegroup.Debug()) } @@ -164,15 +164,15 @@ func (pcp *packetCloudProvider) Refresh() error { } // Cleanup currently does nothing. -func (pcp *packetCloudProvider) Cleanup() error { +func (pcp *equinixMetalCloudProvider) Cleanup() error { return nil } -// BuildPacket is called by the autoscaler to build a packet cloud provider. +// BuildEquinixMetal is called by the autoscaler to build an Equinix Metal cloud provider. // -// The packetManager is created here, and the node groups are created +// The equinixMetalManager is created here, and the node groups are created // based on the specs provided via the command line parameters. -func BuildPacket(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { +func BuildEquinixMetal(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { var config io.ReadCloser if opts.CloudConfig != "" { @@ -184,14 +184,14 @@ func BuildPacket(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDisco defer config.Close() } - manager, err := createPacketManager(config, do, opts) + manager, err := createEquinixMetalManager(config, do, opts) if err != nil { - klog.Fatalf("Failed to create packet manager: %v", err) + klog.Fatalf("Failed to create equinix metal manager: %v", err) } - provider, err := buildPacketCloudProvider(manager, rl) + provider, err := buildEquinixMetalCloudProvider(manager, rl) if err != nil { - klog.Fatalf("Failed to create packet cloud provider: %v", err) + klog.Fatalf("Failed to create equinix metal cloud provider: %v", err) } if len(do.NodeGroupSpecs) == 0 { @@ -212,8 +212,8 @@ func BuildPacket(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDisco klog.Fatalf("Invalid nodepool name: %s\nMust be a valid kubernetes label value", spec.Name) } - ng := packetNodeGroup{ - packetManager: manager, + ng := equinixMetalNodeGroup{ + equinixMetalManager: manager, id: spec.Name, clusterUpdateMutex: &clusterUpdateLock, minSize: spec.MinSize, @@ -222,11 +222,11 @@ func BuildPacket(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDisco waitTimeStep: waitForStatusTimeStep, deleteBatchingDelay: deleteNodesBatchingDelay, } - *ng.targetSize, err = ng.packetManager.nodeGroupSize(ng.id) + *ng.targetSize, err = ng.equinixMetalManager.nodeGroupSize(ng.id) if err != nil { klog.Fatalf("Could not set current nodes in node group: %v", err) } - provider.(*packetCloudProvider).AddNodeGroup(ng) + provider.(*equinixMetalCloudProvider).AddNodeGroup(ng) } return provider diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager.go b/cluster-autoscaler/cloudprovider/packet/packet_manager.go index f5b8793b3eb9..e20ee3dfafa0 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager.go @@ -38,8 +38,8 @@ type NodeRef struct { IPs []string } -// packetManager is an interface for the basic interactions with the cluster. -type packetManager interface { +// equinixMetalManager is an interface for the basic interactions with the cluster. +type equinixMetalManager interface { nodeGroupSize(nodegroup string) (int, error) createNodes(nodegroup string, nodes int) error getNodes(nodegroup string) ([]string, error) @@ -49,20 +49,20 @@ type packetManager interface { NodeGroupForNode(labels map[string]string, nodeId string) (string, error) } -// createPacketManager creates the desired implementation of packetManager. -// Currently reads the environment variable PACKET_MANAGER to find which to create, +// createEquinixMetalManager creates the desired implementation of equinixMetalManager. +// Currently reads the environment variable EQUINIX_METAL_MANAGER to find which to create, // and falls back to a default if the variable is not found. -func createPacketManager(configReader io.Reader, discoverOpts cloudprovider.NodeGroupDiscoveryOptions, opts config.AutoscalingOptions) (packetManager, error) { +func createEquinixMetalManager(configReader io.Reader, discoverOpts cloudprovider.NodeGroupDiscoveryOptions, opts config.AutoscalingOptions) (equinixMetalManager, error) { // For now get manager from env var, can consider adding flag later - manager, ok := os.LookupEnv("PACKET_MANAGER") + manager, ok := os.LookupEnv("EQUINIX_METAL_MANAGER") if !ok { manager = defaultManager } switch manager { case "rest": - return createPacketManagerRest(configReader, discoverOpts, opts) + return createEquinixMetalManagerRest(configReader, discoverOpts, opts) } - return nil, fmt.Errorf("packet manager does not exist: %s", manager) + return nil, fmt.Errorf("equinix metal manager does not exist: %s", manager) } diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go index 138410a2685f..c18674852ca0 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go @@ -146,7 +146,7 @@ var InstanceTypes = map[string]*instanceType{ }, } -type packetManagerNodePool struct { +type equinixMetalManagerNodePool struct { baseURL string clusterName string projectID string @@ -160,9 +160,9 @@ type packetManagerNodePool struct { hostnamePattern string } -type packetManagerRest struct { - authToken string - packetManagerNodePools map[string]*packetManagerNodePool +type equinixMetalManagerRest struct { + authToken string + equinixMetalManagerNodePools map[string]*equinixMetalManagerNodePool } // ConfigNodepool options only include the project-id for now @@ -185,7 +185,7 @@ type ConfigFile struct { Nodegroupdef map[string]*ConfigNodepool `gcfg:"nodegroupdef"` } -// Device represents a Packet device +// Device represents an Equinix Metal device type Device struct { ID string `json:"id"` ShortID string `json:"short_id"` @@ -195,7 +195,7 @@ type Device struct { Tags []string `json:"tags"` } -// Devices represents a list of Packet devices +// Devices represents a list of an Equinix Metal devices type Devices struct { Devices []Device `json:"devices"` } @@ -206,7 +206,7 @@ type IPAddressCreateRequest struct { Public bool `json:"public"` } -// DeviceCreateRequest represents a request to create a new Packet device. Used by createNodes +// DeviceCreateRequest represents a request to create a new Equinix Metal device. Used by createNodes type DeviceCreateRequest struct { Hostname string `json:"hostname"` Plan string `json:"plan"` @@ -271,9 +271,9 @@ func Contains(a []string, x string) bool { return false } -// createPacketManagerRest sets up the client and returns -// an packetManagerRest. -func createPacketManagerRest(configReader io.Reader, discoverOpts cloudprovider.NodeGroupDiscoveryOptions, opts config.AutoscalingOptions) (*packetManagerRest, error) { +// createEquinixMetalManagerRest sets up the client and returns +// an equinixMetalManagerRest. +func createEquinixMetalManagerRest(configReader io.Reader, discoverOpts cloudprovider.NodeGroupDiscoveryOptions, opts config.AutoscalingOptions) (*equinixMetalManagerRest, error) { // Initialize ConfigFile instance cfg := ConfigFile{ DefaultNodegroupdef: ConfigNodepool{}, @@ -287,8 +287,8 @@ func createPacketManagerRest(configReader io.Reader, discoverOpts cloudprovider. } } - var manager packetManagerRest - manager.packetManagerNodePools = make(map[string]*packetManagerNodePool) + var manager equinixMetalManagerRest + manager.equinixMetalManagerNodePools = make(map[string]*equinixMetalManagerNodePool) if _, ok := cfg.Nodegroupdef["default"]; !ok { cfg.Nodegroupdef["default"] = &cfg.DefaultNodegroupdef @@ -298,12 +298,12 @@ func createPacketManagerRest(configReader io.Reader, discoverOpts cloudprovider. klog.Fatalf("No \"default\" or [Global] nodepool definition was found") } - packetAuthToken := os.Getenv("PACKET_AUTH_TOKEN") - if len(packetAuthToken) == 0 { + metalAuthToken := os.Getenv("PACKET_AUTH_TOKEN") + if len(metalAuthToken) == 0 { klog.Fatalf("PACKET_AUTH_TOKEN is required and missing") } - manager.authToken = packetAuthToken + manager.authToken = metalAuthToken for nodepool := range cfg.Nodegroupdef { if opts.ClusterName == "" && cfg.Nodegroupdef[nodepool].ClusterName == "" { @@ -312,8 +312,8 @@ func createPacketManagerRest(configReader io.Reader, discoverOpts cloudprovider. cfg.Nodegroupdef[nodepool].ClusterName = opts.ClusterName } - manager.packetManagerNodePools[nodepool] = &packetManagerNodePool{ - baseURL: "https://api.equinix.com/metal/v1", + manager.equinixMetalManagerNodePools[nodepool] = &equinixMetalManagerNodePool{ + baseURL: "https://api.equinix.com", clusterName: cfg.Nodegroupdef[nodepool].ClusterName, projectID: cfg.Nodegroupdef["default"].ProjectID, apiServerEndpoint: cfg.Nodegroupdef["default"].APIServerEndpoint, @@ -330,7 +330,7 @@ func createPacketManagerRest(configReader io.Reader, discoverOpts cloudprovider. return &manager, nil } -func (mgr *packetManagerRest) request(ctx context.Context, method, url string, jsonData []byte) ([]byte, error) { +func (mgr *equinixMetalManagerRest) request(ctx context.Context, method, url string, jsonData []byte) ([]byte, error) { req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBuffer(jsonData)) if err != nil { return nil, fmt.Errorf("failed to create request: %w", err) @@ -381,7 +381,7 @@ func (mgr *packetManagerRest) request(ctx context.Context, method, url string, j return nil, errorResponse } -func (mgr *packetManagerRest) listPacketDevices(ctx context.Context) (*Devices, error) { +func (mgr *equinixMetalManagerRest) listMetalDevices(ctx context.Context) (*Devices, error) { url := mgr.getNodePoolDefinition("default").baseURL + "/" + path.Join("projects", mgr.getNodePoolDefinition("default").projectID, "devices") klog.Infof("url: %v", url) @@ -398,7 +398,7 @@ func (mgr *packetManagerRest) listPacketDevices(ctx context.Context) (*Devices, return &devices, nil } -func (mgr *packetManagerRest) getPacketDevice(ctx context.Context, id string) (*Device, error) { +func (mgr *equinixMetalManagerRest) getEquinixMetalDevice(ctx context.Context, id string) (*Device, error) { url := mgr.getNodePoolDefinition("default").baseURL + "/" + path.Join("devices", id) result, err := mgr.request(ctx, "GET", url, []byte(``)) @@ -414,14 +414,14 @@ func (mgr *packetManagerRest) getPacketDevice(ctx context.Context, id string) (* return &device, nil } -func (mgr *packetManagerRest) NodeGroupForNode(labels map[string]string, nodeId string) (string, error) { +func (mgr *equinixMetalManagerRest) NodeGroupForNode(labels map[string]string, nodeId string) (string, error) { if nodegroup, ok := labels["pool"]; ok { return nodegroup, nil } trimmedNodeId := strings.TrimPrefix(nodeId, prefix) - device, err := mgr.getPacketDevice(context.TODO(), trimmedNodeId) + device, err := mgr.getEquinixMetalDevice(context.TODO(), trimmedNodeId) if err != nil { return "", fmt.Errorf("could not find group for node: %s %s", nodeId, err) } @@ -434,8 +434,8 @@ func (mgr *packetManagerRest) NodeGroupForNode(labels map[string]string, nodeId } // nodeGroupSize gets the current size of the nodegroup as reported by packet tags. -func (mgr *packetManagerRest) nodeGroupSize(nodegroup string) (int, error) { - devices, err := mgr.listPacketDevices(context.TODO()) +func (mgr *equinixMetalManagerRest) nodeGroupSize(nodegroup string) (int, error) { + devices, err := mgr.listMetalDevices(context.TODO()) if err != nil { return 0, fmt.Errorf("failed to list devices: %w", err) } @@ -462,7 +462,7 @@ func randString8() string { return string(b) } -func (mgr *packetManagerRest) createNode(ctx context.Context, cloudinit, nodegroup string) error { +func (mgr *equinixMetalManagerRest) createNode(ctx context.Context, cloudinit, nodegroup string) error { udvars := CloudInitTemplateData{ BootstrapTokenID: os.Getenv("BOOTSTRAP_TOKEN_ID"), BootstrapTokenSecret: os.Getenv("BOOTSTRAP_TOKEN_SECRET"), @@ -489,13 +489,13 @@ func (mgr *packetManagerRest) createNode(ctx context.Context, cloudinit, nodegro return fmt.Errorf("failed to create device %q in node group %q: %w", hn, nodegroup, err) } - klog.Infof("Created new node on Packet.") + klog.Infof("Created new node on Equinix Metal.") return nil } // createNodes provisions new nodes on packet and bootstraps them in the cluster. -func (mgr *packetManagerRest) createNodes(nodegroup string, nodes int) error { +func (mgr *equinixMetalManagerRest) createNodes(nodegroup string, nodes int) error { klog.Infof("Updating node count to %d for nodegroup %s", nodes, nodegroup) cloudinit, err := base64.StdEncoding.DecodeString(mgr.getNodePoolDefinition(nodegroup).cloudinit) @@ -513,7 +513,7 @@ func (mgr *packetManagerRest) createNodes(nodegroup string, nodes int) error { return utilerrors.NewAggregate(errList) } -func (mgr *packetManagerRest) createDevice(ctx context.Context, hostname, userData, nodegroup string) error { +func (mgr *equinixMetalManagerRest) createDevice(ctx context.Context, hostname, userData, nodegroup string) error { reservation := "" if mgr.getNodePoolDefinition(nodegroup).reservation == "require" || mgr.getNodePoolDefinition(nodegroup).reservation == "prefer" { reservation = "next-available" @@ -551,7 +551,7 @@ func isNoAvailableReservationsError(err error) bool { return strings.Contains(err.Error(), " no available hardware reservations ") } -func (mgr *packetManagerRest) createDeviceRequest(ctx context.Context, cr *DeviceCreateRequest, nodegroup string) error { +func (mgr *equinixMetalManagerRest) createDeviceRequest(ctx context.Context, cr *DeviceCreateRequest, nodegroup string) error { url := mgr.getNodePoolDefinition("default").baseURL + "/" + path.Join("projects", cr.ProjectID, "devices") jsonValue, err := json.Marshal(cr) @@ -571,9 +571,9 @@ func (mgr *packetManagerRest) createDeviceRequest(ctx context.Context, cr *Devic // getNodes should return ProviderIDs for all nodes in the node group, // used to find any nodes which are unregistered in kubernetes. -func (mgr *packetManagerRest) getNodes(nodegroup string) ([]string, error) { +func (mgr *equinixMetalManagerRest) getNodes(nodegroup string) ([]string, error) { // Get node ProviderIDs by getting device IDs from Packet - devices, err := mgr.listPacketDevices(context.TODO()) + devices, err := mgr.listMetalDevices(context.TODO()) if err != nil { return nil, fmt.Errorf("failed to list devices: %w", err) } @@ -591,8 +591,8 @@ func (mgr *packetManagerRest) getNodes(nodegroup string) ([]string, error) { // getNodeNames should return Names for all nodes in the node group, // used to find any nodes which are unregistered in kubernetes. -func (mgr *packetManagerRest) getNodeNames(nodegroup string) ([]string, error) { - devices, err := mgr.listPacketDevices(context.TODO()) +func (mgr *equinixMetalManagerRest) getNodeNames(nodegroup string) ([]string, error) { + devices, err := mgr.listMetalDevices(context.TODO()) if err != nil { return nil, fmt.Errorf("failed to list devices: %w", err) } @@ -608,7 +608,7 @@ func (mgr *packetManagerRest) getNodeNames(nodegroup string) ([]string, error) { return nodes, nil } -func (mgr *packetManagerRest) deleteDevice(ctx context.Context, nodegroup, id string) error { +func (mgr *equinixMetalManagerRest) deleteDevice(ctx context.Context, nodegroup, id string) error { url := mgr.getNodePoolDefinition("default").baseURL + "/" + path.Join("devices", id) result, err := mgr.request(context.TODO(), "DELETE", url, []byte("")) @@ -622,14 +622,14 @@ func (mgr *packetManagerRest) deleteDevice(ctx context.Context, nodegroup, id st } // deleteNodes deletes nodes by passing a comma separated list of names or IPs -func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error { +func (mgr *equinixMetalManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error { klog.Infof("Deleting nodes %v", nodes) ctx := context.TODO() errList := make([]error, 0, len(nodes)) - devices, err := mgr.listPacketDevices(ctx) + devices, err := mgr.listMetalDevices(ctx) if err != nil { return fmt.Errorf("failed to list devices: %w", err) } @@ -655,7 +655,7 @@ func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd switch { case d.Hostname == n.Name: - klog.V(1).Infof("Matching Packet Device %s - %s", d.Hostname, d.ID) + klog.V(1).Infof("Matching Equinix Metal Device %s - %s", d.Hostname, d.ID) errList = append(errList, mgr.deleteDevice(ctx, nodegroup, d.ID)) case fakeNode && trimmedName == d.ID: klog.V(1).Infof("Fake Node %s", d.ID) @@ -668,7 +668,7 @@ func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd return utilerrors.NewAggregate(errList) } -// BuildGenericLabels builds basic labels for Packet nodes +// BuildGenericLabels builds basic labels for equinix metal nodes func BuildGenericLabels(nodegroup string, instanceType string) map[string]string { result := make(map[string]string) @@ -681,9 +681,9 @@ func BuildGenericLabels(nodegroup string, instanceType string) map[string]string return result } -// templateNodeInfo returns a NodeInfo with a node template based on the packet plan +// templateNodeInfo returns a NodeInfo with a node template based on the equinix metal plan // that is used to create nodes in a given node group. -func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) { +func (mgr *equinixMetalManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) { node := apiv1.Node{} nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63()) node.ObjectMeta = metav1.ObjectMeta{ @@ -695,14 +695,14 @@ func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulerfram Capacity: apiv1.ResourceList{}, } - packetPlan := InstanceTypes[mgr.getNodePoolDefinition(nodegroup).plan] - if packetPlan == nil { - return nil, fmt.Errorf("packet plan %q not supported", mgr.getNodePoolDefinition(nodegroup).plan) + equinixMetalPlan := InstanceTypes[mgr.getNodePoolDefinition(nodegroup).plan] + if equinixMetalPlan == nil { + return nil, fmt.Errorf("equinix metal plan %q not supported", mgr.getNodePoolDefinition(nodegroup).plan) } node.Status.Capacity[apiv1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI) - node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(packetPlan.CPU, resource.DecimalSI) - node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(packetPlan.GPU, resource.DecimalSI) - node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(packetPlan.MemoryMb*1024*1024, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(equinixMetalPlan.CPU, resource.DecimalSI) + node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(equinixMetalPlan.GPU, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(equinixMetalPlan.MemoryMb*1024*1024, resource.DecimalSI) node.Status.Allocatable = node.Status.Capacity node.Status.Conditions = cloudprovider.BuildReadyConditions() @@ -715,10 +715,10 @@ func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulerfram return nodeInfo, nil } -func (mgr *packetManagerRest) getNodePoolDefinition(nodegroup string) *packetManagerNodePool { - NodePoolDefinition, ok := mgr.packetManagerNodePools[nodegroup] +func (mgr *equinixMetalManagerRest) getNodePoolDefinition(nodegroup string) *equinixMetalManagerNodePool { + NodePoolDefinition, ok := mgr.equinixMetalManagerNodePools[nodegroup] if !ok { - NodePoolDefinition, ok = mgr.packetManagerNodePools["default"] + NodePoolDefinition, ok = mgr.equinixMetalManagerNodePools["default"] if !ok { klog.Fatalf("No default cloud-config was found") } diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go index 4ca55eeb4367..fe47c22792fe 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go @@ -28,23 +28,23 @@ import ( ) // API call responses contain only the minimum information required by the cluster-autoscaler -const listPacketDevicesResponse = ` +const listMetalDevicesResponse = ` {"devices":[{"id":"cace3b27-dff8-4930-943d-b2a63a775f03","short_id":"cace3b27","hostname":"k8s-cluster2-pool3-gndxdmmw","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"efc985f6-ba6a-4bc3-8ef4-9643b0e950a9","short_id":"efc985f6","hostname":"k8s-cluster2-master","description":null,"state":"active","tags":["k8s-cluster-cluster2"]}]} ` -const listPacketDevicesResponseAfterIncreasePool3 = ` +const listMetalDevicesResponseAfterIncreasePool3 = ` {"devices":[{"id":"8fa90049-e715-4794-ba31-81c1c78cee84","short_id":"8fa90049","hostname":"k8s-cluster2-pool3-xpnrwgdf","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"cace3b27-dff8-4930-943d-b2a63a775f03","short_id":"cace3b27","hostname":"k8s-cluster2-pool3-gndxdmmw","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"efc985f6-ba6a-4bc3-8ef4-9643b0e950a9","short_id":"efc985f6","hostname":"k8s-cluster2-master","description":null,"state":"active","tags":["k8s-cluster-cluster2"]}]} ` -const listPacketDevicesResponseAfterIncreasePool2 = ` +const listMetalDevicesResponseAfterIncreasePool2 = ` {"devices":[{"id":"0f5609af-1c27-451b-8edd-a1283f2c9440","short_id":"0f5609af","hostname":"k8s-cluster2-pool2-jssxcyzz","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool2"]},{"id":"8fa90049-e715-4794-ba31-81c1c78cee84","short_id":"8fa90049","hostname":"k8s-cluster2-pool3-xpnrwgdf","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"cace3b27-dff8-4930-943d-b2a63a775f03","short_id":"cace3b27","hostname":"k8s-cluster2-pool3-gndxdmmw","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"efc985f6-ba6a-4bc3-8ef4-9643b0e950a9","short_id":"efc985f6","hostname":"k8s-cluster2-master","description":null,"state":"active","tags":["k8s-cluster-cluster2"]}]} ` const cloudinitDefault = "IyEvYmluL2Jhc2gKZXhwb3J0IERFQklBTl9GUk9OVEVORD1ub25pbnRlcmFjdGl2ZQphcHQtZ2V0IHVwZGF0ZSAmJiBhcHQtZ2V0IGluc3RhbGwgLXkgYXB0LXRyYW5zcG9ydC1odHRwcyBjYS1jZXJ0aWZpY2F0ZXMgY3VybCBzb2Z0d2FyZS1wcm9wZXJ0aWVzLWNvbW1vbgpjdXJsIC1mc1NMIGh0dHBzOi8vZG93bmxvYWQuZG9ja2VyLmNvbS9saW51eC91YnVudHUvZ3BnIHwgYXB0LWtleSBhZGQgLQpjdXJsIC1zIGh0dHBzOi8vcGFja2FnZXMuY2xvdWQuZ29vZ2xlLmNvbS9hcHQvZG9jL2FwdC1rZXkuZ3BnIHwgYXB0LWtleSBhZGQgLQpjYXQgPDxFT0YgPi9ldGMvYXB0L3NvdXJjZXMubGlzdC5kL2t1YmVybmV0ZXMubGlzdApkZWIgaHR0cHM6Ly9hcHQua3ViZXJuZXRlcy5pby8ga3ViZXJuZXRlcy14ZW5pYWwgbWFpbgpFT0YKYWRkLWFwdC1yZXBvc2l0b3J5ICAgImRlYiBbYXJjaD1hbWQ2NF0gaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dSAgICQobHNiX3JlbGVhc2UgLWNzKSAgIHN0YWJsZSIKYXB0LWdldCB1cGRhdGUKYXB0LWdldCB1cGdyYWRlIC15CmFwdC1nZXQgaW5zdGFsbCAteSBrdWJlbGV0PTEuMTcuNC0wMCBrdWJlYWRtPTEuMTcuNC0wMCBrdWJlY3RsPTEuMTcuNC0wMAphcHQtbWFyayBob2xkIGt1YmVsZXQga3ViZWFkbSBrdWJlY3RsCmN1cmwgLWZzU0wgaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dS9ncGcgfCBhcHQta2V5IGFkZCAtCmFkZC1hcHQtcmVwb3NpdG9yeSAiZGViIFthcmNoPWFtZDY0XSBodHRwczovL2Rvd25sb2FkLmRvY2tlci5jb20vbGludXgvdWJ1bnR1IGJpb25pYyBzdGFibGUiCmFwdCB1cGRhdGUKYXB0IGluc3RhbGwgLXkgZG9ja2VyLWNlPTE4LjA2LjJ+Y2V+My0wfnVidW50dQpjYXQgPiAvZXRjL2RvY2tlci9kYWVtb24uanNvbiA8PEVPRgp7CiAgImV4ZWMtb3B0cyI6IFsibmF0aXZlLmNncm91cGRyaXZlcj1zeXN0ZW1kIl0sCiAgImxvZy1kcml2ZXIiOiAianNvbi1maWxlIiwKICAibG9nLW9wdHMiOiB7CiAgICAibWF4LXNpemUiOiAiMTAwbSIKICB9LAogICJzdG9yYWdlLWRyaXZlciI6ICJvdmVybGF5MiIKfQpFT0YKbWtkaXIgLXAgL2V0Yy9zeXN0ZW1kL3N5c3RlbS9kb2NrZXIuc2VydmljZS5kCnN5c3RlbWN0bCBkYWVtb24tcmVsb2FkCnN5c3RlbWN0bCByZXN0YXJ0IGRvY2tlcgpzd2Fwb2ZmIC1hCm12IC9ldGMvZnN0YWIgL2V0Yy9mc3RhYi5vbGQgJiYgZ3JlcCAtdiBzd2FwIC9ldGMvZnN0YWIub2xkID4gL2V0Yy9mc3RhYgpjYXQgPDxFT0YgfCB0ZWUgL2V0Yy9kZWZhdWx0L2t1YmVsZXQKS1VCRUxFVF9FWFRSQV9BUkdTPS0tY2xvdWQtcHJvdmlkZXI9ZXh0ZXJuYWwgLS1ub2RlLWxhYmVscz1wb29sPXt7Lk5vZGVHcm91cH19CkVPRgprdWJlYWRtIGpvaW4gLS1kaXNjb3ZlcnktdG9rZW4tdW5zYWZlLXNraXAtY2EtdmVyaWZpY2F0aW9uIC0tdG9rZW4ge3suQm9vdHN0cmFwVG9rZW5JRH19Lnt7LkJvb3RzdHJhcFRva2VuU2VjcmV0fX0ge3suQVBJU2VydmVyRW5kcG9pbnR9fQo=" -func newTestPacketManagerRest(t *testing.T, url string) *packetManagerRest { - manager := &packetManagerRest{ - packetManagerNodePools: map[string]*packetManagerNodePool{ +func newTestPacketManagerRest(t *testing.T, url string) *equinixMetalManagerRest { + manager := &equinixMetalManagerRest{ + equinixMetalManagerNodePools: map[string]*equinixMetalManagerNodePool{ "default": { baseURL: url, clusterName: "cluster2", @@ -75,8 +75,8 @@ func newTestPacketManagerRest(t *testing.T, url string) *packetManagerRest { } return manager } -func TestListPacketDevices(t *testing.T) { - var m *packetManagerRest +func TestListMetalDevices(t *testing.T) { + var m *equinixMetalManagerRest server := NewHttpServerMock(MockFieldContentType, MockFieldResponse) defer server.Close() if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { @@ -86,11 +86,11 @@ func TestListPacketDevices(t *testing.T) { // Set up a mock Packet API m = newTestPacketManagerRest(t, server.URL) t.Logf("server URL: %v", server.URL) - t.Logf("default packetManagerNodePool baseURL: %v", m.packetManagerNodePools["default"].baseURL) - server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(2) + t.Logf("default equinixMetalManagerNodePool baseURL: %v", m.equinixMetalManagerNodePools["default"].baseURL) + server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponse).Times(2) } - _, err := m.listPacketDevices(context.TODO()) + _, err := m.listMetalDevices(context.TODO()) assert.NoError(t, err) c, err := m.nodeGroupSize("pool3") diff --git a/cluster-autoscaler/cloudprovider/packet/packet_node_group.go b/cluster-autoscaler/cloudprovider/packet/packet_node_group.go index 863e7d86afa9..f4f7b32f23ad 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_node_group.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_node_group.go @@ -28,14 +28,14 @@ import ( schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) -// packetNodeGroup implements NodeGroup interface from cluster-autoscaler/cloudprovider. +// equinixMetalNodeGroup implements NodeGroup interface from cluster-autoscaler/cloudprovider. // // Represents a homogeneous collection of nodes within a cluster, // which can be dynamically resized between a minimum and maximum // number of nodes. -type packetNodeGroup struct { - packetManager packetManager - id string +type equinixMetalNodeGroup struct { + equinixMetalManager equinixMetalManager + id string clusterUpdateMutex *sync.Mutex @@ -70,7 +70,7 @@ const ( // // Takes precautions so that the cluster is not modified while in an UPDATE_IN_PROGRESS state. // Blocks until the cluster has reached UPDATE_COMPLETE. -func (ng *packetNodeGroup) IncreaseSize(delta int) error { +func (ng *equinixMetalNodeGroup) IncreaseSize(delta int) error { ng.clusterUpdateMutex.Lock() defer ng.clusterUpdateMutex.Unlock() @@ -78,7 +78,7 @@ func (ng *packetNodeGroup) IncreaseSize(delta int) error { return fmt.Errorf("size increase must be positive") } - size, err := ng.packetManager.nodeGroupSize(ng.id) + size, err := ng.equinixMetalManager.nodeGroupSize(ng.id) if err != nil { return fmt.Errorf("could not check current nodegroup size: %v", err) } @@ -89,7 +89,7 @@ func (ng *packetNodeGroup) IncreaseSize(delta int) error { klog.V(0).Infof("Increasing size by %d, %d->%d", delta, *ng.targetSize, *ng.targetSize+delta) *ng.targetSize += delta - err = ng.packetManager.createNodes(ng.id, delta) + err = ng.equinixMetalManager.createNodes(ng.id, delta) if err != nil { return fmt.Errorf("could not increase cluster size: %v", err) } @@ -99,12 +99,12 @@ func (ng *packetNodeGroup) IncreaseSize(delta int) error { // deleteNodes deletes a set of nodes chosen by the autoscaler. // -// The process of deletion depends on the implementation of packetManager, +// The process of deletion depends on the implementation of equinixMetalManager, // but this function handles what should be common between all implementations: // - simultaneous but separate calls from the autoscaler are batched together // - does not allow scaling while the cluster is already in an UPDATE_IN_PROGRESS state // - after scaling down, blocks until the cluster has reached UPDATE_COMPLETE -func (ng *packetNodeGroup) DeleteNodes(nodes []*apiv1.Node) error { +func (ng *equinixMetalNodeGroup) DeleteNodes(nodes []*apiv1.Node) error { klog.V(1).Infof("Locking nodesToDeleteMutex") // Batch simultaneous deletes on individual nodes @@ -117,7 +117,7 @@ func (ng *packetNodeGroup) DeleteNodes(nodes []*apiv1.Node) error { var cachedSize int var err error if time.Since(ng.deleteNodesCachedSizeAt) > time.Second*10 { - cachedSize, err = ng.packetManager.nodeGroupSize(ng.id) + cachedSize, err = ng.equinixMetalManager.nodeGroupSize(ng.id) if err != nil { ng.nodesToDeleteMutex.Unlock() klog.V(1).Infof("UnLocking nodesToDeleteMutex") @@ -204,13 +204,13 @@ func (ng *packetNodeGroup) DeleteNodes(nodes []*apiv1.Node) error { }) } - err = ng.packetManager.deleteNodes(ng.id, nodeRefs, cachedSize-len(nodes)) + err = ng.equinixMetalManager.deleteNodes(ng.id, nodeRefs, cachedSize-len(nodes)) if err != nil { return fmt.Errorf("manager error deleting nodes: %v", err) } // Check the new node group size and store that as the new target - newSize, err := ng.packetManager.nodeGroupSize(ng.id) + newSize, err := ng.equinixMetalManager.nodeGroupSize(ng.id) if err != nil { // Set to the expected size as a fallback *ng.targetSize = cachedSize - len(nodes) @@ -221,29 +221,29 @@ func (ng *packetNodeGroup) DeleteNodes(nodes []*apiv1.Node) error { return nil } -// DecreaseTargetSize decreases the cluster node_count in packet. -func (ng *packetNodeGroup) DecreaseTargetSize(delta int) error { +// DecreaseTargetSize decreases the cluster node_count in Equinix Metal. +func (ng *equinixMetalNodeGroup) DecreaseTargetSize(delta int) error { if delta >= 0 { return fmt.Errorf("size decrease must be negative") } klog.V(0).Infof("Decreasing target size by %d, %d->%d", delta, *ng.targetSize, *ng.targetSize+delta) *ng.targetSize += delta - return fmt.Errorf("could not decrease target size") /*ng.packetManager.updateNodeCount(ng.id, *ng.targetSize)*/ + return fmt.Errorf("could not decrease target size") /*ng.equinixMetalManager.updateNodeCount(ng.id, *ng.targetSize)*/ } // Id returns the node group ID -func (ng *packetNodeGroup) Id() string { +func (ng *equinixMetalNodeGroup) Id() string { return ng.id } // Debug returns a string formatted with the node group's min, max and target sizes. -func (ng *packetNodeGroup) Debug() string { +func (ng *equinixMetalNodeGroup) Debug() string { return fmt.Sprintf("%s min=%d max=%d target=%d", ng.id, ng.minSize, ng.maxSize, *ng.targetSize) } // Nodes returns a list of nodes that belong to this node group. -func (ng *packetNodeGroup) Nodes() ([]cloudprovider.Instance, error) { - nodes, err := ng.packetManager.getNodes(ng.id) +func (ng *equinixMetalNodeGroup) Nodes() ([]cloudprovider.Instance, error) { + nodes, err := ng.equinixMetalManager.getNodes(ng.id) if err != nil { return nil, fmt.Errorf("could not get nodes: %v", err) } @@ -255,48 +255,48 @@ func (ng *packetNodeGroup) Nodes() ([]cloudprovider.Instance, error) { } // TemplateNodeInfo returns a node template for this node group. -func (ng *packetNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { - return ng.packetManager.templateNodeInfo(ng.id) +func (ng *equinixMetalNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { + return ng.equinixMetalManager.templateNodeInfo(ng.id) } // Exist returns if this node group exists. // Currently always returns true. -func (ng *packetNodeGroup) Exist() bool { +func (ng *equinixMetalNodeGroup) Exist() bool { return true } // Create creates the node group on the cloud provider side. -func (ng *packetNodeGroup) Create() (cloudprovider.NodeGroup, error) { +func (ng *equinixMetalNodeGroup) Create() (cloudprovider.NodeGroup, error) { return nil, cloudprovider.ErrAlreadyExist } // Delete deletes the node group on the cloud provider side. -func (ng *packetNodeGroup) Delete() error { +func (ng *equinixMetalNodeGroup) Delete() error { return cloudprovider.ErrNotImplemented } // Autoprovisioned returns if the nodegroup is autoprovisioned. -func (ng *packetNodeGroup) Autoprovisioned() bool { +func (ng *equinixMetalNodeGroup) Autoprovisioned() bool { return false } // MaxSize returns the maximum allowed size of the node group. -func (ng *packetNodeGroup) MaxSize() int { +func (ng *equinixMetalNodeGroup) MaxSize() int { return ng.maxSize } // MinSize returns the minimum allowed size of the node group. -func (ng *packetNodeGroup) MinSize() int { +func (ng *equinixMetalNodeGroup) MinSize() int { return ng.minSize } // TargetSize returns the target size of the node group. -func (ng *packetNodeGroup) TargetSize() (int, error) { +func (ng *equinixMetalNodeGroup) TargetSize() (int, error) { return *ng.targetSize, nil } // GetOptions returns NodeGroupAutoscalingOptions that should be used for this particular // NodeGroup. Returning a nil will result in using default options. -func (ng *packetNodeGroup) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) { +func (ng *equinixMetalNodeGroup) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go index e2aee3d12c74..50c5f5041277 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go @@ -29,36 +29,36 @@ import ( "github.com/stretchr/testify/mock" ) -const createPacketDeviceResponsePool2 = `` -const deletePacketDeviceResponsePool2 = `` +const createMetalDeviceResponsePool2 = `` +const deleteMetalDeviceResponsePool2 = `` -const createPacketDeviceResponsePool3 = `` -const deletePacketDeviceResponsePool3 = `` +const createMetalDeviceResponsePool3 = `` +const deleteMetalDeviceResponsePool3 = `` func TestIncreaseDecreaseSize(t *testing.T) { - var m *packetManagerRest + var m *equinixMetalManagerRest server := NewHttpServerMock(MockFieldContentType, MockFieldResponse) defer server.Close() assert.Equal(t, true, true) if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { // If auth token set in env, hit the actual Packet API - m = newTestPacketManagerRest(t, "https://api.packet.net") + m = newTestPacketManagerRest(t, "https://api.equinix.com") } else { // Set up a mock Packet API m = newTestPacketManagerRest(t, server.URL) - server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(3) - server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", createPacketDeviceResponsePool3).Times(1) - server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool3).Times(2) - server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", createPacketDeviceResponsePool2).Times(1) - server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool2).Times(3) - server.On("handle", "/devices/0f5609af-1c27-451b-8edd-a1283f2c9440").Return("application/json", deletePacketDeviceResponsePool2).Times(1) - server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool3).Times(3) - server.On("handle", "/devices/8fa90049-e715-4794-ba31-81c1c78cee84").Return("application/json", deletePacketDeviceResponsePool3).Times(1) - server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(3) + server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponse).Times(3) + server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", createMetalDeviceResponsePool3).Times(1) + server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponseAfterIncreasePool3).Times(2) + server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", createMetalDeviceResponsePool2).Times(1) + server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponseAfterIncreasePool2).Times(3) + server.On("handle", "/devices/0f5609af-1c27-451b-8edd-a1283f2c9440").Return("application/json", deleteMetalDeviceResponsePool2).Times(1) + server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponseAfterIncreasePool3).Times(3) + server.On("handle", "/devices/8fa90049-e715-4794-ba31-81c1c78cee84").Return("application/json", deleteMetalDeviceResponsePool3).Times(1) + server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponse).Times(3) } clusterUpdateLock := sync.Mutex{} - ngPool2 := &packetNodeGroup{ - packetManager: m, + ngPool2 := &equinixMetalNodeGroup{ + equinixMetalManager: m, id: "pool2", clusterUpdateMutex: &clusterUpdateLock, minSize: 0, @@ -68,8 +68,8 @@ func TestIncreaseDecreaseSize(t *testing.T) { deleteBatchingDelay: 2 * time.Second, } - ngPool3 := &packetNodeGroup{ - packetManager: m, + ngPool3 := &equinixMetalNodeGroup{ + equinixMetalManager: m, id: "pool3", clusterUpdateMutex: &clusterUpdateLock, minSize: 0, @@ -79,11 +79,11 @@ func TestIncreaseDecreaseSize(t *testing.T) { deleteBatchingDelay: 2 * time.Second, } - n1Pool2, err := ngPool2.packetManager.getNodeNames(ngPool2.id) + n1Pool2, err := ngPool2.equinixMetalManager.getNodeNames(ngPool2.id) assert.NoError(t, err) assert.Equal(t, int(0), len(n1Pool2)) - n1Pool3, err := ngPool3.packetManager.getNodeNames(ngPool3.id) + n1Pool3, err := ngPool3.equinixMetalManager.getNodeNames(ngPool3.id) assert.NoError(t, err) assert.Equal(t, int(1), len(n1Pool3)) @@ -111,7 +111,7 @@ func TestIncreaseDecreaseSize(t *testing.T) { time.Sleep(420 * time.Second) } - n2Pool3, err := ngPool3.packetManager.getNodeNames(ngPool3.id) + n2Pool3, err := ngPool3.equinixMetalManager.getNodeNames(ngPool3.id) assert.NoError(t, err) // Assert that the nodepool3 size is now 2 assert.Equal(t, int(2), len(n2Pool3)) @@ -125,7 +125,7 @@ func TestIncreaseDecreaseSize(t *testing.T) { time.Sleep(420 * time.Second) } - n2Pool2, err := ngPool2.packetManager.getNodeNames(ngPool2.id) + n2Pool2, err := ngPool2.equinixMetalManager.getNodeNames(ngPool2.id) assert.NoError(t, err) // Assert that the nodepool2 size is now 1 assert.Equal(t, int(1), len(n2Pool2)) @@ -156,12 +156,12 @@ func TestIncreaseDecreaseSize(t *testing.T) { } // Make sure that there were no errors and the nodepool2 size is once again 0 - n3Pool2, err := ngPool2.packetManager.getNodeNames(ngPool2.id) + n3Pool2, err := ngPool2.equinixMetalManager.getNodeNames(ngPool2.id) assert.NoError(t, err) assert.Equal(t, int(0), len(n3Pool2)) // Make sure that there were no errors and the nodepool3 size is once again 1 - n3Pool3, err := ngPool3.packetManager.getNodeNames(ngPool3.id) + n3Pool3, err := ngPool3.equinixMetalManager.getNodeNames(ngPool3.id) assert.NoError(t, err) assert.Equal(t, int(1), len(n3Pool3)) mock.AssertExpectationsForObjects(t, server) diff --git a/cluster-autoscaler/cloudprovider/packet/packet_price_model.go b/cluster-autoscaler/cloudprovider/packet/packet_price_model.go index a1617eff768d..9cbb05b4c7d5 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_price_model.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_price_model.go @@ -24,8 +24,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/units" ) -// PacketPriceModel implements PriceModel interface for Packet. -type PacketPriceModel struct { +// EquinixMetalPriceModel implements PriceModel interface for Equinix Metal. +type EquinixMetalPriceModel struct { } const ( @@ -52,7 +52,7 @@ var instancePrices = map[string]float64{ // NodePrice returns a price of running the given node for a given period of time. // All prices are in USD. -func (model *PacketPriceModel) NodePrice(node *apiv1.Node, startTime time.Time, endTime time.Time) (float64, error) { +func (model *EquinixMetalPriceModel) NodePrice(node *apiv1.Node, startTime time.Time, endTime time.Time) (float64, error) { price := 0.0 if node.Labels != nil { if machineType, found := node.Labels[apiv1.LabelInstanceType]; found { @@ -72,7 +72,7 @@ func getHours(startTime time.Time, endTime time.Time) float64 { // PodPrice returns a theoretical minimum price of running a pod for a given // period of time on a perfectly matching machine. -func (model *PacketPriceModel) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { +func (model *EquinixMetalPriceModel) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { price := 0.0 for _, container := range pod.Spec.Containers { price += getBasePrice(container.Resources.Requests, startTime, endTime) diff --git a/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go b/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go index f35c1a7e1568..9dbd1aa31e8a 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go @@ -34,7 +34,7 @@ func TestGetNodePrice(t *testing.T) { labelsPool2 := BuildGenericLabels("pool2", "c3.medium.x86") plan2 := InstanceTypes["c3.medium.x86"] - model := &PacketPriceModel{} + model := &EquinixMetalPriceModel{} now := time.Now() node1 := BuildTestNode("node1", plan1.CPU*1000, plan1.MemoryMb*1024*1024) @@ -55,7 +55,7 @@ func TestGetPodPrice(t *testing.T) { pod1 := BuildTestPod("pod1", 100, 500*units.MiB) pod2 := BuildTestPod("pod2", 2*100, 2*500*units.MiB) - model := &PacketPriceModel{} + model := &EquinixMetalPriceModel{} now := time.Now() price1, err := model.PodPrice(pod1, now, now.Add(time.Hour)) From 4c75ceda4c8bdc954fb6eac9ad67fb59e92d054c Mon Sep 17 00:00:00 2001 From: Ayush Rangwala Date: Mon, 18 Sep 2023 21:59:51 +0530 Subject: [PATCH 2/7] Rename types --- cluster-autoscaler/cloudprovider/builder/builder_all.go | 2 +- .../cloudprovider/builder/builder_packet.go | 2 +- .../cloudprovider/packet/packet_cloud_provider.go | 6 +++--- .../cloudprovider/packet/packet_price_model.go | 8 ++++---- .../cloudprovider/packet/packet_price_model_test.go | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go index 12d745c63a98..30e775c41465 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_all.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go @@ -122,7 +122,7 @@ func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGro case cloudprovider.HetznerProviderName: return hetzner.BuildHetzner(opts, do, rl) case cloudprovider.PacketProviderName: - return packet.BuildEquinixMetal(opts, do, rl) + return packet.BuildCloudProvider(opts, do, rl) case cloudprovider.ClusterAPIProviderName: return clusterapi.BuildClusterAPI(opts, do, rl) case cloudprovider.IonoscloudProviderName: diff --git a/cluster-autoscaler/cloudprovider/builder/builder_packet.go b/cluster-autoscaler/cloudprovider/builder/builder_packet.go index bdd126d40893..9d63d4cf2cec 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_packet.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_packet.go @@ -36,7 +36,7 @@ const DefaultCloudProvider = packet.ProviderName func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { switch opts.CloudProviderName { case packet.ProviderName: - return packet.BuildEquinixMetal(opts, do, rl) + return packet.BuildCloudProvider(opts, do, rl) } return nil diff --git a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go index bafa2fc65744..d7f8940130df 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go @@ -134,7 +134,7 @@ func (pcp *equinixMetalCloudProvider) HasInstance(node *apiv1.Node) (bool, error // Pricing returns pricing model for this cloud provider or error if not available. func (pcp *equinixMetalCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { - return &EquinixMetalPriceModel{}, nil + return &PriceModel{}, nil } // GetAvailableMachineTypes is not implemented. @@ -168,11 +168,11 @@ func (pcp *equinixMetalCloudProvider) Cleanup() error { return nil } -// BuildEquinixMetal is called by the autoscaler to build an Equinix Metal cloud provider. +// BuildCloudProvider is called by the autoscaler to build an Equinix Metal cloud provider. // // The equinixMetalManager is created here, and the node groups are created // based on the specs provided via the command line parameters. -func BuildEquinixMetal(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { +func BuildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { var config io.ReadCloser if opts.CloudConfig != "" { diff --git a/cluster-autoscaler/cloudprovider/packet/packet_price_model.go b/cluster-autoscaler/cloudprovider/packet/packet_price_model.go index 9cbb05b4c7d5..7b75846e570b 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_price_model.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_price_model.go @@ -24,8 +24,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/units" ) -// EquinixMetalPriceModel implements PriceModel interface for Equinix Metal. -type EquinixMetalPriceModel struct { +// PriceModel implements PriceModel interface for Equinix Metal. +type PriceModel struct { } const ( @@ -52,7 +52,7 @@ var instancePrices = map[string]float64{ // NodePrice returns a price of running the given node for a given period of time. // All prices are in USD. -func (model *EquinixMetalPriceModel) NodePrice(node *apiv1.Node, startTime time.Time, endTime time.Time) (float64, error) { +func (model *PriceModel) NodePrice(node *apiv1.Node, startTime time.Time, endTime time.Time) (float64, error) { price := 0.0 if node.Labels != nil { if machineType, found := node.Labels[apiv1.LabelInstanceType]; found { @@ -72,7 +72,7 @@ func getHours(startTime time.Time, endTime time.Time) float64 { // PodPrice returns a theoretical minimum price of running a pod for a given // period of time on a perfectly matching machine. -func (model *EquinixMetalPriceModel) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { +func (model *PriceModel) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { price := 0.0 for _, container := range pod.Spec.Containers { price += getBasePrice(container.Resources.Requests, startTime, endTime) diff --git a/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go b/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go index 9dbd1aa31e8a..9b020f5ae483 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go @@ -34,7 +34,7 @@ func TestGetNodePrice(t *testing.T) { labelsPool2 := BuildGenericLabels("pool2", "c3.medium.x86") plan2 := InstanceTypes["c3.medium.x86"] - model := &EquinixMetalPriceModel{} + model := &PriceModel{} now := time.Now() node1 := BuildTestNode("node1", plan1.CPU*1000, plan1.MemoryMb*1024*1024) @@ -55,7 +55,7 @@ func TestGetPodPrice(t *testing.T) { pod1 := BuildTestPod("pod1", 100, 500*units.MiB) pod2 := BuildTestPod("pod2", 2*100, 2*500*units.MiB) - model := &EquinixMetalPriceModel{} + model := &PriceModel{} now := time.Now() price1, err := model.PodPrice(pod1, now, now.Add(time.Hour)) From 1ec0667b2aabc03e219bd6459e49a63465faf339 Mon Sep 17 00:00:00 2001 From: Ayush Rangwala Date: Thu, 21 Sep 2023 20:26:33 +0530 Subject: [PATCH 3/7] fix: provider name to be used in builder to provide backward compatibility Signed-off-by: Ayush Rangwala --- cluster-autoscaler/cloudprovider/builder/builder_all.go | 3 ++- .../cloudprovider/builder/builder_packet.go | 5 ++++- cluster-autoscaler/cloudprovider/cloud_provider.go | 2 ++ .../cloudprovider/packet/packet_cloud_provider.go | 7 ++++--- .../cloudprovider/packet/packet_manager_rest.go | 2 +- .../cloudprovider/packet/packet_node_group_test.go | 2 +- .../cloudprovider/packet/packet_price_model.go | 8 ++++---- .../cloudprovider/packet/packet_price_model_test.go | 4 ++-- 8 files changed, 20 insertions(+), 13 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go index 30e775c41465..dfcd001ba55f 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_all.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go @@ -121,7 +121,8 @@ func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGro return ovhcloud.BuildOVHcloud(opts, do, rl) case cloudprovider.HetznerProviderName: return hetzner.BuildHetzner(opts, do, rl) - case cloudprovider.PacketProviderName: + case cloudprovider.PacketProviderName, + cloudprovider.EquinixMetalProviderName: return packet.BuildCloudProvider(opts, do, rl) case cloudprovider.ClusterAPIProviderName: return clusterapi.BuildClusterAPI(opts, do, rl) diff --git a/cluster-autoscaler/cloudprovider/builder/builder_packet.go b/cluster-autoscaler/cloudprovider/builder/builder_packet.go index 9d63d4cf2cec..3b15a997e033 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_packet.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_packet.go @@ -28,15 +28,18 @@ import ( // AvailableCloudProviders supported by the cloud provider builder. var AvailableCloudProviders = []string{ packet.ProviderName, + packet.EquinixMetalProviderName, } // DefaultCloudProvider for Packet-only build is Packet. -const DefaultCloudProvider = packet.ProviderName +const DefaultCloudProvider = packet.EquinixMetalProviderName func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { switch opts.CloudProviderName { case packet.ProviderName: return packet.BuildCloudProvider(opts, do, rl) + case packet.EquinixMetalProviderName: + return packet.BuildCloudProvider(opts, do, rl) } return nil diff --git a/cluster-autoscaler/cloudprovider/cloud_provider.go b/cluster-autoscaler/cloudprovider/cloud_provider.go index 94f0273ec365..5423dcf60d2f 100644 --- a/cluster-autoscaler/cloudprovider/cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/cloud_provider.go @@ -78,6 +78,8 @@ const ( VultrProviderName = "vultr" // PacketProviderName gets the provider name of packet PacketProviderName = "packet" + // EquinixMetalProviderName gets the provider name of packet + EquinixMetalProviderName = "equinixmetal" // TencentcloudProviderName gets the provider name of tencentcloud TencentcloudProviderName = "tencentcloud" // ExternalGrpcProviderName gets the provider name of the external grpc provider diff --git a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go index d7f8940130df..7e5a666a0a5c 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go @@ -35,7 +35,8 @@ import ( const ( // ProviderName is the cloud provider name for Equinix Metal - ProviderName = "equinix-metal" + ProviderName = "packet" + EquinixMetalProviderName = "packet" // GPULabel is the label added to nodes with GPU resource. GPULabel = "cloud.google.com/gke-accelerator" // DefaultControllerNodeLabelKey is the label added to Master/Controller to identify as @@ -69,7 +70,7 @@ func buildEquinixMetalCloudProvider(metalManager equinixMetalManager, resourceLi // Name returns the name of the cloud provider. func (pcp *equinixMetalCloudProvider) Name() string { - return ProviderName + return EquinixMetalProviderName } // GPULabel returns the label added to nodes with GPU resource. @@ -134,7 +135,7 @@ func (pcp *equinixMetalCloudProvider) HasInstance(node *apiv1.Node) (bool, error // Pricing returns pricing model for this cloud provider or error if not available. func (pcp *equinixMetalCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { - return &PriceModel{}, nil + return &Price{}, nil } // GetAvailableMachineTypes is not implemented. diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go index c18674852ca0..9500c309064c 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go @@ -313,7 +313,7 @@ func createEquinixMetalManagerRest(configReader io.Reader, discoverOpts cloudpro } manager.equinixMetalManagerNodePools[nodepool] = &equinixMetalManagerNodePool{ - baseURL: "https://api.equinix.com", + baseURL: "https://api.equinix.com/metal/v1", clusterName: cfg.Nodegroupdef[nodepool].ClusterName, projectID: cfg.Nodegroupdef["default"].ProjectID, apiServerEndpoint: cfg.Nodegroupdef["default"].APIServerEndpoint, diff --git a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go index 50c5f5041277..a3a1ea07abbd 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go @@ -42,7 +42,7 @@ func TestIncreaseDecreaseSize(t *testing.T) { assert.Equal(t, true, true) if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { // If auth token set in env, hit the actual Packet API - m = newTestPacketManagerRest(t, "https://api.equinix.com") + m = newTestPacketManagerRest(t, "https://api.equinix.com/metal/v1") } else { // Set up a mock Packet API m = newTestPacketManagerRest(t, server.URL) diff --git a/cluster-autoscaler/cloudprovider/packet/packet_price_model.go b/cluster-autoscaler/cloudprovider/packet/packet_price_model.go index 7b75846e570b..e01b45257958 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_price_model.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_price_model.go @@ -24,8 +24,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/units" ) -// PriceModel implements PriceModel interface for Equinix Metal. -type PriceModel struct { +// Price implements Price interface for Equinix Metal. +type Price struct { } const ( @@ -52,7 +52,7 @@ var instancePrices = map[string]float64{ // NodePrice returns a price of running the given node for a given period of time. // All prices are in USD. -func (model *PriceModel) NodePrice(node *apiv1.Node, startTime time.Time, endTime time.Time) (float64, error) { +func (model *Price) NodePrice(node *apiv1.Node, startTime time.Time, endTime time.Time) (float64, error) { price := 0.0 if node.Labels != nil { if machineType, found := node.Labels[apiv1.LabelInstanceType]; found { @@ -72,7 +72,7 @@ func getHours(startTime time.Time, endTime time.Time) float64 { // PodPrice returns a theoretical minimum price of running a pod for a given // period of time on a perfectly matching machine. -func (model *PriceModel) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { +func (model *Price) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { price := 0.0 for _, container := range pod.Spec.Containers { price += getBasePrice(container.Resources.Requests, startTime, endTime) diff --git a/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go b/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go index 9b020f5ae483..e604db1095b2 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go @@ -34,7 +34,7 @@ func TestGetNodePrice(t *testing.T) { labelsPool2 := BuildGenericLabels("pool2", "c3.medium.x86") plan2 := InstanceTypes["c3.medium.x86"] - model := &PriceModel{} + model := &Price{} now := time.Now() node1 := BuildTestNode("node1", plan1.CPU*1000, plan1.MemoryMb*1024*1024) @@ -55,7 +55,7 @@ func TestGetPodPrice(t *testing.T) { pod1 := BuildTestPod("pod1", 100, 500*units.MiB) pod2 := BuildTestPod("pod2", 2*100, 2*500*units.MiB) - model := &PriceModel{} + model := &Price{} now := time.Now() price1, err := model.PodPrice(pod1, now, now.Add(time.Hour)) From e4a474740fe4e5b97f9e2637f5791c3a605d3fb9 Mon Sep 17 00:00:00 2001 From: Ayush Rangwala Date: Mon, 4 Sep 2023 22:15:14 +0530 Subject: [PATCH 4/7] Rename comments, logs, structs, and vars from packet to equinix metal --- cluster-autoscaler/cloudprovider/builder/builder_packet.go | 5 ++--- .../cloudprovider/packet/packet_cloud_provider.go | 5 ++--- .../cloudprovider/packet/packet_manager_rest_test.go | 6 +++--- .../cloudprovider/packet/packet_node_group_test.go | 4 ++-- 4 files changed, 9 insertions(+), 11 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/builder/builder_packet.go b/cluster-autoscaler/cloudprovider/builder/builder_packet.go index 3b15a997e033..bd7bb3c2e542 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_packet.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_packet.go @@ -36,9 +36,8 @@ const DefaultCloudProvider = packet.EquinixMetalProviderName func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { switch opts.CloudProviderName { - case packet.ProviderName: - return packet.BuildCloudProvider(opts, do, rl) - case packet.EquinixMetalProviderName: + case packet.ProviderName, + packet.EquinixMetalProviderName: return packet.BuildCloudProvider(opts, do, rl) } diff --git a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go index 7e5a666a0a5c..8cdb4abd9b8c 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go @@ -35,8 +35,7 @@ import ( const ( // ProviderName is the cloud provider name for Equinix Metal - ProviderName = "packet" - EquinixMetalProviderName = "packet" + ProviderName = "equinix-metal" // GPULabel is the label added to nodes with GPU resource. GPULabel = "cloud.google.com/gke-accelerator" // DefaultControllerNodeLabelKey is the label added to Master/Controller to identify as @@ -70,7 +69,7 @@ func buildEquinixMetalCloudProvider(metalManager equinixMetalManager, resourceLi // Name returns the name of the cloud provider. func (pcp *equinixMetalCloudProvider) Name() string { - return EquinixMetalProviderName + return ProviderName } // GPULabel returns the label added to nodes with GPU resource. diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go index fe47c22792fe..a0c3ae1c52a1 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go @@ -42,7 +42,7 @@ const listMetalDevicesResponseAfterIncreasePool2 = ` const cloudinitDefault = "IyEvYmluL2Jhc2gKZXhwb3J0IERFQklBTl9GUk9OVEVORD1ub25pbnRlcmFjdGl2ZQphcHQtZ2V0IHVwZGF0ZSAmJiBhcHQtZ2V0IGluc3RhbGwgLXkgYXB0LXRyYW5zcG9ydC1odHRwcyBjYS1jZXJ0aWZpY2F0ZXMgY3VybCBzb2Z0d2FyZS1wcm9wZXJ0aWVzLWNvbW1vbgpjdXJsIC1mc1NMIGh0dHBzOi8vZG93bmxvYWQuZG9ja2VyLmNvbS9saW51eC91YnVudHUvZ3BnIHwgYXB0LWtleSBhZGQgLQpjdXJsIC1zIGh0dHBzOi8vcGFja2FnZXMuY2xvdWQuZ29vZ2xlLmNvbS9hcHQvZG9jL2FwdC1rZXkuZ3BnIHwgYXB0LWtleSBhZGQgLQpjYXQgPDxFT0YgPi9ldGMvYXB0L3NvdXJjZXMubGlzdC5kL2t1YmVybmV0ZXMubGlzdApkZWIgaHR0cHM6Ly9hcHQua3ViZXJuZXRlcy5pby8ga3ViZXJuZXRlcy14ZW5pYWwgbWFpbgpFT0YKYWRkLWFwdC1yZXBvc2l0b3J5ICAgImRlYiBbYXJjaD1hbWQ2NF0gaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dSAgICQobHNiX3JlbGVhc2UgLWNzKSAgIHN0YWJsZSIKYXB0LWdldCB1cGRhdGUKYXB0LWdldCB1cGdyYWRlIC15CmFwdC1nZXQgaW5zdGFsbCAteSBrdWJlbGV0PTEuMTcuNC0wMCBrdWJlYWRtPTEuMTcuNC0wMCBrdWJlY3RsPTEuMTcuNC0wMAphcHQtbWFyayBob2xkIGt1YmVsZXQga3ViZWFkbSBrdWJlY3RsCmN1cmwgLWZzU0wgaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dS9ncGcgfCBhcHQta2V5IGFkZCAtCmFkZC1hcHQtcmVwb3NpdG9yeSAiZGViIFthcmNoPWFtZDY0XSBodHRwczovL2Rvd25sb2FkLmRvY2tlci5jb20vbGludXgvdWJ1bnR1IGJpb25pYyBzdGFibGUiCmFwdCB1cGRhdGUKYXB0IGluc3RhbGwgLXkgZG9ja2VyLWNlPTE4LjA2LjJ+Y2V+My0wfnVidW50dQpjYXQgPiAvZXRjL2RvY2tlci9kYWVtb24uanNvbiA8PEVPRgp7CiAgImV4ZWMtb3B0cyI6IFsibmF0aXZlLmNncm91cGRyaXZlcj1zeXN0ZW1kIl0sCiAgImxvZy1kcml2ZXIiOiAianNvbi1maWxlIiwKICAibG9nLW9wdHMiOiB7CiAgICAibWF4LXNpemUiOiAiMTAwbSIKICB9LAogICJzdG9yYWdlLWRyaXZlciI6ICJvdmVybGF5MiIKfQpFT0YKbWtkaXIgLXAgL2V0Yy9zeXN0ZW1kL3N5c3RlbS9kb2NrZXIuc2VydmljZS5kCnN5c3RlbWN0bCBkYWVtb24tcmVsb2FkCnN5c3RlbWN0bCByZXN0YXJ0IGRvY2tlcgpzd2Fwb2ZmIC1hCm12IC9ldGMvZnN0YWIgL2V0Yy9mc3RhYi5vbGQgJiYgZ3JlcCAtdiBzd2FwIC9ldGMvZnN0YWIub2xkID4gL2V0Yy9mc3RhYgpjYXQgPDxFT0YgfCB0ZWUgL2V0Yy9kZWZhdWx0L2t1YmVsZXQKS1VCRUxFVF9FWFRSQV9BUkdTPS0tY2xvdWQtcHJvdmlkZXI9ZXh0ZXJuYWwgLS1ub2RlLWxhYmVscz1wb29sPXt7Lk5vZGVHcm91cH19CkVPRgprdWJlYWRtIGpvaW4gLS1kaXNjb3ZlcnktdG9rZW4tdW5zYWZlLXNraXAtY2EtdmVyaWZpY2F0aW9uIC0tdG9rZW4ge3suQm9vdHN0cmFwVG9rZW5JRH19Lnt7LkJvb3RzdHJhcFRva2VuU2VjcmV0fX0ge3suQVBJU2VydmVyRW5kcG9pbnR9fQo=" -func newTestPacketManagerRest(t *testing.T, url string) *equinixMetalManagerRest { +func newTestMetalManagerRest(t *testing.T, url string) *equinixMetalManagerRest { manager := &equinixMetalManagerRest{ equinixMetalManagerNodePools: map[string]*equinixMetalManagerNodePool{ "default": { @@ -81,10 +81,10 @@ func TestListMetalDevices(t *testing.T) { defer server.Close() if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { // If auth token set in env, hit the actual Packet API - m = newTestPacketManagerRest(t, "https://api.equinix.com/metal/v1/") + m = newTestMetalManagerRest(t, "https://api.equinix.com/metal/v1/") } else { // Set up a mock Packet API - m = newTestPacketManagerRest(t, server.URL) + m = newTestMetalManagerRest(t, server.URL) t.Logf("server URL: %v", server.URL) t.Logf("default equinixMetalManagerNodePool baseURL: %v", m.equinixMetalManagerNodePools["default"].baseURL) server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponse).Times(2) diff --git a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go index a3a1ea07abbd..b32ba9d6f772 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go @@ -42,10 +42,10 @@ func TestIncreaseDecreaseSize(t *testing.T) { assert.Equal(t, true, true) if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { // If auth token set in env, hit the actual Packet API - m = newTestPacketManagerRest(t, "https://api.equinix.com/metal/v1") + m = newTestMetalManagerRest(t, "https://api.equinix.com") } else { // Set up a mock Packet API - m = newTestPacketManagerRest(t, server.URL) + m = newTestMetalManagerRest(t, server.URL) server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponse).Times(3) server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", createMetalDeviceResponsePool3).Times(1) server.On("handle", "/projects/"+m.equinixMetalManagerNodePools["default"].projectID+"/devices").Return("application/json", listMetalDevicesResponseAfterIncreasePool3).Times(2) From b00ec6172cee63da3ddf5282f509fae8cd7e6613 Mon Sep 17 00:00:00 2001 From: Ayush Rangwala Date: Mon, 4 Sep 2023 22:47:11 +0530 Subject: [PATCH 5/7] Created a new env var for metal to replace/support packet env vars as usual --- .../cluster-autoscaler-deployment.yaml | 2 +- .../packet/packet_cloud_provider.go | 18 +++++++++++++----- .../packet/packet_manager_rest.go | 13 ++++++++++--- .../packet/packet_manager_rest_test.go | 2 +- .../packet/packet_node_group_test.go | 8 ++++---- 5 files changed, 29 insertions(+), 14 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml index 94d066690fc3..3b0254097720 100644 --- a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml +++ b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml @@ -175,7 +175,7 @@ spec: secretKeyRef: name: bootstrap-token-cluster-autoscaler-packet key: token-secret - - name: PACKET_AUTH_TOKEN + - name: METAL_AUTH_TOKEN valueFrom: secretKeyRef: name: cluster-autoscaler-packet diff --git a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go index 8cdb4abd9b8c..4ac984952719 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go @@ -42,7 +42,10 @@ const ( // master/controller node. DefaultControllerNodeLabelKey = "node-role.kubernetes.io/master" // ControllerNodeIdentifierEnv is the string for the environment variable. - ControllerNodeIdentifierEnv = "PACKET_CONTROLLER_NODE_IDENTIFIER_LABEL" + // Deprecated: This env var is deprecated in the favour packet's acquisition to equinix. + // Please use 'ControllerNodeIdentifierMetalEnv' + ControllerNodeIdentifierEnv = "PACKET_CONTROLLER_NODE_IDENTIFIER_LABEL" + ControllerNodeIdentifierMetalEnv = "METAL_CONTROLLER_NODE_IDENTIFIER_LABEL" ) var ( @@ -106,10 +109,15 @@ func (pcp *equinixMetalCloudProvider) AddNodeGroup(group equinixMetalNodeGroup) // // Since only a single node group is currently supported, the first node group is always returned. func (pcp *equinixMetalCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { - controllerNodeLabel := os.Getenv(ControllerNodeIdentifierEnv) - if controllerNodeLabel == "" { - klog.V(3).Infof("env %s not set, using default: %s", ControllerNodeIdentifierEnv, DefaultControllerNodeLabelKey) - controllerNodeLabel = DefaultControllerNodeLabelKey + controllerNodeLabel := DefaultControllerNodeLabelKey + value, present := os.LookupEnv(ControllerNodeIdentifierMetalEnv) + if present { + controllerNodeLabel = value + } else { + controllerNodeLabel = os.Getenv(ControllerNodeIdentifierEnv) + if controllerNodeLabel == "" { + klog.V(3).Infof("env %s not set, using default: %s", ControllerNodeIdentifierEnv, DefaultControllerNodeLabelKey) + } } if _, found := node.ObjectMeta.Labels[controllerNodeLabel]; found { diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go index 9500c309064c..1bc1fd490c3b 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go @@ -49,6 +49,7 @@ const ( userAgent = "kubernetes/cluster-autoscaler/" + version.ClusterAutoscalerVersion expectedAPIContentTypePrefix = "application/json" prefix = "equinixmetal://" + metalAuthTokenEnv = "METAL_AUTH_TOKEN" ) type instanceType struct { @@ -298,9 +299,15 @@ func createEquinixMetalManagerRest(configReader io.Reader, discoverOpts cloudpro klog.Fatalf("No \"default\" or [Global] nodepool definition was found") } - metalAuthToken := os.Getenv("PACKET_AUTH_TOKEN") - if len(metalAuthToken) == 0 { - klog.Fatalf("PACKET_AUTH_TOKEN is required and missing") + var metalAuthToken string + value, present := os.LookupEnv(metalAuthTokenEnv) + if present { + metalAuthToken = value + } else { + metalAuthToken = os.Getenv("PACKET_AUTH_TOKEN") + if len(metalAuthToken) == 0 { + klog.Fatalf("%s or PACKET_AUTH_TOKEN is required and missing", metalAuthTokenEnv) + } } manager.authToken = metalAuthToken diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go index a0c3ae1c52a1..a402f44c265d 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go @@ -79,7 +79,7 @@ func TestListMetalDevices(t *testing.T) { var m *equinixMetalManagerRest server := NewHttpServerMock(MockFieldContentType, MockFieldResponse) defer server.Close() - if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { + if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 || len(os.Getenv(metalAuthTokenEnv)) > 0 { // If auth token set in env, hit the actual Packet API m = newTestMetalManagerRest(t, "https://api.equinix.com/metal/v1/") } else { diff --git a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go index b32ba9d6f772..9f8e514047a8 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go @@ -40,7 +40,7 @@ func TestIncreaseDecreaseSize(t *testing.T) { server := NewHttpServerMock(MockFieldContentType, MockFieldResponse) defer server.Close() assert.Equal(t, true, true) - if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { + if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 || len(os.Getenv(metalAuthTokenEnv)) > 0 { // If auth token set in env, hit the actual Packet API m = newTestMetalManagerRest(t, "https://api.equinix.com") } else { @@ -106,7 +106,7 @@ func TestIncreaseDecreaseSize(t *testing.T) { err = ngPool3.IncreaseSize(1) assert.NoError(t, err) - if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { + if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 || len(os.Getenv(metalAuthTokenEnv)) > 0 { // If testing with actual API give it some time until the nodes bootstrap time.Sleep(420 * time.Second) } @@ -120,7 +120,7 @@ func TestIncreaseDecreaseSize(t *testing.T) { err = ngPool2.IncreaseSize(1) assert.NoError(t, err) - if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { + if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 || len(os.Getenv(metalAuthTokenEnv)) > 0 { // If testing with actual API give it some time until the nodes bootstrap time.Sleep(420 * time.Second) } @@ -151,7 +151,7 @@ func TestIncreaseDecreaseSize(t *testing.T) { assert.NoError(t, err) // Wait a few seconds if talking to the actual Packet API - if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { + if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 || len(os.Getenv(metalAuthTokenEnv)) > 0 { time.Sleep(10 * time.Second) } From 0d84ab69347a91cf9ccfd2a39b36533bcad019a7 Mon Sep 17 00:00:00 2001 From: Ayush Rangwala Date: Thu, 12 Oct 2023 21:50:11 +0530 Subject: [PATCH 6/7] Support backward compatibility for PACKET_MANAGER env var Signed-off-by: Ayush Rangwala --- cluster-autoscaler/cloudprovider/packet/packet_manager.go | 4 +++- .../cloudprovider/packet/packet_manager_rest.go | 3 +-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager.go b/cluster-autoscaler/cloudprovider/packet/packet_manager.go index e20ee3dfafa0..04751c71fedc 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager.go @@ -56,7 +56,9 @@ func createEquinixMetalManager(configReader io.Reader, discoverOpts cloudprovide // For now get manager from env var, can consider adding flag later manager, ok := os.LookupEnv("EQUINIX_METAL_MANAGER") if !ok { - manager = defaultManager + if manager, ok = os.LookupEnv("PACKET_MANAGER"); !ok { + manager = defaultManager + } } switch manager { diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go index 1bc1fd490c3b..ac099c001e63 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go @@ -23,7 +23,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "net/http" "os" @@ -360,7 +359,7 @@ func (mgr *equinixMetalManagerRest) request(ctx context.Context, method, url str } }() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("failed to read response body: %w", err) } From 1e4cb18fb2d1028b79b740ac657116fe63533279 Mon Sep 17 00:00:00 2001 From: Ayush Rangwala Date: Mon, 23 Oct 2023 17:35:07 +0530 Subject: [PATCH 7/7] fix: refactor cloud provider names Signed-off-by: Ayush Rangwala --- .../cloudprovider/builder/builder_all.go | 3 +-- .../cloudprovider/builder/builder_packet.go | 7 +++---- .../cloudprovider/packet/packet_cloud_provider.go | 12 +++++++----- .../cloudprovider/packet/packet_node_group_test.go | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go index dfcd001ba55f..1398b5ad95b9 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_all.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go @@ -121,8 +121,7 @@ func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGro return ovhcloud.BuildOVHcloud(opts, do, rl) case cloudprovider.HetznerProviderName: return hetzner.BuildHetzner(opts, do, rl) - case cloudprovider.PacketProviderName, - cloudprovider.EquinixMetalProviderName: + case cloudprovider.PacketProviderName, cloudprovider.EquinixMetalProviderName: return packet.BuildCloudProvider(opts, do, rl) case cloudprovider.ClusterAPIProviderName: return clusterapi.BuildClusterAPI(opts, do, rl) diff --git a/cluster-autoscaler/cloudprovider/builder/builder_packet.go b/cluster-autoscaler/cloudprovider/builder/builder_packet.go index bd7bb3c2e542..e41d743dd570 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_packet.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_packet.go @@ -28,16 +28,15 @@ import ( // AvailableCloudProviders supported by the cloud provider builder. var AvailableCloudProviders = []string{ packet.ProviderName, - packet.EquinixMetalProviderName, + cloudprovider.EquinixMetalProviderName, } // DefaultCloudProvider for Packet-only build is Packet. -const DefaultCloudProvider = packet.EquinixMetalProviderName +const DefaultCloudProvider = cloudprovider.EquinixMetalProviderName func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { switch opts.CloudProviderName { - case packet.ProviderName, - packet.EquinixMetalProviderName: + case packet.ProviderName, cloudprovider.EquinixMetalProviderName: return packet.BuildCloudProvider(opts, do, rl) } diff --git a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go index 4ac984952719..fbb5a96d0c30 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go @@ -25,17 +25,18 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + klog "k8s.io/klog/v2" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" - klog "k8s.io/klog/v2" ) const ( - // ProviderName is the cloud provider name for Equinix Metal - ProviderName = "equinix-metal" + // ProviderName is the cloud provider name for Packet cloud provider, now named as equinixmetal + ProviderName = "packet" // GPULabel is the label added to nodes with GPU resource. GPULabel = "cloud.google.com/gke-accelerator" // DefaultControllerNodeLabelKey is the label added to Master/Controller to identify as @@ -44,7 +45,8 @@ const ( // ControllerNodeIdentifierEnv is the string for the environment variable. // Deprecated: This env var is deprecated in the favour packet's acquisition to equinix. // Please use 'ControllerNodeIdentifierMetalEnv' - ControllerNodeIdentifierEnv = "PACKET_CONTROLLER_NODE_IDENTIFIER_LABEL" + ControllerNodeIdentifierEnv = "PACKET_CONTROLLER_NODE_IDENTIFIER_LABEL" + // ControllerNodeIdentifierMetalEnv is the string for the environment variable of controller node id labels for equinix metal. ControllerNodeIdentifierMetalEnv = "METAL_CONTROLLER_NODE_IDENTIFIER_LABEL" ) @@ -72,7 +74,7 @@ func buildEquinixMetalCloudProvider(metalManager equinixMetalManager, resourceLi // Name returns the name of the cloud provider. func (pcp *equinixMetalCloudProvider) Name() string { - return ProviderName + return cloudprovider.EquinixMetalProviderName } // GPULabel returns the label added to nodes with GPU resource. diff --git a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go index 9f8e514047a8..56819727ee03 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go @@ -42,7 +42,7 @@ func TestIncreaseDecreaseSize(t *testing.T) { assert.Equal(t, true, true) if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 || len(os.Getenv(metalAuthTokenEnv)) > 0 { // If auth token set in env, hit the actual Packet API - m = newTestMetalManagerRest(t, "https://api.equinix.com") + m = newTestMetalManagerRest(t, "https://api.equinix.com/metal/v1") } else { // Set up a mock Packet API m = newTestMetalManagerRest(t, server.URL)