From a698fdfa5095f3c1c06ed4c664b43f5cdd4e7d23 Mon Sep 17 00:00:00 2001 From: Khaja Omer <56000175+komer3@users.noreply.github.com> Date: Mon, 16 Dec 2024 15:51:29 -0600 Subject: [PATCH] [improvement] Change conditions pkg to use kubernetes metav1 (#590) --- api/v1alpha2/linodecluster_types.go | 22 +- api/v1alpha2/linodefirewall_types.go | 16 +- api/v1alpha2/linodemachine_types.go | 17 +- .../linodeobjectstoragebucket_types.go | 19 +- api/v1alpha2/linodeobjectstoragekey_types.go | 19 +- api/v1alpha2/linodeplacementgroup_types.go | 19 +- api/v1alpha2/linodevpc_types.go | 15 +- api/v1alpha2/zz_generated.deepcopy.go | 19 +- ...cture.cluster.x-k8s.io_linodeclusters.yaml | 52 +++-- ...ture.cluster.x-k8s.io_linodefirewalls.yaml | 52 +++-- ...cture.cluster.x-k8s.io_linodemachines.yaml | 52 +++-- ...r.x-k8s.io_linodeobjectstoragebuckets.yaml | 52 +++-- ...ster.x-k8s.io_linodeobjectstoragekeys.yaml | 52 +++-- ...luster.x-k8s.io_linodeplacementgroups.yaml | 52 +++-- ...structure.cluster.x-k8s.io_linodevpcs.yaml | 52 +++-- go.mod | 18 +- go.sum | 66 +++--- .../controller/linodecluster_controller.go | 97 ++++++-- .../linodecluster_controller_test.go | 6 +- .../controller/linodefirewall_controller.go | 30 ++- .../linodefirewall_controller_test.go | 9 +- .../controller/linodemachine_controller.go | 221 ++++++++++++++---- .../linodemachine_controller_helpers.go | 63 +++-- .../linodemachine_controller_test.go | 15 +- .../linodeobjectstoragebucket_controller.go | 19 +- ...nodeobjectstoragebucket_controller_test.go | 2 +- .../linodeobjectstoragekey_controller.go | 19 +- .../linodeobjectstoragekey_controller_test.go | 2 +- .../linodeplacementgroup_controller.go | 37 ++- internal/controller/linodevpc_controller.go | 53 ++++- .../controller/linodevpc_controller_test.go | 9 +- util/errors.go | 8 + util/reconciler/conditions.go | 22 +- 33 files changed, 820 insertions(+), 386 deletions(-) diff --git a/api/v1alpha2/linodecluster_types.go b/api/v1alpha2/linodecluster_types.go index daa2f8b79..0788dfb27 100644 --- a/api/v1alpha2/linodecluster_types.go +++ b/api/v1alpha2/linodecluster_types.go @@ -20,7 +20,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/errors" ) const ( @@ -73,7 +72,7 @@ type LinodeClusterStatus struct { // reconciling the LinodeCluster and will contain a succinct value suitable // for machine interpretation. // +optional - FailureReason *errors.ClusterStatusError `json:"failureReason,omitempty"` + FailureReason *string `json:"failureReason,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the LinodeCluster and will contain a more verbose string suitable @@ -83,7 +82,7 @@ type LinodeClusterStatus struct { // Conditions defines current service state of the LinodeCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -103,12 +102,21 @@ type LinodeCluster struct { Status LinodeClusterStatus `json:"status,omitempty"` } -func (lm *LinodeCluster) GetConditions() clusterv1.Conditions { - return lm.Status.Conditions +func (lc *LinodeCluster) GetConditions() []metav1.Condition { + return lc.Status.Conditions } -func (lm *LinodeCluster) SetConditions(conditions clusterv1.Conditions) { - lm.Status.Conditions = conditions +func (lc *LinodeCluster) SetConditions(conditions []metav1.Condition) { + lc.Status.Conditions = conditions +} + +// We need V1Beta2Conditions helpers to be able to use the conditions package from cluster-api +func (lc *LinodeCluster) GetV1Beta2Conditions() []metav1.Condition { + return lc.GetConditions() +} + +func (lc *LinodeCluster) SetV1Beta2Conditions(conditions []metav1.Condition) { + lc.SetConditions(conditions) } // NetworkSpec encapsulates Linode networking resources. diff --git a/api/v1alpha2/linodefirewall_types.go b/api/v1alpha2/linodefirewall_types.go index 5204a17b0..a6cc79058 100644 --- a/api/v1alpha2/linodefirewall_types.go +++ b/api/v1alpha2/linodefirewall_types.go @@ -19,7 +19,6 @@ package v1alpha2 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -118,7 +117,7 @@ type LinodeFirewallStatus struct { // Conditions defines current service state of the LinodeFirewall. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -137,14 +136,23 @@ type LinodeFirewall struct { Status LinodeFirewallStatus `json:"status,omitempty"` } -func (lfw *LinodeFirewall) GetConditions() clusterv1.Conditions { +func (lfw *LinodeFirewall) GetConditions() []metav1.Condition { return lfw.Status.Conditions } -func (lfw *LinodeFirewall) SetConditions(conditions clusterv1.Conditions) { +func (lfw *LinodeFirewall) SetConditions(conditions []metav1.Condition) { lfw.Status.Conditions = conditions } +// We need V1Beta2Conditions helpers to be able to use the conditions package from cluster-api +func (lfw *LinodeFirewall) GetV1Beta2Conditions() []metav1.Condition { + return lfw.GetConditions() +} + +func (lfw *LinodeFirewall) SetV1Beta2Conditions(conditions []metav1.Condition) { + lfw.SetConditions(conditions) +} + //+kubebuilder:object:root=true // LinodeFirewallList contains a list of LinodeFirewall diff --git a/api/v1alpha2/linodemachine_types.go b/api/v1alpha2/linodemachine_types.go index 041f486be..9a318238f 100644 --- a/api/v1alpha2/linodemachine_types.go +++ b/api/v1alpha2/linodemachine_types.go @@ -22,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/errors" ) const ( @@ -192,7 +191,7 @@ type LinodeMachineStatus struct { // can be added as events to the Machine object and/or logged in the // controller's output. // +optional - FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` + FailureReason *string `json:"failureReason,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable @@ -215,7 +214,7 @@ type LinodeMachineStatus struct { // Conditions defines current service state of the LinodeMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -237,14 +236,22 @@ type LinodeMachine struct { Status LinodeMachineStatus `json:"status,omitempty"` } -func (lm *LinodeMachine) GetConditions() clusterv1.Conditions { +func (lm *LinodeMachine) GetConditions() []metav1.Condition { return lm.Status.Conditions } -func (lm *LinodeMachine) SetConditions(conditions clusterv1.Conditions) { +func (lm *LinodeMachine) SetConditions(conditions []metav1.Condition) { lm.Status.Conditions = conditions } +func (lm *LinodeMachine) GetV1Beta2Conditions() []metav1.Condition { + return lm.GetConditions() +} + +func (lm *LinodeMachine) SetV1Beta2Conditions(conditions []metav1.Condition) { + lm.SetConditions(conditions) +} + // +kubebuilder:object:root=true // LinodeMachineList contains a list of LinodeMachine diff --git a/api/v1alpha2/linodeobjectstoragebucket_types.go b/api/v1alpha2/linodeobjectstoragebucket_types.go index 9b74067ea..0554c52d1 100644 --- a/api/v1alpha2/linodeobjectstoragebucket_types.go +++ b/api/v1alpha2/linodeobjectstoragebucket_types.go @@ -19,7 +19,6 @@ package v1alpha2 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) type ObjectStorageACL string @@ -76,7 +75,7 @@ type LinodeObjectStorageBucketStatus struct { // Conditions specify the service state of the LinodeObjectStorageBucket. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` // Hostname is the address assigned to the bucket. // +optional @@ -105,12 +104,20 @@ type LinodeObjectStorageBucket struct { Status LinodeObjectStorageBucketStatus `json:"status,omitempty"` } -func (b *LinodeObjectStorageBucket) GetConditions() clusterv1.Conditions { - return b.Status.Conditions +func (losb *LinodeObjectStorageBucket) GetConditions() []metav1.Condition { + return losb.Status.Conditions } -func (b *LinodeObjectStorageBucket) SetConditions(conditions clusterv1.Conditions) { - b.Status.Conditions = conditions +func (losb *LinodeObjectStorageBucket) SetConditions(conditions []metav1.Condition) { + losb.Status.Conditions = conditions +} + +func (losb *LinodeObjectStorageBucket) GetV1Beta2Conditions() []metav1.Condition { + return losb.GetConditions() +} + +func (losb *LinodeObjectStorageBucket) SetV1Beta2Conditions(conditions []metav1.Condition) { + losb.SetConditions(conditions) } // +kubebuilder:object:root=true diff --git a/api/v1alpha2/linodeobjectstoragekey_types.go b/api/v1alpha2/linodeobjectstoragekey_types.go index 9bbf06d6d..53de86737 100644 --- a/api/v1alpha2/linodeobjectstoragekey_types.go +++ b/api/v1alpha2/linodeobjectstoragekey_types.go @@ -19,7 +19,6 @@ package v1alpha2 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -101,7 +100,7 @@ type LinodeObjectStorageKeyStatus struct { // Conditions specify the service state of the LinodeObjectStorageKey. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` // CreationTime specifies the creation timestamp for the secret. // +optional @@ -133,12 +132,20 @@ type LinodeObjectStorageKey struct { Status LinodeObjectStorageKeyStatus `json:"status,omitempty"` } -func (b *LinodeObjectStorageKey) GetConditions() clusterv1.Conditions { - return b.Status.Conditions +func (losk *LinodeObjectStorageKey) GetConditions() []metav1.Condition { + return losk.Status.Conditions } -func (b *LinodeObjectStorageKey) SetConditions(conditions clusterv1.Conditions) { - b.Status.Conditions = conditions +func (losk *LinodeObjectStorageKey) SetConditions(conditions []metav1.Condition) { + losk.Status.Conditions = conditions +} + +func (losk *LinodeObjectStorageKey) GetV1Beta2Conditions() []metav1.Condition { + return losk.GetConditions() +} + +func (losk *LinodeObjectStorageKey) SetV1Beta2Conditions(conditions []metav1.Condition) { + losk.SetConditions(conditions) } // +kubebuilder:object:root=true diff --git a/api/v1alpha2/linodeplacementgroup_types.go b/api/v1alpha2/linodeplacementgroup_types.go index 9b3dfbe97..219bd9d45 100644 --- a/api/v1alpha2/linodeplacementgroup_types.go +++ b/api/v1alpha2/linodeplacementgroup_types.go @@ -19,7 +19,6 @@ package v1alpha2 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -100,7 +99,7 @@ type LinodePlacementGroupStatus struct { // Conditions defines current service state of the LinodePlacementGroup. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -118,12 +117,20 @@ type LinodePlacementGroup struct { Status LinodePlacementGroupStatus `json:"status,omitempty"` } -func (lm *LinodePlacementGroup) GetConditions() clusterv1.Conditions { - return lm.Status.Conditions +func (lpg *LinodePlacementGroup) GetConditions() []metav1.Condition { + return lpg.Status.Conditions } -func (lm *LinodePlacementGroup) SetConditions(conditions clusterv1.Conditions) { - lm.Status.Conditions = conditions +func (lpg *LinodePlacementGroup) SetConditions(conditions []metav1.Condition) { + lpg.Status.Conditions = conditions +} + +func (lpg *LinodePlacementGroup) GetV1Beta2Conditions() []metav1.Condition { + return lpg.GetConditions() +} + +func (lpg *LinodePlacementGroup) SetV1Beta2Conditions(conditions []metav1.Condition) { + lpg.SetConditions(conditions) } // +kubebuilder:object:root=true diff --git a/api/v1alpha2/linodevpc_types.go b/api/v1alpha2/linodevpc_types.go index c01dde9cd..7f369afd6 100644 --- a/api/v1alpha2/linodevpc_types.go +++ b/api/v1alpha2/linodevpc_types.go @@ -19,7 +19,6 @@ package v1alpha2 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -105,7 +104,7 @@ type LinodeVPCStatus struct { // Conditions defines current service state of the LinodeVPC. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -124,14 +123,22 @@ type LinodeVPC struct { Status LinodeVPCStatus `json:"status,omitempty"` } -func (lv *LinodeVPC) GetConditions() clusterv1.Conditions { +func (lv *LinodeVPC) GetConditions() []metav1.Condition { return lv.Status.Conditions } -func (lv *LinodeVPC) SetConditions(conditions clusterv1.Conditions) { +func (lv *LinodeVPC) SetConditions(conditions []metav1.Condition) { lv.Status.Conditions = conditions } +func (lv *LinodeVPC) GetV1Beta2Conditions() []metav1.Condition { + return lv.GetConditions() +} + +func (lv *LinodeVPC) SetV1Beta2Conditions(conditions []metav1.Condition) { + lv.SetConditions(conditions) +} + // +kubebuilder:object:root=true // LinodeVPCList contains a list of LinodeVPC diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go index 72fc1b749..a6761806f 100644 --- a/api/v1alpha2/zz_generated.deepcopy.go +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/errors" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -455,7 +454,7 @@ func (in *LinodeClusterStatus) DeepCopyInto(out *LinodeClusterStatus) { *out = *in if in.FailureReason != nil { in, out := &in.FailureReason, &out.FailureReason - *out = new(errors.ClusterStatusError) + *out = new(string) **out = **in } if in.FailureMessage != nil { @@ -465,7 +464,7 @@ func (in *LinodeClusterStatus) DeepCopyInto(out *LinodeClusterStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -707,7 +706,7 @@ func (in *LinodeFirewallStatus) DeepCopyInto(out *LinodeFirewallStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -896,7 +895,7 @@ func (in *LinodeMachineStatus) DeepCopyInto(out *LinodeMachineStatus) { } if in.FailureReason != nil { in, out := &in.FailureReason, &out.FailureReason - *out = new(errors.MachineStatusError) + *out = new(string) **out = **in } if in.FailureMessage != nil { @@ -906,7 +905,7 @@ func (in *LinodeMachineStatus) DeepCopyInto(out *LinodeMachineStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1122,7 +1121,7 @@ func (in *LinodeObjectStorageBucketStatus) DeepCopyInto(out *LinodeObjectStorage } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1250,7 +1249,7 @@ func (in *LinodeObjectStorageKeyStatus) DeepCopyInto(out *LinodeObjectStorageKey } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1380,7 +1379,7 @@ func (in *LinodePlacementGroupStatus) DeepCopyInto(out *LinodePlacementGroupStat } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1501,7 +1500,7 @@ func (in *LinodeVPCStatus) DeepCopyInto(out *LinodeVPCStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml index 7d233d4a3..ca1632f4f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml @@ -312,44 +312,56 @@ spec: conditions: description: Conditions defines current service state of the LinodeCluster. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- - Last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- - A human readable message indicating details about the transition. - This field may be empty. + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - The reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. - type: string - severity: - description: |- - Severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: Status of the condition, one of True, False, Unknown. + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml index 09b070d2a..94eee4be0 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml @@ -377,44 +377,56 @@ spec: conditions: description: Conditions defines current service state of the LinodeFirewall. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- - Last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- - A human readable message indicating details about the transition. - This field may be empty. + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - The reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. - type: string - severity: - description: |- - Severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: Status of the condition, one of True, False, Unknown. + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml index ef7f85eee..4a527a9d4 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml @@ -455,44 +455,56 @@ spec: conditions: description: Conditions defines current service state of the LinodeMachine. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- - Last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- - A human readable message indicating details about the transition. - This field may be empty. + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - The reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. - type: string - severity: - description: |- - Severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: Status of the condition, one of True, False, Unknown. + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml index 0927b7f7e..1791be346 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml @@ -107,44 +107,56 @@ spec: conditions: description: Conditions specify the service state of the LinodeObjectStorageBucket. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- - Last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- - A human readable message indicating details about the transition. - This field may be empty. + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - The reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. - type: string - severity: - description: |- - Severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: Status of the condition, one of True, False, Unknown. + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragekeys.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragekeys.yaml index 5270a3b26..9824db0cb 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragekeys.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragekeys.yaml @@ -158,44 +158,56 @@ spec: conditions: description: Conditions specify the service state of the LinodeObjectStorageKey. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- - Last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- - A human readable message indicating details about the transition. - This field may be empty. + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - The reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. - type: string - severity: - description: |- - Severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: Status of the condition, one of True, False, Unknown. + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeplacementgroups.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeplacementgroups.yaml index 5ecaa96d2..df491355b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeplacementgroups.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeplacementgroups.yaml @@ -100,44 +100,56 @@ spec: conditions: description: Conditions defines current service state of the LinodePlacementGroup. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- - Last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- - A human readable message indicating details about the transition. - This field may be empty. + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - The reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. - type: string - severity: - description: |- - Severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: Status of the condition, one of True, False, Unknown. + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodevpcs.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodevpcs.yaml index bd28d4892..f5f8de518 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodevpcs.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodevpcs.yaml @@ -98,44 +98,56 @@ spec: conditions: description: Conditions defines current service state of the LinodeVPC. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- - Last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- - A human readable message indicating details about the transition. - This field may be empty. + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - The reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. - type: string - severity: - description: |- - Severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: Status of the condition, one of True, False, Unknown. + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object diff --git a/go.mod b/go.mod index 8def10a1e..3a0155fa5 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.70.0 github.com/aws/smithy-go v1.22.1 github.com/go-logr/logr v1.4.2 + github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/linode/linodego v1.43.0 github.com/onsi/ginkgo/v2 v2.22.0 @@ -29,7 +30,7 @@ require ( k8s.io/apimachinery v0.31.3 k8s.io/client-go v0.31.3 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 - sigs.k8s.io/cluster-api v1.8.5 + sigs.k8s.io/cluster-api v1.9.0 sigs.k8s.io/controller-runtime v0.19.3 ) @@ -52,15 +53,14 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/google/cel-go v0.20.1 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect golang.org/x/sync v0.9.0 // indirect - k8s.io/apiserver v0.31.0 // indirect - k8s.io/component-base v0.31.0 // indirect + k8s.io/apiserver v0.31.3 // indirect + k8s.io/component-base v0.31.3 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect ) @@ -83,7 +83,7 @@ require ( github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect github.com/go-resty/resty/v2 v2.16.2 github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/gobuffalo/flect v1.0.2 // indirect + github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -128,10 +128,10 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/ratelimit v0.2.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sys v0.27.0 // indirect - golang.org/x/term v0.25.0 // indirect + golang.org/x/term v0.26.0 // indirect golang.org/x/text v0.20.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.26.0 // indirect @@ -144,7 +144,7 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/apiextensions-apiserver v0.31.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/go.sum b/go.sum index b8896fb18..bb7d5ab48 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,13 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/akamai/AkamaiOPEN-edgegrid-golang/v8 v8.4.0 h1:zZJimNqkV3o7qZqBnprKyHCqUOTzoEaabG4qB3z0E2g= github.com/akamai/AkamaiOPEN-edgegrid-golang/v8 v8.4.0/go.mod h1:2xRRnHx8dnw0i8IZPYOI0I7xbr1gnAN1uIYo7acMIbg= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= @@ -69,8 +71,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.23 h1:Fp4FETmk8sT/IRgnKX2xstC2dL7+QdcU+BL5AYIN3Jw= -github.com/coredns/corefile-migration v1.0.23/go.mod h1:8HyMhuyzx9RLZp8cRc9Uf3ECpEAafHOFxQWUPqktMQI= +github.com/coredns/corefile-migration v1.0.24 h1:NL/zRKijhJZLYlNnMr891DRv5jXgfd3Noons1M6oTpc= +github.com/coredns/corefile-migration v1.0.24/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -114,8 +116,8 @@ github.com/go-resty/resty/v2 v2.16.2 h1:CpRqTjIzq/rweXUt9+GxzzQdlkqMdt8Lm/fuK/CA github.com/go-resty/resty/v2 v2.16.2/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= -github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= +github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -142,8 +144,8 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -222,13 +224,13 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -325,8 +327,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -338,10 +340,10 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -355,8 +357,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -408,18 +410,18 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE= +k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4= k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY= -k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk= +k8s.io/apiserver v0.31.3 h1:+1oHTtCB+OheqFEz375D0IlzHZ5VeQKX1KGXnx+TTuY= +k8s.io/apiserver v0.31.3/go.mod h1:PrxVbebxrxQPFhJk4powDISIROkNMKHibTg9lTRQ0Qg= k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= -k8s.io/cluster-bootstrap v0.30.3 h1:MgxyxMkpaC6mu0BKWJ8985XCOnKU+eH3Iy+biwtDXRk= -k8s.io/cluster-bootstrap v0.30.3/go.mod h1:h8BoLDfdD7XEEIXy7Bx9FcMzxHwz29jsYYi34bM5DKU= -k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= -k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/cluster-bootstrap v0.31.3 h1:O1Yxk1bLaxZvmQCXLaJjj5iJD+lVMfJdRUuKgbUHPlA= +k8s.io/cluster-bootstrap v0.31.3/go.mod h1:TI6TCsQQB4FfcryWgNO3SLXSKWBqHjx4DfyqSFwixj8= +k8s.io/component-base v0.31.3 h1:DMCXXVx546Rfvhj+3cOm2EUxhS+EyztH423j+8sOwhQ= +k8s.io/component-base v0.31.3/go.mod h1:xME6BHfUOafRgT0rGVBGl7TuSg8Z9/deT7qq6w7qjIU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= @@ -428,8 +430,8 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.8.5 h1:lNA2fPN4fkXEs+oOQlnwxT/4VwRFBpv5kkSoJG8nqBA= -sigs.k8s.io/cluster-api v1.8.5/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= +sigs.k8s.io/cluster-api v1.9.0 h1:Iud4Zj8R/t7QX5Rvs9/V+R8HDLbf7QPVemrWfZi4g54= +sigs.k8s.io/cluster-api v1.9.0/go.mod h1:8rjpkMxLFcA87Y3P6NOi6E9RMZv2uRnN9ppOPAxrTAY= sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/internal/controller/linodecluster_controller.go b/internal/controller/linodecluster_controller.go index e3ff98163..4b7f2ae7d 100644 --- a/internal/controller/linodecluster_controller.go +++ b/internal/controller/linodecluster_controller.go @@ -30,9 +30,8 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - cerrs "sigs.k8s.io/cluster-api/errors" kutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -52,8 +51,8 @@ import ( const ( lbTypeDNS string = "dns" - ConditionPreflightLinodeVPCReady clusterv1.ConditionType = "PreflightLinodeVPCReady" - ConditionPreflightLinodeNBFirewallReady clusterv1.ConditionType = "PreflightLinodeNBFirewallReady" + ConditionPreflightLinodeVPCReady string = "PreflightLinodeVPCReady" + ConditionPreflightLinodeNBFirewallReady string = "PreflightLinodeNBFirewallReady" ) // LinodeClusterReconciler reconciles a LinodeCluster object @@ -155,7 +154,7 @@ func (r *LinodeClusterReconciler) reconcile( if !clusterScope.LinodeCluster.DeletionTimestamp.IsZero() { if err := r.reconcileDelete(ctx, logger, clusterScope); err != nil { if !reconciler.HasStaleCondition(clusterScope.LinodeCluster, - clusterv1.ReadyCondition, + string(clusterv1.ReadyCondition), reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultClusterControllerReconcileTimeout)) { logger.Info("re-queuing cluster/nb deletion") return ctrl.Result{RequeueAfter: reconciler.DefaultClusterControllerReconcileDelay}, nil @@ -179,7 +178,7 @@ func (r *LinodeClusterReconciler) reconcile( if clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint.Host == "" { if err := r.reconcileCreate(ctx, logger, clusterScope); err != nil { if !reconciler.HasStaleCondition(clusterScope.LinodeCluster, - clusterv1.ReadyCondition, + string(clusterv1.ReadyCondition), reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultClusterControllerReconcileTimeout)) { logger.Info("re-queuing cluster/load-balancer creation") return ctrl.Result{RequeueAfter: reconciler.DefaultClusterControllerReconcileDelay}, nil @@ -190,7 +189,11 @@ func (r *LinodeClusterReconciler) reconcile( } clusterScope.LinodeCluster.Status.Ready = true - conditions.MarkTrue(clusterScope.LinodeCluster, clusterv1.ReadyCondition) + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionTrue, + Reason: "LoadBalancerReady", // We have to set the reason to not fail object patching + }) for _, eachMachine := range clusterScope.LinodeMachines.Items { if len(eachMachine.Status.Addresses) == 0 { @@ -211,22 +214,38 @@ func (r *LinodeClusterReconciler) performPreflightChecks(ctx context.Context, lo if !reconciler.ConditionTrue(clusterScope.LinodeCluster, ConditionPreflightLinodeVPCReady) { res, err := r.reconcilePreflightLinodeVPCCheck(ctx, logger, clusterScope) if err != nil || !res.IsZero() { - conditions.MarkFalse(clusterScope.LinodeCluster, ConditionPreflightLinodeVPCReady, "linode vpc not yet available", "", "") + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: ConditionPreflightLinodeVPCReady, + Status: metav1.ConditionFalse, + Reason: "LinodeVPCNotYetAvailable", // We have to set the reason to not fail object patching + }) return res, err } } - conditions.MarkTrue(clusterScope.LinodeCluster, ConditionPreflightLinodeVPCReady) + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: ConditionPreflightLinodeVPCReady, + Status: metav1.ConditionTrue, + Reason: "LinodeVPCReady", // We have to set the reason to not fail object patching + }) } if clusterScope.LinodeCluster.Spec.NodeBalancerFirewallRef != nil { if !reconciler.ConditionTrue(clusterScope.LinodeCluster, ConditionPreflightLinodeNBFirewallReady) { res, err := r.reconcilePreflightLinodeFirewallCheck(ctx, logger, clusterScope) if err != nil || !res.IsZero() { - conditions.MarkFalse(clusterScope.LinodeCluster, ConditionPreflightLinodeNBFirewallReady, "linode firewall not yet available", "", "") + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: ConditionPreflightLinodeNBFirewallReady, + Status: metav1.ConditionFalse, + Reason: "LinodeFirewallNotYetAvailable", // We have to set the reason to not fail object patching + }) return res, err } } - conditions.MarkTrue(clusterScope.LinodeCluster, ConditionPreflightLinodeNBFirewallReady) + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: ConditionPreflightLinodeNBFirewallReady, + Status: metav1.ConditionTrue, + Reason: "LinodeFirewallReady", // We have to set the reason to not fail object patching + }) } return ctrl.Result{}, nil @@ -252,7 +271,12 @@ func (r *LinodeClusterReconciler) reconcilePreflightLinodeFirewallCheck(ctx cont if reconciler.HasStaleCondition(clusterScope.LinodeCluster, ConditionPreflightLinodeNBFirewallReady, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultClusterControllerReconcileTimeout)) { - conditions.MarkFalse(clusterScope.LinodeCluster, ConditionPreflightLinodeNBFirewallReady, string(cerrs.CreateClusterError), "", "%s", err.Error()) + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: ConditionPreflightLinodeNBFirewallReady, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{}, err } return ctrl.Result{RequeueAfter: reconciler.DefaultClusterControllerReconcileDelay}, nil @@ -284,7 +308,12 @@ func (r *LinodeClusterReconciler) reconcilePreflightLinodeVPCCheck(ctx context.C if reconciler.HasStaleCondition(clusterScope.LinodeCluster, ConditionPreflightLinodeVPCReady, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultClusterControllerReconcileTimeout)) { - conditions.MarkFalse(clusterScope.LinodeCluster, ConditionPreflightLinodeVPCReady, string(cerrs.CreateClusterError), "", "%s", err.Error()) + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: ConditionPreflightLinodeVPCReady, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{}, err } return ctrl.Result{RequeueAfter: reconciler.DefaultClusterControllerReconcileDelay}, nil @@ -296,19 +325,24 @@ func (r *LinodeClusterReconciler) reconcilePreflightLinodeVPCCheck(ctx context.C return ctrl.Result{}, nil } -func setFailureReason(clusterScope *scope.ClusterScope, failureReason cerrs.ClusterStatusError, err error, lcr *LinodeClusterReconciler) { +func setFailureReason(clusterScope *scope.ClusterScope, failureReason string, err error, lcr *LinodeClusterReconciler) { clusterScope.LinodeCluster.Status.FailureReason = util.Pointer(failureReason) clusterScope.LinodeCluster.Status.FailureMessage = util.Pointer(err.Error()) - conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, string(failureReason), "", "%s", err.Error()) + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: failureReason, + Message: err.Error(), + }) - lcr.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, string(failureReason), err.Error()) + lcr.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, failureReason, err.Error()) } func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope) error { if err := clusterScope.AddCredentialsRefFinalizer(ctx); err != nil { logger.Error(err, "failed to update credentials finalizer") - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + setFailureReason(clusterScope, util.CreateError, err, r) return err } @@ -329,14 +363,24 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo switch { case clusterScope.LinodeCluster.Spec.Network.LoadBalancerType == "external": logger.Info("LoadBalacing managed externally, nothing to do.") - conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, clusterv1.DeletedReason, "", "%s", "Deletion in progress") + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(clusterv1.DeletedReason), + Message: "Deletion in progress", + }) r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, "LoadBalacing managed externally", "LoadBalacing managed externally, nothing to do.") case clusterScope.LinodeCluster.Spec.Network.LoadBalancerType == lbTypeDNS: if err := removeMachineFromDNS(ctx, logger, clusterScope); err != nil { return fmt.Errorf("remove machine from loadbalancer: %w", err) } - conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, clusterv1.DeletedReason, "", "%s", "Load balancing for Type DNS deleted") + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(clusterv1.DeletedReason), + Message: "Load balancing for Type DNS deleted", + }) r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeNormal, clusterv1.DeletedReason, "Load balancing for Type DNS deleted") case clusterScope.LinodeCluster.Spec.Network.LoadBalancerType == "NodeBalancer" && clusterScope.LinodeCluster.Spec.Network.NodeBalancerID == nil: @@ -351,11 +395,16 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo err := clusterScope.LinodeClient.DeleteNodeBalancer(ctx, *clusterScope.LinodeCluster.Spec.Network.NodeBalancerID) if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { logger.Error(err, "failed to delete NodeBalancer") - setFailureReason(clusterScope, cerrs.DeleteClusterError, err, r) + setFailureReason(clusterScope, util.DeleteError, err, r) return err } - conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, clusterv1.DeletedReason, "", "%s", "Load balancer for Type NodeBalancer deleted") + conditions.Set(clusterScope.LinodeCluster, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(clusterv1.DeletedReason), + Message: "Load balancer for Type NodeBalancer deleted", + }) r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeNormal, clusterv1.DeletedReason, "Load balancer for Type NodeBalancer deleted") clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = nil @@ -370,7 +419,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo if err := clusterScope.RemoveCredentialsRefFinalizer(ctx); err != nil { logger.Error(err, "failed to remove credentials finalizer") - setFailureReason(clusterScope, cerrs.DeleteClusterError, err, r) + setFailureReason(clusterScope, util.DeleteError, err, r) return err } controllerutil.RemoveFinalizer(clusterScope.LinodeCluster, infrav1alpha2.ClusterFinalizer) @@ -387,13 +436,13 @@ func (r *LinodeClusterReconciler) SetupWithManager(mgr ctrl.Manager, options crc For(&infrav1alpha2.LinodeCluster{}). WithOptions(options). // we care about reconciling on metadata updates for LinodeClusters because the OwnerRef for the Cluster is needed - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue)). Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc( kutil.ClusterToInfrastructureMapFunc(context.TODO(), infrav1alpha2.GroupVersion.WithKind("LinodeCluster"), mgr.GetClient(), &infrav1alpha2.LinodeCluster{}), ), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), ). Watches( &infrav1alpha2.LinodeMachine{}, diff --git a/internal/controller/linodecluster_controller_test.go b/internal/controller/linodecluster_controller_test.go index e72c08de8..0534d72d0 100644 --- a/internal/controller/linodecluster_controller_test.go +++ b/internal/controller/linodecluster_controller_test.go @@ -320,7 +320,7 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) Expect(linodeCluster.Status.Ready).To(BeTrue()) Expect(linodeCluster.Status.Conditions).To(HaveLen(3)) - Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition))) Expect(linodeCluster.Status.Conditions[1].Type).To(Equal(ConditionPreflightLinodeNBFirewallReady)) Expect(linodeCluster.Status.Conditions[2].Type).To(Equal(ConditionPreflightLinodeVPCReady)) By("checking NB id") @@ -435,7 +435,7 @@ var _ = Describe("cluster-lifecycle-dns", Ordered, Label("cluster", "cluster-lif Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) Expect(linodeCluster.Status.Ready).To(BeTrue()) Expect(linodeCluster.Status.Conditions).To(HaveLen(1)) - Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition))) By("checking controlPlaneEndpoint/NB host and port") Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal(controlPlaneEndpointHost)) @@ -704,7 +704,7 @@ var _ = Describe("dns-override-endpoint", Ordered, Label("cluster", "dns-overrid Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) Expect(linodeCluster.Status.Ready).To(BeTrue()) Expect(linodeCluster.Status.Conditions).To(HaveLen(1)) - Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition))) By("checking controlPlaneEndpoint/NB host and port") Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal(controlPlaneEndpointHost)) diff --git a/internal/controller/linodefirewall_controller.go b/internal/controller/linodefirewall_controller.go index 0207eee8b..e8805ccbb 100644 --- a/internal/controller/linodefirewall_controller.go +++ b/internal/controller/linodefirewall_controller.go @@ -26,11 +26,12 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" kutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -112,7 +113,12 @@ func (r *LinodeFirewallReconciler) reconcile( if err != nil { fwScope.LinodeFirewall.Status.FailureReason = util.Pointer(failureReason) fwScope.LinodeFirewall.Status.FailureMessage = util.Pointer(err.Error()) - conditions.MarkFalse(fwScope.LinodeFirewall, clusterv1.ReadyCondition, string(failureReason), "", "%s", err.Error()) + conditions.Set(fwScope.LinodeFirewall, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(failureReason), + Message: err.Error(), + }) r.Recorder.Event(fwScope.LinodeFirewall, corev1.EventTypeWarning, string(failureReason), err.Error()) } @@ -154,7 +160,12 @@ func (r *LinodeFirewallReconciler) reconcile( failureReason = infrav1alpha2.CreateFirewallError if err = fwScope.AddCredentialsRefFinalizer(ctx); err != nil { logger.Error(err, "failed to update credentials secret") - conditions.MarkFalse(fwScope.LinodeFirewall, clusterv1.ReadyCondition, string(failureReason), "", "%s", err.Error()) + conditions.Set(fwScope.LinodeFirewall, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(failureReason), + Message: err.Error(), + }) r.Recorder.Event(fwScope.LinodeFirewall, corev1.EventTypeWarning, string(failureReason), err.Error()) return ctrl.Result{}, nil @@ -162,14 +173,19 @@ func (r *LinodeFirewallReconciler) reconcile( } if err = reconcileFirewall(ctx, r.Client, fwScope, logger); err != nil { logger.Error(err, fmt.Sprintf("failed to %s Firewall", action)) - conditions.MarkFalse(fwScope.LinodeFirewall, clusterv1.ReadyCondition, string(failureReason), "", "%s", err.Error()) + conditions.Set(fwScope.LinodeFirewall, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(failureReason), + Message: err.Error(), + }) r.Recorder.Event(fwScope.LinodeFirewall, corev1.EventTypeWarning, string(failureReason), err.Error()) switch { case errors.Is(err, errTooManyIPs): // Cannot reconcile firewall with too many ips, wait for an update to the spec return ctrl.Result{}, nil - case util.IsRetryableError(err) && !reconciler.HasStaleCondition(fwScope.LinodeFirewall, clusterv1.ReadyCondition, + case util.IsRetryableError(err) && !reconciler.HasStaleCondition(fwScope.LinodeFirewall, string(clusterv1.ReadyCondition), reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultFWControllerReconcileTimeout)): logger.Info(fmt.Sprintf("re-queuing Firewall %s", action)) @@ -241,7 +257,7 @@ func (r *LinodeFirewallReconciler) SetupWithManager(mgr ctrl.Manager, options cr WithOptions(options). WithEventFilter( predicate.And( - predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue), predicate.GenerationChangedPredicate{}, predicate.Funcs{UpdateFunc: func(e event.UpdateEvent) bool { oldObject, okOld := e.ObjectOld.(*infrav1alpha2.LinodeFirewall) @@ -256,7 +272,7 @@ func (r *LinodeFirewallReconciler) SetupWithManager(mgr ctrl.Manager, options cr Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(linodeFirewallMapper), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), ). Watches( &infrav1alpha2.AddressSet{}, diff --git a/internal/controller/linodefirewall_controller_test.go b/internal/controller/linodefirewall_controller_test.go index 9eeefc06d..4b870b91b 100644 --- a/internal/controller/linodefirewall_controller_test.go +++ b/internal/controller/linodefirewall_controller_test.go @@ -27,7 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" @@ -209,7 +209,12 @@ var _ = Describe("lifecycle", Ordered, Label("firewalls", "lifecycle"), func() { }), OneOf( Path(Result("update requeues for update rules error", func(ctx context.Context, mck Mock) { - conditions.MarkFalse(fwScope.LinodeFirewall, clusterv1.ReadyCondition, "test", clusterv1.ConditionSeverityWarning, "%s", "test") + conditions.Set(fwScope.LinodeFirewall, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: "test", + Message: "test", + }) mck.LinodeClient.EXPECT().UpdateFirewallRules(ctx, 1, gomock.Any()).Return(nil, &linodego.Error{Code: http.StatusInternalServerError}) res, err := reconciler.reconcile(ctx, mck.Logger(), &fwScope) Expect(err).NotTo(HaveOccurred()) diff --git a/internal/controller/linodemachine_controller.go b/internal/controller/linodemachine_controller.go index 22bf04501..cc08893f9 100644 --- a/internal/controller/linodemachine_controller.go +++ b/internal/controller/linodemachine_controller.go @@ -33,9 +33,8 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - cerrs "sigs.k8s.io/cluster-api/errors" kutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -59,16 +58,16 @@ const ( defaultDiskFilesystem = string(linodego.FilesystemExt4) // conditions for preflight instance creation - ConditionPreflightBootstrapDataSecretReady clusterv1.ConditionType = "PreflightBootstrapDataSecretReady" - ConditionPreflightLinodeFirewallReady clusterv1.ConditionType = "PreflightLinodeFirewallReady" - ConditionPreflightMetadataSupportConfigured clusterv1.ConditionType = "PreflightMetadataSupportConfigured" - ConditionPreflightCreated clusterv1.ConditionType = "PreflightCreated" - ConditionPreflightRootDiskResizing clusterv1.ConditionType = "PreflightRootDiskResizing" - ConditionPreflightRootDiskResized clusterv1.ConditionType = "PreflightRootDiskResized" - ConditionPreflightAdditionalDisksCreated clusterv1.ConditionType = "PreflightAdditionalDisksCreated" - ConditionPreflightConfigured clusterv1.ConditionType = "PreflightConfigured" - ConditionPreflightBootTriggered clusterv1.ConditionType = "PreflightBootTriggered" - ConditionPreflightReady clusterv1.ConditionType = "PreflightReady" + ConditionPreflightBootstrapDataSecretReady = "PreflightBootstrapDataSecretReady" + ConditionPreflightLinodeFirewallReady = "PreflightLinodeFirewallReady" + ConditionPreflightMetadataSupportConfigured = "PreflightMetadataSupportConfigured" + ConditionPreflightCreated = "PreflightCreated" + ConditionPreflightRootDiskResizing = "PreflightRootDiskResizing" + ConditionPreflightRootDiskResized = "PreflightRootDiskResized" + ConditionPreflightAdditionalDisksCreated = "PreflightAdditionalDisksCreated" + ConditionPreflightConfigured = "PreflightConfigured" + ConditionPreflightBootTriggered = "PreflightBootTriggered" + ConditionPreflightReady = "PreflightReady" // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. WaitingForBootstrapDataReason = "WaitingForBootstrapData" @@ -171,7 +170,7 @@ func (r *LinodeMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques } func (r *LinodeMachineReconciler) reconcile(ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope) (res ctrl.Result, err error) { - failureReason := cerrs.MachineStatusError("UnknownError") + failureReason := util.UnknownError //nolint:dupl // Code duplication is simplicity in this case. defer func() { if err != nil { @@ -179,11 +178,16 @@ func (r *LinodeMachineReconciler) reconcile(ctx context.Context, logger logr.Log if linodego.ErrHasStatus(err, http.StatusBadRequest) { machineScope.LinodeMachine.Status.FailureReason = util.Pointer(failureReason) machineScope.LinodeMachine.Status.FailureMessage = util.Pointer(err.Error()) - conditions.MarkFalse(machineScope.LinodeMachine, clusterv1.ReadyCondition, string(failureReason), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: failureReason, + Message: err.Error(), + }) } // Record the event regardless of whether the error is retryable or not for visibility. - r.Recorder.Event(machineScope.LinodeMachine, corev1.EventTypeWarning, string(failureReason), err.Error()) + r.Recorder.Event(machineScope.LinodeMachine, corev1.EventTypeWarning, failureReason, err.Error()) } // Always close the scope when exiting this function so we can persist any LinodeMachine changes. @@ -214,27 +218,35 @@ func (r *LinodeMachineReconciler) reconcile(ctx context.Context, logger logr.Log // Delete if !machineScope.LinodeMachine.ObjectMeta.DeletionTimestamp.IsZero() { - failureReason = cerrs.DeleteMachineError + failureReason = util.DeleteError return r.reconcileDelete(ctx, logger, machineScope) } // Make sure bootstrap data is available and populated. - if !reconciler.ConditionTrue(machineScope.LinodeMachine, ConditionPreflightBootstrapDataSecretReady) && machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { + if !reconciler.ConditionTrue(machineScope.LinodeMachine, string(ConditionPreflightBootstrapDataSecretReady)) && machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { logger.Info("Bootstrap data secret is not yet available") - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightBootstrapDataSecretReady, WaitingForBootstrapDataReason, "", "%s", "") + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightBootstrapDataSecretReady, + Status: metav1.ConditionFalse, + Reason: WaitingForBootstrapDataReason, + }) return ctrl.Result{}, nil - } else { - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightBootstrapDataSecretReady) } + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightBootstrapDataSecretReady, + Status: metav1.ConditionTrue, + Reason: "BootstrapDataSecretReady", // We have to set the reason to not fail object patching + }) + // Update if machineScope.LinodeMachine.Status.InstanceState != nil { - failureReason = cerrs.UpdateMachineError + failureReason = util.UpdateError return r.reconcileUpdate(ctx, logger, machineScope) } // Create - failureReason = cerrs.CreateMachineError + failureReason = util.CreateError return r.reconcileCreate(ctx, logger, machineScope) } @@ -252,10 +264,14 @@ func (r *LinodeMachineReconciler) reconcileCreate( } if machineScope.LinodeMachine.Spec.FirewallRef != nil { - if !reconciler.ConditionTrue(machineScope.LinodeMachine, ConditionPreflightLinodeFirewallReady) && machineScope.LinodeMachine.Spec.ProviderID == nil { + if !reconciler.ConditionTrue(machineScope.LinodeMachine, string(ConditionPreflightLinodeFirewallReady)) && machineScope.LinodeMachine.Spec.ProviderID == nil { res, err := r.reconcilePreflightLinodeFirewallCheck(ctx, logger, machineScope) if err != nil || !res.IsZero() { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightLinodeFirewallReady, string("linode firewall not yet available"), "", "%s", "") + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightLinodeFirewallReady, + Status: metav1.ConditionFalse, + Reason: "LinodeFirewallNotYetAvailable", // We have to set the reason to not fail object patching + }) return res, err } } @@ -333,17 +349,31 @@ func (r *LinodeMachineReconciler) reconcilePreflightVPC(ctx context.Context, log if reconciler.HasStaleCondition(machineScope.LinodeMachine, ConditionPreflightLinodeVPCReady, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultClusterControllerReconcileTimeout)) { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightLinodeVPCReady, string(cerrs.CreateClusterError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightLinodeVPCReady, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightLinodeVPCReady, string(cerrs.CreateClusterError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightLinodeVPCReady, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{RequeueAfter: reconciler.DefaultClusterControllerReconcileDelay}, nil } else if !linodeVPC.Status.Ready { logger.Info("LinodeVPC is not yet available") return ctrl.Result{RequeueAfter: reconciler.DefaultClusterControllerReconcileDelay}, nil } r.Recorder.Event(machineScope.LinodeMachine, corev1.EventTypeNormal, string(clusterv1.ReadyCondition), "LinodeVPC is now available") - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightLinodeVPCReady) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightLinodeVPCReady, + Status: metav1.ConditionTrue, + Reason: "LinodeVPCReady", // We have to set the reason to not fail object patching + }) return ctrl.Result{}, nil } @@ -364,16 +394,30 @@ func (r *LinodeMachineReconciler) reconcilePreflightLinodeFirewallCheck(ctx cont if reconciler.HasStaleCondition(machineScope.LinodeMachine, ConditionPreflightLinodeFirewallReady, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultMachineControllerWaitForPreflightTimeout)) { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightLinodeFirewallReady, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightLinodeFirewallReady, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightLinodeFirewallReady, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightLinodeFirewallReady, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerRetryDelay}, nil } else if !linodeFirewall.Status.Ready { logger.Info("Linode firewall not yet ready") return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerRetryDelay}, nil } - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightLinodeFirewallReady) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightLinodeFirewallReady, + Status: metav1.ConditionTrue, + Reason: "LinodeFirewallReady", // We have to set the reason to not fail object patching + }) return ctrl.Result{}, nil } @@ -399,7 +443,11 @@ func (r *LinodeMachineReconciler) reconcilePreflightMetadataSupportConfigure(ctx logger.Info("cloud-init metadata support not available", "imageMetadataSupport", imageMetadataSupport, "regionMetadataSupport", regionMetadataSupport) machineScope.LinodeMachine.Status.CloudinitMetadataSupport = false } - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightMetadataSupportConfigured) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightMetadataSupportConfigured, + Status: metav1.ConditionTrue, + Reason: "LinodeMetadataSupportConfigured", // We have to set the reason to not fail object patching + }) return ctrl.Result{}, nil } @@ -421,14 +469,28 @@ func (r *LinodeMachineReconciler) reconcilePreflightCreate(ctx context.Context, if reconciler.HasStaleCondition(machineScope.LinodeMachine, ConditionPreflightCreated, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultMachineControllerWaitForPreflightTimeout)) { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightCreated, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightCreated, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightCreated, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightCreated, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return retryIfTransient(err, logger) } - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightCreated) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightCreated, + Status: metav1.ConditionTrue, + Reason: "LinodeMachinePreflightCreated", // We have to set the reason to not fail object patching + }) // Set the provider ID since the instance is successfully created machineScope.LinodeMachine.Spec.ProviderID = util.Pointer(fmt.Sprintf("linode://%d", linodeInstance.ID)) return ctrl.Result{}, nil @@ -439,10 +501,20 @@ func (r *LinodeMachineReconciler) reconcilePreflightConfigure(ctx context.Contex if reconciler.HasStaleCondition(machineScope.LinodeMachine, ConditionPreflightConfigured, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultMachineControllerWaitForPreflightTimeout)) { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightConfigured, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightConfigured, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightConfigured, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightConfigured, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerWaitForRunningDelay}, nil } if machineScope.LinodeMachine.Spec.Configuration != nil && machineScope.LinodeMachine.Spec.Configuration.Kernel != "" { @@ -457,7 +529,11 @@ func (r *LinodeMachineReconciler) reconcilePreflightConfigure(ctx context.Contex return retryIfTransient(err, logger) } } - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightConfigured) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightConfigured, + Status: metav1.ConditionTrue, + Reason: "LinodeMachinePreflightConfigured", // We have to set the reason to not fail object patching + }) return ctrl.Result{}, nil } @@ -467,13 +543,27 @@ func (r *LinodeMachineReconciler) reconcilePreflightBoot(ctx context.Context, in if reconciler.HasStaleCondition(machineScope.LinodeMachine, ConditionPreflightBootTriggered, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultMachineControllerWaitForPreflightTimeout)) { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightBootTriggered, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightBootTriggered, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightBootTriggered, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightBootTriggered, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerWaitForRunningDelay}, nil } - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightBootTriggered) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightBootTriggered, + Status: metav1.ConditionTrue, + Reason: "LinodeMachinePreflightBootTriggered", // We have to set the reason to not fail object patching + }) return ctrl.Result{}, nil } @@ -484,14 +574,28 @@ func (r *LinodeMachineReconciler) reconcilePreflightReady(ctx context.Context, i if reconciler.HasStaleCondition(machineScope.LinodeMachine, ConditionPreflightReady, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultMachineControllerWaitForPreflightTimeout)) { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightReady, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightReady, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightReady, string(cerrs.CreateMachineError), "", "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightReady, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerWaitForRunningDelay}, nil } machineScope.LinodeMachine.Status.Addresses = addrs - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightReady) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightReady, + Status: metav1.ConditionTrue, + Reason: "LinodeMachinePreflightReady", // We have to set the reason to not fail object patching + }) return ctrl.Result{}, nil } @@ -516,14 +620,28 @@ func (r *LinodeMachineReconciler) reconcileUpdate(ctx context.Context, logger lo return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerWaitForRunningDelay}, nil } else { logger.Info("Instance not ready in time, skipping reconciliation", "status", linodeInstance.Status) - conditions.MarkFalse(machineScope.LinodeMachine, clusterv1.ReadyCondition, string(linodeInstance.Status), "", "%s", "skipped due to long running operation") + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(linodeInstance.Status), + Message: "skipped due to long running operation", + }) } } else if linodeInstance.Status != linodego.InstanceRunning { logger.Info("Instance has incompatible status, skipping reconciliation", "status", linodeInstance.Status) - conditions.MarkFalse(machineScope.LinodeMachine, clusterv1.ReadyCondition, string(linodeInstance.Status), "", "%s", "incompatible status") + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(linodeInstance.Status), + Message: "incompatible status", + }) } else { machineScope.LinodeMachine.Status.Ready = true - conditions.MarkTrue(machineScope.LinodeMachine, clusterv1.ReadyCondition) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionTrue, + Reason: "LinodeMachineReady", // We have to set the reason to not fail object patching + }) } // Clean up after instance creation. @@ -579,7 +697,12 @@ func (r *LinodeMachineReconciler) reconcileDelete( } } - conditions.MarkFalse(machineScope.LinodeMachine, clusterv1.ReadyCondition, clusterv1.DeletedReason, "", "%s", "instance deleted") + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(clusterv1.DeletedReason), + Message: "instance deleted", + }) r.Recorder.Event(machineScope.LinodeMachine, corev1.EventTypeNormal, clusterv1.DeletedReason, "instance has cleaned up") @@ -620,11 +743,11 @@ func (r *LinodeMachineReconciler) SetupWithManager(mgr ctrl.Manager, options crc Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(linodeMachineMapper), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), ). // we care about reconciling on metadata updates for LinodeMachines because the OwnerRef for the Machine is needed WithEventFilter(predicate.And( - predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue), predicate.Funcs{UpdateFunc: func(e event.UpdateEvent) bool { oldObject, okOld := e.ObjectOld.(*infrav1alpha2.LinodeMachine) newObject, okNew := e.ObjectNew.(*infrav1alpha2.LinodeMachine) diff --git a/internal/controller/linodemachine_controller_helpers.go b/internal/controller/linodemachine_controller_helpers.go index 0184a2480..9d9f3e158 100644 --- a/internal/controller/linodemachine_controller_helpers.go +++ b/internal/controller/linodemachine_controller_helpers.go @@ -39,9 +39,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - cerrs "sigs.k8s.io/cluster-api/errors" kutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -673,14 +672,12 @@ func createDisks(ctx context.Context, logger logr.Logger, machineScope *scope.Ma logger.Error(err, "Failed to create disk", "DiskLabel", label) } - conditions.MarkFalse( - machineScope.LinodeMachine, - ConditionPreflightAdditionalDisksCreated, - string(cerrs.CreateMachineError), - clusterv1.ConditionSeverityWarning, - "%s", - err.Error(), - ) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightAdditionalDisksCreated, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return err } disk.DiskID = linodeDisk.ID @@ -690,7 +687,11 @@ func createDisks(ctx context.Context, logger logr.Logger, machineScope *scope.Ma if err != nil { return err } - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightAdditionalDisksCreated) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightAdditionalDisksCreated, + Status: metav1.ConditionTrue, + Reason: "AdditionalDisksCreated", + }) return nil } @@ -703,12 +704,22 @@ func resizeRootDisk(ctx context.Context, logger logr.Logger, machineScope *scope if err != nil { logger.Error(err, "Failed to get default instance configuration") - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightRootDiskResized, string(cerrs.CreateMachineError), clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightRootDiskResized, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return err } if instanceConfig.Devices.SDA == nil { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightRootDiskResized, string(cerrs.CreateMachineError), clusterv1.ConditionSeverityWarning, "root disk not yet ready") + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightRootDiskResized, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: "root disk not yet ready", + }) return errors.New("root disk not yet ready") } @@ -721,7 +732,12 @@ func resizeRootDisk(ctx context.Context, logger logr.Logger, machineScope *scope if err != nil { logger.Error(err, "Failed to get root disk for instance") - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightRootDiskResizing, string(cerrs.CreateMachineError), clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightRootDiskResizing, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return err } @@ -736,14 +752,27 @@ func resizeRootDisk(ctx context.Context, logger logr.Logger, machineScope *scope } if err := machineScope.LinodeClient.ResizeInstanceDisk(ctx, linodeInstanceID, rootDiskID, diskSize); err != nil { - conditions.MarkFalse(machineScope.LinodeMachine, ConditionPreflightRootDiskResizing, string(cerrs.CreateMachineError), clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightRootDiskResizing, + Status: metav1.ConditionFalse, + Reason: util.CreateError, + Message: err.Error(), + }) return err } - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightRootDiskResizing) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightRootDiskResizing, + Status: metav1.ConditionTrue, + Reason: "RootDiskResizing", + }) } conditions.Delete(machineScope.LinodeMachine, ConditionPreflightRootDiskResizing) - conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightRootDiskResized) + conditions.Set(machineScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightRootDiskResized, + Status: metav1.ConditionTrue, + Reason: "RootDiskResized", + }) return nil } diff --git a/internal/controller/linodemachine_controller_test.go b/internal/controller/linodemachine_controller_test.go index 7434c19c5..ec1ae9a0a 100644 --- a/internal/controller/linodemachine_controller_test.go +++ b/internal/controller/linodemachine_controller_test.go @@ -33,8 +33,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - cerrs "sigs.k8s.io/cluster-api/errors" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -632,8 +631,8 @@ var _ = Describe("create", Label("machine", "create"), func() { Expect(rutil.ConditionTrue(&linodeMachine, ConditionPreflightCreated)).To(BeFalse()) condition := conditions.Get(&linodeMachine, ConditionPreflightCreated) Expect(condition).ToNot(BeNil()) - Expect(condition.Status).To(Equal(corev1.ConditionFalse)) - Expect(condition.Reason).To(Equal(string(cerrs.CreateMachineError))) + Expect(condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(condition.Reason).To(Equal(util.CreateError)) Expect(condition.Message).To(ContainSubstring("time is up")) }) }) @@ -1354,7 +1353,11 @@ var _ = Describe("machine-lifecycle", Ordered, Label("machine", "machine-lifecyc Namespace: namespace, } linodeMachine.Status.CloudinitMetadataSupport = true - conditions.MarkTrue(mScope.LinodeMachine, ConditionPreflightMetadataSupportConfigured) + conditions.Set(mScope.LinodeMachine, metav1.Condition{ + Type: ConditionPreflightMetadataSupportConfigured, + Status: metav1.ConditionTrue, + Reason: "LinodeMetadataSupportConfigured", // We have to set the reason to not fail object patching + }) }), OneOf( Path(Result("firewall ready condition is not set", func(ctx context.Context, mck Mock) { @@ -1673,7 +1676,7 @@ var _ = Describe("machine-update", Ordered, Label("machine", "machine-update"), res, err = reconciler.reconcile(ctx, logr.Logger{}, mScope) Expect(err).NotTo(HaveOccurred()) Expect(*linodeMachine.Status.InstanceState).To(Equal(linodego.InstanceRunning)) - Expect(rutil.ConditionTrue(linodeMachine, clusterv1.ReadyCondition)).To(BeTrue()) + Expect(rutil.ConditionTrue(linodeMachine, string(clusterv1.ReadyCondition))).To(BeTrue()) })), ), ) diff --git a/internal/controller/linodeobjectstoragebucket_controller.go b/internal/controller/linodeobjectstoragebucket_controller.go index e749e022e..58e3f2c7e 100644 --- a/internal/controller/linodeobjectstoragebucket_controller.go +++ b/internal/controller/linodeobjectstoragebucket_controller.go @@ -29,7 +29,7 @@ import ( "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" kutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -125,7 +125,12 @@ func (r *LinodeObjectStorageBucketReconciler) reconcile(ctx context.Context, bSc func (r *LinodeObjectStorageBucketReconciler) setFailure(bScope *scope.ObjectStorageBucketScope, err error) { bScope.Bucket.Status.FailureMessage = util.Pointer(err.Error()) r.Recorder.Event(bScope.Bucket, corev1.EventTypeWarning, "Failed", err.Error()) - conditions.MarkFalse(bScope.Bucket, clusterv1.ReadyCondition, "Failed", "", "%s", err.Error()) + conditions.Set(bScope.Bucket, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: err.Error(), + }) } func (r *LinodeObjectStorageBucketReconciler) reconcileApply(ctx context.Context, bScope *scope.ObjectStorageBucketScope) error { @@ -147,7 +152,11 @@ func (r *LinodeObjectStorageBucketReconciler) reconcileApply(ctx context.Context r.Recorder.Event(bScope.Bucket, corev1.EventTypeNormal, "Synced", "Object storage bucket synced") bScope.Bucket.Status.Ready = true - conditions.MarkTrue(bScope.Bucket, clusterv1.ReadyCondition) + conditions.Set(bScope.Bucket, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionTrue, + Reason: "ObjectStorageBucketReady", // We have to set the reason to not fail object patching + }) return nil } @@ -171,13 +180,13 @@ func (r *LinodeObjectStorageBucketReconciler) SetupWithManager(mgr ctrl.Manager, WithOptions(options). Owns(&corev1.Secret{}). WithEventFilter(predicate.And( - predicates.ResourceHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicates.ResourceHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue), predicate.GenerationChangedPredicate{}, )). Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(linodeObjectStorageBucketMapper), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) if err != nil { return fmt.Errorf("failed to build controller: %w", err) diff --git a/internal/controller/linodeobjectstoragebucket_controller_test.go b/internal/controller/linodeobjectstoragebucket_controller_test.go index fef9464f0..b1ce1b459 100644 --- a/internal/controller/linodeobjectstoragebucket_controller_test.go +++ b/internal/controller/linodeobjectstoragebucket_controller_test.go @@ -104,7 +104,7 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { Expect(obj.Status.Ready).To(BeTrue()) Expect(obj.Status.FailureMessage).To(BeNil()) Expect(obj.Status.Conditions).To(HaveLen(1)) - Expect(obj.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + Expect(obj.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition))) Expect(*obj.Status.Hostname).To(Equal("hostname")) Expect(obj.Status.CreationTime).NotTo(BeNil()) diff --git a/internal/controller/linodeobjectstoragekey_controller.go b/internal/controller/linodeobjectstoragekey_controller.go index 4507e771b..8075fbe59 100644 --- a/internal/controller/linodeobjectstoragekey_controller.go +++ b/internal/controller/linodeobjectstoragekey_controller.go @@ -32,7 +32,7 @@ import ( "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" kutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -147,7 +147,12 @@ func (r *LinodeObjectStorageKeyReconciler) reconcile(ctx context.Context, keySco func (r *LinodeObjectStorageKeyReconciler) setFailure(keyScope *scope.ObjectStorageKeyScope, err error) { keyScope.Key.Status.FailureMessage = util.Pointer(err.Error()) r.Recorder.Event(keyScope.Key, corev1.EventTypeWarning, "Failed", err.Error()) - conditions.MarkFalse(keyScope.Key, clusterv1.ReadyCondition, "Failed", clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(keyScope.Key, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: err.Error(), + }) } func (r *LinodeObjectStorageKeyReconciler) reconcileApply(ctx context.Context, keyScope *scope.ObjectStorageKeyScope) error { @@ -239,7 +244,11 @@ func (r *LinodeObjectStorageKeyReconciler) reconcileApply(ctx context.Context, k keyScope.Key.Status.LastKeyGeneration = &keyScope.Key.Spec.KeyGeneration keyScope.Key.Status.Ready = true - conditions.MarkTrue(keyScope.Key, clusterv1.ReadyCondition) + conditions.Set(keyScope.Key, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionTrue, + Reason: "LinodeObjectStorageKeySynced", // We have to set the reason to not fail object patching + }) r.Recorder.Event(keyScope.Key, corev1.EventTypeNormal, "Synced", "Object storage key synced") return nil @@ -303,13 +312,13 @@ func (r *LinodeObjectStorageKeyReconciler) SetupWithManager(mgr ctrl.Manager, op WithOptions(options). Owns(&corev1.Secret{}). WithEventFilter(predicate.And( - predicates.ResourceHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicates.ResourceHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue), predicate.GenerationChangedPredicate{}, )). Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(linodeObjectStorageKeyMapper), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) if err != nil { return fmt.Errorf("failed to build controller: %w", err) diff --git a/internal/controller/linodeobjectstoragekey_controller_test.go b/internal/controller/linodeobjectstoragekey_controller_test.go index 245c9af05..38a86d9d7 100644 --- a/internal/controller/linodeobjectstoragekey_controller_test.go +++ b/internal/controller/linodeobjectstoragekey_controller_test.go @@ -124,7 +124,7 @@ var _ = Describe("lifecycle", Ordered, Label("key", "key-lifecycle"), func() { Expect(key.Status.Ready).To(BeTrue()) Expect(key.Status.FailureMessage).To(BeNil()) Expect(key.Status.Conditions).To(HaveLen(1)) - Expect(key.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + Expect(key.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition))) Expect(key.Status.CreationTime).NotTo(BeNil()) Expect(*key.Status.LastKeyGeneration).To(Equal(key.Spec.KeyGeneration)) Expect(*key.Status.LastKeyGeneration).To(Equal(0)) diff --git a/internal/controller/linodeplacementgroup_controller.go b/internal/controller/linodeplacementgroup_controller.go index 2bc0f0cdf..38fab0503 100644 --- a/internal/controller/linodeplacementgroup_controller.go +++ b/internal/controller/linodeplacementgroup_controller.go @@ -27,12 +27,13 @@ import ( "github.com/linode/linodego" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" kutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -121,7 +122,12 @@ func (r *LinodePlacementGroupReconciler) reconcile( pgScope.LinodePlacementGroup.Status.FailureReason = util.Pointer(failureReason) pgScope.LinodePlacementGroup.Status.FailureMessage = util.Pointer(err.Error()) - conditions.MarkFalse(pgScope.LinodePlacementGroup, clusterv1.ReadyCondition, string(failureReason), "", "%s", err.Error()) + conditions.Set(pgScope.LinodePlacementGroup, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(failureReason), + Message: err.Error(), + }) r.Recorder.Event(pgScope.LinodePlacementGroup, corev1.EventTypeWarning, string(failureReason), err.Error()) } @@ -174,7 +180,7 @@ func (r *LinodePlacementGroupReconciler) reconcile( failureReason = infrav1alpha2.CreatePlacementGroupError err = r.reconcileCreate(ctx, logger, pgScope) - if err != nil && !reconciler.HasStaleCondition(pgScope.LinodePlacementGroup, clusterv1.ReadyCondition, reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultPGControllerReconcileTimeout)) { + if err != nil && !reconciler.HasStaleCondition(pgScope.LinodePlacementGroup, string(clusterv1.ReadyCondition), reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultPGControllerReconcileTimeout)) { logger.Info("re-queuing Placement Group creation") res = ctrl.Result{RequeueAfter: reconciler.DefaultPGControllerReconcilerDelay} @@ -190,7 +196,12 @@ func (r *LinodePlacementGroupReconciler) reconcileCreate(ctx context.Context, lo if err := pgScope.AddCredentialsRefFinalizer(ctx); err != nil { logger.Error(err, "Failed to update credentials secret") - conditions.MarkFalse(pgScope.LinodePlacementGroup, clusterv1.ReadyCondition, string(infrav1alpha2.CreatePlacementGroupError), "", "%s", err.Error()) + conditions.Set(pgScope.LinodePlacementGroup, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(infrav1alpha2.CreatePlacementGroupError), + Message: err.Error(), + }) r.Recorder.Event(pgScope.LinodePlacementGroup, corev1.EventTypeWarning, string(infrav1alpha2.CreatePlacementGroupError), err.Error()) return err @@ -198,7 +209,12 @@ func (r *LinodePlacementGroupReconciler) reconcileCreate(ctx context.Context, lo if err := r.reconcilePlacementGroup(ctx, pgScope, logger); err != nil { logger.Error(err, "Failed to create Placement Group") - conditions.MarkFalse(pgScope.LinodePlacementGroup, clusterv1.ReadyCondition, string(infrav1alpha2.CreatePlacementGroupError), "", "%s", err.Error()) + conditions.Set(pgScope.LinodePlacementGroup, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(infrav1alpha2.CreatePlacementGroupError), + Message: err.Error(), + }) r.Recorder.Event(pgScope.LinodePlacementGroup, corev1.EventTypeWarning, string(infrav1alpha2.CreatePlacementGroupError), err.Error()) return err @@ -264,7 +280,12 @@ func (r *LinodePlacementGroupReconciler) reconcileDelete(ctx context.Context, lo logger.Info("Placement Group ID is missing, nothing to do") } - conditions.MarkFalse(pgScope.LinodePlacementGroup, clusterv1.ReadyCondition, clusterv1.DeletedReason, "", "%s", "Placement Group deleted") + conditions.Set(pgScope.LinodePlacementGroup, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(clusterv1.DeletedReason), + Message: "Placement Group deleted", + }) r.Recorder.Event(pgScope.LinodePlacementGroup, corev1.EventTypeNormal, clusterv1.DeletedReason, "Placement Group has cleaned up") @@ -308,7 +329,7 @@ func (r *LinodePlacementGroupReconciler) SetupWithManager(mgr ctrl.Manager, opti For(&infrav1alpha2.LinodePlacementGroup{}). WithOptions(options). WithEventFilter(predicate.And( - predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue), predicate.GenerationChangedPredicate{}, predicate.Funcs{UpdateFunc: func(e event.UpdateEvent) bool { oldObject, okOld := e.ObjectOld.(*infrav1alpha2.LinodePlacementGroup) @@ -323,7 +344,7 @@ func (r *LinodePlacementGroupReconciler) SetupWithManager(mgr ctrl.Manager, opti Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(linodePlacementGroupMapper), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) if err != nil { return fmt.Errorf("failed to build controller: %w", err) diff --git a/internal/controller/linodevpc_controller.go b/internal/controller/linodevpc_controller.go index 925151991..589ca0f51 100644 --- a/internal/controller/linodevpc_controller.go +++ b/internal/controller/linodevpc_controller.go @@ -26,12 +26,13 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" kutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -127,7 +128,12 @@ func (r *LinodeVPCReconciler) reconcile( vpcScope.LinodeVPC.Status.FailureReason = util.Pointer(failureReason) vpcScope.LinodeVPC.Status.FailureMessage = util.Pointer(err.Error()) - conditions.MarkFalse(vpcScope.LinodeVPC, clusterv1.ReadyCondition, string(failureReason), "", "%s", err.Error()) + conditions.Set(vpcScope.LinodeVPC, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(failureReason), + Message: err.Error(), + }) r.Recorder.Event(vpcScope.LinodeVPC, corev1.EventTypeWarning, string(failureReason), err.Error()) } @@ -171,7 +177,7 @@ func (r *LinodeVPCReconciler) reconcile( logger = logger.WithValues("vpcID", *vpcScope.LinodeVPC.Spec.VPCID) err = r.reconcileUpdate(ctx, logger, vpcScope) - if err != nil && !reconciler.HasStaleCondition(vpcScope.LinodeVPC, clusterv1.ReadyCondition, + if err != nil && !reconciler.HasStaleCondition(vpcScope.LinodeVPC, string(clusterv1.ReadyCondition), reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultVPCControllerReconcileTimeout)) { logger.Info("re-queuing VPC update") @@ -186,7 +192,7 @@ func (r *LinodeVPCReconciler) reconcile( failureReason = infrav1alpha2.CreateVPCError err = r.reconcileCreate(ctx, logger, vpcScope) - if err != nil && !reconciler.HasStaleCondition(vpcScope.LinodeVPC, clusterv1.ReadyCondition, + if err != nil && !reconciler.HasStaleCondition(vpcScope.LinodeVPC, string(clusterv1.ReadyCondition), reconciler.DefaultTimeout(r.ReconcileTimeout, reconciler.DefaultVPCControllerReconcileTimeout)) { logger.Info("re-queuing VPC creation") @@ -203,7 +209,12 @@ func (r *LinodeVPCReconciler) reconcileCreate(ctx context.Context, logger logr.L if err := vpcScope.AddCredentialsRefFinalizer(ctx); err != nil { logger.Error(err, "Failed to update credentials secret") - conditions.MarkFalse(vpcScope.LinodeVPC, clusterv1.ReadyCondition, string(infrav1alpha2.CreateVPCError), "", "%s", err.Error()) + conditions.Set(vpcScope.LinodeVPC, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(infrav1alpha2.CreateVPCError), + Message: err.Error(), + }) r.Recorder.Event(vpcScope.LinodeVPC, corev1.EventTypeWarning, string(infrav1alpha2.CreateVPCError), err.Error()) return err @@ -211,7 +222,12 @@ func (r *LinodeVPCReconciler) reconcileCreate(ctx context.Context, logger logr.L if err := reconcileVPC(ctx, vpcScope, logger); err != nil { logger.Error(err, "Failed to create VPC") - conditions.MarkFalse(vpcScope.LinodeVPC, clusterv1.ReadyCondition, string(infrav1alpha2.CreateVPCError), "", "%s", err.Error()) + conditions.Set(vpcScope.LinodeVPC, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(infrav1alpha2.CreateVPCError), + Message: err.Error(), + }) r.Recorder.Event(vpcScope.LinodeVPC, corev1.EventTypeWarning, string(infrav1alpha2.CreateVPCError), err.Error()) return err @@ -230,7 +246,12 @@ func (r *LinodeVPCReconciler) reconcileUpdate(ctx context.Context, logger logr.L if err := reconcileVPC(ctx, vpcScope, logger); err != nil { logger.Error(err, "Failed to update VPC") - conditions.MarkFalse(vpcScope.LinodeVPC, clusterv1.ReadyCondition, string(infrav1alpha2.UpdateVPCError), "", "%s", err.Error()) + conditions.Set(vpcScope.LinodeVPC, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(infrav1alpha2.UpdateVPCError), + Message: err.Error(), + }) r.Recorder.Event(vpcScope.LinodeVPC, corev1.EventTypeWarning, string(infrav1alpha2.UpdateVPCError), err.Error()) return err @@ -272,7 +293,12 @@ func (r *LinodeVPCReconciler) reconcileDelete(ctx context.Context, logger logr.L return ctrl.Result{RequeueAfter: reconciler.DefaultVPCControllerReconcileDelay}, nil } - conditions.MarkFalse(vpcScope.LinodeVPC, clusterv1.ReadyCondition, clusterv1.DeletionFailedReason, "", "%s", "skipped due to node(s) attached") + conditions.Set(vpcScope.LinodeVPC, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(clusterv1.DeletionFailedReason), + Message: "skipped due to node(s) attached", + }) return ctrl.Result{}, errors.New("will not delete VPC with node(s) attached") } @@ -294,7 +320,12 @@ func (r *LinodeVPCReconciler) reconcileDelete(ctx context.Context, logger logr.L logger.Info("VPC ID is missing, nothing to do") } - conditions.MarkFalse(vpcScope.LinodeVPC, clusterv1.ReadyCondition, clusterv1.DeletedReason, "", "%s", "VPC deleted") + conditions.Set(vpcScope.LinodeVPC, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: string(clusterv1.DeletedReason), + Message: "VPC deleted", + }) r.Recorder.Event(vpcScope.LinodeVPC, corev1.EventTypeNormal, clusterv1.DeletedReason, "VPC has cleaned up") @@ -338,7 +369,7 @@ func (r *LinodeVPCReconciler) SetupWithManager(mgr ctrl.Manager, options crcontr For(&infrav1alpha2.LinodeVPC{}). WithOptions(options). WithEventFilter(predicate.And( - predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue), predicate.GenerationChangedPredicate{}, predicate.Funcs{UpdateFunc: func(e event.UpdateEvent) bool { oldObject, okOld := e.ObjectOld.(*infrav1alpha2.LinodeVPC) @@ -353,7 +384,7 @@ func (r *LinodeVPCReconciler) SetupWithManager(mgr ctrl.Manager, options crcontr Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(linodeVPCMapper), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) if err != nil { return fmt.Errorf("failed to build controller: %w", err) diff --git a/internal/controller/linodevpc_controller_test.go b/internal/controller/linodevpc_controller_test.go index 0bd13c187..f07bf45b4 100644 --- a/internal/controller/linodevpc_controller_test.go +++ b/internal/controller/linodevpc_controller_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" @@ -165,7 +165,12 @@ var _ = Describe("lifecycle", Ordered, Label("vpc", "lifecycle"), func() { }), OneOf( Path(Result("update requeues", func(ctx context.Context, mck Mock) { - conditions.MarkFalse(vpcScope.LinodeVPC, clusterv1.ReadyCondition, "test", clusterv1.ConditionSeverityWarning, "%s", "test") + conditions.Set(vpcScope.LinodeVPC, metav1.Condition{ + Type: string(clusterv1.ReadyCondition), + Status: metav1.ConditionFalse, + Reason: "test", + Message: "test", + }) res, err := reconciler.reconcile(ctx, mck.Logger(), &vpcScope) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(rec.DefaultVPCControllerReconcileDelay)) diff --git a/util/errors.go b/util/errors.go index 5d76ced8f..b821092c2 100644 --- a/util/errors.go +++ b/util/errors.go @@ -24,3 +24,11 @@ var ( // ErrRateLimit indicates hitting linode API rate limits ErrRateLimit = errors.New("rate-limit exceeded") ) + +// List of failure reasons to use in the status fields of our resources +var ( + CreateError = "CreateError" + DeleteError = "DeleteError" + UpdateError = "UpdateError" + UnknownError = "UnknownError" +) diff --git a/util/reconciler/conditions.go b/util/reconciler/conditions.go index 872f9558f..78aca70d1 100644 --- a/util/reconciler/conditions.go +++ b/util/reconciler/conditions.go @@ -3,16 +3,15 @@ package reconciler import ( "time" - corev1 "k8s.io/api/core/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" ) -func ConditionTrue(from conditions.Getter, typ clusterv1.ConditionType) bool { - return HasConditionStatus(from, typ, "True") +func ConditionTrue(from conditions.Getter, typ string) bool { + return HasConditionStatus(from, typ, metav1.ConditionTrue) } -func HasConditionStatus(from conditions.Getter, typ clusterv1.ConditionType, status corev1.ConditionStatus) bool { +func HasConditionStatus(from conditions.Getter, typ string, status metav1.ConditionStatus) bool { cond := conditions.Get(from, typ) if cond == nil { return false @@ -21,16 +20,7 @@ func HasConditionStatus(from conditions.Getter, typ clusterv1.ConditionType, sta return cond.Status == status } -func HasConditionSeverity(from conditions.Getter, typ clusterv1.ConditionType, severity clusterv1.ConditionSeverity) bool { - cond := conditions.Get(from, typ) - if cond == nil { - return false - } - - return cond.Severity == severity -} - -func HasStaleCondition(from conditions.Getter, typ clusterv1.ConditionType, timeout time.Duration) bool { +func HasStaleCondition(from conditions.Getter, typ string, timeout time.Duration) bool { cond := conditions.Get(from, typ) if cond == nil { return false