From bbf481a6584e4c695481ef47c8e238088a815ca0 Mon Sep 17 00:00:00 2001 From: Kim-Norman Sahm Date: Wed, 6 Nov 2024 17:10:08 +0100 Subject: [PATCH] Added kind ScalingPolicy (#109) * add woop_policy Signed-off-by: kisahm * Fix ScalingPolicy Signed-off-by: kisahm --------- Signed-off-by: kisahm --- .../v1alpha1/zz_generated.conversion_hubs.go | 10 + .../v1alpha1/zz_generated.deepcopy.go | 919 ++++++++++++++++++ .../workload/v1alpha1/zz_generated.managed.go | 68 ++ .../v1alpha1/zz_generated.managedlist.go | 17 + .../workload/v1alpha1/zz_groupversion_info.go | 32 + .../v1alpha1/zz_scalingpolicy_terraformed.go | 129 +++ .../v1alpha1/zz_scalingpolicy_types.go | 535 ++++++++++ apis/zz_register.go | 2 + config/external_name.go | 1 + config/workloadscalingpolicy/config.go | 13 + .../workload/v1alpha1/scalingpolicy.yaml | 35 + .../workload/scalingpolicy/zz_controller.go | 87 ++ internal/controller/zz_setup.go | 2 + ...oad.castai.upbound.io_scalingpolicies.yaml | 850 ++++++++++++++++ 14 files changed, 2700 insertions(+) create mode 100755 apis/workload/v1alpha1/zz_generated.conversion_hubs.go create mode 100644 apis/workload/v1alpha1/zz_generated.deepcopy.go create mode 100644 apis/workload/v1alpha1/zz_generated.managed.go create mode 100644 apis/workload/v1alpha1/zz_generated.managedlist.go create mode 100755 apis/workload/v1alpha1/zz_groupversion_info.go create mode 100755 apis/workload/v1alpha1/zz_scalingpolicy_terraformed.go create mode 100755 apis/workload/v1alpha1/zz_scalingpolicy_types.go create mode 100644 config/workloadscalingpolicy/config.go create mode 100644 examples-generated/workload/v1alpha1/scalingpolicy.yaml create mode 100755 internal/controller/workload/scalingpolicy/zz_controller.go create mode 100644 package/crds/workload.castai.upbound.io_scalingpolicies.yaml diff --git a/apis/workload/v1alpha1/zz_generated.conversion_hubs.go b/apis/workload/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..ee6ed62 --- /dev/null +++ b/apis/workload/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *ScalingPolicy) Hub() {} diff --git a/apis/workload/v1alpha1/zz_generated.deepcopy.go b/apis/workload/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..878c916 --- /dev/null +++ b/apis/workload/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,919 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AntiAffinityInitParameters) DeepCopyInto(out *AntiAffinityInitParameters) { + *out = *in + if in.ConsiderAntiAffinity != nil { + in, out := &in.ConsiderAntiAffinity, &out.ConsiderAntiAffinity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AntiAffinityInitParameters. +func (in *AntiAffinityInitParameters) DeepCopy() *AntiAffinityInitParameters { + if in == nil { + return nil + } + out := new(AntiAffinityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AntiAffinityObservation) DeepCopyInto(out *AntiAffinityObservation) { + *out = *in + if in.ConsiderAntiAffinity != nil { + in, out := &in.ConsiderAntiAffinity, &out.ConsiderAntiAffinity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AntiAffinityObservation. +func (in *AntiAffinityObservation) DeepCopy() *AntiAffinityObservation { + if in == nil { + return nil + } + out := new(AntiAffinityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AntiAffinityParameters) DeepCopyInto(out *AntiAffinityParameters) { + *out = *in + if in.ConsiderAntiAffinity != nil { + in, out := &in.ConsiderAntiAffinity, &out.ConsiderAntiAffinity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AntiAffinityParameters. +func (in *AntiAffinityParameters) DeepCopy() *AntiAffinityParameters { + if in == nil { + return nil + } + out := new(AntiAffinityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPUInitParameters) DeepCopyInto(out *CPUInitParameters) { + *out = *in + if in.ApplyThreshold != nil { + in, out := &in.ApplyThreshold, &out.ApplyThreshold + *out = new(float64) + **out = **in + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.LookBackPeriodSeconds != nil { + in, out := &in.LookBackPeriodSeconds, &out.LookBackPeriodSeconds + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUInitParameters. +func (in *CPUInitParameters) DeepCopy() *CPUInitParameters { + if in == nil { + return nil + } + out := new(CPUInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPUObservation) DeepCopyInto(out *CPUObservation) { + *out = *in + if in.ApplyThreshold != nil { + in, out := &in.ApplyThreshold, &out.ApplyThreshold + *out = new(float64) + **out = **in + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.LookBackPeriodSeconds != nil { + in, out := &in.LookBackPeriodSeconds, &out.LookBackPeriodSeconds + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUObservation. +func (in *CPUObservation) DeepCopy() *CPUObservation { + if in == nil { + return nil + } + out := new(CPUObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPUParameters) DeepCopyInto(out *CPUParameters) { + *out = *in + if in.ApplyThreshold != nil { + in, out := &in.ApplyThreshold, &out.ApplyThreshold + *out = new(float64) + **out = **in + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.LookBackPeriodSeconds != nil { + in, out := &in.LookBackPeriodSeconds, &out.LookBackPeriodSeconds + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUParameters. +func (in *CPUParameters) DeepCopy() *CPUParameters { + if in == nil { + return nil + } + out := new(CPUParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownscalingInitParameters) DeepCopyInto(out *DownscalingInitParameters) { + *out = *in + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownscalingInitParameters. +func (in *DownscalingInitParameters) DeepCopy() *DownscalingInitParameters { + if in == nil { + return nil + } + out := new(DownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownscalingObservation) DeepCopyInto(out *DownscalingObservation) { + *out = *in + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownscalingObservation. +func (in *DownscalingObservation) DeepCopy() *DownscalingObservation { + if in == nil { + return nil + } + out := new(DownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownscalingParameters) DeepCopyInto(out *DownscalingParameters) { + *out = *in + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownscalingParameters. +func (in *DownscalingParameters) DeepCopy() *DownscalingParameters { + if in == nil { + return nil + } + out := new(DownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryEventInitParameters) DeepCopyInto(out *MemoryEventInitParameters) { + *out = *in + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryEventInitParameters. +func (in *MemoryEventInitParameters) DeepCopy() *MemoryEventInitParameters { + if in == nil { + return nil + } + out := new(MemoryEventInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryEventObservation) DeepCopyInto(out *MemoryEventObservation) { + *out = *in + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryEventObservation. +func (in *MemoryEventObservation) DeepCopy() *MemoryEventObservation { + if in == nil { + return nil + } + out := new(MemoryEventObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryEventParameters) DeepCopyInto(out *MemoryEventParameters) { + *out = *in + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryEventParameters. +func (in *MemoryEventParameters) DeepCopy() *MemoryEventParameters { + if in == nil { + return nil + } + out := new(MemoryEventParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryInitParameters) DeepCopyInto(out *MemoryInitParameters) { + *out = *in + if in.ApplyThreshold != nil { + in, out := &in.ApplyThreshold, &out.ApplyThreshold + *out = new(float64) + **out = **in + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.LookBackPeriodSeconds != nil { + in, out := &in.LookBackPeriodSeconds, &out.LookBackPeriodSeconds + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryInitParameters. +func (in *MemoryInitParameters) DeepCopy() *MemoryInitParameters { + if in == nil { + return nil + } + out := new(MemoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryObservation) DeepCopyInto(out *MemoryObservation) { + *out = *in + if in.ApplyThreshold != nil { + in, out := &in.ApplyThreshold, &out.ApplyThreshold + *out = new(float64) + **out = **in + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.LookBackPeriodSeconds != nil { + in, out := &in.LookBackPeriodSeconds, &out.LookBackPeriodSeconds + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryObservation. +func (in *MemoryObservation) DeepCopy() *MemoryObservation { + if in == nil { + return nil + } + out := new(MemoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryParameters) DeepCopyInto(out *MemoryParameters) { + *out = *in + if in.ApplyThreshold != nil { + in, out := &in.ApplyThreshold, &out.ApplyThreshold + *out = new(float64) + **out = **in + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.LookBackPeriodSeconds != nil { + in, out := &in.LookBackPeriodSeconds, &out.LookBackPeriodSeconds + *out = new(float64) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryParameters. +func (in *MemoryParameters) DeepCopy() *MemoryParameters { + if in == nil { + return nil + } + out := new(MemoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicy) DeepCopyInto(out *ScalingPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicy. +func (in *ScalingPolicy) DeepCopy() *ScalingPolicy { + if in == nil { + return nil + } + out := new(ScalingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalingPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyInitParameters) DeepCopyInto(out *ScalingPolicyInitParameters) { + *out = *in + if in.AntiAffinity != nil { + in, out := &in.AntiAffinity, &out.AntiAffinity + *out = make([]AntiAffinityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = make([]CPUInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = make([]DownscalingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagementOption != nil { + in, out := &in.ManagementOption, &out.ManagementOption + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = make([]MemoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MemoryEvent != nil { + in, out := &in.MemoryEvent, &out.MemoryEvent + *out = make([]MemoryEventInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Startup != nil { + in, out := &in.Startup, &out.Startup + *out = make([]StartupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyInitParameters. +func (in *ScalingPolicyInitParameters) DeepCopy() *ScalingPolicyInitParameters { + if in == nil { + return nil + } + out := new(ScalingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyList) DeepCopyInto(out *ScalingPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScalingPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyList. +func (in *ScalingPolicyList) DeepCopy() *ScalingPolicyList { + if in == nil { + return nil + } + out := new(ScalingPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalingPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyObservation) DeepCopyInto(out *ScalingPolicyObservation) { + *out = *in + if in.AntiAffinity != nil { + in, out := &in.AntiAffinity, &out.AntiAffinity + *out = make([]AntiAffinityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = make([]CPUObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = make([]DownscalingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ManagementOption != nil { + in, out := &in.ManagementOption, &out.ManagementOption + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = make([]MemoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MemoryEvent != nil { + in, out := &in.MemoryEvent, &out.MemoryEvent + *out = make([]MemoryEventObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Startup != nil { + in, out := &in.Startup, &out.Startup + *out = make([]StartupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyObservation. +func (in *ScalingPolicyObservation) DeepCopy() *ScalingPolicyObservation { + if in == nil { + return nil + } + out := new(ScalingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyParameters) DeepCopyInto(out *ScalingPolicyParameters) { + *out = *in + if in.AntiAffinity != nil { + in, out := &in.AntiAffinity, &out.AntiAffinity + *out = make([]AntiAffinityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplyType != nil { + in, out := &in.ApplyType, &out.ApplyType + *out = new(string) + **out = **in + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = make([]CPUParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = make([]DownscalingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagementOption != nil { + in, out := &in.ManagementOption, &out.ManagementOption + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = make([]MemoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MemoryEvent != nil { + in, out := &in.MemoryEvent, &out.MemoryEvent + *out = make([]MemoryEventParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Startup != nil { + in, out := &in.Startup, &out.Startup + *out = make([]StartupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyParameters. +func (in *ScalingPolicyParameters) DeepCopy() *ScalingPolicyParameters { + if in == nil { + return nil + } + out := new(ScalingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicySpec) DeepCopyInto(out *ScalingPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicySpec. +func (in *ScalingPolicySpec) DeepCopy() *ScalingPolicySpec { + if in == nil { + return nil + } + out := new(ScalingPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyStatus) DeepCopyInto(out *ScalingPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyStatus. +func (in *ScalingPolicyStatus) DeepCopy() *ScalingPolicyStatus { + if in == nil { + return nil + } + out := new(ScalingPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupInitParameters) DeepCopyInto(out *StartupInitParameters) { + *out = *in + if in.PeriodSeconds != nil { + in, out := &in.PeriodSeconds, &out.PeriodSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupInitParameters. +func (in *StartupInitParameters) DeepCopy() *StartupInitParameters { + if in == nil { + return nil + } + out := new(StartupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupObservation) DeepCopyInto(out *StartupObservation) { + *out = *in + if in.PeriodSeconds != nil { + in, out := &in.PeriodSeconds, &out.PeriodSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupObservation. +func (in *StartupObservation) DeepCopy() *StartupObservation { + if in == nil { + return nil + } + out := new(StartupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupParameters) DeepCopyInto(out *StartupParameters) { + *out = *in + if in.PeriodSeconds != nil { + in, out := &in.PeriodSeconds, &out.PeriodSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupParameters. +func (in *StartupParameters) DeepCopy() *StartupParameters { + if in == nil { + return nil + } + out := new(StartupParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/workload/v1alpha1/zz_generated.managed.go b/apis/workload/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..06562b9 --- /dev/null +++ b/apis/workload/v1alpha1/zz_generated.managed.go @@ -0,0 +1,68 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ScalingPolicy. +func (mg *ScalingPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ScalingPolicy. +func (mg *ScalingPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ScalingPolicy. +func (mg *ScalingPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ScalingPolicy. +func (mg *ScalingPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ScalingPolicy. +func (mg *ScalingPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ScalingPolicy. +func (mg *ScalingPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ScalingPolicy. +func (mg *ScalingPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ScalingPolicy. +func (mg *ScalingPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ScalingPolicy. +func (mg *ScalingPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ScalingPolicy. +func (mg *ScalingPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ScalingPolicy. +func (mg *ScalingPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ScalingPolicy. +func (mg *ScalingPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/workload/v1alpha1/zz_generated.managedlist.go b/apis/workload/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..0f19ab6 --- /dev/null +++ b/apis/workload/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ScalingPolicyList. +func (l *ScalingPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/workload/v1alpha1/zz_groupversion_info.go b/apis/workload/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..ab1b8c5 --- /dev/null +++ b/apis/workload/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=workload.castai.upbound.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "workload.castai.upbound.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/workload/v1alpha1/zz_scalingpolicy_terraformed.go b/apis/workload/v1alpha1/zz_scalingpolicy_terraformed.go new file mode 100755 index 0000000..2ff4163 --- /dev/null +++ b/apis/workload/v1alpha1/zz_scalingpolicy_terraformed.go @@ -0,0 +1,129 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ScalingPolicy +func (mg *ScalingPolicy) GetTerraformResourceType() string { + return "castai_workload_scaling_policy" +} + +// GetConnectionDetailsMapping for this ScalingPolicy +func (tr *ScalingPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ScalingPolicy +func (tr *ScalingPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ScalingPolicy +func (tr *ScalingPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ScalingPolicy +func (tr *ScalingPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ScalingPolicy +func (tr *ScalingPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ScalingPolicy +func (tr *ScalingPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ScalingPolicy +func (tr *ScalingPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ScalingPolicy +func (tr *ScalingPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ScalingPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ScalingPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &ScalingPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ScalingPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/workload/v1alpha1/zz_scalingpolicy_types.go b/apis/workload/v1alpha1/zz_scalingpolicy_types.go new file mode 100755 index 0000000..1d09d7a --- /dev/null +++ b/apis/workload/v1alpha1/zz_scalingpolicy_types.go @@ -0,0 +1,535 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AntiAffinityInitParameters struct { + + // affinity should be considered when scaling the workload. + // If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + // Defines if anti-affinity should be considered when scaling the workload. + // If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + ConsiderAntiAffinity *bool `json:"considerAntiAffinity,omitempty" tf:"consider_anti_affinity,omitempty"` +} + +type AntiAffinityObservation struct { + + // affinity should be considered when scaling the workload. + // If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + // Defines if anti-affinity should be considered when scaling the workload. + // If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + ConsiderAntiAffinity *bool `json:"considerAntiAffinity,omitempty" tf:"consider_anti_affinity,omitempty"` +} + +type AntiAffinityParameters struct { + + // affinity should be considered when scaling the workload. + // If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + // Defines if anti-affinity should be considered when scaling the workload. + // If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + // +kubebuilder:validation:Optional + ConsiderAntiAffinity *bool `json:"considerAntiAffinity,omitempty" tf:"consider_anti_affinity,omitempty"` +} + +type CPUInitParameters struct { + + // (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + // The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + ApplyThreshold *float64 `json:"applyThreshold,omitempty" tf:"apply_threshold,omitempty"` + + // i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + // The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + // The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // (Number) The look back period in seconds for the recommendation. + // The look back period in seconds for the recommendation. + LookBackPeriodSeconds *float64 `json:"lookBackPeriodSeconds,omitempty" tf:"look_back_period_seconds,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` + + // (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + // Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + Overhead *float64 `json:"overhead,omitempty" tf:"overhead,omitempty"` +} + +type CPUObservation struct { + + // (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + // The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + ApplyThreshold *float64 `json:"applyThreshold,omitempty" tf:"apply_threshold,omitempty"` + + // i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + // The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + // The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // (Number) The look back period in seconds for the recommendation. + // The look back period in seconds for the recommendation. + LookBackPeriodSeconds *float64 `json:"lookBackPeriodSeconds,omitempty" tf:"look_back_period_seconds,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` + + // (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + // Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + Overhead *float64 `json:"overhead,omitempty" tf:"overhead,omitempty"` +} + +type CPUParameters struct { + + // (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + // The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + // +kubebuilder:validation:Optional + ApplyThreshold *float64 `json:"applyThreshold,omitempty" tf:"apply_threshold,omitempty"` + + // i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + // The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + // The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + // +kubebuilder:validation:Optional + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // (Number) The look back period in seconds for the recommendation. + // The look back period in seconds for the recommendation. + // +kubebuilder:validation:Optional + LookBackPeriodSeconds *float64 `json:"lookBackPeriodSeconds,omitempty" tf:"look_back_period_seconds,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` + + // (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + // Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + // +kubebuilder:validation:Optional + Overhead *float64 `json:"overhead,omitempty" tf:"overhead,omitempty"` +} + +type DownscalingInitParameters struct { + + // (String) Recommendation apply type. + // Defines the apply type to be used when downscaling. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` +} + +type DownscalingObservation struct { + + // (String) Recommendation apply type. + // Defines the apply type to be used when downscaling. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` +} + +type DownscalingParameters struct { + + // (String) Recommendation apply type. + // Defines the apply type to be used when downscaling. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + // +kubebuilder:validation:Optional + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` +} + +type MemoryEventInitParameters struct { + + // (String) Recommendation apply type. + // Defines the apply type to be used when applying recommendation for memory related event. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` +} + +type MemoryEventObservation struct { + + // (String) Recommendation apply type. + // Defines the apply type to be used when applying recommendation for memory related event. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` +} + +type MemoryEventParameters struct { + + // (String) Recommendation apply type. + // Defines the apply type to be used when applying recommendation for memory related event. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + // +kubebuilder:validation:Optional + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` +} + +type MemoryInitParameters struct { + + // (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + // The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + ApplyThreshold *float64 `json:"applyThreshold,omitempty" tf:"apply_threshold,omitempty"` + + // i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + // The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + // The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // (Number) The look back period in seconds for the recommendation. + // The look back period in seconds for the recommendation. + LookBackPeriodSeconds *float64 `json:"lookBackPeriodSeconds,omitempty" tf:"look_back_period_seconds,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` + + // (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + // Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + Overhead *float64 `json:"overhead,omitempty" tf:"overhead,omitempty"` +} + +type MemoryObservation struct { + + // (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + // The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + ApplyThreshold *float64 `json:"applyThreshold,omitempty" tf:"apply_threshold,omitempty"` + + // i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + // The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + // The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // (Number) The look back period in seconds for the recommendation. + // The look back period in seconds for the recommendation. + LookBackPeriodSeconds *float64 `json:"lookBackPeriodSeconds,omitempty" tf:"look_back_period_seconds,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` + + // (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + // Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + Overhead *float64 `json:"overhead,omitempty" tf:"overhead,omitempty"` +} + +type MemoryParameters struct { + + // (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + // The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + // +kubebuilder:validation:Optional + ApplyThreshold *float64 `json:"applyThreshold,omitempty" tf:"apply_threshold,omitempty"` + + // i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + // The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + // The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + // +kubebuilder:validation:Optional + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // (Number) The look back period in seconds for the recommendation. + // The look back period in seconds for the recommendation. + // +kubebuilder:validation:Optional + LookBackPeriodSeconds *float64 `json:"lookBackPeriodSeconds,omitempty" tf:"look_back_period_seconds,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // this is in MiB, for CPU - this is in cores. + // Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` + + // (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + // Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + // +kubebuilder:validation:Optional + Overhead *float64 `json:"overhead,omitempty" tf:"overhead,omitempty"` +} + +type ScalingPolicyInitParameters struct { + + // (Block List, Max: 1) (see below for nested schema) + AntiAffinity []AntiAffinityInitParameters `json:"antiAffinity,omitempty" tf:"anti_affinity,omitempty"` + + // (String) Recommendation apply type. + // Recommendation apply type. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` + + // (Block List, Min: 1, Max: 1) (see below for nested schema) + CPU []CPUInitParameters `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // (String) CAST AI cluster id + // CAST AI cluster id + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + Downscaling []DownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // (String) Defines possible options for workload management. + // Defines possible options for workload management. + // - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. + // - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload. + ManagementOption *string `json:"managementOption,omitempty" tf:"management_option,omitempty"` + + // (Block List, Min: 1, Max: 1) (see below for nested schema) + Memory []MemoryInitParameters `json:"memory,omitempty" tf:"memory,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + MemoryEvent []MemoryEventInitParameters `json:"memoryEvent,omitempty" tf:"memory_event,omitempty"` + + // (String) Scaling policy name + // Scaling policy name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + Startup []StartupInitParameters `json:"startup,omitempty" tf:"startup,omitempty"` +} + +type ScalingPolicyObservation struct { + + // (Block List, Max: 1) (see below for nested schema) + AntiAffinity []AntiAffinityObservation `json:"antiAffinity,omitempty" tf:"anti_affinity,omitempty"` + + // (String) Recommendation apply type. + // Recommendation apply type. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` + + // (Block List, Min: 1, Max: 1) (see below for nested schema) + CPU []CPUObservation `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // (String) CAST AI cluster id + // CAST AI cluster id + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + Downscaling []DownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // (String) The ID of this resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (String) Defines possible options for workload management. + // Defines possible options for workload management. + // - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. + // - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload. + ManagementOption *string `json:"managementOption,omitempty" tf:"management_option,omitempty"` + + // (Block List, Min: 1, Max: 1) (see below for nested schema) + Memory []MemoryObservation `json:"memory,omitempty" tf:"memory,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + MemoryEvent []MemoryEventObservation `json:"memoryEvent,omitempty" tf:"memory_event,omitempty"` + + // (String) Scaling policy name + // Scaling policy name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + Startup []StartupObservation `json:"startup,omitempty" tf:"startup,omitempty"` +} + +type ScalingPolicyParameters struct { + + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + AntiAffinity []AntiAffinityParameters `json:"antiAffinity,omitempty" tf:"anti_affinity,omitempty"` + + // (String) Recommendation apply type. + // Recommendation apply type. + // - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + // - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + // +kubebuilder:validation:Optional + ApplyType *string `json:"applyType,omitempty" tf:"apply_type,omitempty"` + + // (Block List, Min: 1, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + CPU []CPUParameters `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // (String) CAST AI cluster id + // CAST AI cluster id + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Downscaling []DownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // (String) Defines possible options for workload management. + // Defines possible options for workload management. + // - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. + // - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload. + // +kubebuilder:validation:Optional + ManagementOption *string `json:"managementOption,omitempty" tf:"management_option,omitempty"` + + // (Block List, Min: 1, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Memory []MemoryParameters `json:"memory,omitempty" tf:"memory,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + MemoryEvent []MemoryEventParameters `json:"memoryEvent,omitempty" tf:"memory_event,omitempty"` + + // (String) Scaling policy name + // Scaling policy name + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (Block List, Max: 1) (see below for nested schema) + // +kubebuilder:validation:Optional + Startup []StartupParameters `json:"startup,omitempty" tf:"startup,omitempty"` +} + +type StartupInitParameters struct { + + // (Number) Defines the duration (in seconds) during which elevated resource usage is expected at startup. + // When set, recommendations will be adjusted to disregard resource spikes within this period. + // If not specified, the workload will receive standard recommendations without startup considerations. + // Defines the duration (in seconds) during which elevated resource usage is expected at startup. + // When set, recommendations will be adjusted to disregard resource spikes within this period. + // If not specified, the workload will receive standard recommendations without startup considerations. + PeriodSeconds *float64 `json:"periodSeconds,omitempty" tf:"period_seconds,omitempty"` +} + +type StartupObservation struct { + + // (Number) Defines the duration (in seconds) during which elevated resource usage is expected at startup. + // When set, recommendations will be adjusted to disregard resource spikes within this period. + // If not specified, the workload will receive standard recommendations without startup considerations. + // Defines the duration (in seconds) during which elevated resource usage is expected at startup. + // When set, recommendations will be adjusted to disregard resource spikes within this period. + // If not specified, the workload will receive standard recommendations without startup considerations. + PeriodSeconds *float64 `json:"periodSeconds,omitempty" tf:"period_seconds,omitempty"` +} + +type StartupParameters struct { + + // (Number) Defines the duration (in seconds) during which elevated resource usage is expected at startup. + // When set, recommendations will be adjusted to disregard resource spikes within this period. + // If not specified, the workload will receive standard recommendations without startup considerations. + // Defines the duration (in seconds) during which elevated resource usage is expected at startup. + // When set, recommendations will be adjusted to disregard resource spikes within this period. + // If not specified, the workload will receive standard recommendations without startup considerations. + // +kubebuilder:validation:Optional + PeriodSeconds *float64 `json:"periodSeconds,omitempty" tf:"period_seconds,omitempty"` +} + +// ScalingPolicySpec defines the desired state of ScalingPolicy +type ScalingPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScalingPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScalingPolicyInitParameters `json:"initProvider,omitempty"` +} + +// ScalingPolicyStatus defines the observed state of ScalingPolicy. +type ScalingPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScalingPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ScalingPolicy is the Schema for the ScalingPolicys API. Manage workload scaling policy. Scaling policy reference https://docs.cast.ai/docs/woop-scaling-policies +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,castai} +type ScalingPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.applyType) || (has(self.initProvider) && has(self.initProvider.applyType))",message="spec.forProvider.applyType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.cpu) || (has(self.initProvider) && has(self.initProvider.cpu))",message="spec.forProvider.cpu is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.managementOption) || (has(self.initProvider) && has(self.initProvider.managementOption))",message="spec.forProvider.managementOption is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.memory) || (has(self.initProvider) && has(self.initProvider.memory))",message="spec.forProvider.memory is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ScalingPolicySpec `json:"spec"` + Status ScalingPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScalingPolicyList contains a list of ScalingPolicys +type ScalingPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScalingPolicy `json:"items"` +} + +// Repository type metadata. +var ( + ScalingPolicy_Kind = "ScalingPolicy" + ScalingPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ScalingPolicy_Kind}.String() + ScalingPolicy_KindAPIVersion = ScalingPolicy_Kind + "." + CRDGroupVersion.String() + ScalingPolicy_GroupVersionKind = CRDGroupVersion.WithKind(ScalingPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&ScalingPolicy{}, &ScalingPolicyList{}) +} diff --git a/apis/zz_register.go b/apis/zz_register.go index 740969d..17a3a7c 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -13,6 +13,7 @@ import ( v1alpha1 "github.com/crossplane-contrib/crossplane-provider-castai/apis/castai/v1alpha1" v1alpha1apis "github.com/crossplane-contrib/crossplane-provider-castai/apis/v1alpha1" v1beta1 "github.com/crossplane-contrib/crossplane-provider-castai/apis/v1beta1" + v1alpha1workload "github.com/crossplane-contrib/crossplane-provider-castai/apis/workload/v1alpha1" ) func init() { @@ -21,6 +22,7 @@ func init() { v1alpha1.SchemeBuilder.AddToScheme, v1alpha1apis.SchemeBuilder.AddToScheme, v1beta1.SchemeBuilder.AddToScheme, + v1alpha1workload.SchemeBuilder.AddToScheme, ) } diff --git a/config/external_name.go b/config/external_name.go index 92b1303..37ddb52 100644 --- a/config/external_name.go +++ b/config/external_name.go @@ -24,6 +24,7 @@ var ExternalNameConfigs = map[string]config.ExternalName{ "castai_organization_members": config.IdentifierFromProvider, "castai_reservations": config.IdentifierFromProvider, "castai_sso_connection": config.IdentifierFromProvider, + "castai_workload_scaling_policy": config.IdentifierFromProvider, } // ExternalNameConfigurations applies all external name configs listed in the diff --git a/config/workloadscalingpolicy/config.go b/config/workloadscalingpolicy/config.go new file mode 100644 index 0000000..fccde69 --- /dev/null +++ b/config/workloadscalingpolicy/config.go @@ -0,0 +1,13 @@ +package workloadscalingpolicy + +import ( + "github.com/crossplane/upjet/pkg/config" +) + +// Configure configures individual resources by adding custom ResourceConfigurators. +func Configure(p *config.Provider) { + p.AddResourceConfigurator("castai_workload_scaling_policy", func(r *config.Resource) { + r.ShortGroup = "" + r.Kind = "ScalingPolicy" + }) +} diff --git a/examples-generated/workload/v1alpha1/scalingpolicy.yaml b/examples-generated/workload/v1alpha1/scalingpolicy.yaml new file mode 100644 index 0000000..c2b916b --- /dev/null +++ b/examples-generated/workload/v1alpha1/scalingpolicy.yaml @@ -0,0 +1,35 @@ +apiVersion: workload.castai.upbound.io/v1alpha1 +kind: ScalingPolicy +metadata: + annotations: + meta.upbound.io/example-id: workload/v1alpha1/scalingpolicy + labels: + testing.upbound.io/example-name: services + name: services +spec: + forProvider: + antiAffinity: + - considerAntiAffinity: false + applyType: IMMEDIATE + clusterId: ${castai_gke_cluster.dev.id} + cpu: + - applyThreshold: 0.1 + args: + - "0.9" + function: QUANTILE + lookBackPeriodSeconds: 172800 + max: 1 + min: 0.1 + overhead: 0.15 + downscaling: + - applyType: DEFERRED + managementOption: MANAGED + memory: + - applyThreshold: 0.2 + function: MAX + overhead: 0.35 + memoryEvent: + - applyType: IMMEDIATE + name: services + startup: + - periodSeconds: 240 diff --git a/internal/controller/workload/scalingpolicy/zz_controller.go b/internal/controller/workload/scalingpolicy/zz_controller.go new file mode 100755 index 0000000..7159129 --- /dev/null +++ b/internal/controller/workload/scalingpolicy/zz_controller.go @@ -0,0 +1,87 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package scalingpolicy + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/crossplane-contrib/crossplane-provider-castai/apis/workload/v1alpha1" + features "github.com/crossplane-contrib/crossplane-provider-castai/internal/features" +) + +// Setup adds a controller that reconciles ScalingPolicy managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ScalingPolicy_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ScalingPolicy_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ScalingPolicy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["castai_workload_scaling_policy"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ScalingPolicy + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ScalingPolicy{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ScalingPolicy") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ScalingPolicyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ScalingPolicyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ScalingPolicy_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ScalingPolicy{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index 37348cf..d248e2e 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -25,6 +25,7 @@ import ( reservations "github.com/crossplane-contrib/crossplane-provider-castai/internal/controller/castai/reservations" ssoconnection "github.com/crossplane-contrib/crossplane-provider-castai/internal/controller/castai/ssoconnection" providerconfig "github.com/crossplane-contrib/crossplane-provider-castai/internal/controller/providerconfig" + scalingpolicy "github.com/crossplane-contrib/crossplane-provider-castai/internal/controller/workload/scalingpolicy" ) // Setup creates all controllers with the supplied logger and adds them to @@ -47,6 +48,7 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { reservations.Setup, ssoconnection.Setup, providerconfig.Setup, + scalingpolicy.Setup, } { if err := setup(mgr, o); err != nil { return err diff --git a/package/crds/workload.castai.upbound.io_scalingpolicies.yaml b/package/crds/workload.castai.upbound.io_scalingpolicies.yaml new file mode 100644 index 0000000..187983a --- /dev/null +++ b/package/crds/workload.castai.upbound.io_scalingpolicies.yaml @@ -0,0 +1,850 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: scalingpolicies.workload.castai.upbound.io +spec: + group: workload.castai.upbound.io + names: + categories: + - crossplane + - managed + - castai + kind: ScalingPolicy + listKind: ScalingPolicyList + plural: scalingpolicies + singular: scalingpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ScalingPolicy is the Schema for the ScalingPolicys API. Manage + workload scaling policy. Scaling policy reference https://docs.cast.ai/docs/woop-scaling-policies + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScalingPolicySpec defines the desired state of ScalingPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + antiAffinity: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + considerAntiAffinity: + description: |- + affinity should be considered when scaling the workload. + If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + Defines if anti-affinity should be considered when scaling the workload. + If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + type: boolean + type: object + type: array + applyType: + description: |- + (String) Recommendation apply type. + Recommendation apply type. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + clusterId: + description: |- + (String) CAST AI cluster id + CAST AI cluster id + type: string + cpu: + description: '(Block List, Min: 1, Max: 1) (see below for nested + schema)' + items: + properties: + applyThreshold: + description: |- + (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + type: number + args: + description: |- + i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + items: + type: string + type: array + function: + description: |- + (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + type: string + lookBackPeriodSeconds: + description: |- + (Number) The look back period in seconds for the recommendation. + The look back period in seconds for the recommendation. + type: number + max: + description: |- + this is in MiB, for CPU - this is in cores. + Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + min: + description: |- + this is in MiB, for CPU - this is in cores. + Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + overhead: + description: |- + (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + type: number + type: object + type: array + downscaling: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + applyType: + description: |- + (String) Recommendation apply type. + Defines the apply type to be used when downscaling. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + type: object + type: array + managementOption: + description: |- + (String) Defines possible options for workload management. + Defines possible options for workload management. + - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. + - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload. + type: string + memory: + description: '(Block List, Min: 1, Max: 1) (see below for nested + schema)' + items: + properties: + applyThreshold: + description: |- + (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + type: number + args: + description: |- + i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + items: + type: string + type: array + function: + description: |- + (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + type: string + lookBackPeriodSeconds: + description: |- + (Number) The look back period in seconds for the recommendation. + The look back period in seconds for the recommendation. + type: number + max: + description: |- + this is in MiB, for CPU - this is in cores. + Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + min: + description: |- + this is in MiB, for CPU - this is in cores. + Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + overhead: + description: |- + (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + type: number + type: object + type: array + memoryEvent: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + applyType: + description: |- + (String) Recommendation apply type. + Defines the apply type to be used when applying recommendation for memory related event. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + type: object + type: array + name: + description: |- + (String) Scaling policy name + Scaling policy name + type: string + startup: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + periodSeconds: + description: |- + (Number) Defines the duration (in seconds) during which elevated resource usage is expected at startup. + When set, recommendations will be adjusted to disregard resource spikes within this period. + If not specified, the workload will receive standard recommendations without startup considerations. + Defines the duration (in seconds) during which elevated resource usage is expected at startup. + When set, recommendations will be adjusted to disregard resource spikes within this period. + If not specified, the workload will receive standard recommendations without startup considerations. + type: number + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + antiAffinity: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + considerAntiAffinity: + description: |- + affinity should be considered when scaling the workload. + If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + Defines if anti-affinity should be considered when scaling the workload. + If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + type: boolean + type: object + type: array + applyType: + description: |- + (String) Recommendation apply type. + Recommendation apply type. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + clusterId: + description: |- + (String) CAST AI cluster id + CAST AI cluster id + type: string + cpu: + description: '(Block List, Min: 1, Max: 1) (see below for nested + schema)' + items: + properties: + applyThreshold: + description: |- + (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + type: number + args: + description: |- + i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + items: + type: string + type: array + function: + description: |- + (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + type: string + lookBackPeriodSeconds: + description: |- + (Number) The look back period in seconds for the recommendation. + The look back period in seconds for the recommendation. + type: number + max: + description: |- + this is in MiB, for CPU - this is in cores. + Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + min: + description: |- + this is in MiB, for CPU - this is in cores. + Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + overhead: + description: |- + (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + type: number + type: object + type: array + downscaling: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + applyType: + description: |- + (String) Recommendation apply type. + Defines the apply type to be used when downscaling. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + type: object + type: array + managementOption: + description: |- + (String) Defines possible options for workload management. + Defines possible options for workload management. + - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. + - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload. + type: string + memory: + description: '(Block List, Min: 1, Max: 1) (see below for nested + schema)' + items: + properties: + applyThreshold: + description: |- + (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + type: number + args: + description: |- + i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + items: + type: string + type: array + function: + description: |- + (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + type: string + lookBackPeriodSeconds: + description: |- + (Number) The look back period in seconds for the recommendation. + The look back period in seconds for the recommendation. + type: number + max: + description: |- + this is in MiB, for CPU - this is in cores. + Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + min: + description: |- + this is in MiB, for CPU - this is in cores. + Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + overhead: + description: |- + (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + type: number + type: object + type: array + memoryEvent: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + applyType: + description: |- + (String) Recommendation apply type. + Defines the apply type to be used when applying recommendation for memory related event. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + type: object + type: array + name: + description: |- + (String) Scaling policy name + Scaling policy name + type: string + startup: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + periodSeconds: + description: |- + (Number) Defines the duration (in seconds) during which elevated resource usage is expected at startup. + When set, recommendations will be adjusted to disregard resource spikes within this period. + If not specified, the workload will receive standard recommendations without startup considerations. + Defines the duration (in seconds) during which elevated resource usage is expected at startup. + When set, recommendations will be adjusted to disregard resource spikes within this period. + If not specified, the workload will receive standard recommendations without startup considerations. + type: number + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.applyType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.applyType) + || (has(self.initProvider) && has(self.initProvider.applyType))' + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.cpu is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.cpu) + || (has(self.initProvider) && has(self.initProvider.cpu))' + - message: spec.forProvider.managementOption is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.managementOption) + || (has(self.initProvider) && has(self.initProvider.managementOption))' + - message: spec.forProvider.memory is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.memory) + || (has(self.initProvider) && has(self.initProvider.memory))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ScalingPolicyStatus defines the observed state of ScalingPolicy. + properties: + atProvider: + properties: + antiAffinity: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + considerAntiAffinity: + description: |- + affinity should be considered when scaling the workload. + If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + Defines if anti-affinity should be considered when scaling the workload. + If enabled, requiring host ports, or having anti-affinity on hostname will force all recommendations to be deferred. + type: boolean + type: object + type: array + applyType: + description: |- + (String) Recommendation apply type. + Recommendation apply type. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + clusterId: + description: |- + (String) CAST AI cluster id + CAST AI cluster id + type: string + cpu: + description: '(Block List, Min: 1, Max: 1) (see below for nested + schema)' + items: + properties: + applyThreshold: + description: |- + (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + type: number + args: + description: |- + i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + items: + type: string + type: array + function: + description: |- + (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + type: string + lookBackPeriodSeconds: + description: |- + (Number) The look back period in seconds for the recommendation. + The look back period in seconds for the recommendation. + type: number + max: + description: |- + this is in MiB, for CPU - this is in cores. + Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + min: + description: |- + this is in MiB, for CPU - this is in cores. + Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + overhead: + description: |- + (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + type: number + type: object + type: array + downscaling: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + applyType: + description: |- + (String) Recommendation apply type. + Defines the apply type to be used when downscaling. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + type: object + type: array + id: + description: (String) The ID of this resource. + type: string + managementOption: + description: |- + (String) Defines possible options for workload management. + Defines possible options for workload management. + - READ_ONLY - workload watched (metrics collected), but no actions performed by CAST AI. + - MANAGED - workload watched (metrics collected), CAST AI may perform actions on the workload. + type: string + memory: + description: '(Block List, Min: 1, Max: 1) (see below for nested + schema)' + items: + properties: + applyThreshold: + description: |- + (Number) The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + The threshold of when to apply the recommendation. Recommendation will be applied when diff of current requests and new recommendation is greater than set value + type: number + args: + description: |- + i.e. for QUANTILE this should be a [0, 1] float. MAX doesn't accept any args + The arguments for the function - i.e. for `QUANTILE` this should be a [0, 1] float. `MAX` doesn't accept any args + items: + type: string + type: array + function: + description: |- + (String) The function used to calculate the resource recommendation. Supported values: QUANTILE, MAX + The function used to calculate the resource recommendation. Supported values: `QUANTILE`, `MAX` + type: string + lookBackPeriodSeconds: + description: |- + (Number) The look back period in seconds for the recommendation. + The look back period in seconds for the recommendation. + type: number + max: + description: |- + this is in MiB, for CPU - this is in cores. + Max values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + min: + description: |- + this is in MiB, for CPU - this is in cores. + Min values for the recommendation, applies to every container. For memory - this is in MiB, for CPU - this is in cores. + type: number + overhead: + description: |- + (Number) Overhead for the recommendation, e.g. 0.1 will result in 10% higher recommendation + Overhead for the recommendation, e.g. `0.1` will result in 10% higher recommendation + type: number + type: object + type: array + memoryEvent: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + applyType: + description: |- + (String) Recommendation apply type. + Defines the apply type to be used when applying recommendation for memory related event. + - IMMEDIATE - pods are restarted immediately when new recommendation is generated. + - DEFERRED - pods are not restarted and recommendation values are applied during natural restarts only (new deployment, etc.) + type: string + type: object + type: array + name: + description: |- + (String) Scaling policy name + Scaling policy name + type: string + startup: + description: '(Block List, Max: 1) (see below for nested schema)' + items: + properties: + periodSeconds: + description: |- + (Number) Defines the duration (in seconds) during which elevated resource usage is expected at startup. + When set, recommendations will be adjusted to disregard resource spikes within this period. + If not specified, the workload will receive standard recommendations without startup considerations. + Defines the duration (in seconds) during which elevated resource usage is expected at startup. + When set, recommendations will be adjusted to disregard resource spikes within this period. + If not specified, the workload will receive standard recommendations without startup considerations. + type: number + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}