From 19281076114a2eb4004c7f38a94c99213b0eef3d Mon Sep 17 00:00:00 2001 From: abdurrahman-osman Date: Mon, 16 Oct 2023 23:23:33 +0300 Subject: [PATCH] typo fixes --- apis/tmc/v1alpha1/zz_cluster_group_types.go | 116 ++ .../v1alpha1/zz_cluster_node_pool_types.go | 336 ++++++ apis/tmc/v1alpha1/zz_generated.deepcopy.go | 1033 +++++++++++++++++ apis/tmc/v1alpha1/zz_generated.managed.go | 152 +++ apis/tmc/v1alpha1/zz_generated.managedlist.go | 18 + apis/tmc/v1alpha1/zz_generated_terraformed.go | 148 +++ config/external_name.go | 8 +- .../tmc/cluster_group/zz_controller.go | 55 + .../tmc/cluster_node_pool/zz_controller.go | 55 + internal/controller/zz_setup.go | 4 + .../tmc.crossplane.io_cluster_groups.yaml | 356 ++++++ .../tmc.crossplane.io_cluster_node_pools.yaml | 607 ++++++++++ 12 files changed, 2884 insertions(+), 4 deletions(-) create mode 100755 apis/tmc/v1alpha1/zz_cluster_group_types.go create mode 100755 apis/tmc/v1alpha1/zz_cluster_node_pool_types.go create mode 100755 internal/controller/tmc/cluster_group/zz_controller.go create mode 100755 internal/controller/tmc/cluster_node_pool/zz_controller.go create mode 100644 package/crds/tmc.crossplane.io_cluster_groups.yaml create mode 100644 package/crds/tmc.crossplane.io_cluster_node_pools.yaml diff --git a/apis/tmc/v1alpha1/zz_cluster_group_types.go b/apis/tmc/v1alpha1/zz_cluster_group_types.go new file mode 100755 index 0000000..4d9209e --- /dev/null +++ b/apis/tmc/v1alpha1/zz_cluster_group_types.go @@ -0,0 +1,116 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type Cluster_GroupMetaObservation struct { + + // Annotations for the resource + Annotations map[string]*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // Description of the resource + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Labels for the resource + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Resource version of the resource + ResourceVersion *string `json:"resourceVersion,omitempty" tf:"resource_version,omitempty"` + + // UID of the resource + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type Cluster_GroupMetaParameters struct { + + // Annotations for the resource + // +kubebuilder:validation:Optional + Annotations map[string]*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // Description of the resource + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Labels for the resource + // +kubebuilder:validation:Optional + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type Cluster_GroupObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Metadata for the resource + Meta []Cluster_GroupMetaObservation `json:"meta,omitempty" tf:"meta,omitempty"` + + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type Cluster_GroupParameters struct { + + // Metadata for the resource + // +kubebuilder:validation:Optional + Meta []Cluster_GroupMetaParameters `json:"meta,omitempty" tf:"meta,omitempty"` + + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +// Cluster_GroupSpec defines the desired state of Cluster_Group +type Cluster_GroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider Cluster_GroupParameters `json:"forProvider"` +} + +// Cluster_GroupStatus defines the observed state of Cluster_Group. +type Cluster_GroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider Cluster_GroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Cluster_Group is the Schema for the Cluster_Groups API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,tmc} +type Cluster_Group struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.name)",message="name is a required parameter" + Spec Cluster_GroupSpec `json:"spec"` + Status Cluster_GroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// Cluster_GroupList contains a list of Cluster_Groups +type Cluster_GroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster_Group `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Group_Kind = "Cluster_Group" + Cluster_Group_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Group_Kind}.String() + Cluster_Group_KindAPIVersion = Cluster_Group_Kind + "." + CRDGroupVersion.String() + Cluster_Group_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Group_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster_Group{}, &Cluster_GroupList{}) +} diff --git a/apis/tmc/v1alpha1/zz_cluster_node_pool_types.go b/apis/tmc/v1alpha1/zz_cluster_node_pool_types.go new file mode 100755 index 0000000..53d68c3 --- /dev/null +++ b/apis/tmc/v1alpha1/zz_cluster_node_pool_types.go @@ -0,0 +1,336 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type Cluster_Node_PoolMetaObservation struct { + + // Annotations for the resource + Annotations map[string]*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // Description of the resource + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Labels for the resource + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Resource version of the resource + ResourceVersion *string `json:"resourceVersion,omitempty" tf:"resource_version,omitempty"` + + // UID of the resource + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type Cluster_Node_PoolMetaParameters struct { + + // Annotations for the resource + // +kubebuilder:validation:Optional + Annotations map[string]*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // Description of the resource + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Labels for the resource + // +kubebuilder:validation:Optional + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type Cluster_Node_PoolObservation struct { + + // Name of the cluster + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the management cluster + ManagementClusterName *string `json:"managementClusterName,omitempty" tf:"management_cluster_name,omitempty"` + + // Metadata for the resource + Meta []Cluster_Node_PoolMetaObservation `json:"meta,omitempty" tf:"meta,omitempty"` + + // Name of this nodepool + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Provisioner of the cluster + ProvisionerName *string `json:"provisionerName,omitempty" tf:"provisioner_name,omitempty"` + + // Wait timeout duration until nodepool resource reaches READY state. Accepted timeout duration values like 5s, 45m, or 3h, higher than zero. + ReadyWaitTimeout *string `json:"readyWaitTimeout,omitempty" tf:"ready_wait_timeout,omitempty"` + + // Spec for the cluster nodepool + Spec []Cluster_Node_PoolSpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` + + // Status of node pool resource + Status map[string]*string `json:"status,omitempty" tf:"status,omitempty"` +} + +type Cluster_Node_PoolParameters struct { + + // Name of the cluster + // +kubebuilder:validation:Optional + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Name of the management cluster + // +kubebuilder:validation:Optional + ManagementClusterName *string `json:"managementClusterName,omitempty" tf:"management_cluster_name,omitempty"` + + // Metadata for the resource + // +kubebuilder:validation:Optional + Meta []Cluster_Node_PoolMetaParameters `json:"meta,omitempty" tf:"meta,omitempty"` + + // Name of this nodepool + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Provisioner of the cluster + // +kubebuilder:validation:Optional + ProvisionerName *string `json:"provisionerName,omitempty" tf:"provisioner_name,omitempty"` + + // Wait timeout duration until nodepool resource reaches READY state. Accepted timeout duration values like 5s, 45m, or 3h, higher than zero. + // +kubebuilder:validation:Optional + ReadyWaitTimeout *string `json:"readyWaitTimeout,omitempty" tf:"ready_wait_timeout,omitempty"` + + // Spec for the cluster nodepool + // +kubebuilder:validation:Optional + Spec []Cluster_Node_PoolSpecParameters `json:"spec,omitempty" tf:"spec,omitempty"` +} + +type Cluster_Node_PoolSpecObservation struct { + + // Cloud labels + CloudLabels map[string]*string `json:"cloudLabels,omitempty" tf:"cloud_labels,omitempty"` + + // Node labels + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // TKGAWSNodepool is the nodepool spec for TKG AWS cluster + TkgAws []Cluster_Node_PoolSpecTkgAwsObservation `json:"tkgAws,omitempty" tf:"tkg_aws,omitempty"` + + // TKGServiceVsphereNodepool is the nodepool spec for TKG service vsphere cluster + TkgServiceVsphere []Cluster_Node_PoolSpecTkgServiceVsphereObservation `json:"tkgServiceVsphere,omitempty" tf:"tkg_service_vsphere,omitempty"` + + // TkgVsphereNodepool is the nodepool config for the TKG vsphere cluster + TkgVsphere []Cluster_Node_PoolSpecTkgVsphereObservation `json:"tkgVsphere,omitempty" tf:"tkg_vsphere,omitempty"` + + // Count is the number of nodes + WorkerNodeCount *string `json:"workerNodeCount,omitempty" tf:"worker_node_count,omitempty"` +} + +type Cluster_Node_PoolSpecParameters struct { + + // Cloud labels + // +kubebuilder:validation:Optional + CloudLabels map[string]*string `json:"cloudLabels,omitempty" tf:"cloud_labels,omitempty"` + + // Node labels + // +kubebuilder:validation:Optional + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // TKGAWSNodepool is the nodepool spec for TKG AWS cluster + // +kubebuilder:validation:Optional + TkgAws []Cluster_Node_PoolSpecTkgAwsParameters `json:"tkgAws,omitempty" tf:"tkg_aws,omitempty"` + + // TKGServiceVsphereNodepool is the nodepool spec for TKG service vsphere cluster + // +kubebuilder:validation:Optional + TkgServiceVsphere []Cluster_Node_PoolSpecTkgServiceVsphereParameters `json:"tkgServiceVsphere,omitempty" tf:"tkg_service_vsphere,omitempty"` + + // TkgVsphereNodepool is the nodepool config for the TKG vsphere cluster + // +kubebuilder:validation:Optional + TkgVsphere []Cluster_Node_PoolSpecTkgVsphereParameters `json:"tkgVsphere,omitempty" tf:"tkg_vsphere,omitempty"` + + // Count is the number of nodes + // +kubebuilder:validation:Required + WorkerNodeCount *string `json:"workerNodeCount" tf:"worker_node_count,omitempty"` +} + +type Cluster_Node_PoolSpecTkgAwsObservation struct { + + // Nodepool instance type + Class *string `json:"class,omitempty" tf:"class,omitempty"` + + // Storage Class to be used for storage of the disks which store the root filesystem of the nodes + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type Cluster_Node_PoolSpecTkgAwsParameters struct { + + // Nodepool instance type + // +kubebuilder:validation:Optional + Class *string `json:"class,omitempty" tf:"class,omitempty"` + + // Storage Class to be used for storage of the disks which store the root filesystem of the nodes + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type Cluster_Node_PoolSpecTkgServiceVsphereObservation struct { + + // Nodepool instance type + Class *string `json:"class,omitempty" tf:"class,omitempty"` + + // Configure the failure domain of node pool. The potential values could be found using cluster:options api. This parameter will be ignored by the backend if the TKG service vsphere cluster doesn't support. + FailureDomain *string `json:"failureDomain,omitempty" tf:"failure_domain,omitempty"` + + // Storage Class to be used for storage of the disks which store the root filesystem of the nodes + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` + + // Configurable volumes for nodepool nodes + Volumes []SpecTkgServiceVsphereVolumesObservation `json:"volumes,omitempty" tf:"volumes,omitempty"` +} + +type Cluster_Node_PoolSpecTkgServiceVsphereParameters struct { + + // Nodepool instance type + // +kubebuilder:validation:Required + Class *string `json:"class" tf:"class,omitempty"` + + // Configure the failure domain of node pool. The potential values could be found using cluster:options api. This parameter will be ignored by the backend if the TKG service vsphere cluster doesn't support. + // +kubebuilder:validation:Optional + FailureDomain *string `json:"failureDomain,omitempty" tf:"failure_domain,omitempty"` + + // Storage Class to be used for storage of the disks which store the root filesystem of the nodes + // +kubebuilder:validation:Required + StorageClass *string `json:"storageClass" tf:"storage_class,omitempty"` + + // Configurable volumes for nodepool nodes + // +kubebuilder:validation:Optional + Volumes []SpecTkgServiceVsphereVolumesParameters `json:"volumes,omitempty" tf:"volumes,omitempty"` +} + +type Cluster_Node_PoolSpecTkgVsphereObservation struct { + + // VM specific configuration + VMConfig []SpecTkgVsphereVMConfigObservation `json:"vmConfig,omitempty" tf:"vm_config,omitempty"` +} + +type Cluster_Node_PoolSpecTkgVsphereParameters struct { + + // VM specific configuration + // +kubebuilder:validation:Required + VMConfig []SpecTkgVsphereVMConfigParameters `json:"vmConfig" tf:"vm_config,omitempty"` +} + +type SpecTkgServiceVsphereVolumesObservation struct { + + // Volume capacity is in gib + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // It is the directory where the volume device is to be mounted + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // It is the volume name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // This is the storage class for PVC which in case omitted, default storage class will be used for the disks + PvcStorageClass *string `json:"pvcStorageClass,omitempty" tf:"pvc_storage_class,omitempty"` +} + +type SpecTkgServiceVsphereVolumesParameters struct { + + // Volume capacity is in gib + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // It is the directory where the volume device is to be mounted + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // It is the volume name + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // This is the storage class for PVC which in case omitted, default storage class will be used for the disks + // +kubebuilder:validation:Optional + PvcStorageClass *string `json:"pvcStorageClass,omitempty" tf:"pvc_storage_class,omitempty"` +} + +type SpecTkgVsphereVMConfigObservation struct { + + // Number of CPUs per node + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Root disk size in gigabytes for the VM + DiskSize *string `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Memory associated with the node in megabytes + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpecTkgVsphereVMConfigParameters struct { + + // Number of CPUs per node + // +kubebuilder:validation:Optional + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Root disk size in gigabytes for the VM + // +kubebuilder:validation:Optional + DiskSize *string `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Memory associated with the node in megabytes + // +kubebuilder:validation:Optional + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +// Cluster_Node_PoolSpec defines the desired state of Cluster_Node_Pool +type Cluster_Node_PoolSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider Cluster_Node_PoolParameters `json:"forProvider"` +} + +// Cluster_Node_PoolStatus defines the observed state of Cluster_Node_Pool. +type Cluster_Node_PoolStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider Cluster_Node_PoolObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Cluster_Node_Pool is the Schema for the Cluster_Node_Pools API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,tmc} +type Cluster_Node_Pool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.clusterName)",message="clusterName is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.managementClusterName)",message="managementClusterName is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.name)",message="name is a required parameter" + // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.provisionerName)",message="provisionerName is a required parameter" + Spec Cluster_Node_PoolSpec `json:"spec"` + Status Cluster_Node_PoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// Cluster_Node_PoolList contains a list of Cluster_Node_Pools +type Cluster_Node_PoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster_Node_Pool `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Node_Pool_Kind = "Cluster_Node_Pool" + Cluster_Node_Pool_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Node_Pool_Kind}.String() + Cluster_Node_Pool_KindAPIVersion = Cluster_Node_Pool_Kind + "." + CRDGroupVersion.String() + Cluster_Node_Pool_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Node_Pool_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster_Node_Pool{}, &Cluster_Node_PoolList{}) +} diff --git a/apis/tmc/v1alpha1/zz_generated.deepcopy.go b/apis/tmc/v1alpha1/zz_generated.deepcopy.go index 1bbf2d8..a00b6a0 100644 --- a/apis/tmc/v1alpha1/zz_generated.deepcopy.go +++ b/apis/tmc/v1alpha1/zz_generated.deepcopy.go @@ -1242,6 +1242,909 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Group) DeepCopyInto(out *Cluster_Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Group. +func (in *Cluster_Group) DeepCopy() *Cluster_Group { + if in == nil { + return nil + } + out := new(Cluster_Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster_Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_GroupList) DeepCopyInto(out *Cluster_GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster_Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_GroupList. +func (in *Cluster_GroupList) DeepCopy() *Cluster_GroupList { + if in == nil { + return nil + } + out := new(Cluster_GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster_GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_GroupMetaObservation) DeepCopyInto(out *Cluster_GroupMetaObservation) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceVersion != nil { + in, out := &in.ResourceVersion, &out.ResourceVersion + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_GroupMetaObservation. +func (in *Cluster_GroupMetaObservation) DeepCopy() *Cluster_GroupMetaObservation { + if in == nil { + return nil + } + out := new(Cluster_GroupMetaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_GroupMetaParameters) DeepCopyInto(out *Cluster_GroupMetaParameters) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_GroupMetaParameters. +func (in *Cluster_GroupMetaParameters) DeepCopy() *Cluster_GroupMetaParameters { + if in == nil { + return nil + } + out := new(Cluster_GroupMetaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_GroupObservation) DeepCopyInto(out *Cluster_GroupObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Meta != nil { + in, out := &in.Meta, &out.Meta + *out = make([]Cluster_GroupMetaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_GroupObservation. +func (in *Cluster_GroupObservation) DeepCopy() *Cluster_GroupObservation { + if in == nil { + return nil + } + out := new(Cluster_GroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_GroupParameters) DeepCopyInto(out *Cluster_GroupParameters) { + *out = *in + if in.Meta != nil { + in, out := &in.Meta, &out.Meta + *out = make([]Cluster_GroupMetaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_GroupParameters. +func (in *Cluster_GroupParameters) DeepCopy() *Cluster_GroupParameters { + if in == nil { + return nil + } + out := new(Cluster_GroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_GroupSpec) DeepCopyInto(out *Cluster_GroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_GroupSpec. +func (in *Cluster_GroupSpec) DeepCopy() *Cluster_GroupSpec { + if in == nil { + return nil + } + out := new(Cluster_GroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_GroupStatus) DeepCopyInto(out *Cluster_GroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_GroupStatus. +func (in *Cluster_GroupStatus) DeepCopy() *Cluster_GroupStatus { + if in == nil { + return nil + } + out := new(Cluster_GroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_Pool) DeepCopyInto(out *Cluster_Node_Pool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_Pool. +func (in *Cluster_Node_Pool) DeepCopy() *Cluster_Node_Pool { + if in == nil { + return nil + } + out := new(Cluster_Node_Pool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster_Node_Pool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolList) DeepCopyInto(out *Cluster_Node_PoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster_Node_Pool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolList. +func (in *Cluster_Node_PoolList) DeepCopy() *Cluster_Node_PoolList { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster_Node_PoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolMetaObservation) DeepCopyInto(out *Cluster_Node_PoolMetaObservation) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceVersion != nil { + in, out := &in.ResourceVersion, &out.ResourceVersion + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolMetaObservation. +func (in *Cluster_Node_PoolMetaObservation) DeepCopy() *Cluster_Node_PoolMetaObservation { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolMetaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolMetaParameters) DeepCopyInto(out *Cluster_Node_PoolMetaParameters) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolMetaParameters. +func (in *Cluster_Node_PoolMetaParameters) DeepCopy() *Cluster_Node_PoolMetaParameters { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolMetaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolObservation) DeepCopyInto(out *Cluster_Node_PoolObservation) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ManagementClusterName != nil { + in, out := &in.ManagementClusterName, &out.ManagementClusterName + *out = new(string) + **out = **in + } + if in.Meta != nil { + in, out := &in.Meta, &out.Meta + *out = make([]Cluster_Node_PoolMetaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProvisionerName != nil { + in, out := &in.ProvisionerName, &out.ProvisionerName + *out = new(string) + **out = **in + } + if in.ReadyWaitTimeout != nil { + in, out := &in.ReadyWaitTimeout, &out.ReadyWaitTimeout + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = make([]Cluster_Node_PoolSpecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolObservation. +func (in *Cluster_Node_PoolObservation) DeepCopy() *Cluster_Node_PoolObservation { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolParameters) DeepCopyInto(out *Cluster_Node_PoolParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ManagementClusterName != nil { + in, out := &in.ManagementClusterName, &out.ManagementClusterName + *out = new(string) + **out = **in + } + if in.Meta != nil { + in, out := &in.Meta, &out.Meta + *out = make([]Cluster_Node_PoolMetaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProvisionerName != nil { + in, out := &in.ProvisionerName, &out.ProvisionerName + *out = new(string) + **out = **in + } + if in.ReadyWaitTimeout != nil { + in, out := &in.ReadyWaitTimeout, &out.ReadyWaitTimeout + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = make([]Cluster_Node_PoolSpecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolParameters. +func (in *Cluster_Node_PoolParameters) DeepCopy() *Cluster_Node_PoolParameters { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpec) DeepCopyInto(out *Cluster_Node_PoolSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpec. +func (in *Cluster_Node_PoolSpec) DeepCopy() *Cluster_Node_PoolSpec { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpecObservation) DeepCopyInto(out *Cluster_Node_PoolSpecObservation) { + *out = *in + if in.CloudLabels != nil { + in, out := &in.CloudLabels, &out.CloudLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TkgAws != nil { + in, out := &in.TkgAws, &out.TkgAws + *out = make([]Cluster_Node_PoolSpecTkgAwsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TkgServiceVsphere != nil { + in, out := &in.TkgServiceVsphere, &out.TkgServiceVsphere + *out = make([]Cluster_Node_PoolSpecTkgServiceVsphereObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TkgVsphere != nil { + in, out := &in.TkgVsphere, &out.TkgVsphere + *out = make([]Cluster_Node_PoolSpecTkgVsphereObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkerNodeCount != nil { + in, out := &in.WorkerNodeCount, &out.WorkerNodeCount + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpecObservation. +func (in *Cluster_Node_PoolSpecObservation) DeepCopy() *Cluster_Node_PoolSpecObservation { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpecParameters) DeepCopyInto(out *Cluster_Node_PoolSpecParameters) { + *out = *in + if in.CloudLabels != nil { + in, out := &in.CloudLabels, &out.CloudLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TkgAws != nil { + in, out := &in.TkgAws, &out.TkgAws + *out = make([]Cluster_Node_PoolSpecTkgAwsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TkgServiceVsphere != nil { + in, out := &in.TkgServiceVsphere, &out.TkgServiceVsphere + *out = make([]Cluster_Node_PoolSpecTkgServiceVsphereParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TkgVsphere != nil { + in, out := &in.TkgVsphere, &out.TkgVsphere + *out = make([]Cluster_Node_PoolSpecTkgVsphereParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkerNodeCount != nil { + in, out := &in.WorkerNodeCount, &out.WorkerNodeCount + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpecParameters. +func (in *Cluster_Node_PoolSpecParameters) DeepCopy() *Cluster_Node_PoolSpecParameters { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpecTkgAwsObservation) DeepCopyInto(out *Cluster_Node_PoolSpecTkgAwsObservation) { + *out = *in + if in.Class != nil { + in, out := &in.Class, &out.Class + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpecTkgAwsObservation. +func (in *Cluster_Node_PoolSpecTkgAwsObservation) DeepCopy() *Cluster_Node_PoolSpecTkgAwsObservation { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpecTkgAwsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpecTkgAwsParameters) DeepCopyInto(out *Cluster_Node_PoolSpecTkgAwsParameters) { + *out = *in + if in.Class != nil { + in, out := &in.Class, &out.Class + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpecTkgAwsParameters. +func (in *Cluster_Node_PoolSpecTkgAwsParameters) DeepCopy() *Cluster_Node_PoolSpecTkgAwsParameters { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpecTkgAwsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpecTkgServiceVsphereObservation) DeepCopyInto(out *Cluster_Node_PoolSpecTkgServiceVsphereObservation) { + *out = *in + if in.Class != nil { + in, out := &in.Class, &out.Class + *out = new(string) + **out = **in + } + if in.FailureDomain != nil { + in, out := &in.FailureDomain, &out.FailureDomain + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]SpecTkgServiceVsphereVolumesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpecTkgServiceVsphereObservation. +func (in *Cluster_Node_PoolSpecTkgServiceVsphereObservation) DeepCopy() *Cluster_Node_PoolSpecTkgServiceVsphereObservation { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpecTkgServiceVsphereObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpecTkgServiceVsphereParameters) DeepCopyInto(out *Cluster_Node_PoolSpecTkgServiceVsphereParameters) { + *out = *in + if in.Class != nil { + in, out := &in.Class, &out.Class + *out = new(string) + **out = **in + } + if in.FailureDomain != nil { + in, out := &in.FailureDomain, &out.FailureDomain + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]SpecTkgServiceVsphereVolumesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpecTkgServiceVsphereParameters. +func (in *Cluster_Node_PoolSpecTkgServiceVsphereParameters) DeepCopy() *Cluster_Node_PoolSpecTkgServiceVsphereParameters { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpecTkgServiceVsphereParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpecTkgVsphereObservation) DeepCopyInto(out *Cluster_Node_PoolSpecTkgVsphereObservation) { + *out = *in + if in.VMConfig != nil { + in, out := &in.VMConfig, &out.VMConfig + *out = make([]SpecTkgVsphereVMConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpecTkgVsphereObservation. +func (in *Cluster_Node_PoolSpecTkgVsphereObservation) DeepCopy() *Cluster_Node_PoolSpecTkgVsphereObservation { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpecTkgVsphereObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolSpecTkgVsphereParameters) DeepCopyInto(out *Cluster_Node_PoolSpecTkgVsphereParameters) { + *out = *in + if in.VMConfig != nil { + in, out := &in.VMConfig, &out.VMConfig + *out = make([]SpecTkgVsphereVMConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolSpecTkgVsphereParameters. +func (in *Cluster_Node_PoolSpecTkgVsphereParameters) DeepCopy() *Cluster_Node_PoolSpecTkgVsphereParameters { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolSpecTkgVsphereParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_Node_PoolStatus) DeepCopyInto(out *Cluster_Node_PoolStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_Node_PoolStatus. +func (in *Cluster_Node_PoolStatus) DeepCopy() *Cluster_Node_PoolStatus { + if in == nil { + return nil + } + out := new(Cluster_Node_PoolStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigObservation) DeepCopyInto(out *ConfigObservation) { *out = *in @@ -3541,6 +4444,76 @@ func (in *SpecTkgServiceVsphereParameters) DeepCopy() *SpecTkgServiceVspherePara return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecTkgServiceVsphereVolumesObservation) DeepCopyInto(out *SpecTkgServiceVsphereVolumesObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PvcStorageClass != nil { + in, out := &in.PvcStorageClass, &out.PvcStorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecTkgServiceVsphereVolumesObservation. +func (in *SpecTkgServiceVsphereVolumesObservation) DeepCopy() *SpecTkgServiceVsphereVolumesObservation { + if in == nil { + return nil + } + out := new(SpecTkgServiceVsphereVolumesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecTkgServiceVsphereVolumesParameters) DeepCopyInto(out *SpecTkgServiceVsphereVolumesParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PvcStorageClass != nil { + in, out := &in.PvcStorageClass, &out.PvcStorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecTkgServiceVsphereVolumesParameters. +func (in *SpecTkgServiceVsphereVolumesParameters) DeepCopy() *SpecTkgServiceVsphereVolumesParameters { + if in == nil { + return nil + } + out := new(SpecTkgServiceVsphereVolumesParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpecTkgVsphereObservation) DeepCopyInto(out *SpecTkgVsphereObservation) { *out = *in @@ -3585,6 +4558,66 @@ func (in *SpecTkgVsphereParameters) DeepCopy() *SpecTkgVsphereParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecTkgVsphereVMConfigObservation) DeepCopyInto(out *SpecTkgVsphereVMConfigObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecTkgVsphereVMConfigObservation. +func (in *SpecTkgVsphereVMConfigObservation) DeepCopy() *SpecTkgVsphereVMConfigObservation { + if in == nil { + return nil + } + out := new(SpecTkgVsphereVMConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecTkgVsphereVMConfigParameters) DeepCopyInto(out *SpecTkgVsphereVMConfigParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecTkgVsphereVMConfigParameters. +func (in *SpecTkgVsphereVMConfigParameters) DeepCopy() *SpecTkgVsphereVMConfigParameters { + if in == nil { + return nil + } + out := new(SpecTkgVsphereVMConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageConfigObservation) DeepCopyInto(out *StorageConfigObservation) { *out = *in diff --git a/apis/tmc/v1alpha1/zz_generated.managed.go b/apis/tmc/v1alpha1/zz_generated.managed.go index 1808c3f..d905d4f 100644 --- a/apis/tmc/v1alpha1/zz_generated.managed.go +++ b/apis/tmc/v1alpha1/zz_generated.managed.go @@ -158,3 +158,155 @@ func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetail func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this Cluster_Group. +func (mg *Cluster_Group) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster_Group. +func (mg *Cluster_Group) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicy of this Cluster_Group. +func (mg *Cluster_Group) GetManagementPolicy() xpv1.ManagementPolicy { + return mg.Spec.ManagementPolicy +} + +// GetProviderConfigReference of this Cluster_Group. +func (mg *Cluster_Group) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Cluster_Group. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Cluster_Group) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Cluster_Group. +func (mg *Cluster_Group) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster_Group. +func (mg *Cluster_Group) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster_Group. +func (mg *Cluster_Group) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster_Group. +func (mg *Cluster_Group) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicy of this Cluster_Group. +func (mg *Cluster_Group) SetManagementPolicy(r xpv1.ManagementPolicy) { + mg.Spec.ManagementPolicy = r +} + +// SetProviderConfigReference of this Cluster_Group. +func (mg *Cluster_Group) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Cluster_Group. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Cluster_Group) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster_Group. +func (mg *Cluster_Group) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster_Group. +func (mg *Cluster_Group) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicy of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) GetManagementPolicy() xpv1.ManagementPolicy { + return mg.Spec.ManagementPolicy +} + +// GetProviderConfigReference of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Cluster_Node_Pool. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Cluster_Node_Pool) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicy of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) SetManagementPolicy(r xpv1.ManagementPolicy) { + mg.Spec.ManagementPolicy = r +} + +// SetProviderConfigReference of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Cluster_Node_Pool. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Cluster_Node_Pool) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster_Node_Pool. +func (mg *Cluster_Node_Pool) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/tmc/v1alpha1/zz_generated.managedlist.go b/apis/tmc/v1alpha1/zz_generated.managedlist.go index 04f0be3..876de2f 100644 --- a/apis/tmc/v1alpha1/zz_generated.managedlist.go +++ b/apis/tmc/v1alpha1/zz_generated.managedlist.go @@ -24,3 +24,21 @@ func (l *ClusterList) GetItems() []resource.Managed { } return items } + +// GetItems of this Cluster_GroupList. +func (l *Cluster_GroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this Cluster_Node_PoolList. +func (l *Cluster_Node_PoolList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/tmc/v1alpha1/zz_generated_terraformed.go b/apis/tmc/v1alpha1/zz_generated_terraformed.go index fd77902..c35088d 100755 --- a/apis/tmc/v1alpha1/zz_generated_terraformed.go +++ b/apis/tmc/v1alpha1/zz_generated_terraformed.go @@ -160,3 +160,151 @@ func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { func (tr *Cluster) GetTerraformSchemaVersion() int { return 0 } + +// GetTerraformResourceType returns Terraform resource type for this Cluster_Group +func (mg *Cluster_Group) GetTerraformResourceType() string { + return "tanzu-mission-control_cluster_group" +} + +// GetConnectionDetailsMapping for this Cluster_Group +func (tr *Cluster_Group) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster_Group +func (tr *Cluster_Group) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster_Group +func (tr *Cluster_Group) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster_Group +func (tr *Cluster_Group) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster_Group +func (tr *Cluster_Group) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster_Group +func (tr *Cluster_Group) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Cluster_Group using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster_Group) LateInitialize(attrs []byte) (bool, error) { + params := &Cluster_GroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster_Group) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this Cluster_Node_Pool +func (mg *Cluster_Node_Pool) GetTerraformResourceType() string { + return "tanzu-mission-control_cluster_node_pool" +} + +// GetConnectionDetailsMapping for this Cluster_Node_Pool +func (tr *Cluster_Node_Pool) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster_Node_Pool +func (tr *Cluster_Node_Pool) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster_Node_Pool +func (tr *Cluster_Node_Pool) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster_Node_Pool +func (tr *Cluster_Node_Pool) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster_Node_Pool +func (tr *Cluster_Node_Pool) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster_Node_Pool +func (tr *Cluster_Node_Pool) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Cluster_Node_Pool using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster_Node_Pool) LateInitialize(attrs []byte) (bool, error) { + params := &Cluster_Node_PoolParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster_Node_Pool) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/config/external_name.go b/config/external_name.go index 3c5561e..c597249 100644 --- a/config/external_name.go +++ b/config/external_name.go @@ -10,10 +10,10 @@ import "github.com/upbound/upjet/pkg/config" // provider. var ExternalNameConfigs = map[string]config.ExternalName{ // Import requires using a randomly generated ID from provider: nl-2e21sda - "tanzu-mission-control_akscluster": config.IdentifierFromProvider, - "tanzu-mission-control_cluster": config.IdentifierFromProvider, - "tanzu-mission-control_cluster_group" config.IdentifierFromProvider, - "tanzu-mission-control_cluster_node_pool" config.IdentifierFromProvider, + "tanzu-mission-control_akscluster": config.IdentifierFromProvider, + "tanzu-mission-control_cluster": config.IdentifierFromProvider, + "tanzu-mission-control_cluster_group": config.IdentifierFromProvider, + "tanzu-mission-control_cluster_node_pool": config.IdentifierFromProvider, } // ExternalNameConfigurations applies all external name configs listed in the diff --git a/internal/controller/tmc/cluster_group/zz_controller.go b/internal/controller/tmc/cluster_group/zz_controller.go new file mode 100755 index 0000000..008e3f0 --- /dev/null +++ b/internal/controller/tmc/cluster_group/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package cluster_group + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/ankasoftco/provider-tmc/apis/tmc/v1alpha1" + features "github.com/ankasoftco/provider-tmc/internal/features" +) + +// Setup adds a controller that reconciles Cluster_Group managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Cluster_Group_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["tanzu-mission-control_cluster_group"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Cluster_Group_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Cluster_Group_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.Cluster_Group{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/tmc/cluster_node_pool/zz_controller.go b/internal/controller/tmc/cluster_node_pool/zz_controller.go new file mode 100755 index 0000000..1f08f71 --- /dev/null +++ b/internal/controller/tmc/cluster_node_pool/zz_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package cluster_node_pool + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/ankasoftco/provider-tmc/apis/tmc/v1alpha1" + features "github.com/ankasoftco/provider-tmc/internal/features" +) + +// Setup adds a controller that reconciles Cluster_Node_Pool managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Cluster_Node_Pool_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["tanzu-mission-control_cluster_node_pool"], tjcontroller.WithLogger(o.Logger), + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Cluster_Node_Pool_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Cluster_Node_Pool_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1alpha1.Cluster_Node_Pool{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index 5f0ca4f..1489385 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -12,6 +12,8 @@ import ( providerconfig "github.com/ankasoftco/provider-tmc/internal/controller/providerconfig" akscluster "github.com/ankasoftco/provider-tmc/internal/controller/tmc/akscluster" cluster "github.com/ankasoftco/provider-tmc/internal/controller/tmc/cluster" + cluster_group "github.com/ankasoftco/provider-tmc/internal/controller/tmc/cluster_group" + cluster_node_pool "github.com/ankasoftco/provider-tmc/internal/controller/tmc/cluster_node_pool" ) // Setup creates all controllers with the supplied logger and adds them to @@ -21,6 +23,8 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { providerconfig.Setup, akscluster.Setup, cluster.Setup, + cluster_group.Setup, + cluster_node_pool.Setup, } { if err := setup(mgr, o); err != nil { return err diff --git a/package/crds/tmc.crossplane.io_cluster_groups.yaml b/package/crds/tmc.crossplane.io_cluster_groups.yaml new file mode 100644 index 0000000..71ba439 --- /dev/null +++ b/package/crds/tmc.crossplane.io_cluster_groups.yaml @@ -0,0 +1,356 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: cluster_groups.tmc.crossplane.io +spec: + group: tmc.crossplane.io + names: + categories: + - crossplane + - managed + - tmc + kind: Cluster_Group + listKind: Cluster_GroupList + plural: cluster_groups + singular: cluster_group + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cluster_Group is the Schema for the Cluster_Groups API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Cluster_GroupSpec defines the desired state of Cluster_Group + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + meta: + description: Metadata for the resource + items: + properties: + annotations: + additionalProperties: + type: string + description: Annotations for the resource + type: object + description: + description: Description of the resource + type: string + labels: + additionalProperties: + type: string + description: Labels for the resource + type: object + type: object + type: array + name: + type: string + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: name is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.name) + status: + description: Cluster_GroupStatus defines the observed state of Cluster_Group. + properties: + atProvider: + properties: + id: + type: string + meta: + description: Metadata for the resource + items: + properties: + annotations: + additionalProperties: + type: string + description: Annotations for the resource + type: object + description: + description: Description of the resource + type: string + labels: + additionalProperties: + type: string + description: Labels for the resource + type: object + resourceVersion: + description: Resource version of the resource + type: string + uid: + description: UID of the resource + type: string + type: object + type: array + name: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/tmc.crossplane.io_cluster_node_pools.yaml b/package/crds/tmc.crossplane.io_cluster_node_pools.yaml new file mode 100644 index 0000000..5be9e6b --- /dev/null +++ b/package/crds/tmc.crossplane.io_cluster_node_pools.yaml @@ -0,0 +1,607 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: cluster_node_pools.tmc.crossplane.io +spec: + group: tmc.crossplane.io + names: + categories: + - crossplane + - managed + - tmc + kind: Cluster_Node_Pool + listKind: Cluster_Node_PoolList + plural: cluster_node_pools + singular: cluster_node_pool + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cluster_Node_Pool is the Schema for the Cluster_Node_Pools API. + + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Cluster_Node_PoolSpec defines the desired state of Cluster_Node_Pool + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterName: + description: Name of the cluster + type: string + managementClusterName: + description: Name of the management cluster + type: string + meta: + description: Metadata for the resource + items: + properties: + annotations: + additionalProperties: + type: string + description: Annotations for the resource + type: object + description: + description: Description of the resource + type: string + labels: + additionalProperties: + type: string + description: Labels for the resource + type: object + type: object + type: array + name: + description: Name of this nodepool + type: string + provisionerName: + description: Provisioner of the cluster + type: string + readyWaitTimeout: + description: Wait timeout duration until nodepool resource reaches + READY state. Accepted timeout duration values like 5s, 45m, + or 3h, higher than zero. + type: string + spec: + description: Spec for the cluster nodepool + items: + properties: + cloudLabels: + additionalProperties: + type: string + description: Cloud labels + type: object + nodeLabels: + additionalProperties: + type: string + description: Node labels + type: object + tkgAws: + description: TKGAWSNodepool is the nodepool spec for TKG + AWS cluster + items: + properties: + class: + description: Nodepool instance type + type: string + storageClass: + description: Storage Class to be used for storage + of the disks which store the root filesystem of + the nodes + type: string + type: object + type: array + tkgServiceVsphere: + description: TKGServiceVsphereNodepool is the nodepool spec + for TKG service vsphere cluster + items: + properties: + class: + description: Nodepool instance type + type: string + failureDomain: + description: Configure the failure domain of node + pool. The potential values could be found using + cluster:options api. This parameter will be ignored + by the backend if the TKG service vsphere cluster + doesn't support. + type: string + storageClass: + description: Storage Class to be used for storage + of the disks which store the root filesystem of + the nodes + type: string + volumes: + description: Configurable volumes for nodepool nodes + items: + properties: + capacity: + description: Volume capacity is in gib + type: number + mountPath: + description: It is the directory where the volume + device is to be mounted + type: string + name: + description: It is the volume name + type: string + pvcStorageClass: + description: This is the storage class for PVC + which in case omitted, default storage class + will be used for the disks + type: string + type: object + type: array + required: + - class + - storageClass + type: object + type: array + tkgVsphere: + description: TkgVsphereNodepool is the nodepool config for + the TKG vsphere cluster + items: + properties: + vmConfig: + description: VM specific configuration + items: + properties: + cpu: + description: Number of CPUs per node + type: string + diskSize: + description: Root disk size in gigabytes for + the VM + type: string + memory: + description: Memory associated with the node + in megabytes + type: string + type: object + type: array + required: + - vmConfig + type: object + type: array + workerNodeCount: + description: Count is the number of nodes + type: string + required: + - workerNodeCount + type: object + type: array + type: object + managementPolicy: + default: FullControl + description: 'THIS IS AN ALPHA FIELD. Do not use it in production. + It is not honored unless the relevant Crossplane feature flag is + enabled, and may be changed or removed without notice. ManagementPolicy + specifies the level of control Crossplane has over the managed external + resource. This field is planned to replace the DeletionPolicy field + in a future release. Currently, both could be set independently + and non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - FullControl + - ObserveOnly + - OrphanOnDelete + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: clusterName is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.clusterName) + - message: managementClusterName is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.managementClusterName) + - message: name is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.name) + - message: provisionerName is a required parameter + rule: self.managementPolicy == 'ObserveOnly' || has(self.forProvider.provisionerName) + status: + description: Cluster_Node_PoolStatus defines the observed state of Cluster_Node_Pool. + properties: + atProvider: + properties: + clusterName: + description: Name of the cluster + type: string + id: + type: string + managementClusterName: + description: Name of the management cluster + type: string + meta: + description: Metadata for the resource + items: + properties: + annotations: + additionalProperties: + type: string + description: Annotations for the resource + type: object + description: + description: Description of the resource + type: string + labels: + additionalProperties: + type: string + description: Labels for the resource + type: object + resourceVersion: + description: Resource version of the resource + type: string + uid: + description: UID of the resource + type: string + type: object + type: array + name: + description: Name of this nodepool + type: string + provisionerName: + description: Provisioner of the cluster + type: string + readyWaitTimeout: + description: Wait timeout duration until nodepool resource reaches + READY state. Accepted timeout duration values like 5s, 45m, + or 3h, higher than zero. + type: string + spec: + description: Spec for the cluster nodepool + items: + properties: + cloudLabels: + additionalProperties: + type: string + description: Cloud labels + type: object + nodeLabels: + additionalProperties: + type: string + description: Node labels + type: object + tkgAws: + description: TKGAWSNodepool is the nodepool spec for TKG + AWS cluster + items: + properties: + class: + description: Nodepool instance type + type: string + storageClass: + description: Storage Class to be used for storage + of the disks which store the root filesystem of + the nodes + type: string + type: object + type: array + tkgServiceVsphere: + description: TKGServiceVsphereNodepool is the nodepool spec + for TKG service vsphere cluster + items: + properties: + class: + description: Nodepool instance type + type: string + failureDomain: + description: Configure the failure domain of node + pool. The potential values could be found using + cluster:options api. This parameter will be ignored + by the backend if the TKG service vsphere cluster + doesn't support. + type: string + storageClass: + description: Storage Class to be used for storage + of the disks which store the root filesystem of + the nodes + type: string + volumes: + description: Configurable volumes for nodepool nodes + items: + properties: + capacity: + description: Volume capacity is in gib + type: number + mountPath: + description: It is the directory where the volume + device is to be mounted + type: string + name: + description: It is the volume name + type: string + pvcStorageClass: + description: This is the storage class for PVC + which in case omitted, default storage class + will be used for the disks + type: string + type: object + type: array + type: object + type: array + tkgVsphere: + description: TkgVsphereNodepool is the nodepool config for + the TKG vsphere cluster + items: + properties: + vmConfig: + description: VM specific configuration + items: + properties: + cpu: + description: Number of CPUs per node + type: string + diskSize: + description: Root disk size in gigabytes for + the VM + type: string + memory: + description: Memory associated with the node + in megabytes + type: string + type: object + type: array + type: object + type: array + workerNodeCount: + description: Count is the number of nodes + type: string + type: object + type: array + status: + additionalProperties: + type: string + description: Status of node pool resource + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}