diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 7a36b8fae..e92fdae9e 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,6 +2,7 @@ name: Helm Test WorkFlow on: push: branches: + - verify - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 17b34ff19..d29ee9df0 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,6 +2,7 @@ name: Integration Test WorkFlow on: push: branches: + - verify - develop - main - feature** diff --git a/Dockerfile b/Dockerfile index b56602ab7..ea326a619 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.19 as builder +FROM golang:1.21 as builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/api/v3/clustermaster_types.go b/api/v3/clustermaster_types.go index 6d027378b..51bae0f6a 100644 --- a/api/v3/clustermaster_types.go +++ b/api/v3/clustermaster_types.go @@ -68,6 +68,9 @@ type ClusterMasterStatus struct { // Telemetry App installation flag TelAppInstalled bool `json:"telAppInstalled"` + + // Indicates if the cluster is in maintenance mode. + MaintenanceMode bool `json:"maintenance_mode"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/clustermanager_types.go b/api/v4/clustermanager_types.go index b9a8ceaca..db2f485df 100644 --- a/api/v4/clustermanager_types.go +++ b/api/v4/clustermanager_types.go @@ -32,6 +32,9 @@ const ( // ClusterManagerPausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) ClusterManagerPausedAnnotation = "clustermanager.enterprise.splunk.com/paused" + // ClusterManagerPausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + ClusterManagerMaintenanceAnnotation = "clustermanager.enterprise.splunk.com/maintenance" ) // ClusterManagerSpec defines the desired state of ClusterManager @@ -67,6 +70,15 @@ type ClusterManagerStatus struct { // Telemetry App installation flag TelAppInstalled bool `json:"telAppInstalled"` + + // Indicates if the cluster is in maintenance mode. + MaintenanceMode bool `json:"maintenanceMode"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // BundlePushInfo Indicates if bundle push required @@ -116,13 +128,13 @@ func (cmstr *ClusterManager) NewEvent(eventType, reason, message string) corev1. return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: cmstr.ObjectMeta.Namespace, + Namespace: cmstr.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "Clustermanager", - Namespace: cmstr.Namespace, - Name: cmstr.Name, - UID: cmstr.UID, + Kind: "ClusterManager", + Namespace: cmstr.GetNamespace(), + Name: cmstr.GetName(), + UID: cmstr.GetUID(), APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 60c39f46f..c9145e86c 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -61,6 +61,12 @@ type IndexerClusterMemberStatus struct { // Flag indicating if this peer belongs to the current committed generation and is searchable. Searchable bool `json:"is_searchable"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -152,7 +158,7 @@ func (icstr *IndexerCluster) NewEvent(eventType, reason, message string) corev1. return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: icstr.ObjectMeta.Namespace, + Namespace: icstr.Namespace, }, InvolvedObject: corev1.ObjectReference{ Kind: "IndexerCluster", diff --git a/api/v4/licensemanager_types.go b/api/v4/licensemanager_types.go index 29a0afa9a..fbd7ea1da 100644 --- a/api/v4/licensemanager_types.go +++ b/api/v4/licensemanager_types.go @@ -52,6 +52,12 @@ type LicenseManagerStatus struct { // Telemetry App installation flag TelAppInstalled bool `json:"telAppInstalled"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -91,7 +97,7 @@ func (lmstr *LicenseManager) NewEvent(eventType, reason, message string) corev1. return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: lmstr.ObjectMeta.Namespace, + Namespace: lmstr.Namespace, }, InvolvedObject: corev1.ObjectReference{ Kind: "LicenseManager", diff --git a/api/v4/monitoringconsole_types.go b/api/v4/monitoringconsole_types.go index 7a4a9ad4a..fe3bc8c2d 100644 --- a/api/v4/monitoringconsole_types.go +++ b/api/v4/monitoringconsole_types.go @@ -58,6 +58,12 @@ type MonitoringConsoleStatus struct { // App Framework status AppContext AppDeploymentContext `json:"appContext,omitempty"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -99,13 +105,13 @@ func (mcnsl *MonitoringConsole) NewEvent(eventType, reason, message string) core return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: mcnsl.ObjectMeta.Namespace, + Namespace: mcnsl.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "MonitoringConsole", - Namespace: mcnsl.Namespace, - Name: mcnsl.Name, - UID: mcnsl.UID, + Kind: "SearchHeadCluster", + Namespace: mcnsl.GetNamespace(), + Name: mcnsl.GetName(), + UID: mcnsl.GetUID(), APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/searchheadcluster_types.go b/api/v4/searchheadcluster_types.go index db263cf92..1df55411c 100644 --- a/api/v4/searchheadcluster_types.go +++ b/api/v4/searchheadcluster_types.go @@ -44,6 +44,12 @@ type SearchHeadClusterSpec struct { // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // SearchHeadClusterMemberStatus is used to track the status of each search head cluster member @@ -161,13 +167,13 @@ func (shcstr *SearchHeadCluster) NewEvent(eventType, reason, message string) cor return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: shcstr.ObjectMeta.Namespace, + Namespace: shcstr.GetNamespace(), }, InvolvedObject: corev1.ObjectReference{ Kind: "SearchHeadCluster", - Namespace: shcstr.Namespace, - Name: shcstr.Name, - UID: shcstr.UID, + Namespace: shcstr.GetNamespace(), + Name: shcstr.GetName(), + UID: shcstr.GetUID(), APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/standalone_types.go b/api/v4/standalone_types.go index 44bedc5fa..f213bcb11 100644 --- a/api/v4/standalone_types.go +++ b/api/v4/standalone_types.go @@ -46,6 +46,12 @@ type StandaloneSpec struct { // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. @@ -115,13 +121,13 @@ func (standln *Standalone) NewEvent(eventType, reason, message string) corev1.Ev return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: standln.ObjectMeta.Namespace, + Namespace: standln.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "Standalone", - Namespace: standln.Namespace, - Name: standln.Name, - UID: standln.UID, + Kind: "SearchHeadCluster", + Namespace: standln.GetNamespace(), + Name: standln.GetName(), + UID: standln.GetUID(), APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index b8240b5a6..357313909 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -22,7 +22,8 @@ limitations under the License. package v4 import ( - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -287,6 +288,13 @@ func (in *ClusterManagerStatus) DeepCopyInto(out *ClusterManagerStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterManagerStatus. @@ -307,7 +315,7 @@ func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { out.VarVolumeStorageConfig = in.VarVolumeStorageConfig if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) + *out = make([]corev1.Volume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -319,7 +327,7 @@ func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { out.MonitoringConsoleRef = in.MonitoringConsoleRef if in.ExtraEnv != nil { in, out := &in.ExtraEnv, &out.ExtraEnv - *out = make([]v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -341,7 +349,7 @@ func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } } @@ -496,6 +504,13 @@ func (in *IndexerClusterList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IndexerClusterMemberStatus) DeepCopyInto(out *IndexerClusterMemberStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterMemberStatus. @@ -542,7 +557,9 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { if in.Peers != nil { in, out := &in.Peers, &out.Peers *out = make([]IndexerClusterMemberStatus, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } @@ -636,6 +653,13 @@ func (in *LicenseManagerSpec) DeepCopy() *LicenseManagerSpec { func (in *LicenseManagerStatus) DeepCopyInto(out *LicenseManagerStatus) { *out = *in in.AppContext.DeepCopyInto(&out.AppContext) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerStatus. @@ -736,6 +760,13 @@ func (in *MonitoringConsoleStatus) DeepCopyInto(out *MonitoringConsoleStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleStatus. @@ -873,6 +904,13 @@ func (in *SearchHeadClusterSpec) DeepCopyInto(out *SearchHeadClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterSpec. @@ -956,7 +994,7 @@ func (in *Spec) DeepCopyInto(out *Spec) { in.Affinity.DeepCopyInto(&out.Affinity) if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -965,7 +1003,7 @@ func (in *Spec) DeepCopyInto(out *Spec) { in.ServiceTemplate.DeepCopyInto(&out.ServiceTemplate) if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1047,6 +1085,13 @@ func (in *StandaloneSpec) DeepCopyInto(out *StandaloneSpec) { in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.SmartStore.DeepCopyInto(&out.SmartStore) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandaloneSpec. diff --git a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml index 7d3f80b3a..23a588606 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: clustermanagers.enterprise.splunk.com spec: @@ -4235,6 +4235,82 @@ spec: needToPushMasterApps: type: boolean type: object + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + errorMessage: + description: ErrorMessage shows current error if there are any + type: string + maintenanceMode: + description: Indicates if the cluster is in maintenance mode. + type: boolean phase: description: current phase of the cluster manager enum: diff --git a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml index 05097a4fc..65d045548 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: clustermasters.enterprise.splunk.com spec: @@ -4235,6 +4235,9 @@ spec: needToPushMasterApps: type: boolean type: object + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean phase: description: current phase of the cluster manager enum: diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 45bb80792..533537e5e 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: indexerclusters.enterprise.splunk.com spec: @@ -7531,6 +7531,84 @@ spec: all indexes. format: int64 type: integer + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + errorMessage: + description: ErrorMessage shows current error if there are any + type: string guid: description: Unique identifier or GUID for the peer type: string diff --git a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml index 1fedf29e1..6ec984cb0 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: licensemanagers.enterprise.splunk.com spec: @@ -4098,6 +4098,79 @@ spec: description: App Framework version info for future use type: integer type: object + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + errorMessage: + description: ErrorMessage shows current error if there are any + type: string phase: description: current phase of the license manager enum: diff --git a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml index 130bfc599..4468b2b8b 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: licensemasters.enterprise.splunk.com spec: diff --git a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml index 8330ef252..c207ae0d7 100644 --- a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml +++ b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: monitoringconsoles.enterprise.splunk.com spec: @@ -8236,6 +8236,79 @@ spec: needToPushMasterApps: type: boolean type: object + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + errorMessage: + description: ErrorMessage shows current error if there are any + type: string phase: description: current phase of the monitoring console enum: diff --git a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml index 9f0990ae6..df0eddd4d 100644 --- a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: searchheadclusters.enterprise.splunk.com spec: @@ -5330,6 +5330,76 @@ spec: type: string type: object x-kubernetes-map-type: atomic + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array defaults: description: Inline map of default.yml overrides used to initialize the environment @@ -5344,6 +5414,9 @@ spec: be installed on the CM, standalone, search head deployer or license manager instance. type: string + errorMessage: + description: ErrorMessage shows current error if there are any + type: string etcVolumeStorageConfig: description: Storage configuration for /opt/splunk/etc volume properties: diff --git a/config/crd/bases/enterprise.splunk.com_standalones.yaml b/config/crd/bases/enterprise.splunk.com_standalones.yaml index 88a5a1956..e43344242 100644 --- a/config/crd/bases/enterprise.splunk.com_standalones.yaml +++ b/config/crd/bases/enterprise.splunk.com_standalones.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: standalones.enterprise.splunk.com spec: @@ -5482,6 +5482,76 @@ spec: type: string type: object x-kubernetes-map-type: atomic + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array defaults: description: Inline map of default.yml overrides used to initialize the environment @@ -5496,6 +5566,9 @@ spec: be installed on the CM, standalone, search head deployer or license manager instance. type: string + errorMessage: + description: ErrorMessage shows current error if there are any + type: string etcVolumeStorageConfig: description: Storage configuration for /opt/splunk/etc volume properties: diff --git a/controllers/clustermanager_controller.go b/controllers/clustermanager_controller.go index 3f3b11dee..859474d3a 100644 --- a/controllers/clustermanager_controller.go +++ b/controllers/clustermanager_controller.go @@ -18,12 +18,15 @@ package controllers import ( "context" + //"github.com/jinzhu/copier" enterpriseApi "github.com/splunk/splunk-operator/api/v4" "time" "github.com/pkg/errors" common "github.com/splunk/splunk-operator/controllers/common" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -41,7 +44,8 @@ import ( // ClusterManagerReconciler reconciles a ClusterManager object type ClusterManagerReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + ProvisionerFactory provisioner.Factory } //+kubebuilder:rbac:groups=enterprise.splunk.com,resources=clustermanagers,verbs=get;list;watch;create;update;patch;delete @@ -112,7 +116,26 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ApplyClusterManager adding to handle unit test case var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { - return enterprise.ApplyClusterManager(ctx, client, instance) + // match the provisioner.EventPublisher interface + publishEvent := func(ctx context.Context, eventType, reason, message string) { + instance.NewEvent(eventType, reason, message) + } + info := &managermodel.ReconcileInfo{ + Kind: instance.Kind, + CommonSpec: instance.Spec.CommonSplunkSpec, + Client: client, + Log: log.FromContext(ctx), + Namespace: instance.GetNamespace(), + Name: instance.GetName(), + MetaObject: instance, + } + //copier.Copy(info.MetaObject, instance.ObjectMeta) + mg := enterprise.NewManagerFactory(false) + manager, err := mg.NewManager(ctx, info, publishEvent) + if err != nil { + instance.NewEvent("Warning", "ApplyClusterManager", err.Error()) + } + return manager.ApplyClusterManager(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/indexercluster_controller.go b/controllers/indexercluster_controller.go index ab583b0c0..80f0fd3be 100644 --- a/controllers/indexercluster_controller.go +++ b/controllers/indexercluster_controller.go @@ -18,13 +18,16 @@ package controllers import ( "context" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" "time" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/pkg/errors" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" common "github.com/splunk/splunk-operator/controllers/common" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -42,7 +45,8 @@ import ( // IndexerClusterReconciler reconciles a IndexerCluster object type IndexerClusterReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + ProvisionerFactory provisioner.Factory } //+kubebuilder:rbac:groups=enterprise.splunk.com,resources=indexerclusters,verbs=get;list;watch;create;update;patch;delete @@ -113,8 +117,26 @@ func (r *IndexerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ApplyIndexerCluster adding to handle unit test case var ApplyIndexerCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IndexerCluster) (reconcile.Result, error) { // IdxCluster can be supported by two CRD types for CM + publishEvent := func(ctx context.Context, eventType, reason, message string) { + instance.NewEvent(eventType, reason, message) + } if len(instance.Spec.ClusterManagerRef.Name) > 0 { - return enterprise.ApplyIndexerClusterManager(ctx, client, instance) + info := &managermodel.ReconcileInfo{ + Kind: instance.Kind, + CommonSpec: instance.Spec.CommonSplunkSpec, + Client: client, + Log: log.FromContext(ctx), + Namespace: instance.GetNamespace(), + Name: instance.GetName(), + MetaObject: instance, + } + //copier.Copy(info.MetaObject, instance.ObjectMeta) + mg := enterprise.NewManagerFactory(false) + manager, err := mg.NewManager(ctx, info, publishEvent) + if err != nil { + instance.NewEvent("Warning", "ApplyIndexerCluster", err.Error()) + } + return manager.ApplyIndexerClusterManager(ctx, client, instance) } return enterprise.ApplyIndexerCluster(ctx, client, instance) } diff --git a/controllers/licensemanager_controller.go b/controllers/licensemanager_controller.go index 20af31fdd..3c77ad6b0 100644 --- a/controllers/licensemanager_controller.go +++ b/controllers/licensemanager_controller.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" common "github.com/splunk/splunk-operator/controllers/common" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -111,7 +112,26 @@ func (r *LicenseManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ApplyLicenseManager adding to handle unit test case var ApplyLicenseManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.LicenseManager) (reconcile.Result, error) { - return enterprise.ApplyLicenseManager(ctx, client, instance) + // match the provisioner.EventPublisher interface + publishEvent := func(ctx context.Context, eventType, reason, message string) { + instance.NewEvent(eventType, reason, message) + } + info := &managermodel.ReconcileInfo{ + Kind: instance.Kind, + CommonSpec: instance.Spec.CommonSplunkSpec, + Client: client, + Log: log.FromContext(ctx), + Namespace: instance.GetNamespace(), + Name: instance.GetName(), + MetaObject: instance, + } + //copier.Copy(info.MetaObject, instance.ObjectMeta) + mg := enterprise.NewManagerFactory(false) + manager, err := mg.NewManager(ctx, info, publishEvent) + if err != nil { + instance.NewEvent("Warning", "ApplyClusterManager", err.Error()) + } + return manager.ApplyLicenseManager(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. diff --git a/go.mod b/go.mod index afdd29d80..0b7903ca8 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,10 @@ require ( github.com/aws/aws-sdk-go v1.42.16 github.com/go-logr/logr v1.2.4 github.com/google/go-cmp v0.5.9 + github.com/jinzhu/copier v0.3.5 github.com/minio/minio-go/v7 v7.0.16 - github.com/onsi/ginkgo/v2 v2.10.0 - github.com/onsi/gomega v1.27.8 + github.com/onsi/ginkgo/v2 v2.12.0 + github.com/onsi/gomega v1.27.10 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/wk8/go-ordered-map/v2 v2.1.7 @@ -36,6 +37,7 @@ require ( github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-resty/resty/v2 v2.7.0 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -45,6 +47,7 @@ require ( github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.12 // indirect + github.com/jarcoal/httpmock v1.3.0 github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -68,13 +71,13 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.10.0 // indirect + golang.org/x/net v0.14.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sys v0.8.0 // indirect + golang.org/x/sys v0.11.0 // indirect golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.9.3 // indirect + golang.org/x/tools v0.12.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/go.sum b/go.sum index 9436ddb77..caf194af9 100644 --- a/go.sum +++ b/go.sum @@ -106,6 +106,8 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -186,7 +188,11 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= +github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -228,6 +234,7 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v7 v7.0.16 h1:GspaSBS8lOuEUCAqMe0W3UxSoyOA4b4F8PTspRVI+k4= @@ -251,10 +258,14 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.10.0 h1:sfUl4qgLdvkChZrWCYndY2EAu9BRIw1YphNAzy1VNWs= -github.com/onsi/ginkgo/v2 v2.10.0/go.mod h1:UDQOh5wbQUlMnkLfVaIUMtQ1Vus92oM+P2JX1aulgcE= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= +github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -424,8 +435,12 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -439,6 +454,10 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -491,6 +510,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml index fd00ad06d..eaa4b9a9f 100644 --- a/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml @@ -16,6 +16,9 @@ sva: searchHeadClusters: - name: shc + licenseManager: + - name: lm + indexerCluster: enabled: true diff --git a/pkg/gateway/splunk/introspection/gateway.go b/pkg/gateway/splunk/introspection/gateway.go new file mode 100644 index 000000000..37d723d56 --- /dev/null +++ b/pkg/gateway/splunk/introspection/gateway.go @@ -0,0 +1,137 @@ +package introspection + +import ( + "context" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" +) + +// EventPublisher is a function type for publishing events associated +// with gateway functions. +type EventPublisher func(reason, message string) + +// Factory is the interface for creating new Gateway objects. +type Factory interface { + NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher EventPublisher) (Gateway, error) +} + +// Gateway holds the state information for talking to +// splunk gateway backend. +type Gateway interface { + + // Heading: Introspect API list + + // Get information about the volume (logical drives) in use by the Splunk deployment. + // endpoint: /services/data/index-volumes + GetIndexVolumes() error + + // List the recognized indexes on the server. + // endpoint: /services/data/indexes + GetIndexes() error + + // List bucket attributes for all indexes. + // endpoint: /services/data/indexes-extended + GetIndexAllBucketInformation() error + + // Get disk usage information about all summaries in an indexer. + // endpoint: /services/data/summaries + GetDataSummaries() error + + // Shows the overall health of a distributed deployment. The health of the deployment can be red, yellow, or green. The overall health of the deployment is based on the health of all features reporting to it. + // Authentication and Authorization: + // Requires the admin role or list_health capability. + // endpoint: /services/server/health/deployment + GetServerDeploymentHealth() error + + // Shows the overall health of splunkd. The health of splunkd can be red, yellow, or green. The health of splunkd is based on the health of all features reporting to it. + // Authentication and Authorization: + // Requires the admin role or list_health capability. + // Get health status of distributed deployment features. + // endpoint: https://:/services/server/health/deployment/details + GetServerDeploymentHealthDetails() error + + // Shows the overall health of splunkd. The health of splunkd can be red, yellow, or green. The health of splunkd is based on the health of all features reporting to it. + // /services/server/health/splunkd + GetSplunkdHealth() error + + // Shows the overall health of the splunkd health status tree, as well as each feature node and its respective color. For unhealthy nodes (non-green), the output includes reasons, indicators, thresholds, messages, and so on. + // Authentication and Authorization: + // Requires the admin role or list_health capability. + // /services/server/health/splunkd/details + GetSplunkdHealthDetails() error + + // Shows the overall health of splunkd. The health of splunkd can be red, yellow, or green. The health of splunkd is based on the health of all features reporting to it. + // Authentication and Authorization + // Requires the admin role or list_health capability. + // Get the health status of splunkd + // endpoint: https://:/services/server/health/splunkd + GetServerHealthConfig() error + + // Access information about the currently running Splunk instance. + // Note: This endpoint provides information on the currently running Splunk instance. Some values returned + // in the GET response reflect server status information. However, this endpoint is meant to provide + // information on the currently running instance, not the machine where the instance is running. + // Server status values returned by this endpoint should be considered deprecated and might not continue + // to be accessible from this endpoint. Use server/sysinfo to access server status instead. + // endpoint: https://:/services/server/info + GetServerInfo() error + + // Access system introspection artifacts. + // endpoint: https://:/services/server/introspection + GetServerIntrospection() error + + // List server/status child resources. + // endpoint: https://:/services/server/status + GetServerStatus() error + + // Access search job information. + // endpoint: https://:/services/server/status/dispatch-artifacts + GetServerDispatchArtifactsStatus() error + + // Access information about the private BTree database. + // GET + // Access private BTree database information. + // endpoint: https://:/services/server/status/fishbucket + GetServerFishBucketStatus() error + + // Check for system file irregularities. + // endpoint: https://:/services/server/status/installed-file-integrity + GetServerInstalledFileIntegrityStatus() error + + // Access search concurrency metrics for a standalone Splunk Enterprise instance. + // Get search concurrency limits for a standalone Splunk Enterprise instance. + // endpoint: https://:/services/server/status/limits/search-concurrency + GetServerSearchConcurrencyLimitsStatus() error + + // Access disk utilization information for filesystems that have Splunk objects, such as indexes, volumes, and logs. A filesystem can span multiple physical disk partitions. + // Get disk utilization information. + // endpoint: https://:/services/server/status/partitions-space + GetServerPartitionSpaceStatus() error + + // Get current resource (CPU, RAM, VM, I/O, file handle) utilization for entire host, and per Splunk-related processes. + // endpoint: https://:/services/server/status/resource-usage + GetServerResourceUsageStatus() error + + // Access host-level dynamic CPU utilization and paging information. + // endpoint: https://:/services/server/status/resource-usage/hostwide + GetServerHostwideResourceUsageState() error + + // Access the most recent disk I/O statistics for each disk. This endpoint is currently supported for Linux, Windows, and Solaris. By default this endpoint is updated every 60s seconds. + // endpoint: https://:/services/server/status/resource-usage/iostats + GetServerIostatResourceUsageStatus() error + + // Access operating system resource utilization information. + // endpoint: https://:/services/server/status/resource-usage/splunk-processes + GetSplunkProcessesResourceUsageStatus() error + + // Exposes relevant information about the resources and OS settings of the machine where Splunk Enterprise is running. + // Usage details + // This endpoint provides status information for the server where the current Splunk instance is running. + // The GET request response includes Kernel Transparent Huge Pages (THP) and ulimit status. + // Note: Some properties returned by this endpoint are also returned by server/info. However, + // the server/info endpoint is meant to provide information on the currently running Splunk instance and not + // the machine where the instance is running. Server status values returned by server/info should be considered + // deprecated and might not continue to be accessible from this endpoint. Use the server/sysinfo endpoint for + // server information instead. + GetServerSysInfo() error +} diff --git a/pkg/gateway/splunk/license-manager/fixture/license.json b/pkg/gateway/splunk/license-manager/fixture/license.json new file mode 100644 index 000000000..9c907c0db --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license.json @@ -0,0 +1,286 @@ +{ + "links": { + "create": "/services/licenser/licenses/_new" + }, + "origin": "https://splunk-lm-license-manager-service.test:8089/services/licenser/licenses", + "updated": "2023-08-01T23:14:56+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "id": "https://splunk-lm-license-manager-service.test:8089/services/licenser/licenses/455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/licenses/455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "list": "/services/licenser/licenses/455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "edit": "/services/licenser/licenses/455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "add_ons": null, + "allowedRoles": [], + "assignableRoles": [], + "creation_time": 1688108400, + "disabled_features": [], + "eai:acl": null, + "expiration_time": 1704095999, + "features": [ + "Acceleration", + "AdvancedSearchCommands", + "AdvancedXML", + "Alerting", + "AllowDuplicateKeys", + "ArchiveToHdfs", + "Auth", + "CanBeRemoteMaster", + "ConditionalLicensingEnforcement", + "CustomRoles", + "DeployClient", + "DeployServer", + "DistSearch", + "FwdData", + "GuestPass", + "KVStore", + "LDAPAuth", + "LocalSearch", + "MultifactorAuth", + "MultisiteClustering", + "NontableLookups", + "RcvData", + "RcvSearch", + "RollingWindowAlerts", + "SAMLAuth", + "ScheduledAlerts", + "ScheduledReports", + "ScheduledSearch", + "ScriptedAuth", + "SearchheadPooling", + "SigningProcessor", + "SplunkWeb", + "SyslogOutputProcessor", + "UnisiteClustering" + ], + "group_id": "Enterprise", + "guid": "2A327594-08E4-48BA-A001-3EB4C1475910", + "is_unlimited": false, + "label": "Splunk Internal License DO NOT DISTRIBUTE", + "license_hash": "455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "max_retention_size": 0, + "max_stack_quota": 18446744073709552000, + "max_users": 0, + "max_violations": 5, + "notes": "", + "quota": 53687091200, + "relative_expiration_interval": 0, + "relative_expiration_start": 0, + "sourcetypes": [], + "stack_id": "enterprise", + "status": "VALID", + "subgroup_id": "Production", + "type": "enterprise", + "window_period": 30 + } + }, + { + "name": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "id": "https://splunk-lm-license-manager-service.test:8089/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "list": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "edit": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "add_ons": null, + "allowedRoles": [], + "assignableRoles": [], + "creation_time": 1277017200, + "disabled_features": [ + "Acceleration", + "AdvancedSearchCommands", + "AdvancedXML", + "ArchiveToHdfs", + "ConditionalLicensingEnforcement", + "CustomRoles", + "GuestPass", + "KVStore", + "LDAPAuth", + "MultifactorAuth", + "MultisiteClustering", + "NontableLookups", + "RollingWindowAlerts", + "SAMLAuth", + "ScheduledAlerts", + "ScheduledReports", + "ScriptedAuth", + "SearchheadPooling", + "UnisiteClustering" + ], + "eai:acl": null, + "expiration_time": 2147483647, + "features": [ + "Auth", + "DeployClient", + "FwdData", + "RcvData", + "SigningProcessor", + "SplunkWeb", + "SyslogOutputProcessor" + ], + "group_id": "Forwarder", + "guid": "11111111-1111-1111-1111-111111111111", + "is_unlimited": false, + "label": "Splunk Forwarder", + "license_hash": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "max_retention_size": 0, + "max_stack_quota": 18446744073709552000, + "max_users": 4294967295, + "max_violations": 5, + "notes": "", + "quota": 1048576, + "relative_expiration_interval": 0, + "relative_expiration_start": 0, + "sourcetypes": [], + "stack_id": "forwarder", + "status": "VALID", + "subgroup_id": "Production", + "type": "forwarder", + "window_period": 30 + } + }, + { + "name": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "id": "https://splunk-lm-license-manager-service.test:8089/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "list": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "edit": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "add_ons": null, + "allowedRoles": [], + "assignableRoles": [], + "creation_time": 1277017200, + "disabled_features": [ + "Acceleration", + "AdvancedSearchCommands", + "AdvancedXML", + "ArchiveToHdfs", + "ConditionalLicensingEnforcement", + "CustomRoles", + "GuestPass", + "LDAPAuth", + "MultifactorAuth", + "MultisiteClustering", + "NontableLookups", + "RollingWindowAlerts", + "SAMLAuth", + "ScheduledAlerts", + "ScheduledReports", + "ScriptedAuth", + "SearchheadPooling", + "UnisiteClustering" + ], + "eai:acl": null, + "expiration_time": 2147483647, + "features": [ + "FwdData", + "KVStore", + "LocalSearch", + "RcvData", + "ScheduledSearch", + "SigningProcessor", + "SplunkWeb" + ], + "group_id": "Free", + "guid": "00000000-0000-0000-0000-000000000000", + "is_unlimited": false, + "label": "Splunk Free", + "license_hash": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "max_retention_size": 0, + "max_stack_quota": 18446744073709552000, + "max_users": 4294967295, + "max_violations": 3, + "notes": "", + "quota": 524288000, + "relative_expiration_interval": 0, + "relative_expiration_start": 0, + "sourcetypes": [], + "stack_id": "free", + "status": "VALID", + "subgroup_id": "Production", + "type": "free", + "window_period": 30 + } + } + ], + "paging": { + "total": 3, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] + } diff --git a/pkg/gateway/splunk/license-manager/fixture/license_fixture.go b/pkg/gateway/splunk/license-manager/fixture/license_fixture.go new file mode 100644 index 000000000..a1b9a2940 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_fixture.go @@ -0,0 +1,528 @@ +package fixture + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "path/filepath" + + //"encoding/json" + + "net/http" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + "github.com/jarcoal/httpmock" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager" + model "github.com/splunk/splunk-operator/pkg/splunk/model" + logz "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var log = logz.New().WithName("gateway").WithName("fixture") + +// fixtureGateway implements the gateway.fixtureGateway interface +// and uses splunk to manage the host. +type fixtureGateway struct { + // client for talking to splunk + client *resty.Client + // the splunk credentials + credentials splunkmodel.SplunkCredentials + // a logger configured for this host + log logr.Logger + // an event publisher for recording significant events + publisher model.EventPublisher + // state of the splunk + state *Fixture +} + +func findFixturePath() (string, error) { + ext := ".env" + wd, err := os.Getwd() + if err != nil { + return "", err + } + for { + dir, err := os.Open(wd) + if err != nil { + fmt.Println("Error opening directory:", err) + return "", err + } + defer dir.Close() + + files, err := dir.Readdir(-1) + if err != nil { + fmt.Println("Error reading directory:", err) + return "", err + } + + for _, file := range files { + if file.Name() == ext { + wd, err = filepath.Abs(wd) + wd += "/pkg/gateway/splunk/license-manager/fixture/" + return wd, err + } + } + wd += "/.." + } +} + +// Fixture contains persistent state for a particular splunk instance +type Fixture struct { +} + +// NewGateway returns a new Fixture Gateway +func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (gateway.Gateway, error) { + p := &fixtureGateway{ + log: log.WithValues("splunk", sad.Address), + publisher: publisher, + state: f, + client: resty.New(), + } + return p, nil +} + +func (p *fixtureGateway) GetLicenseGroup(ctx context.Context) (*[]licensemodel.LicenseGroup, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/license_group.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseGroupUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseGroup{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseGroup) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicense(ctx context.Context) (*[]licensemodel.License, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/license.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.License{} + for _, entry := range envelop.Entry { + var content licensemodel.License + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + //content = entry.Content.(licensemodel.License) + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicenseLocalPeer(ctx context.Context) (*[]licensemodel.LicenseLocalPeer, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/license_local_peer.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseLocalPeersUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseLocalPeer{} + for _, entry := range envelop.Entry { + var content licensemodel.LicenseLocalPeer + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicenseMessage(ctx context.Context) (*[]licensemodel.LicenseMessage, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/license_message.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseMessagesUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseMessage{} + for _, entry := range envelop.Entry { + var content licensemodel.LicenseMessage + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicensePools(ctx context.Context) (*[]licensemodel.LicensePool, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/license_pools.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicensePoolsUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicensePool{} + for _, entry := range envelop.Entry { + var content licensemodel.LicensePool + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicensePeers(context context.Context) (*[]licensemodel.LicensePeer, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/license_peers.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicensePeersUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicensePeer{} + for _, entry := range envelop.Entry { + var content licensemodel.LicensePeer + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicenseUsage(ctx context.Context) (*[]licensemodel.LicenseUsage, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/license_usage.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseUsageUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseUsage{} + for _, entry := range envelop.Entry { + var content licensemodel.LicenseUsage + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicenseStacks(ctx context.Context) (*[]licensemodel.LicenseStack, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/license_stack.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseStacksUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseStack{} + for _, entry := range envelop.Entry { + var content licensemodel.LicenseStack + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} diff --git a/pkg/gateway/splunk/license-manager/fixture/license_group.json b/pkg/gateway/splunk/license-manager/fixture/license_group.json new file mode 100644 index 000000000..b341fcf81 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_group.json @@ -0,0 +1,198 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/licenser/groups", + "updated": "2023-08-01T21:47:15+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "Enterprise", + "id": "https://localhost:8089/services/licenser/groups/Enterprise", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Enterprise", + "list": "/services/licenser/groups/Enterprise", + "edit": "/services/licenser/groups/Enterprise" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": true, + "stack_ids": [ + "enterprise" + ] + } + }, + { + "name": "Forwarder", + "id": "https://localhost:8089/services/licenser/groups/Forwarder", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Forwarder", + "list": "/services/licenser/groups/Forwarder", + "edit": "/services/licenser/groups/Forwarder" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": false, + "stack_ids": [ + "forwarder" + ] + } + }, + { + "name": "Free", + "id": "https://localhost:8089/services/licenser/groups/Free", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Free", + "list": "/services/licenser/groups/Free", + "edit": "/services/licenser/groups/Free" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": false, + "stack_ids": [ + "free" + ] + } + }, + { + "name": "Lite", + "id": "https://localhost:8089/services/licenser/groups/Lite", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Lite", + "list": "/services/licenser/groups/Lite", + "edit": "/services/licenser/groups/Lite" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": false, + "stack_ids": [] + } + }, + { + "name": "Lite_Free", + "id": "https://localhost:8089/services/licenser/groups/Lite_Free", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Lite_Free", + "list": "/services/licenser/groups/Lite_Free", + "edit": "/services/licenser/groups/Lite_Free" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": false, + "stack_ids": [] + } + } + ], + "paging": { + "total": 5, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json b/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json new file mode 100644 index 000000000..bffde8278 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json @@ -0,0 +1,115 @@ +{ + "links": {}, + "origin": "https://splunk-lm-license-manager-service.test:8089/services/licenser/localpeer", + "updated": "2023-08-01T21:36:59+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "license", + "id": "https://splunk-lm-license-manager-service.test:8089/services/licenser/localpeer/license", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/localpeer/license", + "list": "/services/licenser/localpeer/license", + "edit": "/services/licenser/localpeer/license" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "add_ons": null, + "connection_timeout": 30, + "eai:acl": null, + "features": { + "AWSMarketplace": "DISABLED_DUE_TO_LICENSE", + "Acceleration": "ENABLED", + "AdvancedSearchCommands": "ENABLED", + "AdvancedXML": "ENABLED", + "Alerting": "ENABLED", + "AllowDuplicateKeys": "ENABLED", + "ArchiveToHdfs": "ENABLED", + "Auth": "ENABLED", + "CanBeRemoteMaster": "ENABLED", + "ConditionalLicensingEnforcement": "ENABLED", + "CustomRoles": "ENABLED", + "DeployClient": "ENABLED", + "DeployServer": "ENABLED", + "DisableQuotaEnforcement": "DISABLED_DUE_TO_LICENSE", + "DistSearch": "ENABLED", + "FwdData": "ENABLED", + "GuestPass": "ENABLED", + "HideQuotaWarnings": "DISABLED_DUE_TO_LICENSE", + "KVStore": "ENABLED", + "LDAPAuth": "ENABLED", + "LocalSearch": "ENABLED", + "MultifactorAuth": "ENABLED", + "MultisiteClustering": "ENABLED", + "NontableLookups": "ENABLED", + "RcvData": "ENABLED", + "RcvSearch": "ENABLED", + "ResetWarnings": "DISABLED_DUE_TO_LICENSE", + "RollingWindowAlerts": "ENABLED", + "SAMLAuth": "ENABLED", + "ScheduledAlerts": "ENABLED", + "ScheduledReports": "ENABLED", + "ScheduledSearch": "ENABLED", + "ScriptedAuth": "ENABLED", + "SearchheadPooling": "ENABLED", + "SigningProcessor": "ENABLED", + "SplunkWeb": "ENABLED", + "SubgroupId": "DISABLED_DUE_TO_LICENSE", + "SyslogOutputProcessor": "ENABLED", + "UnisiteClustering": "ENABLED" + }, + "guid": [ + "2A327594-08E4-48BA-A001-3EB4C1475910" + ], + "last_manager_contact_attempt_time": 1690925795, + "last_manager_contact_success_time": 1690925795, + "last_master_contact_attempt_time": 1690925795, + "last_master_contact_success_time": 1690925795, + "last_trackerdb_service_time": 0, + "license_keys": [ + "455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987" + ], + "manager_guid": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "manager_uri": "self", + "master_guid": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "master_uri": "self", + "peer_id": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "peer_label": "splunk-lm-license-manager-0", + "receive_timeout": 30, + "send_timeout": 30, + "slave_id": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "slave_label": "splunk-lm-license-manager-0", + "squash_threshold": 2000 + } + } + ], + "paging": { + "total": 1, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] + } \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_message.json b/pkg/gateway/splunk/license-manager/fixture/license_message.json new file mode 100644 index 000000000..6763913fa --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_message.json @@ -0,0 +1,16 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/licenser/messages", + "updated": "2023-08-01T21:48:06+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [], + "paging": { + "total": 0, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_peers.json b/pkg/gateway/splunk/license-manager/fixture/license_peers.json new file mode 100644 index 000000000..a9c736894 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_peers.json @@ -0,0 +1,65 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/licenser/peers", + "updated": "2023-08-01T21:48:40+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "id": "https://localhost:8089/servicesNS/nobody/system/licenser/peers/A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/servicesNS/nobody/system/licenser/peers/A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "list": "/servicesNS/nobody/system/licenser/peers/A7E343C4-26D4-47A8-86F4-56B3CAD86721" + }, + "author": "nobody", + "acl": { + "app": "system", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "nobody", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_pool_ids": [ + "auto_generated_pool_enterprise" + ], + "eai:acl": null, + "label": "splunk-lm-license-manager-0", + "pool_ids": [ + "auto_generated_pool_enterprise", + "auto_generated_pool_forwarder", + "auto_generated_pool_free" + ], + "pool_suggestion": null, + "stack_ids": [ + "enterprise", + "forwarder", + "free" + ], + "warning_count": 0 + } + } + ], + "paging": { + "total": 1, + "perPage": 0, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_pools.json b/pkg/gateway/splunk/license-manager/fixture/license_pools.json new file mode 100644 index 000000000..8f746870c --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_pools.json @@ -0,0 +1,156 @@ +{ + "links": { + "create": "/services/licenser/pools/_new", + "_reload": "/services/licenser/pools/_reload", + "_acl": "/services/licenser/pools/_acl" + }, + "origin": "https://localhost:8089/services/licenser/pools", + "updated": "2023-08-01T21:49:16+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "auto_generated_pool_enterprise", + "id": "https://localhost:8089/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise", + "list": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise", + "_reload": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise/_reload", + "edit": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise", + "remove": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise" + }, + "author": "nobody", + "acl": { + "app": "system", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "nobody", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "description": "auto_generated_pool_enterprise", + "eai:acl": null, + "effective_quota": 53687091200, + "is_unlimited": false, + "peers": [], + "peers_usage_bytes": null, + "quota": "MAX", + "slaves": [], + "slaves_usage_bytes": null, + "stack_id": "enterprise", + "used_bytes": 0 + } + }, + { + "name": "auto_generated_pool_forwarder", + "id": "https://localhost:8089/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder", + "list": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder", + "_reload": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder/_reload", + "edit": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder", + "remove": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder" + }, + "author": "nobody", + "acl": { + "app": "system", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "nobody", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "description": "auto_generated_pool_forwarder", + "eai:acl": null, + "effective_quota": 1048576, + "is_unlimited": false, + "peers": [], + "peers_usage_bytes": null, + "quota": "MAX", + "slaves": [], + "slaves_usage_bytes": null, + "stack_id": "forwarder", + "used_bytes": 0 + } + }, + { + "name": "auto_generated_pool_free", + "id": "https://localhost:8089/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free", + "list": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free", + "_reload": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free/_reload", + "edit": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free", + "remove": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free" + }, + "author": "nobody", + "acl": { + "app": "system", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "nobody", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "description": "auto_generated_pool_free", + "eai:acl": null, + "effective_quota": 524288000, + "is_unlimited": false, + "peers": [], + "peers_usage_bytes": null, + "quota": "MAX", + "slaves": [], + "slaves_usage_bytes": null, + "stack_id": "free", + "used_bytes": 0 + } + } + ], + "paging": { + "total": 3, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_stack.json b/pkg/gateway/splunk/license-manager/fixture/license_stack.json new file mode 100644 index 000000000..9d88adc99 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_stack.json @@ -0,0 +1,137 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/licenser/stacks", + "updated": "2023-08-01T21:50:11+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "enterprise", + "id": "https://localhost:8089/services/licenser/stacks/enterprise", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/stacks/enterprise", + "list": "/services/licenser/stacks/enterprise" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "cle_active": 1, + "eai:acl": null, + "is_unlimited": false, + "label": "Splunk Internal License DO NOT DISTRIBUTE", + "max_retention_size": 0, + "max_violations": 45, + "quota": 53687091200, + "type": "enterprise", + "window_period": 60 + } + }, + { + "name": "forwarder", + "id": "https://localhost:8089/services/licenser/stacks/forwarder", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/stacks/forwarder", + "list": "/services/licenser/stacks/forwarder" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "cle_active": 0, + "eai:acl": null, + "is_unlimited": false, + "label": "Splunk Forwarder", + "max_retention_size": 0, + "max_violations": 5, + "quota": 1048576, + "type": "forwarder", + "window_period": 30 + } + }, + { + "name": "free", + "id": "https://localhost:8089/services/licenser/stacks/free", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/stacks/free", + "list": "/services/licenser/stacks/free" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "cle_active": 0, + "eai:acl": null, + "is_unlimited": false, + "label": "Splunk Free", + "max_retention_size": 0, + "max_violations": 3, + "quota": 524288000, + "type": "free", + "window_period": 30 + } + } + ], + "paging": { + "total": 3, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_usage.json b/pkg/gateway/splunk/license-manager/fixture/license_usage.json new file mode 100644 index 000000000..08c64b857 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_usage.json @@ -0,0 +1,48 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/licenser/usage", + "updated": "2023-08-01T21:50:49+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "license_usage", + "id": "https://localhost:8089/services/licenser/usage/license_usage", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/usage/license_usage", + "list": "/services/licenser/usage/license_usage" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "*" + ], + "write": [] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "peers_usage_bytes": 0, + "quota": 53687091200, + "slaves_usage_bytes": 0 + } + } + ], + "paging": { + "total": 1, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/gateway.go b/pkg/gateway/splunk/license-manager/gateway.go new file mode 100644 index 000000000..713cb813f --- /dev/null +++ b/pkg/gateway/splunk/license-manager/gateway.go @@ -0,0 +1,55 @@ +package licensemanager + +import ( + "context" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + model "github.com/splunk/splunk-operator/pkg/splunk/model" +) + +// Factory is the interface for creating new Gateway objects. +type Factory interface { + NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (Gateway, error) +} + +// Gateway holds the state information for talking to +// splunk gateway backend. +type Gateway interface { + + // GetLicenseGroup Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. + // Authentication and Authorization: + // Requires the admin role or list_indexer_cluster capability. + // endpoint: https://:/services/cluster/manager/health + GetLicenseGroup(ctx context.Context) (*[]licensemodel.LicenseGroup, error) + + // GetLicense Access information about cluster manager node. + // get List cluster manager node details. + // endpoint: https://:/services/cluster/manager/info + GetLicense(ctx context.Context) (*[]licensemodel.License, error) + + // GetLicenseLocalPeerAccess cluster manager peers. + // endpoint: https://:/services/cluster/manager/peers + GetLicenseLocalPeer(ctx context.Context) (*[]licensemodel.LicenseLocalPeer, error) + + // GetLicenseMessage Access cluster site information. + // list List available cluster sites. + // endpoint: https://:/services/cluster/manager/sites + GetLicenseMessage(ctx context.Context) (*[]licensemodel.LicenseMessage, error) + + // GetLicensePools Endpoint to get the status of a rolling restart. + // GET the status of a rolling restart. + // endpoint: https://:/services/cluster/manager/status + GetLicensePools(ctx context.Context) (*[]licensemodel.LicensePool, error) + + // GetLicensePeers Endpoint to set cluster in maintenance mode. + // Post the status of a rolling restart. + // endpoint: https://:/services/cluster/manager/control/default/maintenance + GetLicensePeers(context context.Context) (*[]licensemodel.LicensePeer, error) + + // GetLicenseUsage check if cluster is in maintenance mode + GetLicenseUsage(ctx context.Context) (*[]licensemodel.LicenseUsage, error) + + // GetLicenseStacks check if cluster is in maintenance mode + GetLicenseStacks(ctx context.Context) (*[]licensemodel.LicenseStack, error) +} diff --git a/pkg/gateway/splunk/license-manager/implementation/factory.go b/pkg/gateway/splunk/license-manager/implementation/factory.go new file mode 100644 index 000000000..5242baa39 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/implementation/factory.go @@ -0,0 +1,86 @@ +package impl + +import ( + "context" + "crypto/tls" + "fmt" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + "time" + + model "github.com/splunk/splunk-operator/pkg/splunk/model" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type splunkGatewayFactory struct { + log logr.Logger + //credentials to log on to splunk + credentials *splunkmodel.SplunkCredentials + // client for talking to splunk + client *resty.Client +} + +// NewGatewayFactory new gateway factory to create gateway interface +func NewGatewayFactory() gateway.Factory { + factory := splunkGatewayFactory{} + err := factory.init() + if err != nil { + return nil // FIXME we have to throw some kind of exception or error here + } + return factory +} + +func (f *splunkGatewayFactory) init() error { + return nil +} + +func (f splunkGatewayFactory) splunkGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (*splunkGateway, error) { + gatewayLogger := log.FromContext(ctx) + reqLogger := log.FromContext(ctx) + f.log = reqLogger.WithName("splunkGateway") + + f.client = resty.New() + // Enable debug mode + f.client.SetDebug(true) + // or One can disable security check (https) + f.client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: sad.DisableCertificateVerification}) + // Set client timeout as per your need + f.client.SetTimeout(1 * time.Minute) + namespace := "default" + if len(sad.Namespace) > 0 { + namespace = sad.Namespace + } + //splunkURL := fmt.Sprintf("https://%s:%d/%s", sad.Address, sad.Port, sad.ServicesNamespace) + splunkURL := fmt.Sprintf("https://%s.%s:%d", sad.Address, namespace, sad.Port) + f.client.SetBaseURL(splunkURL) + f.client.SetBasicAuth("admin", sad.CredentialsName) + f.client.SetHeader("Content-Type", "application/json") + f.client.SetHeader("Accept", "application/json") + f.credentials = sad + + gatewayLogger.Info("new splunk manager created to access rest endpoint") + newGateway := &splunkGateway{ + credentials: f.credentials, + client: f.client, + log: f.log, + debugLog: f.log, + publisher: publisher, + } + f.log.Info("splunk settings", + "endpoint", f.credentials.Address, + "CACertFile", f.credentials.TrustedCAFile, + "ClientCertFile", f.credentials.ClientCertificateFile, + "ClientPrivKeyFile", f.credentials.ClientPrivateKeyFile, + "TLSInsecure", f.credentials.DisableCertificateVerification, + ) + return newGateway, nil +} + +// NewGateway returns a new Splunk Gateway using global +// configuration for finding the Splunk services. +func (f splunkGatewayFactory) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (gateway.Gateway, error) { + return f.splunkGateway(ctx, sad, publisher) +} diff --git a/pkg/gateway/splunk/license-manager/implementation/license_impl.go b/pkg/gateway/splunk/license-manager/implementation/license_impl.go new file mode 100644 index 000000000..2018cbe56 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/implementation/license_impl.go @@ -0,0 +1,348 @@ +package impl + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + model "github.com/splunk/splunk-operator/pkg/splunk/model" +) + +// splunkGateway implements the gateway.Gateway interface +// and uses gateway to manage the host. +type splunkGateway struct { + // a logger configured for this host + log logr.Logger + // a debug logger configured for this host + debugLog logr.Logger + // an event publisher for recording significant events + publisher model.EventPublisher + // client for talking to splunk + client *resty.Client + // credentials + credentials *splunkmodel.SplunkCredentials +} + +func (p *splunkGateway) GetLicenseGroup(ctx context.Context) (*[]licensemodel.LicenseGroup, error) { + url := licensemodel.GetLicenseGroupUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseGroup{} + for _, entry := range envelop.Entry { + var content licensemodel.LicenseGroup + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicense(ctx context.Context) (*[]licensemodel.License, error) { + url := licensemodel.GetLicenseUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.License{} + for _, entry := range envelop.Entry { + var content licensemodel.License + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicenseLocalPeer(ctx context.Context) (*[]licensemodel.LicenseLocalPeer, error) { + url := licensemodel.GetLicenseLocalPeersUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseLocalPeer{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseLocalPeer) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicenseMessage(ctx context.Context) (*[]licensemodel.LicenseMessage, error) { + url := licensemodel.GetLicenseMessagesUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseMessage{} + for _, entry := range envelop.Entry { + var content licensemodel.LicenseMessage + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicensePools(ctx context.Context) (*[]licensemodel.LicensePool, error) { + url := licensemodel.GetLicensePoolsUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicensePool{} + for _, entry := range envelop.Entry { + var content licensemodel.LicensePool + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicensePeers(context context.Context) (*[]licensemodel.LicensePeer, error) { + url := licensemodel.GetLicenseGroupUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicensePeer{} + for _, entry := range envelop.Entry { + var content licensemodel.LicensePeer + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicenseUsage(ctx context.Context) (*[]licensemodel.LicenseUsage, error) { + url := licensemodel.GetLicenseUsageUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseUsage{} + for _, entry := range envelop.Entry { + var content licensemodel.LicenseUsage + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicenseStacks(ctx context.Context) (*[]licensemodel.LicenseStack, error) { + url := licensemodel.GetLicenseStacksUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseStack{} + for _, entry := range envelop.Entry { + var content licensemodel.LicenseStack + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } + contentList = append(contentList, content) + } + return &contentList, nil +} diff --git a/pkg/gateway/splunk/license-manager/implementation/license_test.go b/pkg/gateway/splunk/license-manager/implementation/license_test.go new file mode 100644 index 000000000..a168ee220 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/implementation/license_test.go @@ -0,0 +1,77 @@ +package impl + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/go-resty/resty/v2" + "github.com/jarcoal/httpmock" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" + + //managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + //peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" + "testing" + + logz "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var slog = logz.New().WithName("gateway").WithName("fixture") + +func setCreds(t *testing.T) *splunkGateway { + //ctx := context.TODO() + sad := &splunkmodel.SplunkCredentials{ + Address: "splunk-cm-cluster-master-service", + Port: 8089, + ServicesNamespace: "", + User: "admin", + App: "", + CredentialsName: "admin: abcdefghijklmnopqrstuvwxyz", + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + } + publisher := func(ctx context.Context, eventType, reason, message string) {} + // TODO fixme how to test the gateway call directly + //sm := NewGatewayFactory(ctx, &sad, publisher) + sm := &splunkGateway{ + credentials: sad, + client: resty.New(), + publisher: publisher, + log: slog, + debugLog: slog, + } + //splunkURL := fmt.Sprintf("https://%s:%d/%s", sad.Address, sad.Port, sad.ServicesNamespace) + splunkURL := fmt.Sprintf("https://%s:%d", sad.Address, sad.Port) + sm.client.SetBaseURL(splunkURL) + sm.client.SetHeader("Content-Type", "application/json") + sm.client.SetHeader("Accept", "application/json") + sm.client.SetTimeout(time.Duration(60 * time.Minute)) + sm.client.SetDebug(true) + return sm +} + +func GetLicenseGroup(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := os.ReadFile("../fixture/license_group.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager health %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.GetClusterManagerHealthUrl + httpmock.RegisterResponder("GET", url, responder) + + _, err = sm.GetLicenseGroup(ctx) + if err != nil { + t.Errorf("fixture: error in get cluster manager health %v", err) + } +} diff --git a/pkg/gateway/splunk/model/error_types.go b/pkg/gateway/splunk/model/error_types.go new file mode 100644 index 000000000..578b2b004 --- /dev/null +++ b/pkg/gateway/splunk/model/error_types.go @@ -0,0 +1,15 @@ +package model + +type SplunkError struct { + Messages []struct { + Type string `json:"type,omitempty"` + Text string `json:"text,omitempty"` + } `json:"messages,omitempty"` +} + +func (s *SplunkError) Error() string { + if len(s.Messages) > 0 { + return s.Messages[0].Text + } + return "unknown error" +} diff --git a/pkg/gateway/splunk/model/services/cluster/config_types.go b/pkg/gateway/splunk/model/services/cluster/config_types.go new file mode 100644 index 000000000..816f25a3b --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/config_types.go @@ -0,0 +1,146 @@ +package cluster + +import "time" + +// Description: Access cluster node configuration details. +// Rest End point API : services/cluster/config +type ClusterConfigContent struct { + AccessLoggingForHeartbeats bool `json:"access_logging_for_heartbeats"` + AssignPrimariesToAllSites string `json:"assign_primaries_to_all_sites"` + AutoRebalancePrimaries bool `json:"auto_rebalance_primaries"` + BucketsToSummarize string `json:"buckets_to_summarize"` + ClusterLabel string `json:"cluster_label"` + CmComTimeout int `json:"cm_com_timeout"` + CmHeartbeatPeriod int `json:"cm_heartbeat_period"` + CmMaxHbmissCount int `json:"cm_max_hbmiss_count"` + CxnTimeout int `json:"cxn_timeout"` + DecommissionForceFinishIdleTime int `json:"decommission_force_finish_idle_time"` + DecommissionForceTimeout int `json:"decommission_force_timeout"` + Disabled bool `json:"disabled"` + EaiAcl interface{} `json:"eai:acl"` + EnableParallelAddPeer string `json:"enable_parallel_add_peer"` + EnablePrimaryFixupDuringMaintenance string `json:"enable_primary_fixup_during_maintenance"` + ExecutorWorkers int `json:"executor_workers"` + ForwarderSiteFailover string `json:"forwarder_site_failover"` + ForwarderdataRcvPort int `json:"forwarderdata_rcv_port"` + ForwarderdataUseSsl bool `json:"forwarderdata_use_ssl"` + FreezeDuringMaintenance string `json:"freeze_during_maintenance"` + FrozenNotificationsPerBatch int `json:"frozen_notifications_per_batch"` + GenerationPollInterval int `json:"generation_poll_interval"` + GUID string `json:"guid"` + HeartbeatPeriod int64 `json:"heartbeat_period"` + HeartbeatTimeout int `json:"heartbeat_timeout"` + LogBucketDuringAddpeer string `json:"log_bucket_during_addpeer"` + ManagerSwitchoverIdxPing bool `json:"manager_switchover_idx_ping"` + ManagerSwitchoverMode string `json:"manager_switchover_mode"` + ManagerSwitchoverQuietPeriod int `json:"manager_switchover_quiet_period"` + ManagerURI string `json:"manager_uri"` + MasterURI string `json:"master_uri"` + MaxAutoServiceInterval int `json:"max_auto_service_interval"` + MaxConcurrentPeersJoining int `json:"max_concurrent_peers_joining"` + MaxDelayedUpdatesTimeMs int `json:"max_delayed_updates_time_ms "` + MaxFixupTimeMs int `json:"max_fixup_time_ms"` + MaxPeerBuildLoad int `json:"max_peer_build_load"` + MaxPeerRepLoad int `json:"max_peer_rep_load"` + MaxPeerSumRepLoad int `json:"max_peer_sum_rep_load"` + MaxPeersToDownloadBundle int `json:"max_peers_to_download_bundle"` + MaxPrimaryBackupsPerService int `json:"max_primary_backups_per_service"` + MaxRemoveSummaryS2PerService int `json:"max_remove_summary_s2_per_service"` + Mode string `json:"mode"` + Multisite string `json:"multisite"` + NotifyBucketsPeriod int `json:"notify_buckets_period"` + NotifyScanMinPeriod int `json:"notify_scan_min_period"` + NotifyScanPeriod int `json:"notify_scan_period"` + PercentPeersToReload int `json:"percent_peers_to_reload"` + PercentPeersToRestart int `json:"percent_peers_to_restart"` + PingFlag bool `json:"ping_flag"` + PrecompressClusterBundle bool `json:"precompress_cluster_bundle"` + QuietPeriod int `json:"quiet_period"` + RcvTimeout int `json:"rcv_timeout"` + RebalancePrimariesExecutionLimitMs int `json:"rebalance_primaries_execution_limit_ms"` + RebalanceThreshold float64 `json:"rebalance_threshold"` + RegisterForwarderAddress string `json:"register_forwarder_address"` + RegisterReplicationAddress string `json:"register_replication_address"` + RegisterSearchAddress string `json:"register_search_address"` + RemoteStorageRetentionPeriod int `json:"remote_storage_retention_period"` + RemoteStorageUploadTimeout int `json:"remote_storage_upload_timeout"` + RepCxnTimeout int `json:"rep_cxn_timeout"` + RepMaxRcvTimeout int `json:"rep_max_rcv_timeout"` + RepMaxSendTimeout int `json:"rep_max_send_timeout"` + RepRcvTimeout int `json:"rep_rcv_timeout"` + RepSendTimeout int `json:"rep_send_timeout"` + ReplicationFactor int `json:"replication_factor"` + ReplicationPort interface{} `json:"replication_port"` + ReplicationUseSsl bool `json:"replication_use_ssl"` + ReportingDelayPeriod int `json:"reporting_delay_period"` + RestartInactivityTimeout int `json:"restart_inactivity_timeout"` + RestartTimeout int `json:"restart_timeout"` + RollingRestart string `json:"rolling_restart"` + RollingRestartCondition string `json:"rolling_restart_condition"` + SearchFactor int `json:"search_factor"` + SearchFilesRetryTimeout int `json:"search_files_retry_timeout"` + SearchableRebalance string `json:"searchable_rebalance"` + SearchableRollingPeerStateDelayInterval int `json:"searchable_rolling_peer_state_delay_interval"` + Secret string `json:"secret"` + SendTimeout int `json:"send_timeout"` + ServiceExecutionThresholdMs int `json:"service_execution_threshold_ms"` + ServiceInterval int `json:"service_interval"` + ServiceJobsMsec int `json:"service_jobs_msec"` + Site string `json:"site"` + SiteBySite bool `json:"site_by_site"` + SiteReplicationFactor string `json:"site_replication_factor"` + SiteSearchFactor string `json:"site_search_factor"` + StreamingReplicationWaitSecs int `json:"streaming_replication_wait_secs"` + SummaryReplication string `json:"summary_replication"` + SummaryUpdateBatchSize int `json:"summary_update_batch_size"` + TargetWaitTime int `json:"target_wait_time"` + UseBatchDiscard string `json:"use_batch_discard"` + UseBatchMaskChanges string `json:"use_batch_mask_changes"` + UseBatchRemoteRepChanges string `json:"use_batch_remote_rep_changes"` +} + +type ClusterConfigHeader struct { + Links struct { + Reload string `json:"_reload,omitempty"` + Acl string `json:"_acl,omitempty"` + } `json:"links"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator struct { + Build string `json:"build,omitempty"` + Version string `json:"version,omitempty"` + } `json:"generator"` + Entry []struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Links struct { + Alternate string `json:"alternate,omitempty"` + List string `json:"list,omitempty"` + Reload string `json:"_reload,omitempty"` + Edit string `json:"edit,omitempty"` + Disable string `json:"disable,omitempty"` + } `json:"links,omitempty"` + Author string `json:"author,omitempty"` + Acl struct { + App string `json:"app,omitempty"` + CanList bool `json:"can_list,omitempty"` + CanWrite bool `json:"can_write,omitempty"` + Modifiable bool `json:"modifiable,omitempty"` + Owner string `json:"owner,omitempty"` + Perms struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` + } `json:"perms,omitempty"` + Removable bool `json:"removable,omitempty"` + Sharing string `json:"sharing,omitempty"` + } `json:"acl,omitempty"` + Content ClusterConfigContent `json:"content,omitempty"` + } `json:"entry,omitempty"` + Paging struct { + Total int `json:"total,omitempty"` + PerPage int `json:"perPage,omitempty"` + Offset int `json:"offset,omitempty"` + } `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/health_types.go b/pkg/gateway/splunk/model/services/cluster/manager/health_types.go new file mode 100644 index 000000000..eeb1cdc60 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/health_types.go @@ -0,0 +1,64 @@ +package manager + +import ( + "time" +) + +// Description: Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. +// Rest End Point API: services/cluster/manager/health +type ClusterManagerHealthContent struct { + AllDataIsSearchable string `json:"all_data_is_searchable,omitempty"` + AllPeersAreUp string `json:"all_peers_are_up,omitempty"` + CmVersionIsCompatible string `json:"cm_version_is_compatible,omitempty"` + EaiAcl interface{} `json:"eai:acl,omitempty"` + Multisite string `json:"multisite,omitempty"` + NoFixupTasksInProgress string `json:"no_fixup_tasks_in_progress,omitempty"` + PreFlightCheck string `json:"pre_flight_check,omitempty"` + ReadyForSearchableRollingRestart string `json:"ready_for_searchable_rolling_restart,omitempty"` + ReplicationFactorMet string `json:"replication_factor_met,omitempty"` + SearchFactorMet string `json:"search_factor_met,omitempty"` + SiteReplicationFactorMet string `json:"site_replication_factor_met,omitempty"` + SiteSearchFactorMet string `json:"site_search_factor_met,omitempty"` + SplunkVersionPeerCount string `json:"splunk_version_peer_count,omitempty"` +} + +type ClusterManagerHealthHeader struct { + Links struct { + } `json:"links,omitempty"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator struct { + Build string `json:"build,omitempty"` + Version string `json:"version,omitempty"` + } `json:"generator,omitempty"` + Entry []struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Links struct { + Alternate string `json:"alternate,omitempty"` + List string `json:"list,omitempty"` + } `json:"links,omitempty"` + Author string `json:"author,omitempty"` + Acl struct { + App string `json:"app,omitempty"` + CanList bool `json:"can_list,omitempty"` + CanWrite bool `json:"can_write,omitempty"` + Modifiable bool `json:"modifiable,omitempty"` + Owner string `json:"owner,omitempty"` + Perms struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` + } `json:"perms,omitempty"` + Removable bool `json:"removable,omitempty"` + Sharing string `json:"sharing,omitempty"` + } `json:"acl,omitempty"` + Content ClusterManagerHealthContent `json:"content,omitempty"` + } `json:"entry,omitempty"` + Paging struct { + Total int `json:"total,omitempty"` + PerPage int `json:"perPage,omitempty"` + Offset int `json:"offset,omitempty"` + } `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/info_types.go b/pkg/gateway/splunk/model/services/cluster/manager/info_types.go new file mode 100644 index 000000000..b5ed1c6ae --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/info_types.go @@ -0,0 +1,106 @@ +package manager + +import "time" + +// Description: Access information about cluster manager node. +// Rest End Point API: services/cluster/manager/info + +type ClusterManagerInfoContent struct { + ActiveBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"active_bundle"` + ApplyBundleStatus struct { + InvalidBundle struct { + BundlePath string `json:"bundle_path"` + BundleValidationErrorsOnMaster []interface{} `json:"bundle_validation_errors_on_master"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"invalid_bundle"` + ReloadBundleIssued bool `json:"reload_bundle_issued"` + Status string `json:"status"` + } `json:"apply_bundle_status"` + AvailableSites string `json:"available_sites"` + BackupAndRestorePrimaries bool `json:"backup_and_restore_primaries"` + ControlledRollingRestartFlag bool `json:"controlled_rolling_restart_flag"` + EaiAcl interface{} `json:"eai:acl"` + ForwarderSiteFailover string `json:"forwarder_site_failover"` + IndexingReadyFlag bool `json:"indexing_ready_flag"` + InitializedFlag bool `json:"initialized_flag"` + Label string `json:"label"` + LastCheckRestartBundleResult bool `json:"last_check_restart_bundle_result"` + LastDryRunBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"last_dry_run_bundle"` + LastValidatedBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + IsValidBundle bool `json:"is_valid_bundle"` + Timestamp int `json:"timestamp"` + } `json:"last_validated_bundle"` + LatestBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"latest_bundle"` + MaintenanceMode bool `json:"maintenance_mode"` + Multisite bool `json:"multisite"` + PreviousActiveBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"previous_active_bundle"` + PrimariesBackupStatus string `json:"primaries_backup_status"` + QuietPeriodFlag bool `json:"quiet_period_flag"` + RollingRestartFlag bool `json:"rolling_restart_flag"` + RollingRestartOrUpgrade bool `json:"rolling_restart_or_upgrade"` + ServiceReadyFlag bool `json:"service_ready_flag"` + SiteReplicationFactor string `json:"site_replication_factor"` + SiteSearchFactor string `json:"site_search_factor"` + StartTime int `json:"start_time"` + SummaryReplication string `json:"summary_replication"` +} + +type ClusterManagerInfoHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content ClusterManagerInfoContent `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/peers_types.go b/pkg/gateway/splunk/model/services/cluster/manager/peers_types.go new file mode 100644 index 000000000..3d5660805 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/peers_types.go @@ -0,0 +1,149 @@ +package manager + +import "time" + +// Description: Access cluster manager peers. +// Rest End Point API: services/cluster/manager/peers +type LastDryRunBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` +} + +type LastValidatedBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + IsValidBundle bool `json:"is_valid_bundle,omitempty"` + Timestamp int `json:"timestamp,omitempty"` +} + +type LatestBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` +} + +type PreviousActiveBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` +} + +type ClusterManagerPeerContent struct { + ActiveBundleID string `json:"active_bundle_id"` + ApplyBundleStatus struct { + InvalidBundle struct { + BundleValidationErrors []interface{} `json:"bundle_validation_errors"` + InvalidBundleID string `json:"invalid_bundle_id"` + } `json:"invalid_bundle"` + ReasonsForRestart []interface{} `json:"reasons_for_restart"` + RestartRequiredForApplyBundle bool `json:"restart_required_for_apply_bundle"` + Status string `json:"status"` + } `json:"apply_bundle_status"` + BaseGenerationID int `json:"base_generation_id"` + BatchedReplicationCount int `json:"batched_replication_count"` + BucketCount int `json:"bucket_count"` + BucketCountByIndex struct { + Audit int `json:"_audit"` + Internal int `json:"_internal"` + Telemetry int `json:"_telemetry"` + } `json:"bucket_count_by_index"` + BucketsRfByOriginSite struct { + Default int `json:"default"` + Site1 int `json:"site1"` + Site2 int `json:"site2"` + } `json:"buckets_rf_by_origin_site"` + BucketsSfByOriginSite struct { + Default int `json:"default"` + Site1 int `json:"site1"` + Site2 int `json:"site2"` + } `json:"buckets_sf_by_origin_site"` + EaiAcl interface{} `json:"eai:acl"` + FixupSet []interface{} `json:"fixup_set"` + HeartbeatStarted bool `json:"heartbeat_started"` + HostPortPair string `json:"host_port_pair"` + IndexingDiskSpace int64 `json:"indexing_disk_space"` + IsSearchable bool `json:"is_searchable"` + IsValidBundle bool `json:"is_valid_bundle"` + Label string `json:"label"` + LastDryRunBundle string `json:"last_dry_run_bundle"` + LastHeartbeat int `json:"last_heartbeat"` + LastValidatedBundle string `json:"last_validated_bundle"` + LatestBundleID string `json:"latest_bundle_id"` + MergingMode bool `json:"merging_mode"` + PeerRegisteredSummaries bool `json:"peer_registered_summaries"` + PendingJobCount int `json:"pending_job_count"` + PrimaryCount int `json:"primary_count"` + PrimaryCountRemote int `json:"primary_count_remote"` + RegisterSearchAddress string `json:"register_search_address"` + ReplicationCount int `json:"replication_count"` + ReplicationPort int `json:"replication_port"` + ReplicationUseSsl bool `json:"replication_use_ssl"` + RestartRequiredForApplyingDryRunBundle bool `json:"restart_required_for_applying_dry_run_bundle"` + SearchStateCounter struct { + PendingSearchable int `json:"PendingSearchable"` + PendingUnsearchable int `json:"PendingUnsearchable"` + Searchable int `json:"Searchable"` + SearchablePendingMask int `json:"SearchablePendingMask"` + Unknown int `json:"Unknown"` + Unsearchable int `json:"Unsearchable"` + } `json:"search_state_counter"` + Site string `json:"site"` + SplunkVersion string `json:"splunk_version"` + Status string `json:"status"` + StatusCounter struct { + Complete int `json:"Complete"` + NonStreamingTarget int `json:"NonStreamingTarget"` + PendingDiscard int `json:"PendingDiscard"` + PendingTruncate int `json:"PendingTruncate"` + StreamingError int `json:"StreamingError"` + StreamingSource int `json:"StreamingSource"` + StreamingTarget int `json:"StreamingTarget"` + Unset int `json:"Unset"` + } `json:"status_counter"` + SummaryReplicationCount int `json:"summary_replication_count"` + TransientJobCount int `json:"transient_job_count"` +} + +type ClusterManagerPeerHeader struct { + Links struct { + Create string `json:"create"` + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + Edit string `json:"edit"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content ClusterManagerPeerContent `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/searchhead_types.go b/pkg/gateway/splunk/model/services/cluster/manager/searchhead_types.go new file mode 100644 index 000000000..982efa570 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/searchhead_types.go @@ -0,0 +1,55 @@ +package manager + +import "time" + +// https://splunk-cm-cluster-master-service:8089/services/cluster/manager/searchheads?count=0&output_mode=json + +type SearchHeadContent struct { + EaiAcl interface{} `json:"eai:acl"` + HostPortPair string `json:"host_port_pair"` + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` +} + +// ClusterMasterSearchHeadsHeader +type ClusterMasterSearchHeadHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content SearchHeadContent `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/sites_types.go b/pkg/gateway/splunk/model/services/cluster/manager/sites_types.go new file mode 100644 index 000000000..f23677a95 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/sites_types.go @@ -0,0 +1,105 @@ +package manager + +import "time" + +// Description: Access cluster site information. +// Rest End Point: services/cluster/manager/sites +type ClusterManagerSiteContent struct { + ActiveBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"active_bundle,omitempty"` + ApplyBundleStatus struct { + InvalidBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + BundleValidationErrorsOnMaster []interface{} `json:"bundle_validation_errors_on_master,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"invalid_bundle,omitempty"` + ReloadBundleIssued bool `json:"reload_bundle_issued,omitempty"` + Status string `json:"status,omitempty"` + } `json:"apply_bundle_status,omitempty"` + AvailableSites string `json:"available_sites,omitempty"` + BackupAndRestorePrimaries bool `json:"backup_and_restore_primaries,omitempty"` + ControlledRollingRestartFlag bool `json:"controlled_rolling_restart_flag,omitempty"` + EaiAcl interface{} `json:"eai:acl,omitempty"` + ForwarderSiteFailover string `json:"forwarder_site_failover,omitempty"` + IndexingReadyFlag bool `json:"indexing_ready_flag,omitempty"` + InitializedFlag bool `json:"initialized_flag,omitempty"` + Label string `json:"label,omitempty"` + LastCheckRestartBundleResult bool `json:"last_check_restart_bundle_result,omitempty"` + LastDryRunBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"last_dry_run_bundle,omitempty"` + LastValidatedBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + IsValidBundle bool `json:"is_valid_bundle,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"last_validated_bundle,omitempty"` + LatestBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"latest_bundle,omitempty"` + MaintenanceMode bool `json:"maintenance_mode,omitempty"` + Multisite bool `json:"multisite,omitempty"` + PreviousActiveBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"previous_active_bundle,omitempty"` + PrimariesBackupStatus string `json:"primaries_backup_status,omitempty"` + QuietPeriodFlag bool `json:"quiet_period_flag,omitempty"` + RollingRestartFlag bool `json:"rolling_restart_flag,omitempty"` + RollingRestartOrUpgrade bool `json:"rolling_restart_or_upgrade,omitempty"` + ServiceReadyFlag bool `json:"service_ready_flag,omitempty"` + SiteReplicationFactor string `json:"site_replication_factor,omitempty"` + SiteSearchFactor string `json:"site_search_factor,omitempty"` + StartTime int `json:"start_time,omitempty"` + SummaryReplication string `json:"summary_replication,omitempty"` +} + +type ClusterManagerSiteHeader struct { + Links struct { + } `json:"links,omitempty"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator struct { + Build string `json:"build,omitempty"` + Version string `json:"version,omitempty"` + } `json:"generator,omitempty"` + Entry []struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Links struct { + Alternate string `json:"alternate,omitempty"` + List string `json:"list,omitempty"` + } `json:"links,omitempty"` + Author string `json:"author,omitempty"` + Acl struct { + App string `json:"app,omitempty"` + CanList bool `json:"can_list,omitempty"` + CanWrite bool `json:"can_write,omitempty"` + Modifiable bool `json:"modifiable,omitempty"` + Owner string `json:"owner,omitempty"` + Perms struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` + } `json:"perms,omitempty"` + Removable bool `json:"removable,omitempty"` + Sharing string `json:"sharing,omitempty"` + } `json:"acl,omitempty"` + Content ClusterManagerSiteContent `json:"content,omitempty"` + } `json:"entry,omitempty"` + Paging struct { + Total int `json:"total,omitempty"` + PerPage int `json:"perPage,omitempty"` + Offset int `json:"offset,omitempty"` + } `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/status_types.go b/pkg/gateway/splunk/model/services/cluster/manager/status_types.go new file mode 100644 index 000000000..19f9d09d4 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/status_types.go @@ -0,0 +1,99 @@ +package manager + +import "time" + +// Description: Endpoint to get the status of a rolling restart. +// Rest End Point: services/cluster/manager/status +type ClusterManagerStatusContent struct { + AvailableSites string `json:"available_sites"` + DecommissionForceTimeout string `json:"decommission_force_timeout"` + EaiAcl interface{} `json:"eai:acl"` + HaMode string `json:"ha_mode"` + MaintenanceMode bool `json:"maintenance_mode"` + Messages string `json:"messages"` + Multisite bool `json:"multisite"` + Peers struct { + One88C23DDD6414BA2B651C042F809A0B3 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"188C23DD-D641-4BA2-B651-C042F809A0B3"` + OneFBC4C960AD04C0084684DDA988FB808 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"1FBC4C96-0AD0-4C00-8468-4DDA988FB808"` + ThreeA617349B0774E0FB76A41C300B00326 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"3A617349-B077-4E0F-B76A-41C300B00326"` + SevenD3E85ABB17A47A6B5E9405FB889AD25 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"7D3E85AB-B17A-47A6-B5E9-405FB889AD25"` + CB87DA8D38FF42D8B7EC076C97D77E18 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"CB87DA8D-38FF-42D8-B7EC-076C97D77E18"` + F881BA5FE1814C09BB3396131460678E struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"F881BA5F-E181-4C09-BB33-96131460678E"` + } `json:"peers"` + RestartInactivityTimeout string `json:"restart_inactivity_timeout"` + RestartProgress struct { + Done []interface{} `json:"done"` + Failed []interface{} `json:"failed"` + InProgress []interface{} `json:"in_progress"` + ToBeRestarted []interface{} `json:"to_be_restarted"` + } `json:"restart_progress"` + RollingRestartFlag bool `json:"rolling_restart_flag"` + RollingRestartOrUpgrade bool `json:"rolling_restart_or_upgrade"` + SearchableRolling bool `json:"searchable_rolling"` + ServiceReadyFlag bool `json:"service_ready_flag"` +} + +type ClusterManagerStatusHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content ClusterManagerStatusContent `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/url_types.go b/pkg/gateway/splunk/model/services/cluster/url_types.go new file mode 100644 index 000000000..29165c013 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/url_types.go @@ -0,0 +1,35 @@ +package cluster + +const ( + GetClusterConfigUrl = "/services/cluster/config" + + GetClusterManagerBucketUrl = "/services/cluster/manager/buckets" + + GetClusterManagerHealthUrl = "/services/cluster/manager/health" + + GetClusterManagerGenerationUrl = "/services/cluster/manager/generation" + + GetClusterManagerIndexesUrl = "/services/cluster/manager/indexes" + + GetClusterManagerPeersUrl = "/services/cluster/manager/peers" + + GetClusterManagerInfoUrl = "/services/cluster/manager/info" + + GetClusterManagerRedundancyUrl = "/services/cluster/manager/redundancy" + + GetClusterManagerSitesUrl = "/services/cluster/manager/sites" + + GetClusterManagerSearchHeadUrl = "/services/cluster/manager/searchheads" + + GetClusterPeerBucketsUrl = "/services/cluster/peer/buckets" + + GetClusterPeerInfoUrl = "/services/cluster/peer/info" + + GetLicenseManagerLocalPeers = "/services/licenser/localslave" + + GetSearchHeadCaptainInfoUrl = "/services/shcluster/captain/info" + + GetClusterManagerStatusUrl = "/services/cluster/manager/status" + + SetClusterInMaintenanceModeUrl = "/services/cluster/manager/control/default/maintenance" +) diff --git a/pkg/gateway/splunk/model/services/common/attributes_types.go b/pkg/gateway/splunk/model/services/common/attributes_types.go new file mode 100644 index 000000000..fff1fe196 --- /dev/null +++ b/pkg/gateway/splunk/model/services/common/attributes_types.go @@ -0,0 +1,61 @@ +package common + +import "time" + +type Perms struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` +} + +type ACL struct { + App string `json:"app,omitempty"` + CanList bool `json:"can_list,omitempty"` + CanWrite bool `json:"can_write,omitempty"` + Modifiable bool `json:"modifiable,omitempty"` + Owner string `json:"owner,omitempty"` + Perms Perms `json:"perms,omitempty"` + Removable bool `json:"removable,omitempty"` + Sharing string `json:"sharing,omitempty"` +} + +type HeaderLinks struct { + Create string `json:"create,omitempty"` + Reload string `json:"_reload,omitempty"` + ACL string `json:"_acl,omitempty"` +} + +type Generator struct { + Build string `json:"build,omitempty"` + Version string `json:"version,omitempty"` +} + +type EntryLinks struct { + Alternate string `json:"alternate,omitempty"` + List string `json:"list,omitempty"` +} + +type Entry struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Links EntryLinks `json:"links,omitempty"` + Author string `json:"author,omitempty"` + ACL ACL `json:"acl,omitempty"` + Content interface{} `json:"content,omitempty"` + Content0 interface{} `json:"content0,omitempty"` +} + +type Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` +} +type Header struct { + Links HeaderLinks `json:"links,omitempty"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator Generator `json:"generator,omitempty"` + Entry []Entry `json:"acl,omitempty"` + Paging Paging `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/license/license_header_types.go b/pkg/gateway/splunk/model/services/license/license_header_types.go new file mode 100644 index 000000000..9f4de1624 --- /dev/null +++ b/pkg/gateway/splunk/model/services/license/license_header_types.go @@ -0,0 +1,46 @@ +package license + +import ( + "time" +) + +type LicenseHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content interface{} `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/license/license_types.go b/pkg/gateway/splunk/model/services/license/license_types.go new file mode 100644 index 000000000..0870e8431 --- /dev/null +++ b/pkg/gateway/splunk/model/services/license/license_types.go @@ -0,0 +1,156 @@ +package license + +// https://:/services/licenser/groups +// Provides access to the configuration of licenser groups. +// A licenser group contains one or more licenser stacks that can operate concurrently. +// Only one licenser group is active at any given time. +type LicenseGroup struct { + IsActive string `json:"is_active,omitempty"` + StackIds []string `json:"stack_ids,omitempty"` +} + +// https://:/services/licenser/licenses +// Provides access to the licenses for this Splunk Enterprise instance. +// A license enables various features for a Splunk instance, including but not limited +// to indexing quota, auth, search, forwarding. +type License struct { + AddOns string `json:"add_ons,omitempty"` + AllowedRoles []string `json:"allowedRoles,omitempty"` + AssignableRoles []string `json:"assignableRoles,omitempty"` + CreationTime uint `json:"creation_time,omitempty"` + DisabledFeatures []string `json:"disabled_features,omitempty"` + ExpirationTime int `json:"expiration_time,omitempty"` + Features []string `json:"features,omitempty"` + GroupId string `json:"group_id,omitempty"` + Guid string `json:"guid,omitempty"` + IsUnlimited bool `json:"is_unlimited,omitempty"` + Label string `json:"label,omitempty"` + LicenseHash string `json:"license_hash,omitempty"` + MaxRetentionSize int `json:"max_retention_size,omitempty"` + MaxStackQuota float64 `json:"max_stack_quota,omitempty"` + MaxUsers int `json:"max_users,omitempty"` + MaxViolation int `json:"max_violations,omitempty"` + Notes string `json:"notes,omitempty"` + Quota int `json:"quota,omitempty"` + RelativeExpirationInterval int `json:"relative_expiration_interval,omitempty"` + RelativeExpirationStart int `json:"relative_expiration_start,omitempty"` + SourceTypes []string `json:"sourcetypes,omitempty"` + StackId string `json:"stack_id,omitempty"` + Status string `json:"status,omitempty"` + SubGroupId string `json:"subgroup_id,omitempty"` + Type string `json:"type,omitempty"` + WindowPeriod int `json:"window_period,omitempty"` +} + +type Features struct { + AWSMarketPlace string `json:"AWSMarketplace,omitempty"` + Acceleration string `json:"Acceleration,omitempty"` + AdvancedSearchCommands string `json:"AdvancedSearchCommands,omitempty"` + AdvanceXML string `json:"AdvancedXML,omitempty"` + Alerting string `json:"Alerting,omitempty"` + AllowDuplicateKeys string `json:"AllowDuplicateKeys,omitempty"` + ArchiveToHdfs string `json:"ArchiveToHdfs,omitempty"` + Auth string `json:"Auth,omitempty"` + CanBeRemoteMaster string `json:"CanBeRemoteMaster,omitempty"` + ConditionalLicensingEnforcement string `json:"ConditionalLicensingEnforcement,omitempty"` + CustomRoles string `json:"CustomRoles,omitempty"` + DeployClient string `json:"DeployClient,omitempty"` + DeployServer string `json:"DeployServer,omitempty"` + DisableQuotaEnforcement string `json:"DisableQuotaEnforcement,omitempty"` + DistSearch string `json:"DistSearch,omitempty"` + FwdData string `json:"FwdData,omitempty"` + GuestPass string `json:"GuestPass,omitempty"` + HideQuotaWarning string `json:"HideQuotaWarnings"` + KVStore string `json:"KVStore,omitempty"` + LDAPAuth string `json:"LDAPAuth,omitempty"` + LocalSearch string `json:"LocalSearch,omitempty"` + MultifactorAuth string `json:"MultifactorAuth,omitempty"` + MultisiteClustering string `json:"MultisiteClustering,omitempty"` + NontableLookups string `json:"NontableLookups,omitempty"` + RcvData string `json:"RcvData,omitempty"` + RcvSearch string `json:"RcvSearch,omitempty"` + ResetWarning string `json:"ResetWarnings,omitempty"` + RollingWindowAlert string `json:"RollingWindowAlerts,omitempty"` + SAMLAuth string `json:"SAMLAuth,omitempty"` + ScheduledAlert string `json:"ScheduledAlerts,omitempty"` + ScheduledReports string `json:"ScheduledReports,omitempty"` + ScheduledSearch string `json:"ScheduledSearch,omitempty"` + ScriptedAuth string `json:"ScriptedAuth,omitempty"` + SearchheadPooling string `json:"SearchheadPooling,omitempty"` + SigningProcessor string `json:"SigningProcessor,omitempty"` + SplunkWeb string `json:"SplunkWeb,omitempty"` + SubgroupId string `json:"SubgroupId,omitempty"` + SyslogOutputProcessor string `json:"SyslogOutputProcessor,omitempty"` + UnisiteClustring string `json:"UnisiteClustering,omitempty"` +} + +// https://:/services/licenser/localpeer +// Get license state information for the Splunk instance. +type LicenseLocalPeer struct { + AddOns string `json:"add_ons,omitempty"` + ConnectionTimeout int `json:"connection_timeout,omitempty"` + Features Features `json:"features,omitempty"` + Guid []string `json:"guid"` + LastManagerContactAttemptTime int `json:"last_manager_contact_attempt_time,omitempty"` + LastManagerContactSuccessTime int `json:"last_manager_contact_success_time,omitempty"` + LastTrackDBServiceTime int `json:"last_trackerdb_service_time,omitempty"` + LicenseKeys []string `json:"license_keys,omitempty"` + ManagerGuid string `json:"manager_guid,omitempty"` + ManagerUri string `json:"manager_uri,omitempty"` + PeerId string `json:"peer_id,omitempty"` + PeerLabel string `json:"peer_label,omitempty"` + ReceiveTimeout int `json:"receive_timeout,omitempty"` + SendTimeout int `json:"send_timeout,omitempty"` + SquashThreshold int `json:"squash_threshold,omitempty"` +} + +// https://:/services/licenser/messages +// Access licenser messages. +// Messages may range from helpful warnings about being close to violations, licenses +// expiring or more severe alerts regarding overages and exceeding license warning window. +type LicenseMessage struct { + Messages []string `json:"messages,omitempty"` +} + +// https://:/services/licenser/pools +// Access the licenser pools configuration. +// A pool logically partitions the daily volume entitlements of a stack. You can use a +// license pool to divide license privileges amongst multiple peers. +type LicensePool struct { +} + +// https://:/services/licenser/peers +// Access license peer instances. +type LicensePeer struct { + ActivePoolIds []string `json:"active_pool_ids,omitempty"` + Label string `json:"splunk-lm-license-manager-0,omitempty"` + PoolIds []string `json:"pool_ids,omitempty"` + PoolSuggestion string `json:"pool_suggestion,omitempty"` + StackIds []string `json:"stack_ids,omitempty"` + WarningCount string `json:"warning_count,omitempty"` +} + +// https://:/services/licenser/stacks +// Provides access to the license stack configuration. +// A license stack is comprised of one or more licenses of the same "type". +// The daily indexing quota of a license stack is additive, so a stack represents +// the aggregate entitlement for a collection of licenses. +type LicenseStack struct { + CleActive int `json:"cle_active,omitempty"` + IsUnlimited bool `json:"is_unlimited,omitempty"` + Label string `json:"label,omitempty"` + MaxRetentionSize int `json:"max_retention_size,omitempty"` + MaxViolations int `json:"max_violations,omitempty"` + Quota int `json:"quota,omitempty"` + Type string `json:"type,omitempty"` + WindowPeriod int `json:"window_period,omitempty"` +} + +// LicenseUsage +// https://:/services/licenser/usage +// Get current license usage stats from the last minute. +type LicenseUsage struct { + PeerUsageBytes int `json:"peers_usage_bytes,omitempty"` + Quota int `json:"quota,omitempty"` + SlavesUsageBytes int `json:"slaves_usage_bytes,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/license/url_types.go b/pkg/gateway/splunk/model/services/license/url_types.go new file mode 100644 index 000000000..43e4b61ce --- /dev/null +++ b/pkg/gateway/splunk/model/services/license/url_types.go @@ -0,0 +1,12 @@ +package license + +const ( + GetLicenseGroupUrl = "/services/licenser/groups" + GetLicenseUrl = "/services/licenser/licenses" + GetLicenseLocalPeersUrl = "/services/licenser/localpeer" + GetLicenseMessagesUrl = "/services/licenser/messages" + GetLicensePoolsUrl = "/services/licenser/pools" + GetLicensePeersUrl = "/services/licenser/peers" + GetLicenseStacksUrl = "/services/licenser/stacks" + GetLicenseUsageUrl = "/services/licenser/usage" +) diff --git a/pkg/gateway/splunk/model/services/server/health/deployment_types.go b/pkg/gateway/splunk/model/services/server/health/deployment_types.go new file mode 100644 index 000000000..477610374 --- /dev/null +++ b/pkg/gateway/splunk/model/services/server/health/deployment_types.go @@ -0,0 +1,58 @@ +package health + +import "time" + +// Description: Endpoint to get the status of a rolling restart. +// Rest End Point: services/server/health/deployment/details + +type DeploymentContent struct { + Health string `json:"health,omitempty"` + Reason string `json:"reason,omitempty"` +} + +type DeploymentHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + Details string `json:"details"` + } `json:"links"` + Author string `json:"author"` + ACL struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []interface{} `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Fields struct { + Required []interface{} `json:"required"` + Optional []interface{} `json:"optional"` + Wildcard []interface{} `json:"wildcard"` + } `json:"fields"` + Content DeploymentContent `json:"content,omitempty"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/server/health/details_types.go b/pkg/gateway/splunk/model/services/server/health/details_types.go new file mode 100644 index 000000000..dc419c978 --- /dev/null +++ b/pkg/gateway/splunk/model/services/server/health/details_types.go @@ -0,0 +1,488 @@ +package health + +import ( + "time" + + "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/common" +) + +// Description: Endpoint to get the status of a rolling restart. +// Rest End Point: services/cluster/manager/status +type DataForwarding struct { + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + Splunk2SplunkForwarding struct { + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + Tcpoutautolb0 struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + S2SConnections struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"s2s_connections,omitempty"` + } `json:"tcpoutautolb-0,omitempty"` + } `json:"splunk-2-splunk_forwarding,omitempty"` +} + +type FileMonitorInput struct { + ForwarderIngestionLatency struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + IngestionLatencyIndexerHealth struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"ingestion_latency_indexer_health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"forwarder_ingestion_latency,omitempty"` + Health string `json:"health,omitempty"` + IngestionLatency struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + IngestionLatencyGapMultiplier struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"ingestion_latency_gap_multiplier,omitempty"` + IngestionLatencyLagSec struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"ingestion_latency_lag_sec,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"ingestion_latency,omitempty"` + LargeAndArchiveFileReader0 struct { + DataOutRate struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_out_rate,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"large_and_archive_file_reader-0,omitempty"` + LargeAndArchiveFileReader1 struct { + DataOutRate struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_out_rate,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"large_and_archive_file_reader-1,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + RealTimeReader0 struct { + DataOutRate struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_out_rate,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"real-time_reader-0,omitempty"` + RealTimeReader1 struct { + DataOutRate struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_out_rate,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"real-time_reader-1,omitempty"` +} + +type IndexProcessor struct { + Buckets struct { + BucketsCreatedLast60M struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"buckets_created_last_60m,omitempty"` + CountBucketRenameFailureLast10Mins struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"count_bucket_rename_failure_last_10mins,omitempty"` + DisplayName string `json:"display_name,omitempty"` + GiganticBucketSize struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"gigantic_bucket_size,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PercentSmallBucketsCreatedLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_small_buckets_created_last_24h,omitempty"` + } `json:"buckets,omitempty"` + DiskSpace struct { + DiskSpaceRemainingMultipleMinfreespace struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"disk_space_remaining_multiple_minfreespace,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + MaxVolumeSizeInvalid struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"max_volume_size_invalid,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"disk_space,omitempty"` + Health string `json:"health,omitempty"` + IndexOptimization struct { + ConcurrentOptimizeProcessesPercent struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"concurrent_optimize_processes_percent,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"index_optimization,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type ClusterBundles struct { + ClusterBundles struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"cluster_bundles,omitempty"` + CountClassicBundleTimeoutLast10Mins struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"count_classic_bundle_timeout_last_10mins,omitempty"` + CountFullBundleUntarLast10Mins struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"count_full_bundle_untar_last_10mins,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type DataDurability struct { + ClusterReplicationFactor struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"cluster_replication_factor,omitempty"` + ClusterSearchFactor struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"cluster_search_factor,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type DataSearchable struct { + DataSearchable struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_searchable,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type Indexers struct { + CmServiceIntervalInvalid struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"cm_service_interval_invalid,omitempty"` + Detention struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"detention,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + MissingPeers struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"missing_peers,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type IndexingReady struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + IndexingReady struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"indexing_ready,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type ManagerConnectivity struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + MasterConnectivity struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"master_connectivity,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type PeerState struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SlaveState struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"slave_state,omitempty"` +} + +type PeerVersion struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SlaveVersion struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"slave_version,omitempty"` +} + +type ReplicationFailures struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + ReplicationFailures struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"replication_failures,omitempty"` +} + +type IndexerClustering struct { + ClusterBundles ClusterBundles `json:"cluster_bundles,omitempty"` + DataDurability DataDurability `json:"data_durability,omitempty"` + DataSearchable DataSearchable `json:"data_searchable,omitempty"` + Health string `json:"health,omitempty"` + Indexers Indexers `json:"indexers,omitempty"` + IndexingReady IndexingReady `json:"indexing_ready,omitempty"` + ManagerConnectivity ManagerConnectivity `json:"manager_connectivity,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PeerState PeerState `json:"peer_state,omitempty"` + PeerVersion PeerVersion `json:"peer_version,omitempty"` + ReplicationFailures ReplicationFailures `json:"replication_failures,omitempty"` +} + +type ResourceUsage struct { + Health string `json:"health,omitempty"` + Iowait struct { + AvgCPUMaxPercLast3M struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"avg_cpu__max_perc_last_3m,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SingleCPUMaxPercLast3M struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"single_cpu__max_perc_last_3m,omitempty"` + SumTop3CPUPercsMaxLast3M struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"sum_top3_cpu_percs__max_last_3m,omitempty"` + } `json:"iowait,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type SearchScheduler struct { + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SchedulerSuppression struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SuppressionListOversized struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"suppression_list_oversized,omitempty"` + } `json:"scheduler_suppression,omitempty"` + SearchLag struct { + CountExtremelyLaggedSearchesLastHour struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"count_extremely_lagged_searches_last_hour,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PercentSearchesLaggedHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_lagged_high_priority_last_24h,omitempty"` + PercentSearchesLaggedNonHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_lagged_non_high_priority_last_24h,omitempty"` + } `json:"search_lag,omitempty"` + SearchesDelayed struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PercentSearchesDelayedHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_delayed_high_priority_last_24h,omitempty"` + PercentSearchesDelayedNonHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_delayed_non_high_priority_last_24h,omitempty"` + } `json:"searches_delayed,omitempty"` + SearchesSkippedInTheLast24Hours struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PercentSearchesSkippedHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_skipped_high_priority_last_24h,omitempty"` + PercentSearchesSkippedNonHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_skipped_non_high_priority_last_24h,omitempty"` + } `json:"searches_skipped_in_the_last_24_hours,omitempty"` +} + +type Splunkd struct { + DataForwarding DataForwarding `json:"data_forwarding,omitempty"` + FileMonitorInput FileMonitorInput `json:"file_monitor_input,omitempty"` + Health string `json:"health,omitempty"` + IndexProcessor IndexProcessor `json:"index_processor,omitempty"` + IndexerClustering IndexerClustering `json:"indexer_clustering,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + ResourceUsage ResourceUsage `json:"resource_usage,omitempty"` + SearchScheduler SearchScheduler `json:"search_scheduler,omitempty"` +} + +type Features struct { + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + Splunkd Splunkd `json:"splunkd,omitempty"` +} + +type DeploymentDetail struct { + Disabled bool `json:"disabled,omitempty"` + EaiACL interface{} `json:"eai:acl,omitempty"` + Features Features `json:"features,omitempty"` + Health string `json:"health,omitempty"` +} + +type DeploymentDetailHeader struct { + Links common.EntryLinks `json:"links,omitempty"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator common.Generator `json:"generator,omitempty"` + Entry []common.Entry `json:"entry,omitempty"` + Paging common.Paging `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/server/health/url_types.go b/pkg/gateway/splunk/model/services/server/health/url_types.go new file mode 100644 index 000000000..c116c8d05 --- /dev/null +++ b/pkg/gateway/splunk/model/services/server/health/url_types.go @@ -0,0 +1,7 @@ +package health + +const ( + DeploymentDetailsUrl = "server/health/deployment/details" + + SplunkdHealthDetailsUrl = "server/health/splunkd/details" +) diff --git a/pkg/gateway/splunk/model/services/server/url_types.go b/pkg/gateway/splunk/model/services/server/url_types.go new file mode 100644 index 000000000..9d9911b62 --- /dev/null +++ b/pkg/gateway/splunk/model/services/server/url_types.go @@ -0,0 +1,7 @@ +package server + +const ( + InfoUrl = "server/info" + + StatusUrl = "server/status" +) diff --git a/pkg/gateway/splunk/model/types.go b/pkg/gateway/splunk/model/types.go new file mode 100644 index 000000000..ede3fc43e --- /dev/null +++ b/pkg/gateway/splunk/model/types.go @@ -0,0 +1,55 @@ +package model + +import ( + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" +) + +// SplunkCredentials contains the information necessary to communicate with +// the Splunk service +type SplunkCredentials struct { + + // Address holds the URL for splunk service + Address string `json:"address"` + + //Port port to connect + Port int32 `json:"port"` + + //Namespace where the splunk services are created + Namespace string `json:"namespace,omitempty"` + + //ServicesNamespace optional for services endpoints + ServicesNamespace string `json:"servicesNs,omitempty"` + + //User optional for services endpoints + User string `json:"user,omitempty"` + + //App optional for services endpoints + App string `json:"app,omitempty"` + + //CredentialsName The name of the secret containing the Splunk credentials (requires + // keys "username" and "password"). + // TODO FIXME need to change this to map as key value + CredentialsName string `json:"credentialsName"` + + //TrustedCAFile Server trusted CA file + TrustedCAFile string `json:"trustedCAFile,omitempty"` + + //ClientCertificateFile client certification if we are using to connect to server + ClientCertificateFile string `json:"clientCertificationFile,omitempty"` + + //ClientPrivateKeyFile client private key if we are using to connect to server + ClientPrivateKeyFile string `json:"clientPrivateKeyFile,omitempty"` + + // DisableCertificateVerification disables verification of splunk + // certificates when using HTTPS to connect to the Splunk. + DisableCertificateVerification bool `json:"disableCertificateVerification,omitempty"` +} + +type splunkGatewayFactory struct { + log logr.Logger + //credentials to log on to splunk + credentials *SplunkCredentials + // client for talking to splunk + client *resty.Client +} diff --git a/pkg/gateway/splunk/services/fixture/cluster_config.json b/pkg/gateway/splunk/services/fixture/cluster_config.json new file mode 100644 index 000000000..16183e806 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_config.json @@ -0,0 +1,129 @@ +{ + "links":{ + "_reload":"/services/cluster/config/_reload", + "_acl":"/services/cluster/config/_acl" + }, + "origin":"https://localhost:8089/services/cluster/config", + "updated":"2022-07-18T23:50:26+00:00", + "generator":{ + "build":"6818ac46f2ec", + "version":"9.0.0" + }, + "entry":[ + { + "name":"config", + "id":"https://localhost:8089/services/cluster/config/config", + "updated":"1970-01-01T00:00:00+00:00", + "links":{ + "alternate":"/services/cluster/config/config", + "list":"/services/cluster/config/config", + "_reload":"/services/cluster/config/config/_reload", + "edit":"/services/cluster/config/config", + "disable":"/services/cluster/config/config/disable" + }, + "author":"system", + "acl":{ + "app":"", + "can_list":true, + "can_write":true, + "modifiable":false, + "owner":"system", + "perms":{ + "read":[ + "admin", + "splunk-system-role" + ], + "write":[ + "admin", + "splunk-system-role" + ] + }, + "removable":false, + "sharing":"system" + }, + "content":{ + "access_logging_for_heartbeats":true, + "ack_factor":0, + "allowed_hbmiss_count":"3", + "buckets_per_addpeer":1000, + "bucketsize_upload_preference":"largest", + "cluster_label":"idxc_label", + "cm_com_timeout":10, + "cm_heartbeat_period":1, + "cm_max_hbmiss_count":3, + "cxn_timeout":60, + "decommission_node_force_timeout":300, + "decommission_search_jobs_wait_secs":180, + "disabled":false, + "eai:acl":null, + "executor_workers":10, + "forwarderdata_rcv_port":9997, + "forwarderdata_use_ssl":false, + "frozen_notifications_per_batch":10, + "guid":"7D3E85AB-B17A-47A6-B5E9-405FB889AD25", + "heartbeat_period":1, + "heartbeat_timeout":60, + "manager_switchover_idx_ping":true, + "manager_switchover_mode":"disabled", + "manager_switchover_quiet_period":60, + "manager_uri":"https://splunk-cm-cluster-master-service:8089", + "manual_detention":"off", + "master_uri":"https://splunk-cm-cluster-master-service:8089", + "max_auto_service_interval":1, + "max_delayed_updates_time_ms ":1000000, + "max_fixup_time_ms":0, + "max_peer_build_load":5, + "max_peer_rep_load":5, + "max_peer_sum_rep_load":5, + "max_peers_to_download_bundle":0, + "max_primary_backups_per_service":10, + "max_replication_errors":3, + "mode":"slave", + "notify_buckets_period":10, + "notify_scan_min_period":10, + "notify_scan_period":10, + "percent_peers_to_reload":100, + "percent_peers_to_restart":10, + "ping_flag":true, + "precompress_cluster_bundle":true, + "quiet_period":60, + "rcv_timeout":60, + "register_forwarder_address":"", + "register_replication_address":"", + "register_search_address":"", + "remote_storage_upload_timeout":60, + "rep_cxn_timeout":60, + "rep_max_rcv_timeout":180, + "rep_max_send_timeout":180, + "rep_rcv_timeout":60, + "rep_send_timeout":60, + "replication_factor":3, + "replication_port":9887, + "replication_use_ssl":false, + "report_remote_storage_bucket_upload_to_targets":"false", + "reporting_delay_period":10, + "restart_timeout":60, + "search_factor":2, + "search_files_retry_timeout":600, + "searchable_rolling_peer_state_delay_interval":60, + "secret":"********", + "send_timeout":60, + "service_execution_threshold_ms":1500, + "service_interval":1, + "site":"site1", + "streaming_replication_wait_secs":60, + "summary_update_batch_size":10, + "upload_rectifier_timeout_secs":2, + "warm_bucket_replication_pre_upload":"false" + } + } + ], + "paging":{ + "total":1, + "perPage":30, + "offset":0 + }, + "messages":[ + + ] + } \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_maintenance.json b/pkg/gateway/splunk/services/fixture/cluster_maintenance.json new file mode 100644 index 000000000..020c88b2f --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_maintenance.json @@ -0,0 +1,48 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/control/default/maintenance", + "updated": "2022-07-18T23:54:03+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/control/default/maintenance/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/control/default/maintenance/master", + "list": "/services/cluster/manager/control/default/maintenance/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_health.json b/pkg/gateway/splunk/services/fixture/cluster_manager_health.json new file mode 100644 index 000000000..fe7478216 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_health.json @@ -0,0 +1,61 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/health", + "updated": "2022-07-18T23:54:03+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/health/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/health/master", + "list": "/services/cluster/manager/health/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "all_data_is_searchable": "1", + "all_peers_are_up": "1", + "cm_version_is_compatible": "1", + "eai:acl": null, + "multisite": "1", + "no_fixup_tasks_in_progress": "1", + "pre_flight_check": "1", + "ready_for_searchable_rolling_restart": "1", + "replication_factor_met": "1", + "search_factor_met": "1", + "site_replication_factor_met": "1", + "site_search_factor_met": "1", + "splunk_version_peer_count": "{ 9.0.0: 6 }" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_info.json b/pkg/gateway/splunk/services/fixture/cluster_manager_info.json new file mode 100644 index 000000000..af9c7199b --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_info.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_peers.json b/pkg/gateway/splunk/services/fixture/cluster_manager_peers.json new file mode 100644 index 000000000..a40ae7605 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_peers.json @@ -0,0 +1,649 @@ +{ + "links": { + "create": "/services/cluster/manager/peers/_new" + }, + "origin": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers", + "updated": "2022-07-21T07:55:59+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "27165CF0-FFDA-403C-B2FD-F258EA1794DA", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/27165CF0-FFDA-403C-B2FD-F258EA1794DA", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/27165CF0-FFDA-403C-B2FD-F258EA1794DA", + "list": "/services/cluster/manager/peers/27165CF0-FFDA-403C-B2FD-F258EA1794DA", + "edit": "/services/cluster/manager/peers/27165CF0-FFDA-403C-B2FD-F258EA1794DA" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 13, + "batched_replication_count": 0, + "bucket_count": 49, + "bucket_count_by_index": { + "_audit": 15, + "_internal": 30, + "_telemetry": 4 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 29, + "site2": 17 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 28, + "site2": 17 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.27.74:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site2-indexer-2", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 17, + "primary_count_remote": 194, + "register_search_address": "192.168.27.74:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 48, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 1 + }, + "site": "site2", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 36, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 4, + "StreamingTarget": 9, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "4E2D2D32-9317-4E00-A531-52622CF1F22D", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/4E2D2D32-9317-4E00-A531-52622CF1F22D", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/4E2D2D32-9317-4E00-A531-52622CF1F22D", + "list": "/services/cluster/manager/peers/4E2D2D32-9317-4E00-A531-52622CF1F22D", + "edit": "/services/cluster/manager/peers/4E2D2D32-9317-4E00-A531-52622CF1F22D" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 11, + "batched_replication_count": 0, + "bucket_count": 43, + "bucket_count_by_index": { + "_audit": 14, + "_internal": 26, + "_telemetry": 3 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 21, + "site2": 19 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 21, + "site2": 19 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.11.34:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site1-indexer-1", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 37, + "primary_count_remote": 195, + "register_search_address": "192.168.11.34:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 43, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 0 + }, + "site": "site1", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 30, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 6, + "StreamingTarget": 7, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "59E6334A-BA79-43F0-B360-41C079AC75C1", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/59E6334A-BA79-43F0-B360-41C079AC75C1", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/59E6334A-BA79-43F0-B360-41C079AC75C1", + "list": "/services/cluster/manager/peers/59E6334A-BA79-43F0-B360-41C079AC75C1", + "edit": "/services/cluster/manager/peers/59E6334A-BA79-43F0-B360-41C079AC75C1" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 14, + "batched_replication_count": 0, + "bucket_count": 27, + "bucket_count_by_index": { + "_audit": 9, + "_internal": 17, + "_telemetry": 1 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 10, + "site2": 14 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 9, + "site2": 14 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.91.15:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site2-indexer-1", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 12, + "primary_count_remote": 194, + "register_search_address": "192.168.91.15:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 26, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 1 + }, + "site": "site2", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 22, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 4, + "StreamingTarget": 1, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "7306C50E-C8FD-45EF-A360-E0A03518BE2C", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/7306C50E-C8FD-45EF-A360-E0A03518BE2C", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/7306C50E-C8FD-45EF-A360-E0A03518BE2C", + "list": "/services/cluster/manager/peers/7306C50E-C8FD-45EF-A360-E0A03518BE2C", + "edit": "/services/cluster/manager/peers/7306C50E-C8FD-45EF-A360-E0A03518BE2C" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 12, + "batched_replication_count": 0, + "bucket_count": 46, + "bucket_count_by_index": { + "_audit": 12, + "_internal": 32, + "_telemetry": 2 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 21, + "site2": 22 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 19, + "site2": 19 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.69.142:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site1-indexer-0", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 37, + "primary_count_remote": 194, + "register_search_address": "192.168.69.142:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 41, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 5 + }, + "site": "site1", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 37, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 5, + "StreamingTarget": 4, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "CA22DE5F-72B1-4324-8844-4BA4765E9CBC", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/CA22DE5F-72B1-4324-8844-4BA4765E9CBC", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/CA22DE5F-72B1-4324-8844-4BA4765E9CBC", + "list": "/services/cluster/manager/peers/CA22DE5F-72B1-4324-8844-4BA4765E9CBC", + "edit": "/services/cluster/manager/peers/CA22DE5F-72B1-4324-8844-4BA4765E9CBC" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 15, + "batched_replication_count": 0, + "bucket_count": 33, + "bucket_count_by_index": { + "_audit": 11, + "_internal": 20, + "_telemetry": 2 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 14, + "site2": 16 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 12, + "site2": 15 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.61.159:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site2-indexer-0", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 13, + "primary_count_remote": 193, + "register_search_address": "192.168.61.159:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 30, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 3 + }, + "site": "site2", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 23, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 4, + "StreamingTarget": 6, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "FCE8B198-DD45-4D79-A2B8-8663836713FC", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/FCE8B198-DD45-4D79-A2B8-8663836713FC", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/FCE8B198-DD45-4D79-A2B8-8663836713FC", + "list": "/services/cluster/manager/peers/FCE8B198-DD45-4D79-A2B8-8663836713FC", + "edit": "/services/cluster/manager/peers/FCE8B198-DD45-4D79-A2B8-8663836713FC" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 25, + "batched_replication_count": 0, + "bucket_count": 30, + "bucket_count_by_index": { + "_audit": 10, + "_internal": 18, + "_telemetry": 2 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 19, + "site2": 8 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 18, + "site2": 5 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.53.162:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site1-indexer-2", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 23, + "primary_count_remote": 194, + "register_search_address": "192.168.53.162:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 26, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 4 + }, + "site": "site1", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 24, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 5, + "StreamingTarget": 1, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + } + ], + "paging": { + "total": 6, + "perPage": 30, + "offset": 0 + }, + "messages": [] + } \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_searchhead.json b/pkg/gateway/splunk/services/fixture/cluster_manager_searchhead.json new file mode 100644 index 000000000..b143380db --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_searchhead.json @@ -0,0 +1,55 @@ +{ + "links":{ + + }, + "origin":"https://splunk-cm-cluster-master-service:8089/services/cluster/manager/searchheads", + "updated":"2022-07-21T06:51:50+00:00", + "generator":{ + "build":"6818ac46f2ec", + "version":"9.0.0" + }, + "entry":[ + { + "name":"3A702D19-3AEF-4D93-8E9D-0022C2C50CF8", + "id":"https://splunk-cm-cluster-master-service:8089/services/cluster/manager/searchheads/3A702D19-3AEF-4D93-8E9D-0022C2C50CF8", + "updated":"1970-01-01T00:00:00+00:00", + "links":{ + "alternate":"/services/cluster/manager/searchheads/3A702D19-3AEF-4D93-8E9D-0022C2C50CF8", + "list":"/services/cluster/manager/searchheads/3A702D19-3AEF-4D93-8E9D-0022C2C50CF8" + }, + "author":"system", + "acl":{ + "app":"", + "can_list":true, + "can_write":true, + "modifiable":false, + "owner":"system", + "perms":{ + "read":[ + "admin", + "splunk-system-role" + ], + "write":[ + "admin", + "splunk-system-role" + ] + }, + "removable":false, + "sharing":"system" + }, + "content":{ + "eai:acl":null, + "host_port_pair":"splunk-cm-cluster-master-0:8089", + "label":"splunk-cm-cluster-master-0", + "site":"site1", + "status":"Connected" + } + } + ], + "paging":{ + "total":1, + "perPage":10000000, + "offset":0 + }, + "messages":[ ] + } \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_sites.json b/pkg/gateway/splunk/services/fixture/cluster_manager_sites.json new file mode 100644 index 000000000..33ded1751 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_sites.json @@ -0,0 +1,109 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/sites", + "updated": "2022-07-18T23:56:42+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "site1", + "id": "https://localhost:8089/services/cluster/manager/sites/site1", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/sites/site1", + "list": "/services/cluster/manager/sites/site1" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "peers": { + "7D3E85AB-B17A-47A6-B5E9-405FB889AD25": { + "host_port_pair": "192.168.47.247:8089", + "server_name": "splunk-example-site1-indexer-0" + }, + "CB87DA8D-38FF-42D8-B7EC-076C97D77E18": { + "host_port_pair": "192.168.82.138:8089", + "server_name": "splunk-example-site1-indexer-2" + }, + "F881BA5F-E181-4C09-BB33-96131460678E": { + "host_port_pair": "192.168.11.34:8089", + "server_name": "splunk-example-site1-indexer-1" + } + } + } + }, + { + "name": "site2", + "id": "https://localhost:8089/services/cluster/manager/sites/site2", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/sites/site2", + "list": "/services/cluster/manager/sites/site2" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "peers": { + "188C23DD-D641-4BA2-B651-C042F809A0B3": { + "host_port_pair": "192.168.61.169:8089", + "server_name": "splunk-example-site2-indexer-0" + }, + "1FBC4C96-0AD0-4C00-8468-4DDA988FB808": { + "host_port_pair": "192.168.79.147:8089", + "server_name": "splunk-example-site2-indexer-2" + }, + "3A617349-B077-4E0F-B76A-41C300B00326": { + "host_port_pair": "192.168.10.218:8089", + "server_name": "splunk-example-site2-indexer-1" + } + } + } + } + ], + "paging": { + "total": 2, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go new file mode 100644 index 000000000..9b55b66a5 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -0,0 +1,405 @@ +package fixture + +import ( + "context" + "fmt" + "os" + "strconv" + + "path/filepath" + + //"encoding/json" + + "net/http" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + "github.com/jarcoal/httpmock" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + model "github.com/splunk/splunk-operator/pkg/splunk/model" + + // peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" + // searchheadmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/searchhead" + // commonmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/common" + // lmmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license-manager" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + logz "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var log = logz.New().WithName("gateway").WithName("fixture") + +// fixtureGateway implements the gateway.fixtureGateway interface +// and uses splunk to manage the host. +type fixtureGateway struct { + // client for talking to splunk + client *resty.Client + // the splunk credentials + credentials splunkmodel.SplunkCredentials + // a logger configured for this host + log logr.Logger + // an event publisher for recording significant events + publisher model.EventPublisher + // state of the splunk + state *Fixture +} + +func findFixturePath() (string, error) { + ext := ".env" + wd, err := os.Getwd() + if err != nil { + return "", err + } + for { + dir, err := os.Open(wd) + if err != nil { + fmt.Println("Error opening directory:", err) + return "", err + } + defer dir.Close() + + files, err := dir.Readdir(-1) + if err != nil { + fmt.Println("Error reading directory:", err) + return "", err + } + + for _, file := range files { + if file.Name() == ext { + wd, err = filepath.Abs(wd) + wd += "/pkg/gateway/splunk/services/fixture/" + return wd, err + } + } + wd += "/.." + } +} + +// Fixture contains persistent state for a particular splunk instance +type Fixture struct { +} + +// NewGateway returns a new Fixture Gateway +func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (gateway.Gateway, error) { + p := &fixtureGateway{ + log: log.WithValues("splunk", sad.Address), + publisher: publisher, + state: f, + client: resty.New(), + } + return p, nil +} + +// GetClusterManagerInfo Access information about cluster manager node. +// get List cluster manager node details. +// endpoint: https://:/services/cluster/manager/info +func (p *fixtureGateway) GetClusterManagerInfo(ctx context.Context) (*[]managermodel.ClusterManagerInfoContent, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := os.ReadFile(relativePath + "/cluster_config.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerInfoUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerInfoHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerInfoContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} + +// GetClusterManagerPeersAccess cluster manager peers. +// endpoint: https://:/services/cluster/manager/peers +func (p *fixtureGateway) GetClusterManagerPeers(ctx context.Context) (*[]managermodel.ClusterManagerPeerContent, error) { + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := os.ReadFile(relativePath + "cluster_config.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerPeersUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerPeerHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerPeerContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} + +// GetClusterManagerHealth Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. +// Authentication and Authorization: +// +// Requires the admin role or list_indexer_cluster capability. +// +// endpoint: https://:/services/cluster/manager/health +func (p *fixtureGateway) GetClusterManagerHealth(ctx context.Context) (*[]managermodel.ClusterManagerHealthContent, error) { + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := os.ReadFile(relativePath + "cluster_config.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerHealthUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerHealthHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerHealthContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} + +// GetClusterManagerSites Access cluster site information. +// list List available cluster sites. +// endpoint: https://:/services/cluster/manager/sites +func (p *fixtureGateway) GetClusterManagerSites(ctx context.Context) (*[]managermodel.ClusterManagerSiteContent, error) { + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := os.ReadFile(relativePath + "/cluster_config.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerSitesUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerSiteHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerSiteContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} + +// GetClusterManagerSearchHeadStatus Endpoint to get searchheads connected to cluster manager. +// endpoint: https://:/services/cluster/manager/status +func (p *fixtureGateway) GetClusterManagerStatus(ctx context.Context) (*[]managermodel.ClusterManagerStatusContent, error) { + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := os.ReadFile(relativePath + "/cluster_manager_status.json") + if err != nil { + log.Error(err, "fixture: error in get cluster manager search heads") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerStatusUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerStatusHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager status failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerStatusContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} + +// SetClusterInMaintainanceMode Endpoint to set cluster in maintenance mode. +// Post the status of a rolling restart. +// endpoint: https://:/services/cluster/manager/control/default/maintenance +func (p *fixtureGateway) SetClusterInMaintenanceMode(context context.Context, mode bool) error { + + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return err + } + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := os.ReadFile(relativePath + "/cluster_maintenance.json") + if err != nil { + log.Error(err, "fixture: error in post cluster maintenance") + return err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.SetClusterInMaintenanceModeUrl + httpmock.RegisterResponder("POST", fakeUrl, responder) + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + resp, err := p.client.R(). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "mode": strconv.FormatBool(mode)}). + Post(fakeUrl) + if err != nil { + p.log.Error(err, "set cluster manager in maintenance mode failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return splunkError + } + + return err +} + +// IsClusterInMaintenanceMode Endpoint check if cluster in maintenance mode. +// endpoint: https://:/services/cluster/manager/control/default/maintenance +func (p *fixtureGateway) IsClusterInMaintenanceMode(ctx context.Context) (result bool, err error) { + clusterInfoList, err := p.GetClusterManagerInfo(ctx) + if err != nil { + return false, err + } + if clusterInfoList != nil && len(*clusterInfoList) > 0 { + content := *clusterInfoList + return content[0].MaintenanceMode, nil + } + return false, nil +} diff --git a/pkg/gateway/splunk/services/gateway.go b/pkg/gateway/splunk/services/gateway.go new file mode 100644 index 000000000..1e19304e3 --- /dev/null +++ b/pkg/gateway/splunk/services/gateway.go @@ -0,0 +1,52 @@ +package indexer + +import ( + "context" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + model "github.com/splunk/splunk-operator/pkg/splunk/model" +) + +// Factory is the interface for creating new Gateway objects. +type Factory interface { + NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (Gateway, error) +} + +// Gateway holds the state information for talking to +// splunk gateway backend. +type Gateway interface { + + // Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. + // Authentication and Authorization: + // Requires the admin role or list_indexer_cluster capability. + // endpoint: https://:/services/cluster/manager/health + GetClusterManagerHealth(ctx context.Context) (*[]managermodel.ClusterManagerHealthContent, error) + + // Access information about cluster manager node. + // get List cluster manager node details. + // endpoint: https://:/services/cluster/manager/info + GetClusterManagerInfo(ctx context.Context) (*[]managermodel.ClusterManagerInfoContent, error) + + // Access cluster manager peers. + // endpoint: https://:/services/cluster/manager/peers + GetClusterManagerPeers(ctx context.Context) (*[]managermodel.ClusterManagerPeerContent, error) + + // Access cluster site information. + // list List available cluster sites. + // endpoint: https://:/services/cluster/manager/sites + GetClusterManagerSites(ctx context.Context) (*[]managermodel.ClusterManagerSiteContent, error) + + // GetClusterManagerSearchHeadStatus Endpoint to get the status of a rolling restart. + // GET the status of a rolling restart. + // endpoint: https://:/services/cluster/manager/status + GetClusterManagerStatus(ctx context.Context) (*[]managermodel.ClusterManagerStatusContent, error) + + // SetClusterInMaintainanceMode Endpoint to set cluster in maintenance mode. + // Post the status of a rolling restart. + // endpoint: https://:/services/cluster/manager/control/default/maintenance + SetClusterInMaintenanceMode(context context.Context, mode bool) error + + // IsClusterInMaintenanceMode check if cluster is in maintenance mode + IsClusterInMaintenanceMode(ctx context.Context) (bool, error) +} diff --git a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go new file mode 100644 index 000000000..58f10f5b0 --- /dev/null +++ b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go @@ -0,0 +1,250 @@ +package impl + +import ( + "context" + "net/http" + "strconv" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + model "github.com/splunk/splunk-operator/pkg/splunk/model" +) + +// splunkGateway implements the gateway.Gateway interface +// and uses gateway to manage the host. +type splunkGateway struct { + // a logger configured for this host + log logr.Logger + // a debug logger configured for this host + debugLog logr.Logger + // an event publisher for recording significant events + publisher model.EventPublisher + // client for talking to splunk + client *resty.Client + // credentials + credentials *splunkmodel.SplunkCredentials +} + +// Access information about cluster manager node. +// get List cluster manager node details. +// endpoint: https://:/services/cluster/manager/info +func (p *splunkGateway) GetClusterManagerInfo(context context.Context) (*[]managermodel.ClusterManagerInfoContent, error) { + url := clustermodel.GetClusterManagerInfoUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerInfoHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager info failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerInfoContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// Access cluster manager peers. +// endpoint: https://:/services/cluster/manager/peers +func (p *splunkGateway) GetClusterManagerPeers(context context.Context) (*[]managermodel.ClusterManagerPeerContent, error) { + url := clustermodel.GetClusterManagerPeersUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerPeerHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerPeerContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. +// Authentication and Authorization: +// +// Requires the admin role or list_indexer_cluster capability. +// +// endpoint: https://:/services/cluster/manager/health +func (p *splunkGateway) GetClusterManagerHealth(context context.Context) (*[]managermodel.ClusterManagerHealthContent, error) { + url := clustermodel.GetClusterManagerHealthUrl + + p.log.Info("getting cluster manager health information") + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerHealthHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager health failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerHealthContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// Access cluster site information. +// list List available cluster sites. +// endpoint: https://:/services/cluster/manager/sites +func (p *splunkGateway) GetClusterManagerSites(context context.Context) (*[]managermodel.ClusterManagerSiteContent, error) { + url := clustermodel.GetClusterManagerSitesUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerSiteHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager sites failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerSiteContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// GetClusterManagerSearchHeadStatus Endpoint to get the status of a rolling restart. +// GET the status of a rolling restart. +// endpoint: https://:/services/cluster/manager/status +func (p *splunkGateway) GetClusterManagerStatus(context context.Context) (*[]managermodel.ClusterManagerStatusContent, error) { + url := clustermodel.GetClusterManagerStatusUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerStatusHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager status failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerStatusContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// SetClusterInMaintainanceMode Endpoint to set cluster in maintenance mode. +// Post the status of a rolling restart. +// endpoint: https://:/services/cluster/manager/control/default/maintenance +func (p *splunkGateway) SetClusterInMaintenanceMode(ctx context.Context, mode bool) error { + url := clustermodel.SetClusterInMaintenanceModeUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + resp, err := p.client.R(). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "mode": strconv.FormatBool(mode)}). + Post(url) + if err != nil { + p.log.Error(err, "get cluster manager status failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return splunkError + } + + return err +} + +// IsClusterInMaintenanceMode Endpoint check if cluster in maintenance mode. +// endpoint: https://:/services/cluster/manager/control/default/maintenance +func (p *splunkGateway) IsClusterInMaintenanceMode(ctx context.Context) (result bool, err error) { + clusterInfoList, err := p.GetClusterManagerInfo(ctx) + if err != nil { + return false, err + } + if clusterInfoList != nil && len(*clusterInfoList) > 0 { + content := *clusterInfoList + return content[0].MaintenanceMode, nil + } + return false, nil +} diff --git a/pkg/gateway/splunk/services/implementation/factory.go b/pkg/gateway/splunk/services/implementation/factory.go new file mode 100644 index 000000000..930d33322 --- /dev/null +++ b/pkg/gateway/splunk/services/implementation/factory.go @@ -0,0 +1,89 @@ +package impl + +import ( + "context" + "crypto/tls" + "fmt" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + + //model "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + //cmmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/cluster-manager/model" + "time" + + model "github.com/splunk/splunk-operator/pkg/splunk/model" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type splunkGatewayFactory struct { + log logr.Logger + //credentials to log on to splunk + credentials *splunkmodel.SplunkCredentials + // client for talking to splunk + client *resty.Client +} + +// NewGatewayFactory new gateway factory to create gateway interface +func NewGatewayFactory() gateway.Factory { + factory := splunkGatewayFactory{} + err := factory.init() + if err != nil { + return nil // FIXME we have to throw some kind of exception or error here + } + return factory +} + +func (f *splunkGatewayFactory) init() error { + return nil +} + +func (f splunkGatewayFactory) splunkGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (*splunkGateway, error) { + gatewayLogger := log.FromContext(ctx) + reqLogger := log.FromContext(ctx) + f.log = reqLogger.WithName("splunkGateway") + + f.client = resty.New() + // Enable debug mode + f.client.SetDebug(true) + // or One can disable security check (https) + f.client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: sad.DisableCertificateVerification}) + // Set client timeout as per your need + f.client.SetTimeout(1 * time.Minute) + namespace := "default" + if len(sad.Namespace) > 0 { + namespace = sad.Namespace + } + //splunkURL := fmt.Sprintf("https://%s:%d/%s", sad.Address, sad.Port, sad.ServicesNamespace) + splunkURL := fmt.Sprintf("https://%s.%s:%d", sad.Address, namespace, sad.Port) + f.client.SetBaseURL(splunkURL) + f.client.SetBasicAuth("admin", sad.CredentialsName) + f.client.SetHeader("Content-Type", "application/json") + f.client.SetHeader("Accept", "application/json") + f.credentials = sad + + gatewayLogger.Info("new splunk manager created to access rest endpoint") + newGateway := &splunkGateway{ + credentials: f.credentials, + client: f.client, + log: f.log, + debugLog: f.log, + publisher: publisher, + } + f.log.Info("splunk settings", + "endpoint", f.credentials.Address, + "CACertFile", f.credentials.TrustedCAFile, + "ClientCertFile", f.credentials.ClientCertificateFile, + "ClientPrivKeyFile", f.credentials.ClientPrivateKeyFile, + "TLSInsecure", f.credentials.DisableCertificateVerification, + ) + return newGateway, nil +} + +// NewGateway returns a new Splunk Gateway using global +// configuration for finding the Splunk services. +func (f splunkGatewayFactory) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (gateway.Gateway, error) { + return f.splunkGateway(ctx, sad, publisher) +} diff --git a/pkg/gateway/splunk/services/implementation/server_impl.go b/pkg/gateway/splunk/services/implementation/server_impl.go new file mode 100644 index 000000000..cc3b0c100 --- /dev/null +++ b/pkg/gateway/splunk/services/implementation/server_impl.go @@ -0,0 +1,161 @@ +package impl + +import ( + "context" + "net/http" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + servermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/server" + healthmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/server/health" +) + +// Shows the overall health of splunkd. The health of splunkd can be red, yellow, or green. The health of splunkd is based on the health of all features reporting to it. +// Authentication and Authorization: +// +// Requires the admin role or list_health capability. +// +// Get health status of distributed deployment features. +// endpoint: https://:/services/server/health/deployment/details +func (p *splunkGateway) GetServerDeploymentHealthDetails(context context.Context) (*[]healthmodel.DeploymentContent, error) { + url := healthmodel.DeploymentDetailsUrl + + // fetch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &healthmodel.DeploymentHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get deployment details failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []healthmodel.DeploymentContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// Shows the overall health of the splunkd health status tree, as well as each feature node and its respective color. For unhealthy nodes (non-green), the output includes reasons, indicators, thresholds, messages, and so on. +// Authentication and Authorization: +// Requires the admin role or list_health capability. +// /services/server/health/splunkd/details + +func (p *splunkGateway) GetSplunkdHealthDetails(context context.Context) (*[]healthmodel.DeploymentContent, error) { + url := healthmodel.SplunkdHealthDetailsUrl + + // fetch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &healthmodel.DeploymentHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get splunkd health details failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []healthmodel.DeploymentContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// Access information about the currently running Splunk instance. +// Note: This endpoint provides information on the currently running Splunk instance. Some values returned +// in the GET response reflect server status information. However, this endpoint is meant to provide +// information on the currently running instance, not the machine where the instance is running. +// Server status values returned by this endpoint should be considered deprecated and might not continue +// to be accessible from this endpoint. Use server/sysinfo to access server status instead. +// endpoint: https://:/services/server/info + +func (p *splunkGateway) GetServerInfo(context context.Context) (*[]healthmodel.DeploymentContent, error) { + url := servermodel.InfoUrl + + // fetch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &healthmodel.DeploymentHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get splunkd health details failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []healthmodel.DeploymentContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// List server/status child resources. +// endpoint: https://:/services/server/status + +func (p *splunkGateway) GetServerStatus(context context.Context) (*[]healthmodel.DeploymentContent, error) { + url := servermodel.StatusUrl + + // fetch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &healthmodel.DeploymentHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get splunkd health details failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []healthmodel.DeploymentContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} diff --git a/pkg/gateway/splunk/services/implementation/splunk_test.go b/pkg/gateway/splunk/services/implementation/splunk_test.go new file mode 100644 index 000000000..4faaed56f --- /dev/null +++ b/pkg/gateway/splunk/services/implementation/splunk_test.go @@ -0,0 +1,147 @@ +package impl + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/go-resty/resty/v2" + "github.com/jarcoal/httpmock" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" + + //managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + //peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" + + "testing" + + logz "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var slog = logz.New().WithName("gateway").WithName("fixture") + +func setCreds(t *testing.T) *splunkGateway { + //ctx := context.TODO() + sad := &splunkmodel.SplunkCredentials{ + Address: "splunk-cm-cluster-master-service", + Port: 8089, + ServicesNamespace: "", + User: "admin", + App: "", + CredentialsName: "admin: abcdefghijklmnopqrstuvwxyz", + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + } + publisher := func(ctx context.Context, eventType, reason, message string) {} + // TODO fixme how to test the gateway call directly + //sm := NewGatewayFactory(ctx, &sad, publisher) + sm := &splunkGateway{ + credentials: sad, + client: resty.New(), + publisher: publisher, + log: slog, + debugLog: slog, + } + //splunkURL := fmt.Sprintf("https://%s:%d/%s", sad.Address, sad.Port, sad.ServicesNamespace) + splunkURL := fmt.Sprintf("https://%s:%d", sad.Address, sad.Port) + sm.client.SetBaseURL(splunkURL) + sm.client.SetHeader("Content-Type", "application/json") + sm.client.SetHeader("Accept", "application/json") + sm.client.SetTimeout(time.Duration(60 * time.Minute)) + sm.client.SetDebug(true) + return sm +} + +func TestGetClusterManagerHealth(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := os.ReadFile("../fixture/cluster_manager_health.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager health %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.GetClusterManagerHealthUrl + httpmock.RegisterResponder("GET", url, responder) + + _, err = sm.GetClusterManagerHealth(ctx) + if err != nil { + t.Errorf("fixture: error in get cluster manager health %v", err) + } +} + +func TestGetClusterManagerInfo(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := os.ReadFile("../fixture/cluster_manager_info.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager info %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.GetClusterManagerInfoUrl + httpmock.RegisterResponder("GET", url, responder) + + _, err = sm.GetClusterManagerInfo(ctx) + if err != nil { + t.Errorf("fixture: error in get cluster manager info %v", err) + } +} + +func TestGetClusterManagerPeers(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := os.ReadFile("../fixture/cluster_manager_peers.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager peers %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.GetClusterManagerPeersUrl + httpmock.RegisterResponder("GET", url, responder) + + peersptr, err := sm.GetClusterManagerPeers(ctx) + if err != nil { + t.Errorf("fixture: error in get cluster manager searchheads %v", err) + } + if peersptr == nil { + t.Errorf("fixture: error in get cluster manager searchheads peers list is empty") + } +} + +func TestSetClusterInMaintenanceeMode(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := os.ReadFile("../fixture/cluster_maintenance.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager peers %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.SetClusterInMaintenanceModeUrl + httpmock.RegisterResponder("POST", url, responder) + + err = sm.SetClusterInMaintenanceMode(ctx, true) + if err != nil { + t.Errorf("fixture: error in get cluster manager searchheads %v", err) + } +} diff --git a/pkg/provisioner/splunk/implementation/factory.go b/pkg/provisioner/splunk/implementation/factory.go new file mode 100644 index 000000000..24c532be1 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/factory.go @@ -0,0 +1,97 @@ +package impl + +import ( + "context" + + "github.com/go-logr/logr" + + //model "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + licensegateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager" + licensefixture "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager/fixture" + splunklicensegatewayimpl "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager/implementation" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/fixture" + splunkgatewayimpl "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/implementation" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + + //cmmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/cluster-manager/model" + model "github.com/splunk/splunk-operator/pkg/splunk/model" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type splunkProvisionerFactory struct { + log logr.Logger + //credentials to log on to splunk + credentials *splunkmodel.SplunkCredentials + // Gateway Factory + gatewayFactory gateway.Factory + // splunk license factory + licenseFactory licensegateway.Factory +} + +// NewProvisionerFactory new provisioner factory to create provisioner interface +func NewProvisionerFactory(runInTestMode bool) provisioner.Factory { + factory := splunkProvisionerFactory{} + + err := factory.init(runInTestMode) + if err != nil { + return nil // FIXME we have to throw some kind of exception or error here + } + return factory +} + +func (f *splunkProvisionerFactory) init(runInTestMode bool) error { + if runInTestMode { + f.gatewayFactory = &fixture.Fixture{} + } else { + f.gatewayFactory = splunkgatewayimpl.NewGatewayFactory() + } + if runInTestMode { + f.licenseFactory = &licensefixture.Fixture{} + } else { + f.licenseFactory = splunklicensegatewayimpl.NewGatewayFactory() + } + return nil +} + +func (f splunkProvisionerFactory) splunkProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (*splunkProvisioner, error) { + provisionerLogger := log.FromContext(ctx) + reqLogger := log.FromContext(ctx) + f.log = reqLogger.WithName("splunkProvisioner") + + f.credentials = sad + + provisionerLogger.Info("new splunk manager created to access rest endpoint") + gateway, err := f.gatewayFactory.NewGateway(ctx, sad, publisher) + if err != nil { + return nil, err + } + licensegateway, err := f.licenseFactory.NewGateway(ctx, sad, publisher) + if err != nil { + return nil, err + } + newProvisioner := &splunkProvisioner{ + credentials: f.credentials, + log: f.log, + debugLog: f.log, + publisher: publisher, + gateway: gateway, + licensegateway: licensegateway, + } + + f.log.Info("splunk settings", + "endpoint", f.credentials.Address, + "CACertFile", f.credentials.TrustedCAFile, + "ClientCertFile", f.credentials.ClientCertificateFile, + "ClientPrivKeyFile", f.credentials.ClientPrivateKeyFile, + "TLSInsecure", f.credentials.DisableCertificateVerification, + ) + return newProvisioner, nil +} + +// NewProvisioner returns a new Splunk Provisioner using global +// configuration for finding the Splunk services. +func (f splunkProvisionerFactory) NewProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (provisioner.Provisioner, error) { + return f.splunkProvisioner(ctx, sad, publisher) +} diff --git a/pkg/provisioner/splunk/implementation/license.go b/pkg/provisioner/splunk/implementation/license.go new file mode 100644 index 000000000..77a1e65e5 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/license.go @@ -0,0 +1,87 @@ +package impl + +import ( + "context" + "fmt" + + licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var callLicenseLocalPeer = func(ctx context.Context, p *splunkProvisioner) (*[]licensemodel.LicenseLocalPeer, error) { + lminfo, err := p.licensegateway.GetLicenseLocalPeer(ctx) + if err != nil { + return nil, err + } else if lminfo == nil { + return nil, fmt.Errorf("cluster manager info data is empty") + } + return lminfo, err +} + +var callLicense = func(ctx context.Context, p *splunkProvisioner) (*[]licensemodel.License, error) { + lminfo, err := p.licensegateway.GetLicense(ctx) + if err != nil { + return nil, err + } else if lminfo == nil { + return nil, fmt.Errorf("license data is empty") + } + return lminfo, err +} + +// GetClusterManagerStatus Access cluster node configuration details. +func (p *splunkProvisioner) GetLicenseLocalPeer(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { + _, err = callLicenseLocalPeer(ctx, p) + //peerlistptr, err := callLicenseLocalPeer(ctx, p) + if err != nil { + return result, err + } + /* else { + peerlist := *peerlistptr + for _, peer := range peerlist { + condition := metav1.Condition{ + Type: peer.Label, + Message: fmt.Sprintf("%s in site %s is %s ", peer.Label, peer.Site, peer.Status), + Reason: peer.Site, + } + if peer.Status == "Up" { + condition.Status = metav1.ConditionTrue + } else { + condition.Status = metav1.ConditionFalse + + } + // set condition to existing conditions list + meta.SetStatusCondition(conditions, condition) + } + }*/ + return result, err +} + +// GetLicenseStatus Access cluster node configuration details. +func (p *splunkProvisioner) GetLicenseStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { + _, err = callLicense(ctx, p) + lslistptr, err := callLicense(ctx, p) + if err != nil { + return result, err + } else { + lslist := *lslistptr + for _, peer := range lslist { + condition := metav1.Condition{ + Type: peer.GroupId, + Message: fmt.Sprintf("%s license %s is %s ", peer.Type, peer.Guid, peer.Status), + Reason: peer.SubGroupId, + } + if peer.Status == "VALID" { + condition.Status = metav1.ConditionTrue + } else { + condition.Status = metav1.ConditionFalse + + } + // set condition to existing conditions list + meta.SetStatusCondition(conditions, condition) + } + } + return result, err +} diff --git a/pkg/provisioner/splunk/implementation/license_test.go b/pkg/provisioner/splunk/implementation/license_test.go new file mode 100644 index 000000000..b2ca49306 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/license_test.go @@ -0,0 +1,49 @@ +package impl + +import ( + "context" + "testing" + + //splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + //licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + //provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGetGetLicense(t *testing.T) { + /*callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]licensemodel.License, error) { + healthData := []licensemodel.ClusterManagerHealthContent{} + return &healthData, nil + }*/ + provisioner := setCreds(t) + conditions := &[]metav1.Condition{} + + ctx := context.TODO() + + _, err := provisioner.GetLicenseStatus(ctx, conditions) + if err != nil { + t.Errorf("fixture: error in set cluster manager %v", err) + } + if conditions == nil || len(*conditions) == 0 { + t.Errorf("fixture: error in conditions for lm %v", err) + } +} + +func TestGetLicenseLocalPeer(t *testing.T) { + /*callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]licensemodel.LicenseLocalPeer, error) { + healthData := []licensemodel.ClusterManagerHealthContent{} + return &healthData, nil + }*/ + provisioner := setCreds(t) + conditions := &[]metav1.Condition{} + + ctx := context.TODO() + + _, err := provisioner.GetLicenseLocalPeer(ctx, conditions) + if err != nil { + t.Errorf("fixture: error in set cluster manager %v", err) + } + if conditions == nil || len(*conditions) == 0 { + t.Errorf("fixture: error in conditions for license manager %v", err) + } +} diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go new file mode 100644 index 000000000..ead2bea25 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -0,0 +1,174 @@ +package impl + +import ( + "context" + "fmt" + "strings" + + "github.com/go-logr/logr" + licensegateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + model "github.com/splunk/splunk-operator/pkg/splunk/model" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// splunkProvisioner implements the provisioner.Provisioner interface +// and uses provisioner to manage the host. +type splunkProvisioner struct { + // a logger configured for this host + log logr.Logger + // a debug logger configured for this host + debugLog logr.Logger + // an event publisher for recording significant events + publisher model.EventPublisher + // credentials + credentials *splunkmodel.SplunkCredentials + // gateway factory + gateway gateway.Gateway + // splunk license factory + licensegateway licensegateway.Gateway +} + +var callGetClusterManagerInfo = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerInfoContent, error) { + cminfo, err := p.gateway.GetClusterManagerInfo(ctx) + if err != nil { + return nil, err + } else if cminfo == nil { + return nil, fmt.Errorf("cluster manager info data is empty") + } + return cminfo, err +} + +var callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerHealthContent, error) { + healthList, err := p.gateway.GetClusterManagerHealth(ctx) + if err != nil { + return nil, err + } else if healthList == nil { + return nil, fmt.Errorf("health data is empty") + } + return healthList, err +} + +var callGetClusterManagerStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerStatusContent, error) { + statuslist, err := p.gateway.GetClusterManagerStatus(ctx) + if err != nil { + return nil, err + } else if statuslist == nil { + return nil, fmt.Errorf("status list is empty") + } + return statuslist, err +} + +var callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { + peerlist, err := p.gateway.GetClusterManagerPeers(ctx) + if err != nil { + return nil, err + } else if peerlist == nil { + return nil, fmt.Errorf("peer list is empty") + } + return peerlist, err +} + +var callGetClusterManagerSitesStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { + peerlist, err := p.gateway.GetClusterManagerPeers(ctx) + if err != nil { + return nil, err + } else if peerlist == nil { + return nil, fmt.Errorf("peer list is empty") + } + return peerlist, err +} + +// GetClusterManagerStatus Access cluster node configuration details. +func (p *splunkProvisioner) GetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { + + peerlistptr, err := callGetClusterManagerPeersStatus(ctx, p) + if err != nil { + return result, err + } else { + peerlist := *peerlistptr + for _, peer := range peerlist { + condition := metav1.Condition{ + Type: peer.Label, + Message: fmt.Sprintf("%s in site %s is %s ", peer.Label, peer.Site, peer.Status), + Reason: peer.Site, + } + if peer.Status == "Up" { + condition.Status = metav1.ConditionTrue + } else { + condition.Status = metav1.ConditionFalse + + } + // set condition to existing conditions list + meta.SetStatusCondition(conditions, condition) + } + } + + cminfolistptr, err := callGetClusterManagerInfo(ctx, p) + if err != nil { + return result, err + } + cminfolist := *cminfolistptr + if cminfolist[0].Multisite { + var site string + multiSiteStatus := metav1.ConditionTrue + message := "multisite is up" + peerlist := *peerlistptr + for _, peer := range peerlist { + if !strings.Contains(peer.Status, "Up") { + site = peer.Site + multiSiteStatus = metav1.ConditionFalse + message = fmt.Sprintf("site %s with label %s status is %s", peer.Site, peer.Label, peer.Status) + break + } // set condition to existing conditions list + } + condition := metav1.Condition{ + Type: "Multisite", + Message: message, + Reason: site, + Status: multiSiteStatus, + } + meta.SetStatusCondition(conditions, condition) + } + + healthList, err := callGetClusterManagerHealth(ctx, p) + if err != nil { + return result, err + } else { + hllist := *healthList + // prepare fields for conditions + for _, health := range hllist { + condition := metav1.Condition{ + Type: "Health", + Message: "all the peers of indexer cluster status", + Reason: "PeersStatus", + } + if health.AllPeersAreUp == "1" { + condition.Status = metav1.ConditionTrue + } else { + condition.Status = metav1.ConditionFalse + } + // set condition to existing conditions list + meta.SetStatusCondition(conditions, condition) + } + } + result.Dirty = true + return result, err +} + +// CheckClusterManagerHealth +func (p *splunkProvisioner) CheckClusterManagerHealth(ctx context.Context) (result provmodel.Result, err error) { + return result, nil +} + +func (p *splunkProvisioner) SetClusterInMaintenanceMode(ctx context.Context, mode bool) error { + return p.gateway.SetClusterInMaintenanceMode(ctx, mode) +} + +func (p *splunkProvisioner) IsClusterInMaintenanceMode(ctx context.Context) (bool, error) { + return p.gateway.IsClusterInMaintenanceMode(ctx) +} diff --git a/pkg/provisioner/splunk/implementation/splunk_test.go b/pkg/provisioner/splunk/implementation/splunk_test.go new file mode 100644 index 000000000..e6a7ef6b5 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/splunk_test.go @@ -0,0 +1,101 @@ +package impl + +import ( + "context" + "testing" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//var log = logz.New().WithName("provisioner").WithName("fixture") + +func setCreds(t *testing.T) provisioner.Provisioner { + ctx := context.TODO() + sad := &splunkmodel.SplunkCredentials{ + Address: "splunk-cm-cluster-master-service", + Port: 8089, + ServicesNamespace: "", + Namespace: "default", + User: "admin", + App: "", + CredentialsName: "admin: abcdefghijklmnopqrstuvwxyz", + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + } + publisher := func(ctx context.Context, eventType, reason, message string) {} + sp := NewProvisionerFactory(true) + provisioner, err := sp.NewProvisioner(ctx, sad, publisher) + if err != nil { + return nil + } + return provisioner +} + +func TestGetClusterManagerStatus(t *testing.T) { + callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerHealthContent, error) { + healthData := []managermodel.ClusterManagerHealthContent{} + return &healthData, nil + } + provisioner := setCreds(t) + conditions := &[]metav1.Condition{} + + ctx := context.TODO() + + _, err := provisioner.GetClusterManagerStatus(ctx, conditions) + if err != nil { + t.Errorf("fixture: error in set cluster manager %v", err) + } +} + +func TestSetClusterManagerMultiSiteStatus(t *testing.T) { + callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerHealthContent, error) { + healthData := []managermodel.ClusterManagerHealthContent{ + { + AllPeersAreUp: "1", + }, + { + AllPeersAreUp: "0", + }, + } + return &healthData, nil + } + + callGetClusterManagerInfo = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerInfoContent, error) { + cminfo := &[]managermodel.ClusterManagerInfoContent{ + { + Multisite: true, + }, + } + return cminfo, nil + } + + callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { + peerlist := &[]managermodel.ClusterManagerPeerContent{ + { + Site: "1", + Label: "site1", + Status: "Up", + }, + { + Site: "2", + Label: "site1", + Status: "down", + }, + } + return peerlist, nil + } + provisioner := setCreds(t) + conditions := &[]metav1.Condition{} + + ctx := context.TODO() + + _, err := provisioner.GetClusterManagerStatus(ctx, conditions) + if err != nil { + t.Errorf("fixture: error in set cluster manager %v", err) + } +} diff --git a/pkg/provisioner/splunk/model/types.go b/pkg/provisioner/splunk/model/types.go new file mode 100644 index 000000000..504948b22 --- /dev/null +++ b/pkg/provisioner/splunk/model/types.go @@ -0,0 +1,15 @@ +package model + +import "time" + +// Result holds the response from a call in the Provsioner API. +type Result struct { + // Dirty indicates whether the splunk object needs to be saved. + Dirty bool + // RequeueAfter indicates how long to wait before making the same + // Provisioner call again. The request should only be requeued if + // Dirty is also true. + RequeueAfter time.Duration + // Any error message produced by the provisioner. + ErrorMessage string +} diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go new file mode 100644 index 000000000..25b64dff5 --- /dev/null +++ b/pkg/provisioner/splunk/provisioner.go @@ -0,0 +1,37 @@ +package indexer + +import ( + "context" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + model "github.com/splunk/splunk-operator/pkg/splunk/model" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Factory is the interface for creating new Provisioner objects. +type Factory interface { + NewProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (Provisioner, error) +} + +// Provisioner holds the state information for talking to +// splunk provisioner backend. +type Provisioner interface { + + // GetClusterManagerStatus set cluster manager status + GetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) + + // CheckClusterManagerHealth + CheckClusterManagerHealth(ctx context.Context) (result provmodel.Result, err error) + + //SetClusterInMaintenanceMode + SetClusterInMaintenanceMode(ctx context.Context, mode bool) error + + // IsClusterInMaintenanceMode + IsClusterInMaintenanceMode(ctx context.Context) (bool, error) + + // GetLicenseLocalPeer + GetLicenseLocalPeer(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) + + GetLicenseStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) +} diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index fad4c23b8..4cce09c1c 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -26,22 +26,37 @@ import ( rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" + model "github.com/splunk/splunk-operator/pkg/splunk/model" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -// ApplyClusterManager reconciles the state of a Splunk Enterprise cluster manager. -func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) { +type splunkManager struct { + // a logger configured for this host + log logr.Logger + // a debug logger configured for this host + debugLog logr.Logger + // an event publisher for recording significant events + publisher model.EventPublisher + // credentials + // gateway factory + provisioner provisioner.Provisioner + // client + client splcommon.ControllerClient +} +// ApplyClusterManager reconciles the state of a Splunk Enterprise cluster manager. +func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ Requeue: true, @@ -233,12 +248,23 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + p.ReconcileClusterManagerMaintenanceMode(ctx, client, cr) + + // Verification of splunk instance update CR status + // We are using Conditions to update status information + provResult := provmodel.Result{} + provResult, err = p.provisioner.GetClusterManagerStatus(ctx, &cr.Status.Conditions) + if err != nil { + cr.Status.ErrorMessage = provResult.ErrorMessage + } + // trigger MonitoringConsole reconcile by changing the splunk/image-tag annotation err = changeMonitoringConsoleAnnotations(ctx, client, cr) if err != nil { return result, err } } + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. if !result.Requeue { @@ -248,6 +274,46 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, nil } +func (p *splunkManager) ReconcileClusterManagerMaintenanceMode(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) { + var result reconcile.Result + var err error + var response bool + response, err = p.provisioner.IsClusterInMaintenanceMode(ctx) + if err != nil { + cr.Status.ErrorMessage = err.Error() + return result, err + } + + // Check if user asking to move Cluster to maintenance mode + cr.Status.MaintenanceMode = response + annotations := cr.GetAnnotations() + if annotations != nil { + if _, ok := annotations[enterpriseApi.ClusterManagerMaintenanceAnnotation]; ok { + if response { + // if cluster is already in maintenance mode return + return result, nil + } + // place cluster manager in maintenance mode + err = p.provisioner.SetClusterInMaintenanceMode(ctx, true) + if err != nil { + cr.Status.ErrorMessage = err.Error() + return result, err + } + cr.Status.MaintenanceMode = true + } else if response { + // if cluster manager is in maintenance mode and annotations is not set then + // unset maintenance mode + err = p.provisioner.SetClusterInMaintenanceMode(ctx, false) + if err != nil { + cr.Status.ErrorMessage = err.Error() + return result, err + } + cr.Status.MaintenanceMode = false + } + } + return result, err +} + // clusterManagerPodManager is used to manage the cluster manager pod type clusterManagerPodManager struct { log logr.Logger diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 01be64670..4720a740b 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/jinzhu/copier" enterpriseApi "github.com/splunk/splunk-operator/api/v4" appsv1 "k8s.io/api/apps/v1" @@ -37,14 +38,40 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" runtime "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + manager "github.com/splunk/splunk-operator/pkg/splunk" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" + //splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" ) +func setCreds(t *testing.T, c splcommon.ControllerClient, cr splcommon.MetaObject, spec enterpriseApi.CommonSplunkSpec) manager.SplunkManager { + ctx := context.TODO() + clusterManager := enterpriseApi.ClusterManager{} + clusterManager.Name = "test" + info := &managermodel.ReconcileInfo{ + Kind: cr.GroupVersionKind().Kind, + CommonSpec: spec, + Client: c, + Log: log.Log, + Namespace: cr.GetNamespace(), + Name: cr.GetName(), + } + copier.Copy(info.MetaObject, cr) + publisher := func(ctx context.Context, eventType, reason, message string) {} + mg := NewManagerFactory(true) + manager, err := mg.NewManager(ctx, info, publisher) + if err != nil { + return nil + } + return manager +} + func TestApplyClusterManager(t *testing.T) { // redefining cpmakeTar to return nil always @@ -117,7 +144,8 @@ func TestApplyClusterManager(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager), cr.(*enterpriseApi.ClusterManager).Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -127,7 +155,9 @@ func TestApplyClusterManager(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) + //_, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -138,7 +168,9 @@ func TestApplyClusterManager(t *testing.T) { } c := spltest.NewMockClient() _ = errors.New(splcommon.Rerr) - _, err := ApplyClusterManager(ctx, c, ¤t) + + manager := setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -204,7 +236,7 @@ func TestApplyClusterManager(t *testing.T) { }, } - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -220,7 +252,7 @@ func TestApplyClusterManager(t *testing.T) { current.Spec.SmartStore.VolList[0].SecretRef = "s3-secret" current.Status.SmartStore.VolList[0].SecretRef = "s3-secret" current.Status.ResourceRevMap["s3-secret"] = "v2" - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -234,7 +266,8 @@ func TestApplyClusterManager(t *testing.T) { c.Create(ctx, &cmap) current.Spec.SmartStore.VolList[0].SecretRef = "" current.Spec.SmartStore.Defaults.IndexAndGlobalCommonSpec.VolName = "msos_s2s3_vol" - _, err = ApplyClusterManager(ctx, c, ¤t) + manager = setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err != nil { t.Errorf("Don't expected error here") } @@ -290,7 +323,7 @@ func TestApplyClusterManager(t *testing.T) { }, }, } - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -307,7 +340,7 @@ func TestApplyClusterManager(t *testing.T) { } rerr := errors.New(splcommon.Rerr) c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -583,7 +616,8 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { } // Without S3 keys, ApplyClusterManager should fail - _, err := ApplyClusterManager(ctx, client, ¤t) + manager := setCreds(t, client, ¤t, current.Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(ctx, client, ¤t) if err == nil { t.Errorf("ApplyClusterManager should fail without S3 secrets configured") } @@ -612,7 +646,8 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyClusterManager(context.Background(), c, cr.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager), current.Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } @@ -639,12 +674,12 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManagerWithSmartstore-0", ¤t, revised, createCalls, updateCalls, reconcile, true, secret, &smartstoreConfigMap, ss, pod) current.Status.BundlePushTracker.NeedToPushManagerApps = true - if _, err = ApplyClusterManager(context.Background(), client, ¤t); err != nil { + if _, err = manager.ApplyClusterManager(ctx, client, ¤t); err != nil { t.Errorf("ApplyClusterManager() should not have returned error") } current.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.StorageCapacity = "-abcd" - if _, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil { + if _, err = manager.ApplyClusterManager(ctx, client, ¤t); err == nil { t.Errorf("ApplyClusterManager() should have returned error") } @@ -654,7 +689,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { ss.Spec.Replicas = &replicas ss.Spec.Template.Spec.Containers[0].Image = "splunk/splunk" client.AddObject(ss) - if result, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil && !result.Requeue { + if result, err := manager.ApplyClusterManager(ctx, client, ¤t); err == nil && !result.Requeue { t.Errorf("ApplyClusterManager() should have returned error or result.requeue should have been false") } @@ -664,7 +699,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { client.AddObjects(objects) current.Spec.CommonSplunkSpec.Mock = false - if _, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil { + if _, err = manager.ApplyClusterManager(ctx, client, ¤t); err == nil { t.Errorf("ApplyClusterManager() should have returned error") } } @@ -866,7 +901,8 @@ func TestAppFrameworkApplyClusterManagerShouldNotFail(t *testing.T) { t.Errorf(err.Error()) } - _, err = ApplyClusterManager(context.Background(), client, &cm) + manager := setCreds(t, client, &cm, cm.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") } @@ -961,7 +997,8 @@ func TestApplyCLusterManagerDeletion(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyClusterManager(ctx, c, &cm) + manager := setCreds(t, c, &cm, cm.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, c, &cm) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") } @@ -1417,7 +1454,8 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { } err := client.Create(ctx, &lm) - _, err = ApplyLicenseManager(ctx, client, &lm) + manager := setCreds(t, client, &lm, lm.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, client, &lm) if err != nil { t.Errorf("applyLicenseManager should not have returned error; err=%v", err) } @@ -1449,14 +1487,16 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { } err = client.Create(ctx, &cm) - _, err = ApplyClusterManager(ctx, client, &cm) + manager = setCreds(t, client, &lm, lm.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } cm.Spec.Image = "splunk2" lm.Spec.Image = "splunk2" - _, err = ApplyLicenseManager(ctx, client, &lm) + manager = setCreds(t, client, &lm, lm.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, client, &lm) clusterManager := &enterpriseApi.ClusterManager{} namespacedName := types.NamespacedName{ @@ -1523,7 +1563,8 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { // Create the instances client.Create(ctx, lm) - _, err := ApplyLicenseManager(ctx, client, lm) + manager := setCreds(t, client, lm, lm.Spec.CommonSplunkSpec) + _, err := manager.ApplyLicenseManager(ctx, client, lm) if err != nil { t.Errorf("applyLicenseManager should not have returned error; err=%v", err) } @@ -1534,7 +1575,8 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { debug.PrintStack() } client.Create(ctx, cm) - _, err = ApplyClusterManager(ctx, client, cm) + manager = setCreds(t, client, cm, cm.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, client, cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } @@ -1675,7 +1717,8 @@ func TestClusterManagerWitReadyState(t *testing.T) { // simulate create clustermanager instance before reconcilation c.Create(ctx, clustermanager) - _, err := ApplyClusterManager(ctx, c, clustermanager) + manager := setCreds(t, c, clustermanager, clustermanager.Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for clustermanager with app framework %v", err) debug.PrintStack() @@ -1711,7 +1754,7 @@ func TestClusterManagerWitReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1829,7 +1872,7 @@ func TestClusterManagerWitReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index f762c19db..312f87c9d 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -232,7 +232,8 @@ func TestSmartstoreApplyClusterManagerFailsOnInvalidSmartStoreConfig(t *testing. var client splcommon.ControllerClient - _, err := ApplyClusterManager(context.Background(), client, &cr) + manager := setCreds(t, client, &cr, cr.Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(context.Background(), client, &cr) if err == nil { t.Errorf("ApplyClusterManager should fail on invalid smartstore config") } diff --git a/pkg/splunk/enterprise/events.go b/pkg/splunk/enterprise/events.go index f05917f18..222de4e3c 100644 --- a/pkg/splunk/enterprise/events.go +++ b/pkg/splunk/enterprise/events.go @@ -55,12 +55,17 @@ func (k *K8EventPublisher) publishEvent(ctx context.Context, eventType, reason, // based on the custom resource instance type find name, type and create new event switch v := k.instance.(type) { case *enterpriseApi.Standalone: - case *enterpriseApiV3.LicenseMaster: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.LicenseManager: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.IndexerCluster: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.ClusterManager: + event = v.NewEvent(eventType, reason, message) case *enterpriseApiV3.ClusterMaster: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.MonitoringConsole: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.SearchHeadCluster: event = v.NewEvent(eventType, reason, message) default: diff --git a/pkg/splunk/enterprise/factory.go b/pkg/splunk/enterprise/factory.go new file mode 100644 index 000000000..780fe2146 --- /dev/null +++ b/pkg/splunk/enterprise/factory.go @@ -0,0 +1,104 @@ +package enterprise + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + + //model "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + splunkprovisionerimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" + manager "github.com/splunk/splunk-operator/pkg/splunk" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + types "github.com/splunk/splunk-operator/pkg/splunk/model" + splutil "github.com/splunk/splunk-operator/pkg/splunk/util" + //cmmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/cluster-manager/model" + model "github.com/splunk/splunk-operator/pkg/splunk/model" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type splunkManagerFactory struct { + log logr.Logger + // Gateway Factory + provisionerFactory provisioner.Factory + runInTestMode bool +} + +// NewManagerFactory new manager factory to create manager interface +func NewManagerFactory(runInTestMode bool) manager.Factory { + factory := splunkManagerFactory{} + factory.runInTestMode = runInTestMode + + err := factory.init(runInTestMode) + if err != nil { + return nil // FIXME we have to throw some kind of exception or error here + } + return factory +} + +func (f *splunkManagerFactory) init(runInTestMode bool) error { + f.provisionerFactory = splunkprovisionerimpl.NewProvisionerFactory(runInTestMode) + return nil +} + +func (f splunkManagerFactory) splunkManager(ctx context.Context, info *types.ReconcileInfo, publisher model.EventPublisher) (*splunkManager, error) { + provisionerLogger := log.FromContext(ctx) + reqLogger := log.FromContext(ctx) + f.log = reqLogger.WithName("splunkProvisioner") + + sad := &splunkmodel.SplunkCredentials{} + if !f.runInTestMode { + defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(info.Namespace) + defaultSecret, err := splutil.GetSecretByName(ctx, info.Client, info.Namespace, info.Name, defaultSecretObjName) + if err != nil { + publisher(ctx, "Warning", "splunkManager", fmt.Sprintf("Could not access default secret object to fetch admin password. Reason %v", err)) + return nil, fmt.Errorf("could not access default secret object to fetch admin password. Reason %v", err) + } + + //Get the admin password from the secret object + adminPwd, foundSecret := defaultSecret.Data["password"] + if !foundSecret { + publisher(ctx, "Warning", "splunkManager", fmt.Sprintf("Could not find admin password ")) + return nil, fmt.Errorf("could not find admin password ") + } + + service := getSplunkService(ctx, info.MetaObject, &info.CommonSpec, GetInstantTypeFromKind(info.Kind), false) + + sad = &splunkmodel.SplunkCredentials{ + Address: service.Name, + Port: 8089, + ServicesNamespace: "-", + User: "admin", + App: "-", + CredentialsName: string(adminPwd[:]), + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + Namespace: info.Namespace, + } + } + provisionerLogger.Info("new splunk manager created to access rest endpoint") + provisioner, err := f.provisionerFactory.NewProvisioner(ctx, sad, publisher) + if err != nil { + return nil, err + } + + newProvisioner := &splunkManager{ + log: f.log, + debugLog: f.log, + publisher: publisher, + provisioner: provisioner, + client: info.Client, + } + + return newProvisioner, nil +} + +// NewProvisioner returns a new Splunk Provisioner using global +// configuration for finding the Splunk services. +func (f splunkManagerFactory) NewManager(ctx context.Context, info *types.ReconcileInfo, publisher model.EventPublisher) (manager.SplunkManager, error) { + return f.splunkManager(ctx, info, publisher) +} diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 8ad327b38..27af9af64 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -45,7 +45,7 @@ import ( type NewSplunkClientFunc func(managementURI, username, password string) *splclient.SplunkClient // ApplyIndexerClusterManager reconciles the state of a Splunk Enterprise indexer cluster. -func ApplyIndexerClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) { +func (p *splunkManager) ApplyIndexerClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -187,6 +187,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // get the pod image name if v.Spec.Containers[0].Image != cr.Spec.Image { // image do not match that means its image upgrade + eventPublisher.Normal(ctx, "version_upgrade", fmt.Sprintf("image change v.spec.Containers[0].Image=%s cr.Spec.Image=%s", v.Spec.Containers[0].Image, cr.Spec.Image)) + scopedLog.Info("image change enabled", "v.spec.Containers[0].Image", v.Spec.Containers[0].Image, "cr.Spec.Image", cr.Spec.Image) versionUpgrade = true break } @@ -202,6 +204,12 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } } else { + + err = p.SetClusterInMaintenanceMode(ctx, client, cr, true) + if err != nil { + eventPublisher.Warning(ctx, "SetClusterInMaintenanceMode", fmt.Sprintf("Unable to enable cluster manager maintenance mode %s", err.Error())) + return result, err + } // Delete the statefulset and recreate new one err = client.Delete(ctx, statefulSet) if err != nil { @@ -279,6 +287,12 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller result.Requeue = true return result, err } + + err = p.SetClusterInMaintenanceMode(ctx, client, cr, false) + if err != nil { + eventPublisher.Warning(ctx, "SetClusterInMaintenanceMode", fmt.Sprintf("Unable to disable cluster manager maintenance mode %s", err.Error())) + return result, err + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. @@ -288,6 +302,44 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, nil } +func (p *splunkManager) SetClusterInMaintenanceMode(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster, value bool) error { + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(client, cr) + + clusterManagerInstance := &enterpriseApi.ClusterManager{} + if len(cr.Spec.ClusterManagerRef.Name) == 0 { + return fmt.Errorf("cluster manager not found") + } + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + err := client.Get(ctx, namespacedName, clusterManagerInstance) + if err != nil { + return err + } + + annotations := cr.GetAnnotations() + if value { + annotations[enterpriseApi.ClusterManagerMaintenanceAnnotation] = "" + scopedLog.Info("set cluster manager in maintenance mode") + eventPublisher.Normal(ctx, "ClusterManager", "set cluster manager in maintenance mode") + } else { + delete(annotations, enterpriseApi.ClusterManagerMaintenanceAnnotation) + scopedLog.Info("unset cluster manager in maintenance mode") + eventPublisher.Normal(ctx, "ClusterManager", "unset cluster manager in maintenance mode") + } + clusterManagerInstance.Annotations = annotations + err = client.Update(ctx, cr) + if err != nil { + return err + } + return nil +} + // ApplyIndexerCluster reconciles the state of a Splunk Enterprise indexer cluster for Older CM CRDs. func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) { diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index cbeb0a1e6..0ccb98cd4 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/jinzhu/copier" "github.com/pkg/errors" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -40,10 +41,13 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" "github.com/go-logr/logr" + manager "github.com/splunk/splunk-operator/pkg/splunk" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -67,6 +71,28 @@ func init() { } } +func setCredsIdx(t *testing.T, c splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) manager.SplunkManager { + ctx := context.TODO() + clusterManager := enterpriseApi.ClusterManager{} + clusterManager.Name = "test" + info := &managermodel.ReconcileInfo{ + Kind: cr.Kind, + CommonSpec: cr.Spec.CommonSplunkSpec, + Client: c, + Log: log.Log, + Namespace: cr.Namespace, + Name: cr.Name, + } + copier.Copy(info.MetaObject, cr.ObjectMeta) + publisher := func(ctx context.Context, eventType, reason, message string) {} + mg := NewManagerFactory(true) + manager, err := mg.NewManager(ctx, info, publisher) + if err != nil { + return nil + } + return manager +} + func TestApplyIndexerClusterOld(t *testing.T) { c := spltest.NewMockClient() ctx := context.TODO() @@ -220,7 +246,8 @@ func TestApplyIndexerCluster(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyIndexerClusterManager(context.Background(), c, cr.(*enterpriseApi.IndexerCluster)) + manager := setCredsIdx(t, c, cr.(*enterpriseApi.IndexerCluster)) + _, err := manager.ApplyIndexerClusterManager(context.Background(), c, cr.(*enterpriseApi.IndexerCluster)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyIndexerClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -230,7 +257,8 @@ func TestApplyIndexerCluster(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyIndexerClusterManager(context.Background(), c, cr.(*enterpriseApi.IndexerCluster)) + manager := setCredsIdx(t, c, cr.(*enterpriseApi.IndexerCluster)) + _, err := manager.ApplyIndexerClusterManager(context.Background(), c, cr.(*enterpriseApi.IndexerCluster)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -240,7 +268,8 @@ func TestApplyIndexerCluster(t *testing.T) { c := spltest.NewMockClient() rerr := errors.New(splcommon.Rerr) c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr - _, err := ApplyIndexerClusterManager(ctx, c, ¤t) + manager := setCredsIdx(t, c, ¤t) + _, err := manager.ApplyIndexerClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -260,7 +289,8 @@ func TestApplyIndexerCluster(t *testing.T) { Name: "manager1", Namespace: "test", } - _, err = ApplyIndexerClusterManager(ctx, c, ¤t) + manager = setCredsIdx(t, c, ¤t) + _, err = manager.ApplyIndexerClusterManager(ctx, c, ¤t) if err != nil { t.Errorf("Expected error") } @@ -273,7 +303,8 @@ func TestApplyIndexerCluster(t *testing.T) { newc.Create(ctx, nsSec) newc.Create(ctx, &cManager) newc.InduceErrorKind[splcommon.MockClientInduceErrorCreate] = rerr - _, err = ApplyIndexerClusterManager(ctx, newc, ¤t) + manager = setCredsIdx(t, c, ¤t) + _, err = manager.ApplyIndexerClusterManager(ctx, newc, ¤t) if err == nil { t.Errorf("Expected error") } @@ -1283,19 +1314,22 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { cm.Status.Phase = enterpriseApi.PhaseReady // Empty ClusterManagerRef should return an error cr.Spec.ClusterManagerRef.Name = "" - if _, err := ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { + manager := setCredsIdx(t, c, &cr) + if _, err := manager.ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { t.Errorf("ApplyIndxerCluster() should have returned error") } cr.Spec.ClusterManagerRef.Name = "manager1" // verifyRFPeers should return err here - if _, err := ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { + manager = setCredsIdx(t, c, &cr) + if _, err := manager.ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { t.Errorf("ApplyIndxerCluster() should have returned error") } cm.Status.Phase = enterpriseApi.PhaseError cr.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.StorageCapacity = "-abcd" - if _, err := ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { + manager = setCredsIdx(t, c, &cr) + if _, err := manager.ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { t.Errorf("ApplyIndxerCluster() should have returned error") } } @@ -1602,7 +1636,8 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + manager := setCreds(t, c, clustermanager, clustermanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1681,7 +1716,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1755,7 +1790,8 @@ func TestIndexerClusterWithReadyState(t *testing.T) { // simulate create clustermanager instance before reconcilation c.Create(ctx, indexercluster) - _, err = ApplyIndexerClusterManager(ctx, c, indexercluster) + manager = setCredsIdx(t, c, indexercluster) + _, err = manager.ApplyIndexerClusterManager(ctx, c, indexercluster) if err != nil { t.Errorf("Unexpected error while running reconciliation for indexer cluster %v", err) debug.PrintStack() @@ -1792,7 +1828,8 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyIndexerClusterManager(ctx, c, indexercluster) + manager = setCredsIdx(t, c, indexercluster) + _, err = manager.ApplyIndexerClusterManager(ctx, c, indexercluster) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1869,7 +1906,8 @@ func TestIndexerClusterWithReadyState(t *testing.T) { indexercluster.Status.IndexingReady = true indexercluster.Status.ServiceReady = true // call reconciliation - _, err = ApplyIndexerClusterManager(ctx, c, indexercluster) + manager = setCredsIdx(t, c, indexercluster) + _, err = manager.ApplyIndexerClusterManager(ctx, c, indexercluster) if err != nil { t.Errorf("Unexpected error while running reconciliation for indexer cluster with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index ad572de10..0597eb58d 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -22,6 +22,7 @@ import ( "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" appsv1 "k8s.io/api/apps/v1" @@ -36,7 +37,7 @@ import ( ) // ApplyLicenseManager reconciles the state for the Splunk Enterprise license manager. -func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) { +func (p *splunkManager) ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -178,6 +179,14 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, if err != nil { return result, err } + + // Verification of splunk instance update CR status + // We are using Conditions to update status information + provResult := provmodel.Result{} + provResult, err = p.provisioner.GetLicenseStatus(ctx, &cr.Status.Conditions) + if err != nil { + cr.Status.ErrorMessage = provResult.ErrorMessage + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 2979fcd1b..0f0fcae99 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -88,7 +88,8 @@ func TestApplyLicenseManager(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.LicenseManager), cr.(*enterpriseApi.LicenseManager).Spec.CommonSplunkSpec) + _, err := manager.ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyLicenseManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -98,7 +99,8 @@ func TestApplyLicenseManager(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.LicenseManager), cr.(*enterpriseApi.LicenseManager).Spec.CommonSplunkSpec) + _, err := manager.ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -107,7 +109,8 @@ func TestApplyLicenseManager(t *testing.T) { c := spltest.NewMockClient() ctx := context.TODO() current.Spec.LivenessInitialDelaySeconds = -1 - _, err := ApplyLicenseManager(ctx, c, ¤t) + manager := setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) + _, err := manager.ApplyLicenseManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -115,7 +118,7 @@ func TestApplyLicenseManager(t *testing.T) { rerr := errors.New(splcommon.Rerr) current.Spec.LivenessInitialDelaySeconds = 5 c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr - _, err = ApplyLicenseManager(ctx, c, ¤t) + _, err = manager.ApplyLicenseManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -127,7 +130,7 @@ func TestApplyLicenseManager(t *testing.T) { } c.Create(ctx, nsSec) c.InduceErrorKind[splcommon.MockClientInduceErrorCreate] = rerr - _, err = ApplyLicenseManager(ctx, c, ¤t) + _, err = manager.ApplyLicenseManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -250,7 +253,8 @@ func TestAppFrameworkApplyLicenseManagerShouldNotFail(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyLicenseManager(ctx, client, &cr) + manager := setCreds(t, client, &cr, cr.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, client, &cr) if err != nil { t.Errorf("ApplyLicenseManager should be successful") @@ -680,7 +684,8 @@ func TestApplyLicenseManagerDeletion(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyLicenseManager(ctx, c, &lm) + manager := setCreds(t, c, &lm, lm.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, c, &lm) if err != nil { t.Errorf("ApplyLicenseManager should not have returned error here.") } @@ -914,7 +919,8 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + manager := setCreds(t, c, clustermanager, clustermanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -988,7 +994,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1065,7 +1071,8 @@ func TestLicenseManagerWithReadyState(t *testing.T) { // simulate create clustermanager instance before reconcilation c.Create(ctx, licensemanager) - _, err = ApplyLicenseManager(ctx, c, licensemanager) + manager = setCreds(t, c, licensemanager, licensemanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, c, licensemanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for indexer cluster %v", err) debug.PrintStack() @@ -1102,7 +1109,8 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyLicenseManager(ctx, c, licensemanager) + manager = setCreds(t, c, licensemanager, licensemanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, c, licensemanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1217,7 +1225,8 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyLicenseManager(ctx, c, licensemanager) + manager = setCreds(t, c, licensemanager, licensemanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, c, licensemanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for license manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/licensemaster_test.go b/pkg/splunk/enterprise/licensemaster_test.go index 0fcab3a5c..0d27155d8 100644 --- a/pkg/splunk/enterprise/licensemaster_test.go +++ b/pkg/splunk/enterprise/licensemaster_test.go @@ -924,7 +924,8 @@ func TestLicenseMasterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + manager := setCreds(t, c, clustermanager, clustermanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1003,7 +1004,7 @@ func TestLicenseMasterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index e72750ec1..cee5ed297 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -1129,7 +1129,8 @@ func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { } err := client.Create(ctx, &cm) - _, err = ApplyClusterManager(ctx, client, &cm) + manager := setCreds(t, client, &cm, cm.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } @@ -1161,6 +1162,9 @@ func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { } err = client.Create(ctx, &mc) + if err != nil { + t.Errorf("applyMonitoringConsole create mc failed error; err=%v", err) + } _, err = ApplyMonitoringConsole(ctx, client, &mc) if err != nil { t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) @@ -1168,7 +1172,7 @@ func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { mc.Spec.Image = "splunk2" cm.Spec.Image = "splunk2" - _, err = ApplyClusterManager(ctx, client, &cm) + _, err = manager.ApplyClusterManager(ctx, client, &cm) monitoringConsole := &enterpriseApi.MonitoringConsole{} namespacedName := types.NamespacedName{ @@ -1235,7 +1239,8 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { // Create the instances client.Create(ctx, cm) - _, err := ApplyClusterManager(ctx, client, cm) + manager := setCreds(t, client, cm, cm.Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(ctx, client, cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 557272168..ee6511159 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -76,6 +76,28 @@ const ( TmpAppDownloadDir string = "/tmp/appframework/" ) +func GetInstantTypeFromKind(kind string) InstanceType { + switch kind { + case "ClusterManager": + return SplunkClusterManager + case "ClusterMaster": + return SplunkClusterManager + case "LicenseMaster": + return SplunkLicenseManager + case "LicenseManager": + return SplunkLicenseManager + case "IndexerCluster": + return SplunkIndexer + case "MonitoringConsole": + return SplunkMonitoringConsole + case "SearchHeadCluster": + return SplunkSearchHead + case "Standalone": + return SplunkStandalone + } + return SplunkClusterManager +} + type commonResourceTracker struct { // mutex to serialize the access to commonResourceTracker mutex sync.Mutex diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 64587db8a..a0f52f463 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -3172,7 +3172,8 @@ func TestGetCurrentImage(t *testing.T) { utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) err := client.Create(ctx, ¤t) - _, err = ApplyClusterManager(ctx, client, ¤t) + manager := setCreds(t, client, ¤t, current.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, client, ¤t) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } diff --git a/pkg/splunk/manager.go b/pkg/splunk/manager.go new file mode 100644 index 000000000..264670774 --- /dev/null +++ b/pkg/splunk/manager.go @@ -0,0 +1,27 @@ +package splunk + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + model "github.com/splunk/splunk-operator/pkg/splunk/model" + types "github.com/splunk/splunk-operator/pkg/splunk/model" +) + +type Factory interface { + NewManager(ctx context.Context, info *types.ReconcileInfo, publisher model.EventPublisher) (SplunkManager, error) +} + +type SplunkManager interface { + ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) + //ApplyClusterMaster(ctx context.Context, cr *enterpriseApiV3.ClusterMaster) (reconcile.Result, error) + ApplyIndexerClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) + ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) + //ApplyMonitoringConsole(ctx context.Context, cr *enterpriseApi.MonitoringConsole) (reconcile.Result, error) + //ApplySearchHeadCluster(ctx context.Context, cr *enterpriseApi.SearchHeadCluster) (reconcile.Result, error) + //ApplyStandalone(ctx context.Context, cr *enterpriseApi.Standalone) (reconcile.Result, error) + //ApplyLicenseMaster(ctx context.Context, cr *enterpriseApiV3.LicenseMaster) (reconcile.Result, error) +} diff --git a/pkg/splunk/model/types.go b/pkg/splunk/model/types.go new file mode 100644 index 000000000..db5c735fe --- /dev/null +++ b/pkg/splunk/model/types.go @@ -0,0 +1,31 @@ +package model + +import ( + "context" + + "github.com/go-logr/logr" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +// EventPublisher is a function type for publishing events associated +// with gateway functions. +type EventPublisher func(ctx context.Context, eventType, reason, message string) + +// Instead of passing a zillion arguments to the action of a phase, +// hold them in a context +type ReconcileInfo struct { + Kind string + MetaObject splcommon.MetaObject + CommonSpec enterpriseApi.CommonSplunkSpec + Client splcommon.ControllerClient + Log logr.Logger + Namespace string + Name string + Request ctrl.Request + Events []corev1.Event + ErrorMessage string + PostSaveCallbacks []func() +} diff --git a/test/licensemanager/lm_s1_test.go b/test/licensemanager/lm_s1_test.go index 6e2f3d6ee..d85dddb04 100644 --- a/test/licensemanager/lm_s1_test.go +++ b/test/licensemanager/lm_s1_test.go @@ -58,8 +58,7 @@ var _ = Describe("Licensemanager test", func() { // Download License File downloadDir := "licenseFolder" switch testenv.ClusterProvider { - case "eks": - licenseFilePath, err := testenv.DownloadLicenseFromS3Bucket() + case "eks": licenseFilePath, err := testenv.DownloadLicenseFromS3Bucket() Expect(err).To(Succeed(), "Unable to download license file from S3") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath)