Skip to content

Commit

Permalink
Added prometheus support
Browse files Browse the repository at this point in the history
Signed-off-by: Kaustav Majumder <[email protected]>
  • Loading branch information
Kaustav Majumder committed Mar 14, 2024
1 parent 73ae0f2 commit d110ac7
Show file tree
Hide file tree
Showing 21 changed files with 838 additions and 4 deletions.
35 changes: 35 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,19 @@ rules:
- get
- list
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- alertmanagers
- prometheuses
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
Expand All @@ -186,6 +199,28 @@ rules:
- list
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- create
- get
- list
- update
- watch
- apiGroups:
- noobaa.io
resources:
Expand Down
33 changes: 33 additions & 0 deletions controllers/defaults/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,4 +224,37 @@ var (
},
},
}

MonitoringResources = map[string]corev1.ResourceRequirements{
"kube-rbac-proxy": {
Requests: corev1.ResourceList{
"memory": resource.MustParse("30Mi"),
"cpu": resource.MustParse("50m"),
},
Limits: corev1.ResourceList{
"memory": resource.MustParse("30Mi"),
"cpu": resource.MustParse("50m"),
},
},
"alertmanager": {
Requests: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("200Mi"),
},
Limits: corev1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("200Mi"),
},
},
"prometheus": {
Requests: corev1.ResourceList{
"cpu": resource.MustParse("400m"),
"memory": resource.MustParse("250Mi"),
},
Limits: corev1.ResourceList{
"cpu": resource.MustParse("400m"),
"memory": resource.MustParse("250Mi"),
},
},
}
)
200 changes: 200 additions & 0 deletions controllers/ocsinitialization/ocsinitialization_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,13 @@ import (

"github.com/go-logr/logr"
secv1client "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
"github.com/red-hat-storage/ocs-operator/v4/controllers/platform"
"github.com/red-hat-storage/ocs-operator/v4/controllers/util"
"github.com/red-hat-storage/ocs-operator/v4/templates"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -56,6 +60,9 @@ type OCSInitializationReconciler struct {
// +kubebuilder:rbac:groups=ocs.openshift.io,resources=*,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,verbs=get;create;update
// +kubebuilder:rbac:groups=security.openshift.io,resourceNames=privileged,resources=securitycontextconstraints,verbs=get;create;update
// +kubebuilder:rbac:groups="networking.k8s.io",resources=networkpolicies,verbs=create;get;list;watch;update
// +kubebuilder:rbac:groups="monitoring.coreos.com",resources={alertmanagers,prometheuses},verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=servicemonitors,verbs=get;list;watch;update;patch;create;delete

// Reconcile reads that state of the cluster for a OCSInitialization object and makes changes based on the state read
// and what is in the OCSInitialization.Spec
Expand Down Expand Up @@ -174,6 +181,49 @@ func (r *OCSInitializationReconciler) Reconcile(ctx context.Context, request rec
r.Log.Error(err, "Failed to ensure uxbackend service")
return reconcile.Result{}, err
}
isROSAHCP, err := platform.IsPlatformROSAHCP()
if err != nil {
r.Log.Error(err, "Failed to determine if on ROSA HCP platform")
return reconcile.Result{}, err
}
if isROSAHCP {
r.Log.Info("Setting up monitoring resources for ROSA HCP platform")
err = r.reconcilePrometheusKubeRBACConfigMap(instance)
if err != nil {
r.Log.Error(err, "Failed to ensure kubeRBACConfig config map")
return reconcile.Result{}, err
}

err = r.reconcilePrometheusService(instance)
if err != nil {
r.Log.Error(err, "Failed to ensure prometheus service")
return reconcile.Result{}, err
}

err = r.reconcilePrometheus(instance)
if err != nil {
r.Log.Error(err, "Failed to ensure prometheus instance")
return reconcile.Result{}, err
}

err = r.reconcileAlertManager(instance)
if err != nil {
r.Log.Error(err, "Failed to ensure alertmanager instance")
return reconcile.Result{}, err
}

err = r.reconcilePrometheusProxyNetworkPolicy(instance)
if err != nil {
r.Log.Error(err, "Failed to ensure Prometheus proxy network policy")
return reconcile.Result{}, err
}

err = r.reconcileK8sMetricsServiceMonitor(instance)
if err != nil {
r.Log.Error(err, "Failed to ensure k8sMetricsService Monitor")
return reconcile.Result{}, err
}
}

reason := ocsv1.ReconcileCompleted
message := ocsv1.ReconcileCompletedMessage
Expand All @@ -192,7 +242,9 @@ func (r *OCSInitializationReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&ocsv1.OCSInitialization{}).
Owns(&corev1.Service{}).
Owns(&networkingv1.NetworkPolicy{}).
Owns(&corev1.Secret{}).
Owns(&promv1.Prometheus{}).
// Watcher for storagecluster required to update
// ocs-operator-config configmap if storagecluster spec changes
Watches(
Expand Down Expand Up @@ -423,3 +475,151 @@ func (r *OCSInitializationReconciler) reconcileUXBackendService(initialData *ocs

return nil
}

func (r *OCSInitializationReconciler) reconcilePrometheusKubeRBACConfigMap(initialData *ocsv1.OCSInitialization) error {

var err error
prometheusKubeRBACConfigMap := &corev1.ConfigMap{}
prometheusKubeRBACConfigMap.Name = templates.PrometheusKubeRBACProxyConfigMapName
prometheusKubeRBACConfigMap.Namespace = initialData.Namespace

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, prometheusKubeRBACConfigMap, func() error {
if err := ctrl.SetControllerReference(initialData, prometheusKubeRBACConfigMap, r.Scheme); err != nil {
return err
}
prometheusKubeRBACConfigMap.Data = templates.KubeRBACProxyConfigMap.Data
return nil
})

if err != nil {
r.Log.Error(err, "Failed to create/update prometheus kube-rbac-proxy config map")
return err
}
r.Log.Info("Prometheus kube-rbac-proxy config map creation succeeded", "Name", prometheusKubeRBACConfigMap.Name)
return nil
}

func (r *OCSInitializationReconciler) reconcilePrometheusService(initialData *ocsv1.OCSInitialization) error {
var err error
prometheusService := &corev1.Service{}
prometheusService.Name = "prometheus"
prometheusService.Namespace = initialData.Namespace

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, prometheusService, func() error {
if err := ctrl.SetControllerReference(initialData, prometheusService, r.Scheme); err != nil {
return err
}
util.AddAnnotation(prometheusService, "service.beta.openshift.io/serving-cert-secret-name", "prometheus-serving-cert-secret")
util.AddLabels(prometheusService, "prometheus", "odf-prometheus")
prometheusService.Spec.Selector = map[string]string{
"app.kubernetes.io/name": prometheusService.Name,
}
prometheusService.Spec.Ports = []corev1.ServicePort{
{
Name: "https",
Protocol: corev1.ProtocolTCP,
Port: int32(templates.KubeRBACProxyPortNumber),
TargetPort: intstr.FromString("https"),
},
}
return nil
})
if err != nil {
r.Log.Error(err, "Failed to create/update prometheus service")
return err
}
r.Log.Info("Service creation succeeded", "Name", prometheusService.Name)
return nil
}

func (r *OCSInitializationReconciler) reconcilePrometheus(initialData *ocsv1.OCSInitialization) error {
var err error

prometheus := &promv1.Prometheus{}
prometheus.Name = "odf-prometheus"
prometheus.Namespace = initialData.Namespace

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, prometheus, func() error {
if err := ctrl.SetControllerReference(initialData, prometheus, r.Scheme); err != nil {
return err
}
templates.PrometheusTemplate.Spec.DeepCopyInto(&prometheus.Spec)
return nil
})

if err != nil {
r.Log.Error(err, "Failed to create/update prometheus instance")
return err
}
r.Log.Info("Prometheus instance creation succeeded", "Name", prometheus.Name)

return nil
}

func (r *OCSInitializationReconciler) reconcileAlertManager(initialData *ocsv1.OCSInitialization) error {
var err error

alertManager := &promv1.Alertmanager{}
alertManager.Name = "odf-alertmanager"
alertManager.Namespace = initialData.Namespace

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, alertManager, func() error {
if err := ctrl.SetControllerReference(initialData, alertManager, r.Scheme); err != nil {
return err
}
util.AddAnnotation(alertManager, "prometheus", "odf-prometheus")
templates.AlertmanagerTemplate.Spec.DeepCopyInto(&alertManager.Spec)
return nil
})
if err != nil {
r.Log.Error(err, "Failed to create/update alertManager instance")
return err
}
r.Log.Info("AlertManager instance creation succeeded", "Name", alertManager.Name)
return nil
}

func (r *OCSInitializationReconciler) reconcilePrometheusProxyNetworkPolicy(initialData *ocsv1.OCSInitialization) error {
var err error

promethuesProxyNetworkPolicy := &networkingv1.NetworkPolicy{}
promethuesProxyNetworkPolicy.Name = "prometheus-proxy-rule"
promethuesProxyNetworkPolicy.Namespace = initialData.Namespace

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, promethuesProxyNetworkPolicy, func() error {
if err := ctrl.SetControllerReference(initialData, promethuesProxyNetworkPolicy, r.Scheme); err != nil {
return err
}
templates.PrometheusProxyNetworkPolicyTemplate.Spec.DeepCopyInto(&promethuesProxyNetworkPolicy.Spec)
return nil
})
if err != nil {
r.Log.Error(err, "Failed to create/update Prometheus proxy network policy")
return err
}
r.Log.Info("Prometheus proxy network policy creation succeeded", "Name", promethuesProxyNetworkPolicy.Name)
return nil
}

func (r *OCSInitializationReconciler) reconcileK8sMetricsServiceMonitor(initialData *ocsv1.OCSInitialization) error {
var err error

k8sMetricsServiceMonitor := &promv1.ServiceMonitor{}
k8sMetricsServiceMonitor.Name = "k8s-metrics-service-monitor"
k8sMetricsServiceMonitor.Namespace = initialData.Namespace

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, k8sMetricsServiceMonitor, func() error {
if err := ctrl.SetControllerReference(initialData, k8sMetricsServiceMonitor, r.Scheme); err != nil {
return err
}
templates.K8sMetricsServiceMonitorTemplate.Spec.DeepCopyInto(&k8sMetricsServiceMonitor.Spec)
return nil
})
if err != nil {
r.Log.Error(err, "Failed to create/update K8s Metrics Service Monitor")
return err
}
r.Log.Info("K8s Metrics Service Monitor creation succeeded", "Name", k8sMetricsServiceMonitor.Name)
return nil

}
23 changes: 19 additions & 4 deletions controllers/platform/platform_detection.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ var (
// platform is used to get the PlatformType of the running cluster in a thread-safe manner
// It is a singleton which is initialized exactly once via Detect() function call.
type platform struct {
isOpenShift bool
platform configv1.PlatformType
isOpenShift bool
platform configv1.PlatformType
infrastructure *configv1.Infrastructure
}

// SetFakePlatformInstanceForTesting can be used to fake a Platform while testing.
Expand Down Expand Up @@ -88,11 +89,12 @@ func Detect() {
}
}
}

if platformInstance.isOpenShift {
if infrastructure, err := configv1client(cfg).Infrastructures().Get(context.TODO(), "cluster", metav1.GetOptions{}); err != nil {
if infrastructure, err := configv1client(cfg).Infrastructures().Get(context.TODO(), "cluster", metav1.GetOptions{}); err == nil {
platformInstance.platform = infrastructure.Status.PlatformStatus.Type
platformInstance.infrastructure = infrastructure
}

}
})
}
Expand Down Expand Up @@ -160,3 +162,16 @@ func SkipObjectStore(p configv1.PlatformType) bool {
}
return false
}

func IsPlatformROSAHCP() (bool, error) {
if platformInstance.platform == configv1.AWSPlatformType {
if platformInstance.infrastructure.Status.ControlPlaneTopology == configv1.ExternalTopologyMode {
for _, resourceTags := range platformInstance.infrastructure.Status.PlatformStatus.AWS.ResourceTags {
if resourceTags.Key == "red-hat-clustertype" && resourceTags.Value == "rosa" {
return true, nil
}
}
}
}
return false, nil
}
Loading

0 comments on commit d110ac7

Please sign in to comment.