diff --git a/tdrs-backend/docker-compose.yml b/tdrs-backend/docker-compose.yml index a66fed7a5..07d2b502a 100644 --- a/tdrs-backend/docker-compose.yml +++ b/tdrs-backend/docker-compose.yml @@ -101,6 +101,15 @@ services: command: --config /usr/share/grafana/conf/custom.ini depends_on: - grafana-pg + + alertmanager: + restart: always + image: prom/alertmanager:v0.27.0 + ports: + - 9093:9093 + volumes: + - ./plg/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml + command: --config.file=/etc/alertmanager/alertmanager.yml --storage.path=/alertmanager --log.level=debug --web.external-url=http://localhost:3000/alerts --web.route-prefix=/alerts --cluster.listen-address="" prometheus: restart: always @@ -109,12 +118,14 @@ services: - 9090:9090 volumes: - ./plg/prometheus/prometheus.local.yml:/etc/prometheus/prometheus.yml - - ./plg/prometheus/django_rules.yml:/etc/prometheus/django_rules.yml + - ./plg/prometheus/django-rules.yml:/etc/prometheus/django-rules.yml + - ./plg/prometheus/alerts.local.yml:/etc/prometheus/alerts.yml - prometheus_data:/prometheus depends_on: - web - celery-exporter - postgres-exporter + - alertmanager promtail: restart: always diff --git a/tdrs-backend/plg/alertmanager/alertmanager.yml b/tdrs-backend/plg/alertmanager/alertmanager.yml new file mode 100644 index 000000000..9414062ae --- /dev/null +++ b/tdrs-backend/plg/alertmanager/alertmanager.yml @@ -0,0 +1,71 @@ +global: + # The smarthost and SMTP sender used for mail notifications. + smtp_smarthost: 'smtp.sendgrid.net:587' + smtp_from: 'no-reply@tanfdata.acf.hhs.gov' + smtp_auth_username: 'apikey' + smtp_auth_password: '{{ sendgrid_api_key }}' + +# The directory from which notification templates are read. +templates: + - '/etc/alertmanager/template/*.tmpl' + +# The root route on which each incoming alert enters. +route: + # The labels by which incoming alerts are grouped together. For example, + # multiple alerts coming in for cluster=A and alertname=LatencyHigh would + # be batched into a single group. + group_by: ['alertname', 'env', 'service'] + + # When a new group of alerts is created by an incoming alert, wait at + # least 'group_wait' to send the initial notification. + # This way ensures that you get multiple alerts for the same group that start + # firing shortly after another are batched together on the first + # notification. + group_wait: 30s + + # When the first notification was sent, wait 'group_interval' to send a batch + # of new alerts that started firing for that group. + group_interval: 5m + + # If an alert has successfully been sent, wait 'repeat_interval' to + # resend them. + repeat_interval: 5m + + # A default receiver + receiver: admin-team-emails + + # All the above attributes are inherited by all child routes and can + # overwritten on each. + + # The child route trees. + routes: + # This routes performs a regular expression match on alert labels to + # catch alerts that are related to a list of services. + - matchers: + - alertname=~"UpTime" + receiver: dev-team-emails + group_wait: 30m + +# Inhibition rules allow to mute a set of alerts given that another alert is +# firing. +# We use this to mute any warning-level notifications if the same alert is +# already critical. +inhibit_rules: + - source_matchers: [severity="CRITICAL"] + target_matchers: [severity="WARNING"] + # Apply inhibition if the alertname is the same. + # CAUTION: + # If all label names listed in `equal` are missing + # from both the source and target alerts, + # the inhibition rule will apply! + equal: [alertname, env, service] + + +receivers: + - name: 'admin-team-emails' + email_configs: + - to: '{{ admin_team_emails }}' + + - name: 'dev-team-emails' + email_configs: + - to: '{{ dev_team_emails }}' diff --git a/tdrs-backend/plg/alertmanager/manifest.yml b/tdrs-backend/plg/alertmanager/manifest.yml new file mode 100644 index 000000000..80067f717 --- /dev/null +++ b/tdrs-backend/plg/alertmanager/manifest.yml @@ -0,0 +1,10 @@ +version: 1 +applications: + - name: alertmanager + memory: 512M + disk_quota: 1G + instances: 1 + command: | + mkdir /tmp + buildpacks: + - https://github.com/cloudfoundry/binary-buildpack diff --git a/tdrs-backend/plg/grafana/dashboards/logs_dashboard.json b/tdrs-backend/plg/grafana/dashboards/logs_dashboard.json index 6843e5a85..5b34ecf3a 100644 --- a/tdrs-backend/plg/grafana/dashboards/logs_dashboard.json +++ b/tdrs-backend/plg/grafana/dashboards/logs_dashboard.json @@ -19,9 +19,95 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 4, "links": [], "panels": [ + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + }, + { + "color": "light-red", + "value": 85 + }, + { + "color": "#EAB839", + "value": 90 + }, + { + "color": "semi-dark-green", + "value": 95 + }, + { + "color": "dark-green", + "value": 100 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "center", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "100 * avg_over_time(up{job=~\"$app\"}[$__range])", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "App Uptime", + "type": "stat" + }, { "datasource": { "type": "loki", @@ -31,7 +117,7 @@ "h": 28, "w": 24, "x": 0, - "y": 0 + "y": 14 }, "id": 1, "options": { @@ -56,7 +142,7 @@ "refId": "A" } ], - "title": "Logs", + "title": "Job Logs", "type": "logs" } ], @@ -71,7 +157,7 @@ "list": [ { "current": { - "selected": false, + "selected": true, "text": "All", "value": "$__all" }, @@ -98,11 +184,35 @@ "skipUrlSync": false, "sort": 0, "type": "query" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "definition": "query_result(up)", + "hide": 0, + "includeAll": true, + "label": "App", + "multi": false, + "name": "app", + "options": [], + "query": { + "qryType": 3, + "query": "query_result(up)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*job=\"([^\"]+).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" } ] }, "time": { - "from": "now-3h", + "from": "now-24h", "to": "now" }, "timepicker": {}, diff --git a/tdrs-backend/plg/prometheus/alerts.local.yml b/tdrs-backend/plg/prometheus/alerts.local.yml new file mode 100644 index 000000000..99183c544 --- /dev/null +++ b/tdrs-backend/plg/prometheus/alerts.local.yml @@ -0,0 +1,39 @@ +groups: + - name: database.alerts + rules: + - alert: LocalDatabaseDown + expr: last_over_time(pg_up{job="postgres"}[1m]) == 0 + for: 1m + labels: + severity: CRITICAL + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - name: backend.alerts + rules: + - alert: LocalBackendDown + expr: last_over_time(up{job=~"tdp-backend.*"}[1m]) == 0 + for: 1m + labels: + severity: ERROR + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - name: plg.alerts + rules: + - alert: LocalLokiDown + expr: last_over_time(up{job="loki"}[1m]) == 0 + labels: + severity: ERROR + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - name: app.alerts + rules: + - alert: UpTime + expr: avg_over_time(up[1m]) < 0.95 + labels: + severity: WARNING + annotations: + summary: "The {{ $labels.service }} service has a uptime warning." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment is not maintaining 95% uptime." diff --git a/tdrs-backend/plg/prometheus/alerts.yml b/tdrs-backend/plg/prometheus/alerts.yml new file mode 100644 index 000000000..affe54498 --- /dev/null +++ b/tdrs-backend/plg/prometheus/alerts.yml @@ -0,0 +1,73 @@ +groups: + - name: database.alerts + rules: + - alert: DevDatabaseDown + expr: last_over_time(pg_up{job="postgres-dev"}[1m]) == 0 + labels: + severity: ERROR + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - alert: StagingDatabaseDown + expr: last_over_time(pg_up{job="postgres-staging"}[1m]) == 0 + labels: + severity: ERROR + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - alert: ProductionDatabaseDown + expr: last_over_time(pg_up{job="postgres-production"}[1m]) == 0 + labels: + severity: CRITICAL + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - name: backend.alerts + rules: + - alert: DevEnvironmentBackendDown + expr: last_over_time(up{job=~"tdp-backend.*", job!~".*prod", job!~".*staging"}[5m]) == 0 + labels: + severity: ERROR + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 5 minutes." + - alert: StagingBackendDown + expr: last_over_time(up{job=~"tdp-backend-staging""}[1m]) == 0 + labels: + severity: ERROR + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - alert: ProductionBackendDown + expr: last_over_time(up{job=~"tdp-backend-prod"}[1m]) == 0 + labels: + severity: CRITICAL + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - name: plg.alerts + rules: + - alert: LokiDown + expr: last_over_time(up{job="loki"}[1m]) == 0 + labels: + severity: ERROR + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - alert: GrafanaDown + expr: last_over_time(up{job="grafana"}[1m]) == 0 + labels: + severity: ERROR + annotations: + summary: "The {{ $labels.service }} service is down." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment has been down for more than 1 minute." + - name: app.alerts + rules: + - alert: UpTime + expr: avg_over_time(up[1d]) < 0.95 + for: 30m + labels: + severity: WARNING + annotations: + summary: "The {{ $labels.service }} service has a uptime warning." + description: "The {{ $labels.service }} service in the {{ $labels.env }} environment is not maintaining 95% uptime." diff --git a/tdrs-backend/plg/prometheus/django_rules.yml b/tdrs-backend/plg/prometheus/django-rules.yml similarity index 100% rename from tdrs-backend/plg/prometheus/django_rules.yml rename to tdrs-backend/plg/prometheus/django-rules.yml diff --git a/tdrs-backend/plg/prometheus/prometheus.local.yml b/tdrs-backend/plg/prometheus/prometheus.local.yml index b9d8256b1..8b0a4517d 100644 --- a/tdrs-backend/plg/prometheus/prometheus.local.yml +++ b/tdrs-backend/plg/prometheus/prometheus.local.yml @@ -7,18 +7,19 @@ global: # Alertmanager configuration alerting: alertmanagers: - - static_configs: + - path_prefix: /alerts + static_configs: - targets: - # - alertmanager:9093 + - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: - - "django_rules.yml" + - "django-rules.yml" + - "alerts.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: - # The job name is added as a label `job=` to any timeseries scraped from this config. - job_name: "prometheus" static_configs: - targets: ["localhost:9090"] @@ -27,11 +28,35 @@ scrape_configs: metrics_path: "/prometheus/metrics" static_configs: - targets: ["web:8080"] + labels: + service: "tdp-backend" + env: "local" - job_name: "celery" static_configs: - targets: ["celery-exporter:9540"] + labels: + service: "celery" + env: "local" - job_name: postgres static_configs: - targets: ["postgres-exporter:9187"] + labels: + service: "postgres" + env: "local" + + - job_name: loki + static_configs: + - targets: ["loki:3100"] + labels: + service: "loki" + env: "local" + + - job_name: grafana + metrics_path: /grafana/metrics + static_configs: + - targets: ["grafana:9400"] + labels: + service: "grafana" + env: "local" diff --git a/tdrs-backend/plg/prometheus/prometheus.yml b/tdrs-backend/plg/prometheus/prometheus.yml index a8afaaa38..007422b26 100644 --- a/tdrs-backend/plg/prometheus/prometheus.yml +++ b/tdrs-backend/plg/prometheus/prometheus.yml @@ -6,24 +6,31 @@ global: # Alertmanager configuration alerting: alertmanagers: - - static_configs: + - path_prefix: /alerts + static_configs: - targets: - # - alertmanager:9093 + # - alertmanager.apps.internal:8080 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: - - "django_rules.yml" + - "django-rules.yml" scrape_configs: # The job name is added as a label `job=` to any timeseries scraped from this config. - job_name: "prometheus" static_configs: - targets: ["localhost:8080"] + labels: + service: "prometheus" + env: "production" - job_name: "tdp-backend-raft" metrics_path: "/prometheus/metrics" static_configs: - targets: ["tdp-backend-raft.apps.internal:8080"] + labels: + service: "tdp-backend" + env: "dev" - job_name: "tdp-backend-qasp" metrics_path: "/prometheus/metrics" @@ -34,29 +41,65 @@ scrape_configs: metrics_path: "/prometheus/metrics" static_configs: - targets: ["tdp-backend-develop.apps.internal:8080"] + labels: + service: "tdp-backend" + env: "dev" - job_name: "tdp-backend-staging" metrics_path: "/prometheus/metrics" static_configs: - targets: ["tdp-backend-staging.apps.internal:8080"] + labels: + service: "tdp-backend" + env: "staging" - job_name: "tdp-backend-prod" metrics_path: "/prometheus/metrics" static_configs: - targets: ["tdp-backend-prod.apps.internal:8080"] + labels: + service: "tdp-backend" + env: "production" - job_name: "celery-exporter-raft" static_configs: - targets: ["celery-exporter-raft.apps.internal:9540"] + labels: + service: "celery" + env: "dev" - job_name: postgres-dev static_configs: - targets: ["pg-exporter-dev.apps.internal:9187"] + labels: + service: "postgres" + env: "dev" - job_name: postgres-staging static_configs: - targets: ["pg-exporter-staging.apps.internal:9187"] + labels: + service: "postgres" + env: "staging" - job_name: postgres-production static_configs: - targets: ["pg-exporter-production.apps.internal:9187"] + labels: + service: "postgres" + env: "production" + + - job_name: loki + static_configs: + - targets: ["loki.apps.internal:3100"] + labels: + service: "loki" + env: "production" + + - job_name: grafana + metrics_path: /grafana/metrics + static_configs: + - targets: ["grafana.app.cloud.gov:9400"] + labels: + service: "grafana" + env: "production" diff --git a/tdrs-backend/tdpservice/urls.py b/tdrs-backend/tdpservice/urls.py index eb91ffe48..e6b22e876 100755 --- a/tdrs-backend/tdpservice/urls.py +++ b/tdrs-backend/tdpservice/urls.py @@ -11,7 +11,7 @@ from rest_framework.permissions import AllowAny -from .users.api.authorization_check import AuthorizationCheck, KibanaAuthorizationCheck, GrafanaAuthorizationCheck +from .users.api.authorization_check import AuthorizationCheck, KibanaAuthorizationCheck, PlgAuthorizationCheck from .users.api.login import TokenAuthorizationLoginDotGov, TokenAuthorizationAMS from .users.api.login import CypressLoginDotGovAuthenticationOverride from .users.api.login_redirect_oidc import LoginRedirectAMS, LoginRedirectLoginDotGov @@ -54,7 +54,7 @@ path("admin/", admin.site.urls, name="admin"), path("prometheus/", include("django_prometheus.urls")), path("kibana_auth_check/", KibanaAuthorizationCheck.as_view(), name="kibana-authorization-check"), - path("grafana_auth_check/", GrafanaAuthorizationCheck.as_view(), name="grafana-authorization-check"), + path("plg_auth_check/", PlgAuthorizationCheck.as_view(), name="plg-authorization-check"), ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) # TODO: Supply `terms_of_service` argument in OpenAPI Info once implemented diff --git a/tdrs-backend/tdpservice/users/api/authorization_check.py b/tdrs-backend/tdpservice/users/api/authorization_check.py index 60da3a17b..1d7bed218 100644 --- a/tdrs-backend/tdpservice/users/api/authorization_check.py +++ b/tdrs-backend/tdpservice/users/api/authorization_check.py @@ -72,22 +72,24 @@ def get(self, request, *args, **kwargs): logger.debug(f"User: {user} has incorrect authentication credentials. Not allowing access to Kibana.") return HttpResponse(status=401) -class GrafanaAuthorizationCheck(APIView): +class PlgAuthorizationCheck(APIView): """Check if user is authorized to view Grafana.""" query_string = False - pattern_name = "grafana-authorization-check" + pattern_name = "plg-authorization-check" permission_classes = [IsAuthenticated] def get(self, request, *args, **kwargs): - """Handle get request and verify user is authorized to access grafana.""" + """Handle get request and verify user is authorized to access plg apps.""" user = request.user user_in_valid_group = user.is_ofa_sys_admin or user.is_developer + print("\n\nINSIDE AUTH CHECK\n\n") + if user_in_valid_group: - logger.debug(f"User: {user} has correct authentication credentials. Allowing access to Grafana.") + logger.debug(f"User: {user} has correct authentication credentials. Allowing access to plg.") return HttpResponse(status=200) else: - logger.debug(f"User: {user} has incorrect authentication credentials. Not allowing access to Grafana.") + logger.debug(f"User: {user} has incorrect authentication credentials. Not allowing access to plg.") return HttpResponse(status=401) diff --git a/tdrs-frontend/docker-compose.yml b/tdrs-frontend/docker-compose.yml index 13094148b..4a1a41fac 100644 --- a/tdrs-frontend/docker-compose.yml +++ b/tdrs-frontend/docker-compose.yml @@ -32,11 +32,12 @@ services: - LOCAL_DEV=true - KIBANA=kibana - GRAFANA=grafana + - ALERTS=alertmanager - REACT_APP_DEVAUTH=${REACT_APP_DEVAUTH} command: > /bin/sh -c "echo 'starting nginx' && - envsubst '$${BACK_END} $${KIBANA} $${GRAFANA}' < /etc/nginx/locations.conf > /etc/nginx/locations_.conf && + envsubst '$${BACK_END} $${KIBANA} $${GRAFANA} $${ALERTS}' < /etc/nginx/locations.conf > /etc/nginx/locations_.conf && rm /etc/nginx/locations.conf && cp /etc/nginx/locations_.conf /etc/nginx/locations.conf && envsubst ' diff --git a/tdrs-frontend/nginx/local/locations.conf b/tdrs-frontend/nginx/local/locations.conf index e25dad318..29ec9dec3 100644 --- a/tdrs-frontend/nginx/local/locations.conf +++ b/tdrs-frontend/nginx/local/locations.conf @@ -62,7 +62,7 @@ location = /kibana_auth_check { } location /grafana/ { - auth_request /grafana_auth_check; + auth_request /plg_auth_check; auth_request_set $auth_status $upstream_status; set $grafana http://${GRAFANA}:9400$request_uri; @@ -79,9 +79,27 @@ location /grafana/ { proxy_buffer_size 4k; } -location = /grafana_auth_check { +location /alerts/ { + auth_request /plg_auth_check; + auth_request_set $auth_status $upstream_status; + + set $alerts http://${ALERTS}:9093$request_uri; + proxy_pass $alerts; + proxy_set_header Host $host:3000; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto https; + + proxy_connect_timeout 300; + proxy_read_timeout 300; + proxy_send_timeout 300; + send_timeout 900; + proxy_buffer_size 4k; +} + +location = /plg_auth_check { internal; - set $endpoint http://${BACK_END}:8080/grafana_auth_check/; + set $endpoint http://${BACK_END}:8080/plg_auth_check/; proxy_pass $endpoint$1$is_args$args; proxy_set_header Host $host:3000; proxy_set_header X-Real-IP $remote_addr; diff --git a/tdrs-frontend/src/components/Header/Header.jsx b/tdrs-frontend/src/components/Header/Header.jsx index cd25c3930..602a2a613 100644 --- a/tdrs-frontend/src/components/Header/Header.jsx +++ b/tdrs-frontend/src/components/Header/Header.jsx @@ -8,7 +8,7 @@ import { accountIsInReview, accountCanViewAdmin, accountCanViewKibana, - accountCanViewGrafana, + accountCanViewPlg, } from '../../selectors/auth' import NavItem from '../NavItem/NavItem' @@ -32,7 +32,7 @@ function Header() { const userAccessRequestApproved = useSelector(accountStatusIsApproved) const userIsAdmin = useSelector(accountCanViewAdmin) const userViewKibana = useSelector(accountCanViewKibana) - const userViewGrafana = useSelector(accountCanViewGrafana) + const userViewPlg = useSelector(accountCanViewPlg) const menuRef = useRef() @@ -148,12 +148,19 @@ function Header() { href={`${process.env.REACT_APP_BACKEND_HOST}/kibana/`} /> )} - {userViewGrafana && ( - + {userViewPlg && ( + <> + + + )} )} diff --git a/tdrs-frontend/src/components/SiteMap/SiteMap.jsx b/tdrs-frontend/src/components/SiteMap/SiteMap.jsx index e7355842c..84e38dda0 100644 --- a/tdrs-frontend/src/components/SiteMap/SiteMap.jsx +++ b/tdrs-frontend/src/components/SiteMap/SiteMap.jsx @@ -4,14 +4,14 @@ import { accountStatusIsApproved, accountCanViewAdmin, accountCanViewKibana, - accountCanViewGrafana, + accountCanViewPlg, } from '../../selectors/auth' const SiteMap = ({ user }) => { const userIsApproved = useSelector(accountStatusIsApproved) const userIsAdmin = useSelector(accountCanViewAdmin) const userViewKibana = useSelector(accountCanViewKibana) - const userViewGrafana = useSelector(accountCanViewGrafana) + const userViewPlg = useSelector(accountCanViewPlg) return (
@@ -43,11 +43,17 @@ const SiteMap = ({ user }) => { /> )} - {userViewGrafana && ( - + {userViewPlg && ( + <> + + + )}
) diff --git a/tdrs-frontend/src/selectors/auth.js b/tdrs-frontend/src/selectors/auth.js index e6143ff5e..081196160 100644 --- a/tdrs-frontend/src/selectors/auth.js +++ b/tdrs-frontend/src/selectors/auth.js @@ -72,6 +72,6 @@ export const accountCanViewKibana = (state) => selectPrimaryUserRole(state)?.name ) -export const accountCanViewGrafana = (state) => +export const accountCanViewPlg = (state) => accountStatusIsApproved(state) && ['OFA System Admin', 'Developer'].includes(selectPrimaryUserRole(state)?.name)