forked from cloudalchemy/ansible-alertmanager
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.yml
110 lines (98 loc) · 3.1 KB
/
main.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
---
alertmanager_version: 0.15.2
alertmanager_config_dir: /etc/alertmanager
alertmanager_db_dir: /var/lib/alertmanager
alertmanager_config_file: 'alertmanager.yml.j2'
alertmanager_web_listen_address: '0.0.0.0:9093'
alertmanager_web_external_url: 'http://localhost:9093/'
alertmanager_resolve_timeout: 3m
alertmanager_config_flags_extra: {}
# alertmanager_config_flags_extra:
# data.retention: 10
# SMTP default params
alertmanager_smtp: {}
# alertmanager_smtp:
# from: ''
# smarthost: ''
# auth_username: ''
# auth_password: ''
# auth_secret: ''
# auth_identity: ''
# auth_require_tls: "True"
# Default values you can see here -> https://prometheus.io/docs/alerting/configuration/
alertmanager_slack_api_url: ''
alertmanager_pagerduty_url: ''
alertmanager_opsgenie_api_host: ''
alertmanager_hipchat_url: ''
alertmanager_hipchat_auth_token: ''
alertmanager_wechat_url: ''
alertmanager_wechat_secret: ''
alertmanager_wechat_corp_id: ''
# First read: https://github.com/prometheus/alertmanager#high-availability
alertmanager_cluster: {}
# alertmanager_cluster:
# listen-address: "{{ ansible_default_ipv4.address }}:6783"
# nickname: "{{ ansible_hostname }}"
# peers:
# - "{{ ansible_default_ipv4.address }}:6783"
# - "demo.cloudalchemy.org:6783"
alertmanager_receivers: []
# alertmanager_receivers:
# - name: slack
# slack_configs:
# - send_resolved: true
# api_url: $slack_api_url
# channel: '#alerts'
alertmanager_inhibit_rules: []
# alertmanager_inhibit_rules:
# - target_match:
# label: value
# source_match:
# label: value
# equal: ['dc', 'rack']
# - target_match_re:
# label: value1|value2
# source_match_re:
# label: value3|value5
alertmanager_route: {}
# alertmanager_route:
# group_by: ['alertname', 'cluster', 'service']
# group_wait: 30s
# group_interval: 5m
# repeat_interval: 4h
# receiver: slack
alertmanager_child_routes: []
# alertmanager_child_routes:
# # This routes performs a regular expression match on alert labels to
# # catch alerts that are related to a list of services.
# - match_re:
# service: ^(foo1|foo2|baz)$
# receiver: team-X-mails
# # The service has a sub-route for critical alerts, any alerts
# # that do not match, i.e. severity != critical, fall-back to the
# # parent node and are sent to 'team-X-mails'
# routes:
# - match:
# severity: critical
# receiver: team-X-pager
# - match:
# service: files
# receiver: team-Y-mails
# routes:
# - match:
# severity: critical
# receiver: team-Y-pager
# # This route handles all alerts coming from a database service. If there's
# # no team to handle it, it defaults to the DB team.
# - match:
# service: database
# receiver: team-DB-pager
# # Also group alerts by affected database.
# group_by: [alertname, cluster, database]
# routes:
# - match:
# owner: team-X
# receiver: team-X-pager
# - match:
# owner: team-Y
# receiver: team-Y-pager