From 935b7e1fdddeee7cae9248d1890b78e0f9949fc9 Mon Sep 17 00:00:00 2001 From: durgin Date: Thu, 18 Aug 2022 20:36:40 +0800 Subject: [PATCH 1/3] feat(alerts) add dingtalk alerts --- README.md | 1 + elastalert/alerts.py | 55 +++++++++++++++++++++- elastalert/loaders.py | 3 +- elastalert/util.py | 18 ++++++++ example_rules/example_dingtalk.yaml | 72 +++++++++++++++++++++++++++++ 5 files changed, 146 insertions(+), 3 deletions(-) create mode 100644 example_rules/example_dingtalk.yaml diff --git a/README.md b/README.md index 99acc02e7..f0604ab53 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,7 @@ Currently, we have built-in support for the following alert types: - Gitter - Line Notify - Zabbix +- dingtalk Additional rule types and alerts can be easily imported or written. diff --git a/elastalert/alerts.py b/elastalert/alerts.py index f2f31853f..dbc80b3f3 100644 --- a/elastalert/alerts.py +++ b/elastalert/alerts.py @@ -32,7 +32,7 @@ from twilio.base.exceptions import TwilioRestException from twilio.rest import Client as TwilioClient -from .util import EAException +from .util import EAException, get_timestamp_sign from .util import elastalert_logger from .util import lookup_es_key from .util import pretty_ts @@ -1073,7 +1073,6 @@ def format_body(self, body): def alert(self, matches): body = self.create_alert_body(matches) - body = self.format_body(body) # post to Teams headers = {'content-type': 'application/json'} @@ -2184,3 +2183,55 @@ def get_info(self): 'type': 'hivealerter', 'hive_host': self.rule.get('hive_connection', {}).get('hive_host', '') } + + +class DingTalkAlerter(Alerter): + required_options = frozenset(['dingtalk_webhook', 'dingtalk_msgtype', "dingtalk_isAtAll", "dingtalk_token", "dingtalk_title"]) + + def __init__(self, rule): + super(DingTalkAlerter, self).__init__(rule) + self.dingtalk_webhook_url = self.rule['dingtalk_webhook'] + self.dingtalk_message = self.rule.get('dingtalk_msgtype', 'text') + self.dingtalk_isAtAll = self.rule.get('dingtalk_isAtAll', False) + self.dingtalk_token = self.rule.get('dingtalk_token', '') + self.dingtalk_title = self.rule.get('dingtalk_title', '') + + def format_body(self, body): + return body.encode('utf8') + + def get_signed_url(self): + timestamp, sign = get_timestamp_sign(self.dingtalk_token) + webhook = self.dingtalk_webhook_url + "×tamp=" + timestamp + "&sign=" + sign + return webhook + + def alert(self, matches): + headers = { + "Content-Type": "application/json", + "Accept": "application/json;charset=utf-8" + } + body = self.create_alert_body(matches) + payload = { + "msgtype": self.dingtalk_message, + "text": { + "content": body + }, + "at": { + "isAtAll": self.dingtalk_isAtAll + } + } + try: + response = requests.post(self.get_signed_url(), + data=json.dumps(payload), + headers=headers) + response.raise_for_status() + print(response.text) + except RequestException as e: + print(e) + pass + raise EAException("Error request to Dingtalk: {0}".format(str(e))) + + def get_info(self): + return { + "type": "dingtalk", + "dingtalk_webhook": self.dingtalk_webhook_url + } diff --git a/elastalert/loaders.py b/elastalert/loaders.py index 771194768..82cf7cf7b 100644 --- a/elastalert/loaders.py +++ b/elastalert/loaders.py @@ -77,7 +77,8 @@ class RulesLoader(object): 'servicenow': alerts.ServiceNowAlerter, 'alerta': alerts.AlertaAlerter, 'post': alerts.HTTPPostAlerter, - 'hivealerter': alerts.HiveAlerter + 'hivealerter': alerts.HiveAlerter, + 'dingtalk': alerts.DingTalkAlerter } # A partial ordering of alert types. Relative order will be preserved in the resulting alerts list diff --git a/elastalert/util.py b/elastalert/util.py index bbb0600ff..27427c0d9 100644 --- a/elastalert/util.py +++ b/elastalert/util.py @@ -1,10 +1,15 @@ # -*- coding: utf-8 -*- +import base64 import collections import datetime +import hashlib +import hmac import logging import os import re import sys +import time +import urllib import dateutil.parser import pytz @@ -460,3 +465,16 @@ def should_scrolling_continue(rule_conf): stop_the_scroll = 0 < max_scrolling <= rule_conf.get('scrolling_cycle') return not stop_the_scroll + + +def get_timestamp_sign(token): + timestamp = str(round(time.time() * 1000)) + secret_enc = token.encode('utf-8') + string_to_sign = '{}\n{}'.format(timestamp, token) + string_to_sign_enc = string_to_sign.encode('utf-8') + hmac_code = hmac.new(secret_enc, string_to_sign_enc, + digestmod=hashlib.sha256).digest() + sign = urllib.parse.quote_plus(base64.b64encode(hmac_code)) + print("timestamp: ", timestamp) + print("sign:", sign) + return timestamp, sign diff --git a/example_rules/example_dingtalk.yaml b/example_rules/example_dingtalk.yaml new file mode 100644 index 000000000..8bce00eb9 --- /dev/null +++ b/example_rules/example_dingtalk.yaml @@ -0,0 +1,72 @@ +# This is the folder that contains the rule yaml files +# Any .yaml file will be loaded as a rule +rules_folder: rules + +# How often ElastAlert will query Elasticsearch +# The unit can be anything from weeks to seconds +run_every: + minutes: 1 + +# ElastAlert will buffer results from the most recent +# period of time, in case some log sources are not in real time +buffer_time: + minutes: 15 + +# The Elasticsearch hostname for metadata writeback +# Note that every rule can have its own Elasticsearch host +es_host: localhost + +# The Elasticsearch port +es_port: 9200 + +# The AWS region to use. Set this when using AWS-managed elasticsearch +#aws_region: us-east-1 + +# The AWS profile to use. Use this if you are using an aws-cli profile. +# See http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html +# for details +#profile: test + +# Optional URL prefix for Elasticsearch +#es_url_prefix: elasticsearch + +# Connect with TLS to Elasticsearch +#use_ssl: True + +# Verify TLS certificates +#verify_certs: True + +# GET request with body is the default option for Elasticsearch. +# If it fails for some reason, you can pass 'GET', 'POST' or 'source'. +# See http://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport +# for details +#es_send_get_body_as: GET + +# Option basic-auth username and password for Elasticsearch +#es_username: elasticusername +#es_password: elasticpassword + +# The index on es_host which is used for metadata storage +# This can be a unmapped index, but it is recommended that you run +# elastalert-create-index to set a mapping +writeback_index: elastalert_status + +# If an alert fails for some reason, ElastAlert will retry +# sending the alert until this time period has elapsed +alert_time_limit: + days: 2 + + +# Use the token method when creating the pinning robot +alert: +- dingtalk + +dingtalk_webhook: your-dingtalk-robot-webhook-url +# dingtalk msgType such as text +dingtalk_msgtype: text +# dingtalk at allUser +dingtalk_isAtAll: True +# dingtalk robot token such as: SEC8423d34b07446fbcc4cf3abe6b71f3d36b5152cdf54cf5dd29482180ce2b2513 +dingtalk_token: "" +# dingtalk title such as: +dingtalk_title: "" From 30548eb64072519f756ff345145f7948dec5efd0 Mon Sep 17 00:00:00 2001 From: durgin Date: Wed, 17 Aug 2022 14:16:47 +0800 Subject: [PATCH 2/3] feat(dingtalk) add dingtalk alter for elastalert --- README.md | 347 +++------------------------- elastalert/alerts.py | 2 +- example_rules/example_dingtalk.yaml | 2 +- 3 files changed, 38 insertions(+), 313 deletions(-) diff --git a/README.md b/README.md index f0604ab53..3394fec24 100644 --- a/README.md +++ b/README.md @@ -1,324 +1,49 @@ -Recent changes: As of Elastalert 0.2.0, you must use Python 3.6. Python 2 will not longer be supported. +# ElastAlert -[![Build Status](https://travis-ci.org/Yelp/elastalert.svg)](https://travis-ci.org/Yelp/elastalert) -[![Join the chat at https://gitter.im/Yelp/elastalert](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Yelp/elastalert?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +## 使用 Elasticsearch 轻松灵活地发出警报 +ElastAlert 是一个简单的框架,用于对来自 Elasticsearch 中的数据的异常、峰值或其他感兴趣的模式发出警报。 -## ElastAlert - [Read the Docs](http://elastalert.readthedocs.org). -### Easy & Flexible Alerting With Elasticsearch +ElastAlert 适用于所有版本的 Elasticsearch。 -ElastAlert is a simple framework for alerting on anomalies, spikes, or other patterns of interest from data in Elasticsearch. +在 Yelp,我们使用 Elasticsearch、Logstash 和 Kibana 来管理我们不断增加的数据和日志量。Kibana 非常适合可视化和查询数据,但我们很快意识到它需要一个配套工具来提醒我们数据中的不一致。出于这种需要,创建了 ElastAlert。 -ElastAlert works with all versions of Elasticsearch. +如果您将数据近乎实时地写入 Elasticsearch,并希望在该数据与特定模式匹配时收到警报,那么 ElastAlert 就是您的理想之选。如果您可以在 Kibana 中看到它,ElastAlert 可以对其发出警报。 -At Yelp, we use Elasticsearch, Logstash and Kibana for managing our ever increasing amount of data and logs. -Kibana is great for visualizing and querying data, but we quickly realized that it needed a companion tool for alerting -on inconsistencies in our data. Out of this need, ElastAlert was created. +## 概述 +我们将 ElastAlert 设计为可靠、高度模块化且易于设置和配置。 -If you have data being written into Elasticsearch in near real time and want to be alerted when that data matches certain patterns, ElastAlert is the tool for you. If you can see it in Kibana, ElastAlert can alert on it. +它通过将 Elasticsearch 与两种类型的组件、规则类型和警报相结合来工作。Elasticsearch 会定期查询并将数据传递给规则类型,该类型确定何时找到匹配项。发生匹配时,会收到一个或多个警报,这些警报会根据匹配采取行动。 -## Overview +这是由一组规则配置的,每个规则定义一个查询、一个规则类型和一组警报。 -We designed ElastAlert to be reliable, highly modular, and easy to set up and configure. +ElastAlert 包含几种具有常见监控范例的规则类型: -It works by combining Elasticsearch with two types of components, rule types and alerts. -Elasticsearch is periodically queried and the data is passed to the rule type, which determines when -a match is found. When a match occurs, it is given to one or more alerts, which take action based on the match. +- 匹配 Y 时间至少有 X 个事件的位置”(frequency类型) +- 当事件发生率增加或减少时匹配”(spike类型) +- 在 Y 时间少于 X 个事件时匹配”(flatline类型) +- 当某个字段匹配黑名单/白名单时匹配”(blacklist和whitelist类型) +- 匹配与给定过滤器匹配的任何事件”(any类型) +- 当一个字段在一段时间内有两个不同的值时匹配”(change类型) +- 当字段中出现从未见过的术语时匹配”(new_term类型) +- 当字段的唯一值数量高于或低于阈值(cardinality类型)时匹配 -This is configured by a set of rules, each of which defines a query, a rule type, and a set of alerts. - -Several rule types with common monitoring paradigms are included with ElastAlert: - -- Match where there are at least X events in Y time" (``frequency`` type) -- Match when the rate of events increases or decreases" (``spike`` type) -- Match when there are less than X events in Y time" (``flatline`` type) -- Match when a certain field matches a blacklist/whitelist" (``blacklist`` and ``whitelist`` type) -- Match on any event matching a given filter" (``any`` type) -- Match when a field has two different values within some time" (``change`` type) -- Match when a never before seen term appears in a field" (``new_term`` type) -- Match when the number of unique values for a field is above or below a threshold (``cardinality`` type) - -Currently, we have built-in support for the following alert types: - -- Email +目前,我们内置了对以下警报方式的支持: +- 电子邮件 - JIRA -- OpsGenie -- Commands -- HipChat -- MS Teams -- Slack -- Telegram -- GoogleChat +- 行动精灵 +- 命令 +- 嘻哈聊天 +- 微软团队 +- 松弛 +- 电报 +- 谷歌聊天 - AWS SNS -- VictorOps -- PagerDuty -- PagerTree -- Exotel -- Twilio -- Gitter -- Line Notify -- Zabbix -- dingtalk - -Additional rule types and alerts can be easily imported or written. - -In addition to this basic usage, there are many other features that make alerts more useful: - -- Alerts link to Kibana dashboards -- Aggregate counts for arbitrary fields -- Combine alerts into periodic reports -- Separate alerts by using a unique key field -- Intercept and enhance match data - -To get started, check out `Running ElastAlert For The First Time` in the [documentation](http://elastalert.readthedocs.org). - -## Running ElastAlert -You can either install the latest released version of ElastAlert using pip: - -```pip install elastalert``` - -or you can clone the ElastAlert repository for the most recent changes: - -```git clone https://github.com/Yelp/elastalert.git``` - -Install the module: - -```pip install "setuptools>=11.3"``` - -```python setup.py install``` - -The following invocation can be used to run ElastAlert after installing - -``$ elastalert [--debug] [--verbose] [--start ] [--end ] [--rule ] [--config ]`` - -``--debug`` will print additional information to the screen as well as suppresses alerts and instead prints the alert body. Not compatible with `--verbose`. - -``--verbose`` will print additional information without suppressing alerts. Not compatible with `--debug.` - -``--start`` will begin querying at the given timestamp. By default, ElastAlert will begin querying from the present. -Timestamp format is ``YYYY-MM-DDTHH-MM-SS[-/+HH:MM]`` (Note the T between date and hour). -Eg: ``--start 2014-09-26T12:00:00`` (UTC) or ``--start 2014-10-01T07:30:00-05:00`` - -``--end`` will cause ElastAlert to stop querying at the given timestamp. By default, ElastAlert will continue -to query indefinitely. - -``--rule`` will allow you to run only one rule. It must still be in the rules folder. -Eg: ``--rule this_rule.yaml`` - -``--config`` allows you to specify the location of the configuration. By default, it is will look for config.yaml in the current directory. - -## Third Party Tools And Extras -### Kibana plugin -![img](https://raw.githubusercontent.com/bitsensor/elastalert-kibana-plugin/master/showcase.gif) -Available at the [ElastAlert Kibana plugin repository](https://github.com/bitsensor/elastalert-kibana-plugin). - -### Docker -A [Dockerized version](https://github.com/bitsensor/elastalert) of ElastAlert including a REST api is build from `master` to `bitsensor/elastalert:latest`. - -```bash -git clone https://github.com/bitsensor/elastalert.git; cd elastalert -docker run -d -p 3030:3030 \ - -v `pwd`/config/elastalert.yaml:/opt/elastalert/config.yaml \ - -v `pwd`/config/config.json:/opt/elastalert-server/config/config.json \ - -v `pwd`/rules:/opt/elastalert/rules \ - -v `pwd`/rule_templates:/opt/elastalert/rule_templates \ - --net="host" \ - --name elastalert bitsensor/elastalert:latest -``` - -## Documentation - -Read the documentation at [Read the Docs](http://elastalert.readthedocs.org). - -To build a html version of the docs locally - -``` -pip install sphinx_rtd_theme sphinx -cd docs -make html -``` - -View in browser at build/html/index.html - -## Configuration - -See config.yaml.example for details on configuration. - -## Example rules - -Examples of different types of rules can be found in example_rules/. - -- ``example_spike.yaml`` is an example of the "spike" rule type, which allows you to alert when the rate of events, averaged over a time period, -increases by a given factor. This example will send an email alert when there are 3 times more events matching a filter occurring within the -last 2 hours than the number of events in the previous 2 hours. - -- ``example_frequency.yaml`` is an example of the "frequency" rule type, which will alert when there are a given number of events occuring -within a time period. This example will send an email when 50 documents matching a given filter occur within a 4 hour timeframe. - -- ``example_change.yaml`` is an example of the "change" rule type, which will alert when a certain field in two documents changes. In this example, -the alert email is sent when two documents with the same 'username' field but a different value of the 'country_name' field occur within 24 hours -of each other. - -- ``example_new_term.yaml`` is an example of the "new term" rule type, which alerts when a new value appears in a field or fields. In this example, -an email is sent when a new value of ("username", "computer") is encountered in example login logs. - -## Frequently Asked Questions - -### My rule is not getting any hits? - -So you've managed to set up ElastAlert, write a rule, and run it, but nothing happens, or it says ``0 query hits``. First of all, we recommend using the command ``elastalert-test-rule rule.yaml`` to debug. It will show you how many documents match your filters for the last 24 hours (or more, see ``--help``), and then shows you if any alerts would have fired. If you have a filter in your rule, remove it and try again. This will show you if the index is correct and that you have at least some documents. If you have a filter in Kibana and want to recreate it in ElastAlert, you probably want to use a query string. Your filter will look like - -``` -filter: -- query: - query_string: - query: "foo: bar AND baz: abc*" -``` -If you receive an error that Elasticsearch is unable to parse it, it's likely the YAML is not spaced correctly, and the filter is not in the right format. If you are using other types of filters, like ``term``, a common pitfall is not realizing that you may need to use the analyzed token. This is the default if you are using Logstash. For example, - -``` -filter: -- term: - foo: "Test Document" -``` - -will not match even if the original value for ``foo`` was exactly "Test Document". Instead, you want to use ``foo.raw``. If you are still having trouble troubleshooting why your documents do not match, try running ElastAlert with ``--es_debug_trace /path/to/file.log``. This will log the queries made to Elasticsearch in full so that you can see exactly what is happening. - -### I got hits, why didn't I get an alert? - -If you got logs that had ``X query hits, 0 matches, 0 alerts sent``, it depends on the ``type`` why you didn't get any alerts. If ``type: any``, a match will occur for every hit. If you are using ``type: frequency``, ``num_events`` must occur within ``timeframe`` of each other for a match to occur. Different rules apply for different rule types. - -If you see ``X matches, 0 alerts sent``, this may occur for several reasons. If you set ``aggregation``, the alert will not be sent until after that time has elapsed. If you have gotten an alert for this same rule before, that rule may be silenced for a period of time. The default is one minute between alerts. If a rule is silenced, you will see ``Ignoring match for silenced rule`` in the logs. - -If you see ``X alerts sent`` but didn't get any alert, it's probably related to the alert configuration. If you are using the ``--debug`` flag, you will not receive any alerts. Instead, the alert text will be written to the console. Use ``--verbose`` to achieve the same affects without preventing alerts. If you are using email alert, make sure you have it configured for an SMTP server. By default, it will connect to localhost on port 25. It will also use the word "elastalert" as the "From:" address. Some SMTP servers will reject this because it does not have a domain while others will add their own domain automatically. See the email section in the documentation for how to configure this. - -### Why did I only get one alert when I expected to get several? - -There is a setting called ``realert`` which is the minimum time between two alerts for the same rule. Any alert that occurs within this time will simply be dropped. The default value for this is one minute. If you want to receive an alert for every single match, even if they occur right after each other, use - -``` -realert: - minutes: 0 -``` - -You can of course set it higher as well. - -### How can I prevent duplicate alerts? - -By setting ``realert``, you will prevent the same rule from alerting twice in an amount of time. - -``` -realert: - days: 1 -``` - -You can also prevent duplicates based on a certain field by using ``query_key``. For example, to prevent multiple alerts for the same user, you might use - -``` -realert: - hours: 8 -query_key: user -``` - -Note that this will also affect the way many rule types work. If you are using ``type: frequency`` for example, ``num_events`` for a single value of ``query_key`` must occur before an alert will be sent. You can also use a compound of multiple fields for this key. For example, if you only wanted to receieve an alert once for a specific error and hostname, you could use - -``` -query_key: [error, hostname] -``` - -Internally, this works by creating a new field for each document called ``field1,field2`` with a value of ``value1,value2`` and using that as the ``query_key``. - -The data for when an alert will fire again is stored in Elasticsearch in the ``elastalert_status`` index, with a ``_type`` of ``silence`` and also cached in memory. - -### How can I change what's in the alert? - -You can use the field ``alert_text`` to add custom text to an alert. By setting ``alert_text_type: alert_text_only``, it will be the entirety of the alert. You can also add different fields from the alert by using Python style string formatting and ``alert_text_args``. For example - -``` -alert_text: "Something happened with {0} at {1}" -alert_text_type: alert_text_only -alert_text_args: ["username", "@timestamp"] -``` - -You can also limit the alert to only containing certain fields from the document by using ``include``. - -``` -include: ["ip_address", "hostname", "status"] -``` - -### My alert only contains data for one event, how can I see more? - -If you are using ``type: frequency``, you can set the option ``attach_related: true`` and every document will be included in the alert. An alternative, which works for every type, is ``top_count_keys``. This will show the top counts for each value for certain fields. For example, if you have - -``` -top_count_keys: ["ip_address", "status"] -``` - -and 10 documents matched your alert, it may contain something like - -``` -ip_address: -127.0.0.1: 7 -10.0.0.1: 2 -192.168.0.1: 1 - -status: -200: 9 -500: 1 -``` - -### How can I make the alert come at a certain time? - -The ``aggregation`` feature will take every alert that has occured over a period of time and send them together in one alert. You can use cron style syntax to send all alerts that have occured since the last once by using - -``` -aggregation: - schedule: '2 4 * * mon,fri' -``` - -### I have lots of documents and it's really slow, how can I speed it up? - -There are several ways to potentially speed up queries. If you are using ``index: logstash-*``, Elasticsearch will query all shards, even if they do not possibly contain data with the correct timestamp. Instead, you can use Python time format strings and set ``use_strftime_index`` - -``` -index: logstash-%Y.%m -use_strftime_index: true -``` - -Another thing you could change is ``buffer_time``. By default, ElastAlert will query large overlapping windows in order to ensure that it does not miss any events, even if they are indexed in real time. In config.yaml, you can adjust ``buffer_time`` to a smaller number to only query the most recent few minutes. - -``` -buffer_time: - minutes: 5 -``` - -By default, ElastAlert will download every document in full before processing them. Instead, you can have ElastAlert simply get a count of the number of documents that have occured in between each query. To do this, set ``use_count_query: true``. This cannot be used if you use ``query_key``, because ElastAlert will not know the contents of each documents, just the total number of them. This also reduces the precision of alerts, because all events that occur between each query will be rounded to a single timestamp. - -If you are using ``query_key`` (a single key, not multiple keys) you can use ``use_terms_query``. This will make ElastAlert perform a terms aggregation to get the counts for each value of a certain field. Both ``use_terms_query`` and ``use_count_query`` also require ``doc_type`` to be set to the ``_type`` of the documents. They may not be compatible with all rule types. - -### Can I perform aggregations? - -The only aggregation supported currently is a terms aggregation, by setting ``use_terms_query``. - -### I'm not using @timestamp, what do I do? - -You can use ``timestamp_field`` to change which field ElastAlert will use as the timestamp. You can use ``timestamp_type`` to change it between ISO 8601 and unix timestamps. You must have some kind of timestamp for ElastAlert to work. If your events are not in real time, you can use ``query_delay`` and ``buffer_time`` to adjust when ElastAlert will look for documents. - -### I'm using flatline but I don't see any alerts - -When using ``type: flatline``, ElastAlert must see at least one document before it will alert you that it has stopped seeing them. - -### How can I get a "resolve" event? - -ElastAlert does not currently support stateful alerts or resolve events. - -### Can I set a warning threshold? - -Currently, the only way to set a warning threshold is by creating a second rule with a lower threshold. - -## License - -ElastAlert is licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0 - -### Read the documentation at [Read the Docs](http://elastalert.readthedocs.org). +- 胜利者行动 +- 寻呼机 +- 寻呼树 +- 外星酒店 +- 特维利奥 +- 吉特 +- 线路通知 +- 扎比克斯 -### Questions? Drop by #elastalert on Freenode IRC. diff --git a/elastalert/alerts.py b/elastalert/alerts.py index dbc80b3f3..148326062 100644 --- a/elastalert/alerts.py +++ b/elastalert/alerts.py @@ -2186,7 +2186,7 @@ def get_info(self): class DingTalkAlerter(Alerter): - required_options = frozenset(['dingtalk_webhook', 'dingtalk_msgtype', "dingtalk_isAtAll", "dingtalk_token", "dingtalk_title"]) + required_options = frozenset(['dingtalk_webhook', 'dingtalk_message', "dingtalk_isAtAll", "dingtalk_token", "dingtalk_title"]) def __init__(self, rule): super(DingTalkAlerter, self).__init__(rule) diff --git a/example_rules/example_dingtalk.yaml b/example_rules/example_dingtalk.yaml index 8bce00eb9..99efd9cac 100644 --- a/example_rules/example_dingtalk.yaml +++ b/example_rules/example_dingtalk.yaml @@ -69,4 +69,4 @@ dingtalk_isAtAll: True # dingtalk robot token such as: SEC8423d34b07446fbcc4cf3abe6b71f3d36b5152cdf54cf5dd29482180ce2b2513 dingtalk_token: "" # dingtalk title such as: -dingtalk_title: "" +dingtalk_title: "castle-pre-usw" From d3573cece68f5afb39e6fdc4fa0f45535218e85d Mon Sep 17 00:00:00 2001 From: durgin Date: Fri, 7 Apr 2023 16:21:06 +0800 Subject: [PATCH 3/3] feat(dingtalk) README changed --- README.md | 346 ++++++++++++++++++++++++++---- example_rules/example_change.yaml | 10 +- 2 files changed, 315 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 3394fec24..99acc02e7 100644 --- a/README.md +++ b/README.md @@ -1,49 +1,323 @@ -# ElastAlert +Recent changes: As of Elastalert 0.2.0, you must use Python 3.6. Python 2 will not longer be supported. -## 使用 Elasticsearch 轻松灵活地发出警报 -ElastAlert 是一个简单的框架,用于对来自 Elasticsearch 中的数据的异常、峰值或其他感兴趣的模式发出警报。 +[![Build Status](https://travis-ci.org/Yelp/elastalert.svg)](https://travis-ci.org/Yelp/elastalert) +[![Join the chat at https://gitter.im/Yelp/elastalert](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Yelp/elastalert?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -ElastAlert 适用于所有版本的 Elasticsearch。 +## ElastAlert - [Read the Docs](http://elastalert.readthedocs.org). +### Easy & Flexible Alerting With Elasticsearch -在 Yelp,我们使用 Elasticsearch、Logstash 和 Kibana 来管理我们不断增加的数据和日志量。Kibana 非常适合可视化和查询数据,但我们很快意识到它需要一个配套工具来提醒我们数据中的不一致。出于这种需要,创建了 ElastAlert。 +ElastAlert is a simple framework for alerting on anomalies, spikes, or other patterns of interest from data in Elasticsearch. -如果您将数据近乎实时地写入 Elasticsearch,并希望在该数据与特定模式匹配时收到警报,那么 ElastAlert 就是您的理想之选。如果您可以在 Kibana 中看到它,ElastAlert 可以对其发出警报。 +ElastAlert works with all versions of Elasticsearch. -## 概述 -我们将 ElastAlert 设计为可靠、高度模块化且易于设置和配置。 +At Yelp, we use Elasticsearch, Logstash and Kibana for managing our ever increasing amount of data and logs. +Kibana is great for visualizing and querying data, but we quickly realized that it needed a companion tool for alerting +on inconsistencies in our data. Out of this need, ElastAlert was created. -它通过将 Elasticsearch 与两种类型的组件、规则类型和警报相结合来工作。Elasticsearch 会定期查询并将数据传递给规则类型,该类型确定何时找到匹配项。发生匹配时,会收到一个或多个警报,这些警报会根据匹配采取行动。 +If you have data being written into Elasticsearch in near real time and want to be alerted when that data matches certain patterns, ElastAlert is the tool for you. If you can see it in Kibana, ElastAlert can alert on it. -这是由一组规则配置的,每个规则定义一个查询、一个规则类型和一组警报。 +## Overview -ElastAlert 包含几种具有常见监控范例的规则类型: +We designed ElastAlert to be reliable, highly modular, and easy to set up and configure. -- 匹配 Y 时间至少有 X 个事件的位置”(frequency类型) -- 当事件发生率增加或减少时匹配”(spike类型) -- 在 Y 时间少于 X 个事件时匹配”(flatline类型) -- 当某个字段匹配黑名单/白名单时匹配”(blacklist和whitelist类型) -- 匹配与给定过滤器匹配的任何事件”(any类型) -- 当一个字段在一段时间内有两个不同的值时匹配”(change类型) -- 当字段中出现从未见过的术语时匹配”(new_term类型) -- 当字段的唯一值数量高于或低于阈值(cardinality类型)时匹配 +It works by combining Elasticsearch with two types of components, rule types and alerts. +Elasticsearch is periodically queried and the data is passed to the rule type, which determines when +a match is found. When a match occurs, it is given to one or more alerts, which take action based on the match. -目前,我们内置了对以下警报方式的支持: -- 电子邮件 +This is configured by a set of rules, each of which defines a query, a rule type, and a set of alerts. + +Several rule types with common monitoring paradigms are included with ElastAlert: + +- Match where there are at least X events in Y time" (``frequency`` type) +- Match when the rate of events increases or decreases" (``spike`` type) +- Match when there are less than X events in Y time" (``flatline`` type) +- Match when a certain field matches a blacklist/whitelist" (``blacklist`` and ``whitelist`` type) +- Match on any event matching a given filter" (``any`` type) +- Match when a field has two different values within some time" (``change`` type) +- Match when a never before seen term appears in a field" (``new_term`` type) +- Match when the number of unique values for a field is above or below a threshold (``cardinality`` type) + +Currently, we have built-in support for the following alert types: + +- Email - JIRA -- 行动精灵 -- 命令 -- 嘻哈聊天 -- 微软团队 -- 松弛 -- 电报 -- 谷歌聊天 +- OpsGenie +- Commands +- HipChat +- MS Teams +- Slack +- Telegram +- GoogleChat - AWS SNS -- 胜利者行动 -- 寻呼机 -- 寻呼树 -- 外星酒店 -- 特维利奥 -- 吉特 -- 线路通知 -- 扎比克斯 +- VictorOps +- PagerDuty +- PagerTree +- Exotel +- Twilio +- Gitter +- Line Notify +- Zabbix + +Additional rule types and alerts can be easily imported or written. + +In addition to this basic usage, there are many other features that make alerts more useful: + +- Alerts link to Kibana dashboards +- Aggregate counts for arbitrary fields +- Combine alerts into periodic reports +- Separate alerts by using a unique key field +- Intercept and enhance match data + +To get started, check out `Running ElastAlert For The First Time` in the [documentation](http://elastalert.readthedocs.org). + +## Running ElastAlert +You can either install the latest released version of ElastAlert using pip: + +```pip install elastalert``` + +or you can clone the ElastAlert repository for the most recent changes: + +```git clone https://github.com/Yelp/elastalert.git``` + +Install the module: + +```pip install "setuptools>=11.3"``` + +```python setup.py install``` + +The following invocation can be used to run ElastAlert after installing + +``$ elastalert [--debug] [--verbose] [--start ] [--end ] [--rule ] [--config ]`` + +``--debug`` will print additional information to the screen as well as suppresses alerts and instead prints the alert body. Not compatible with `--verbose`. + +``--verbose`` will print additional information without suppressing alerts. Not compatible with `--debug.` + +``--start`` will begin querying at the given timestamp. By default, ElastAlert will begin querying from the present. +Timestamp format is ``YYYY-MM-DDTHH-MM-SS[-/+HH:MM]`` (Note the T between date and hour). +Eg: ``--start 2014-09-26T12:00:00`` (UTC) or ``--start 2014-10-01T07:30:00-05:00`` + +``--end`` will cause ElastAlert to stop querying at the given timestamp. By default, ElastAlert will continue +to query indefinitely. + +``--rule`` will allow you to run only one rule. It must still be in the rules folder. +Eg: ``--rule this_rule.yaml`` + +``--config`` allows you to specify the location of the configuration. By default, it is will look for config.yaml in the current directory. + +## Third Party Tools And Extras +### Kibana plugin +![img](https://raw.githubusercontent.com/bitsensor/elastalert-kibana-plugin/master/showcase.gif) +Available at the [ElastAlert Kibana plugin repository](https://github.com/bitsensor/elastalert-kibana-plugin). + +### Docker +A [Dockerized version](https://github.com/bitsensor/elastalert) of ElastAlert including a REST api is build from `master` to `bitsensor/elastalert:latest`. + +```bash +git clone https://github.com/bitsensor/elastalert.git; cd elastalert +docker run -d -p 3030:3030 \ + -v `pwd`/config/elastalert.yaml:/opt/elastalert/config.yaml \ + -v `pwd`/config/config.json:/opt/elastalert-server/config/config.json \ + -v `pwd`/rules:/opt/elastalert/rules \ + -v `pwd`/rule_templates:/opt/elastalert/rule_templates \ + --net="host" \ + --name elastalert bitsensor/elastalert:latest +``` + +## Documentation + +Read the documentation at [Read the Docs](http://elastalert.readthedocs.org). + +To build a html version of the docs locally + +``` +pip install sphinx_rtd_theme sphinx +cd docs +make html +``` + +View in browser at build/html/index.html + +## Configuration + +See config.yaml.example for details on configuration. + +## Example rules + +Examples of different types of rules can be found in example_rules/. + +- ``example_spike.yaml`` is an example of the "spike" rule type, which allows you to alert when the rate of events, averaged over a time period, +increases by a given factor. This example will send an email alert when there are 3 times more events matching a filter occurring within the +last 2 hours than the number of events in the previous 2 hours. + +- ``example_frequency.yaml`` is an example of the "frequency" rule type, which will alert when there are a given number of events occuring +within a time period. This example will send an email when 50 documents matching a given filter occur within a 4 hour timeframe. + +- ``example_change.yaml`` is an example of the "change" rule type, which will alert when a certain field in two documents changes. In this example, +the alert email is sent when two documents with the same 'username' field but a different value of the 'country_name' field occur within 24 hours +of each other. + +- ``example_new_term.yaml`` is an example of the "new term" rule type, which alerts when a new value appears in a field or fields. In this example, +an email is sent when a new value of ("username", "computer") is encountered in example login logs. + +## Frequently Asked Questions + +### My rule is not getting any hits? + +So you've managed to set up ElastAlert, write a rule, and run it, but nothing happens, or it says ``0 query hits``. First of all, we recommend using the command ``elastalert-test-rule rule.yaml`` to debug. It will show you how many documents match your filters for the last 24 hours (or more, see ``--help``), and then shows you if any alerts would have fired. If you have a filter in your rule, remove it and try again. This will show you if the index is correct and that you have at least some documents. If you have a filter in Kibana and want to recreate it in ElastAlert, you probably want to use a query string. Your filter will look like + +``` +filter: +- query: + query_string: + query: "foo: bar AND baz: abc*" +``` +If you receive an error that Elasticsearch is unable to parse it, it's likely the YAML is not spaced correctly, and the filter is not in the right format. If you are using other types of filters, like ``term``, a common pitfall is not realizing that you may need to use the analyzed token. This is the default if you are using Logstash. For example, + +``` +filter: +- term: + foo: "Test Document" +``` + +will not match even if the original value for ``foo`` was exactly "Test Document". Instead, you want to use ``foo.raw``. If you are still having trouble troubleshooting why your documents do not match, try running ElastAlert with ``--es_debug_trace /path/to/file.log``. This will log the queries made to Elasticsearch in full so that you can see exactly what is happening. + +### I got hits, why didn't I get an alert? + +If you got logs that had ``X query hits, 0 matches, 0 alerts sent``, it depends on the ``type`` why you didn't get any alerts. If ``type: any``, a match will occur for every hit. If you are using ``type: frequency``, ``num_events`` must occur within ``timeframe`` of each other for a match to occur. Different rules apply for different rule types. + +If you see ``X matches, 0 alerts sent``, this may occur for several reasons. If you set ``aggregation``, the alert will not be sent until after that time has elapsed. If you have gotten an alert for this same rule before, that rule may be silenced for a period of time. The default is one minute between alerts. If a rule is silenced, you will see ``Ignoring match for silenced rule`` in the logs. + +If you see ``X alerts sent`` but didn't get any alert, it's probably related to the alert configuration. If you are using the ``--debug`` flag, you will not receive any alerts. Instead, the alert text will be written to the console. Use ``--verbose`` to achieve the same affects without preventing alerts. If you are using email alert, make sure you have it configured for an SMTP server. By default, it will connect to localhost on port 25. It will also use the word "elastalert" as the "From:" address. Some SMTP servers will reject this because it does not have a domain while others will add their own domain automatically. See the email section in the documentation for how to configure this. + +### Why did I only get one alert when I expected to get several? + +There is a setting called ``realert`` which is the minimum time between two alerts for the same rule. Any alert that occurs within this time will simply be dropped. The default value for this is one minute. If you want to receive an alert for every single match, even if they occur right after each other, use + +``` +realert: + minutes: 0 +``` + +You can of course set it higher as well. + +### How can I prevent duplicate alerts? + +By setting ``realert``, you will prevent the same rule from alerting twice in an amount of time. + +``` +realert: + days: 1 +``` + +You can also prevent duplicates based on a certain field by using ``query_key``. For example, to prevent multiple alerts for the same user, you might use + +``` +realert: + hours: 8 +query_key: user +``` + +Note that this will also affect the way many rule types work. If you are using ``type: frequency`` for example, ``num_events`` for a single value of ``query_key`` must occur before an alert will be sent. You can also use a compound of multiple fields for this key. For example, if you only wanted to receieve an alert once for a specific error and hostname, you could use + +``` +query_key: [error, hostname] +``` + +Internally, this works by creating a new field for each document called ``field1,field2`` with a value of ``value1,value2`` and using that as the ``query_key``. + +The data for when an alert will fire again is stored in Elasticsearch in the ``elastalert_status`` index, with a ``_type`` of ``silence`` and also cached in memory. + +### How can I change what's in the alert? + +You can use the field ``alert_text`` to add custom text to an alert. By setting ``alert_text_type: alert_text_only``, it will be the entirety of the alert. You can also add different fields from the alert by using Python style string formatting and ``alert_text_args``. For example + +``` +alert_text: "Something happened with {0} at {1}" +alert_text_type: alert_text_only +alert_text_args: ["username", "@timestamp"] +``` + +You can also limit the alert to only containing certain fields from the document by using ``include``. + +``` +include: ["ip_address", "hostname", "status"] +``` + +### My alert only contains data for one event, how can I see more? + +If you are using ``type: frequency``, you can set the option ``attach_related: true`` and every document will be included in the alert. An alternative, which works for every type, is ``top_count_keys``. This will show the top counts for each value for certain fields. For example, if you have + +``` +top_count_keys: ["ip_address", "status"] +``` + +and 10 documents matched your alert, it may contain something like + +``` +ip_address: +127.0.0.1: 7 +10.0.0.1: 2 +192.168.0.1: 1 + +status: +200: 9 +500: 1 +``` + +### How can I make the alert come at a certain time? + +The ``aggregation`` feature will take every alert that has occured over a period of time and send them together in one alert. You can use cron style syntax to send all alerts that have occured since the last once by using + +``` +aggregation: + schedule: '2 4 * * mon,fri' +``` + +### I have lots of documents and it's really slow, how can I speed it up? + +There are several ways to potentially speed up queries. If you are using ``index: logstash-*``, Elasticsearch will query all shards, even if they do not possibly contain data with the correct timestamp. Instead, you can use Python time format strings and set ``use_strftime_index`` + +``` +index: logstash-%Y.%m +use_strftime_index: true +``` + +Another thing you could change is ``buffer_time``. By default, ElastAlert will query large overlapping windows in order to ensure that it does not miss any events, even if they are indexed in real time. In config.yaml, you can adjust ``buffer_time`` to a smaller number to only query the most recent few minutes. + +``` +buffer_time: + minutes: 5 +``` + +By default, ElastAlert will download every document in full before processing them. Instead, you can have ElastAlert simply get a count of the number of documents that have occured in between each query. To do this, set ``use_count_query: true``. This cannot be used if you use ``query_key``, because ElastAlert will not know the contents of each documents, just the total number of them. This also reduces the precision of alerts, because all events that occur between each query will be rounded to a single timestamp. + +If you are using ``query_key`` (a single key, not multiple keys) you can use ``use_terms_query``. This will make ElastAlert perform a terms aggregation to get the counts for each value of a certain field. Both ``use_terms_query`` and ``use_count_query`` also require ``doc_type`` to be set to the ``_type`` of the documents. They may not be compatible with all rule types. + +### Can I perform aggregations? + +The only aggregation supported currently is a terms aggregation, by setting ``use_terms_query``. + +### I'm not using @timestamp, what do I do? + +You can use ``timestamp_field`` to change which field ElastAlert will use as the timestamp. You can use ``timestamp_type`` to change it between ISO 8601 and unix timestamps. You must have some kind of timestamp for ElastAlert to work. If your events are not in real time, you can use ``query_delay`` and ``buffer_time`` to adjust when ElastAlert will look for documents. + +### I'm using flatline but I don't see any alerts + +When using ``type: flatline``, ElastAlert must see at least one document before it will alert you that it has stopped seeing them. + +### How can I get a "resolve" event? + +ElastAlert does not currently support stateful alerts or resolve events. + +### Can I set a warning threshold? + +Currently, the only way to set a warning threshold is by creating a second rule with a lower threshold. + +## License + +ElastAlert is licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0 + +### Read the documentation at [Read the Docs](http://elastalert.readthedocs.org). +### Questions? Drop by #elastalert on Freenode IRC. diff --git a/example_rules/example_change.yaml b/example_rules/example_change.yaml index 107c43ec1..83e676d81 100755 --- a/example_rules/example_change.yaml +++ b/example_rules/example_change.yaml @@ -21,20 +21,20 @@ # (Required) # Rule name, must be unique -name: New country login +name: filebeat-7.3.0-2022.08.16-000001 # (Required) # Type of alert. # the change rule will alert when a certain field changes in two documents within a timeframe -type: change +type: blacklist # (Required) # Index to search, wildcard supported -index: logstash-* +index: filebeat-7.3.0-2022.08.16-000001 # (Required, change specific) # The field to look for changes in -compare_key: country_name +compare_key: "INFO" # (Required, change specific) # Ignore documents without the compare_key (country_name) field @@ -56,7 +56,7 @@ timeframe: filter: - query: query_string: - query: "document_type: login" + query: "kubernetes.pod.name: game-server and INFO" # (Required) # The alert is use when a match is found