diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 000000000..c5ba7493f Binary files /dev/null and b/.DS_Store differ diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE new file mode 100644 index 000000000..cca969eb0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE @@ -0,0 +1,3 @@ +STOP - You are probably in the wrong place! The majority of topics are better suited for the Discussion forum. You can access this area by clicking The Discussions link above. Please search the discussions area first, for keywords that could be associated with the problem you are experiencing. If you do not see an existing discussion, please open a new discussion and include sufficient details for someone in the community to help you. + +If you are confident you have discovered a legitimate issue, attach logs and reproduction steps to this issue. Failure to provide sufficient information will likely cause this issue to go stale and eventually be deleted. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..b69dce987 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,31 @@ +## Description + + + +## Checklist + + + +- [ ] I have reviewed the [contributing guidelines](https://github.com/jertel/elastalert2/blob/master/CONTRIBUTING.md). +- [ ] I have included unit tests for my changes or additions. +- [ ] I have successfully run `make test-docker` with my changes. +- [ ] I have manually tested all relevant modes of the change in this PR. +- [ ] I have updated the [documentation](https://elastalert2.readthedocs.io). +- [ ] I have updated the [changelog](https://github.com/jertel/elastalert2/blob/master/CHANGELOG.md). + + +## Questions or Comments + + diff --git a/.github/workflows/master_build_test.yml b/.github/workflows/master_build_test.yml new file mode 100644 index 000000000..8ab9d01a5 --- /dev/null +++ b/.github/workflows/master_build_test.yml @@ -0,0 +1,28 @@ +name: master_build_test + +# Controls when the action will run. +on: + # Triggers the workflow on push or pull request events but only for the master branch + push: + branches: [ master ] + pull_request: + branches: [ master ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + # Runs a single command using the runners shell + - name: Build and run tests + run: make test-docker diff --git a/.github/workflows/publish_image.yml b/.github/workflows/publish_image.yml new file mode 100644 index 000000000..dede09840 --- /dev/null +++ b/.github/workflows/publish_image.yml @@ -0,0 +1,86 @@ +name: publish_image + +on: + push: + # Publish `master` as Docker `latest` image. + branches: + - master + + tags: + - 2.* + +env: + IMAGE_NAME: elastalert2 + DOCKER_REPO: jertel/elastalert2 + +jobs: + push: + environment: Main + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Log into GitHub Registry + run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u ${{ github.actor }} --password-stdin + + - name: Log into Docker Registry + run: echo "${{ secrets.DOCKER_TOKEN }}" | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin + + - name: Install buildx + id: buildx + uses: crazy-max/ghaction-docker-buildx@v1 + with: + version: latest + + - name: Build and Push multi-arch to Docker Hub + run: | + # Strip git ref prefix from version + VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') + + # Use Docker `latest` tag convention + [ "$VERSION" == "master" ] && VERSION=latest + + TAG2="" + if [[ "$VERSION" == "2."* ]]; then + TAG2="--tag $DOCKER_REPO:2" + fi + + echo VERSION=$VERSION + echo TAG2=$TAG2 + + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + docker buildx build \ + --platform=linux/amd64,linux/arm64 \ + --output "type=image,push=true" \ + --file ./Dockerfile . \ + --tag $DOCKER_REPO:$VERSION $TAG2 + + - name: Build and push image to GitHub + run: | + docker build . --file Dockerfile --tag $IMAGE_NAME + + IMAGE_ID=docker.pkg.github.com/${{ github.repository }}/$IMAGE_NAME + + # Change all uppercase to lowercase + IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') + + # Strip git ref prefix from version + VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') + + # Use Docker `latest` tag convention + [ "$VERSION" == "master" ] && VERSION=latest + + echo IMAGE_ID=$IMAGE_ID + echo VERSION=$VERSION + + # Push to GitHub Package + docker tag $IMAGE_NAME $IMAGE_ID:$VERSION + docker push $IMAGE_ID:$VERSION + + if [[ "$VERSION" == "2."* ]]; then + # Push to GitHub Package + docker tag $IMAGE_NAME $IMAGE_ID:2 + docker push $IMAGE_ID:2 + fi \ No newline at end of file diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 000000000..5b15ece47 --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,33 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: upload_python_package + +on: + push: + tags: + - 2.* + +jobs: + deploy: + environment: Main + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + - name: Build and publish + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} + run: | + python setup.py sdist bdist_wheel + twine upload dist/* diff --git a/.github/workflows/upload_chart.yml b/.github/workflows/upload_chart.yml new file mode 100644 index 000000000..363fa8f33 --- /dev/null +++ b/.github/workflows/upload_chart.yml @@ -0,0 +1,28 @@ +name: upload_chart + +on: + push: + tags: + - 2.* + +jobs: + publish: + environment: Main + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Install Helm + uses: azure/setup-helm@v1 + with: + version: v3.4.0 + + - name: Run chart-releaser + uses: J12934/helm-gh-pages-action@master + with: + charts-folder: chart + access-token: "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" + deploy-branch: gh-pages diff --git a/.gitignore b/.gitignore index 269474d73..1daffea83 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -config.yaml +/config.yaml .tox/ .coverage .idea/* @@ -6,6 +6,7 @@ config.yaml __pycache__/ *.pyc virtualenv_run/ +.venv *.egg-info/ dist/ venv/ @@ -16,3 +17,10 @@ build/ my_rules *.swp *~ +/rules/ +mod/ +comparisonFile +comparisonFile2 +examples/ex/* +examples/ex1/* +comparisonFile* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 95437e1bf..059f7314e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,5 @@ repos: -- repo: git://github.com/pre-commit/pre-commit-hooks +- repo: https://github.com/pre-commit/pre-commit-hooks sha: v1.1.1 hooks: - id: trailing-whitespace @@ -13,13 +13,7 @@ repos: - id: debug-statements - id: requirements-txt-fixer - id: name-tests-test -- repo: git://github.com/asottile/reorder_python_imports +- repo: https://github.com/asottile/reorder_python_imports sha: v0.3.5 hooks: - id: reorder-python-imports -- repo: git://github.com/Yelp/detect-secrets - sha: 0.9.1 - hooks: - - id: detect-secrets - args: ['--baseline', '.secrets.baseline'] - exclude: .*tests/.*|.*yelp/testing/.*|\.pre-commit-config\.yaml diff --git a/.python-version b/.python-version new file mode 100644 index 000000000..2c0733315 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.11 diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..346241720 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,20 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Optionally build your docs in additional formats such as PDF +formats: + - pdf + - htmlzip + +# Optionally set the version of Python and requirements required to build your docs +python: + install: + - requirements: docs/source/requirements.txt \ No newline at end of file diff --git a/.secrets.baseline b/.secrets.baseline deleted file mode 100644 index b4405a48d..000000000 --- a/.secrets.baseline +++ /dev/null @@ -1,27 +0,0 @@ -{ - "exclude_regex": ".*tests/.*|.*yelp/testing/.*|\\.pre-commit-config\\.yaml", - "generated_at": "2018-07-06T22:54:22Z", - "plugins_used": [ - { - "base64_limit": 4.5, - "name": "Base64HighEntropyString" - }, - { - "hex_limit": 3, - "name": "HexHighEntropyString" - }, - { - "name": "PrivateKeyDetector" - } - ], - "results": { - ".travis.yml": [ - { - "hashed_secret": "4f7a1ea04dafcbfee994ee1d08857b8aaedf8065", - "line_number": 14, - "type": "Base64 High Entropy String" - } - ] - }, - "version": "0.9.1" -} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 569bf12d6..000000000 --- a/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: python -python: -- '3.6' -env: -- TOXENV=docs -- TOXENV=py36 -install: -- pip install tox -- > - if [[ -n "${ES_VERSION}" ]] ; then - wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ES_VERSION}.tar.gz - mkdir elasticsearch-${ES_VERSION} && tar -xzf elasticsearch-${ES_VERSION}.tar.gz -C elasticsearch-${ES_VERSION} --strip-components=1 - ./elasticsearch-${ES_VERSION}/bin/elasticsearch & - fi -script: -- > - if [[ -n "${ES_VERSION}" ]] ; then - wget -q --waitretry=1 --retry-connrefused --tries=30 -O - http://127.0.0.1:9200 - make test-elasticsearch - else - make test - fi -jobs: - include: - - stage: 'Elasticsearch test' - env: TOXENV=py36 ES_VERSION=7.0.0-linux-x86_64 - - env: TOXENV=py36 ES_VERSION=6.6.2 - - env: TOXENV=py36 ES_VERSION=6.3.2 - - env: TOXENV=py36 ES_VERSION=6.2.4 - - env: TOXENV=py36 ES_VERSION=6.0.1 - - env: TOXENV=py36 ES_VERSION=5.6.16 - -deploy: - provider: pypi - user: yelplabs - password: - secure: TpSTlFu89tciZzboIfitHhU5NhAB1L1/rI35eQTXstiqzYg2mweOuip+MPNx9AlX3Swg7MhaFYnSUvRqPljuoLjLD0EQ7BHLVSBFl92ukkAMTeKvM6LbB9HnGOwzmAvTR5coegk8IHiegudODWvnhIj4hp7/0EA+gVX7E55kEAw= - on: - tags: true - distributions: sdist bdist_wheel - repo: Yelp/elastalert - branch: master diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..9875f5b3f --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,468 @@ +# fw_2.9.0.8 +- Percentage match rule type modified to work with haystack usecase [HAYS-4850] + +# fw_2.9.0.5 +- New Term Rule type modified to work with haystack usecase +- Updated terms aggregation query to get existing terms +- Updated Query used to get current terms to make use of terms aggregation instead of a search query +- Added ability to refresh existing terms in a configureable refresh interval +- Added Upper and Lower bounds for configureable query parameters like window_size, step_size and terms_size +- Modified Default values for new-term rule configurations + +# 2.9.0.1 FW updates +- Downgraded elasticsearch library from version 8 to version 6 for the engine support the existing es clusters of haystack. +- Downgraded various other libraries for the same. +- es scrolls disabled as per requirement +- replaced search queries with msearch for all es querying + +## Breaking Changes +- Downgraded elasticsearch library from version 8 to version 6 for the engine support the existing es clusters of haystack. +- Downgraded various other libraries for the same. +- es scrolls disabled as per requirement +- replaced search queries with msearch for all es querying + +## New Features +- [Alertmanager] Added tenant specific config to alertmanager +- [Prometheus] Added tenant config and modified prometheus route & port as per haystack requirements +- [Haystack] Added Kibana adapter support for querying from router +- [Haystack] Added url_prefix for kibana adapter +- [Haystack] Term-query optimzations +- [Engine changes] Common index can be configured directly in config.yaml +- [Trace Alerts] Added Error Rate rule that hits router aggregagte endpoint +- [Trace Alerts] Error rate rule enhancements + - error_calculation_method config for users to decide between two different error_rate calculation methods. + - Default values for unique_column and error_condition + - Added Test cases for Error Rate Alert type +- [Dockerfile] Distroless Docker setup for python 3 - elastalert +- [Dockerfile] Docker optimization to fix create_index not running bug + +# 2.TBD.TBD + +## Breaking changes +- [Alerta] All matches will now be sent with the alert - [#1068](https://github.com/jertel/elastalert2/pull/1068) - @dakotacody +- Renamed the `overwrites` parameter to `overrides` in the load_conf method of config.py - [#1100](https://github.com/jertel/elastalert2/pull/1100) - @akusei + +## New features +- [Graylog GELF] Alerter added. [#1050](https://github.com/jertel/elastalert2/pull/1050) - @malinkinsa +- [TheHive] Format `title`, `type`, and `source` with dynamic lookup values - [#1092](https://github.com/jertel/elastalert2/pull/1092) - @fandigunawan +- [HTTP POST2] `http_post2_payload` and `http_post2_headers` now support multiline JSON strings for better control over jinja templates - [#1104](https://github.com/jertel/elastalert2/pull/1104) - @akusei +- [HTTP POST2] This alerter now supports the use of `jinja_root_name` - [#1104](https://github.com/jertel/elastalert2/pull/1104) - @akusei +- [Rule Testing] The data file passed with `--data` can now contain a single JSON document or a list of JSON objects - [#1104](https://github.com/jertel/elastalert2/pull/1104) - @akusei + +## Other changes +- [Docs] Clarify Jira Cloud authentication configuration - [94f7e8c](https://github.com/jertel/elastalert2/commit/94f7e8cc98d59a00349e3b23acd8a8549c80dbc8) - @jertel +- Update minimum versions for third-party dependencies in requirements.txt and setup.py - [#1051](https://github.com/jertel/elastalert2/pull/1051) - @nsano-rururu +- [Docs] Clarify `import` support for list of files [#1075](https://github.com/jertel/elastalert2/pull/1075) - @sqrooted +- Add support for Kibana 8.6 for Kibana Discover - [#1080](https://github.com/jertel/elastalert2/pull/1080) - @nsano-rururu +- Modify schema to allow string and boolean for `*_ca_certs` to allow for one to specify a cert bundle for SSL certificate verification - [#1082](https://github.com/jertel/elastalert2/pull/1082) - @goggin +- Fix UnicodeEncodeError in PagerDutyAlerter - [#1091](https://github.com/jertel/elastalert2/pull/1091) - @nsano-rururu +- The scan_entire_timeframe setting, when used with use_count_query or use_terms_query will now scan entire timeframe on subsequent rule runs - [#1097](https://github.com/jertel/elastalert2/pull/1097) - @rschirin +- Add new unit tests to cover changes in the HTTP POST2 alerter - [#1104](https://github.com/jertel/elastalert2/pull/1104) - @akusei +- [Docs] Updated HTTP POST2 documentation to outline changes with payloads, headers and multiline JSON strings - [#1104](https://github.com/jertel/elastalert2/pull/1104) - @akusei +- [HTTP POST2] Additional error checking around rendering and dumping payloads/headers to JSON - [#1104](https://github.com/jertel/elastalert2/pull/1104) - @akusei + +# 2.9.0 + +## Breaking changes +- None + +## New features +- Add `realert_key` option to silence groups of alerts - [#1004](https://github.com/jertel/elastalert2/pull/1004) - @goggin + +## Other changes +- Upgrade pylint 2.15.3 to 2.15.5, pytest 7.1.3 to 7.2.0, pytest-xdist 2.5.0 to 3.0.2, sphinx 5.2.3 to 5.3.0, tox 3.26.0 to 3.27.0 - [#988](https://github.com/jertel/elastalert2/pull/988) - @nsano-rururu +- Upgrade to Python 3.11 - [#989](https://github.com/jertel/elastalert2/pull/989) - @jertel +- Add support for Kibana 8.5 for Kibana Discover - [#993](https://github.com/jertel/elastalert2/pull/993) - @nsano-rururu +- [Amazon SNS] Fix Amazon SNS Alerter - [#1003](https://github.com/jertel/elastalert2/pull/1003) - @nsano-rururu +- [Mattermost] Add mattermost_emoji_override - [#1011](https://github.com/jertel/elastalert2/pull/1011) - @nsano-rururu +- Add support for disabling verification of SSL certificate for the Kibana Shorten URL API - [#1013](https://github.com/jertel/elastalert2/pull/1013) - @BenJeau + +# 2.8.0 + +## Breaking changes +- None + +## New features +- Adding the optional timestamp_to_datetime_format_expr rule parameter, allowing custom modification of the Elasticsearch timestamp string before parsing it into datetime. - [#978](https://github.com/jertel/elastalert2/pull/978) - @thpiron + +## Other changes +- Upgrade pylint 2.15.2 to 2.15.3 and pytest-cov 3.0.0 to 4.0.0 and sphinx 5.1.1 to 5.2.3; Add Google Chat proxy support - [#972](https://github.com/jertel/elastalert2/pull/972) - @nsano-rururu +- Fix Jira assign issue - [#974](https://github.com/jertel/elastalert2/pull/974) - @jorge-gyant +- [Kubernetes] Add extraContainers value to helm chart - [#975](https://github.com/jertel/elastalert2/pull/975) - @Kasape +- [docs] Updated index configuration documentation - [#979](https://github.com/jertel/elastalert2/pull/979) - @nsano-rururu +- [Kubernetes] Change 'metrics' to 'metrics.enabled' in helm chart documentation - [#981](https://github.com/jertel/elastalert2/pull/981) - @Kasape + +# 2.7.0 + +## Breaking changes +- None + +## New features +- [Telegram] Added new telegram_parse_mode setting to switch between markdown and html body formats. - [#924](https://github.com/jertel/elastalert2/pull/924) - @polshe-v + +## Other changes +- Pin package version of `exotel` to `0.1.5` - [#931](https://github.com/jertel/elastalert2/pull/931) +- Add support for Kibana 8.4 for Kibana Discover - [#935](https://github.com/jertel/elastalert2/pull/935) - @nsano-rururu +- Upgrade pylint 2.14.5 to 2.15.2 and tox 3.25.1 to 3.26.0 and sphinx 5.0.2 to 5.1.1 - [#937](https://github.com/jertel/elastalert2/pull/937) - @nsano-rururu +- Upgrade pytest 7.1.2 to 7.1.3 - [#941](https://github.com/jertel/elastalert2/pull/941) - @nsano-rururu + +# 2.6.0 + +## Breaking changes +- When using HTTP POST 2, it is no longer necessary to pre-escape strings (should they contain control chars) from events in elastic search which are replaced by the jinja2 template. + +- [Kubernetes] [Breaking] Reconfigure metrics to follow prometheus operator nomenclature. `metrics` value, now control the addition of metrics endpoint (command argument), the creation of a service to expose the metrics endpoint and the (optional) creation of prometheus-operator objects: serviceMonitor and prometheurRules to match implementations of other charts. The labels of the chart have been modified, so you'll need to uninstall and reinstall the chart for the upgrade to work. - [#902](https://github.com/jertel/elastalert2/pull/902) - @PedroMSantosD + +## New features +- [Kubernetes] Chart is now able to create a service for the metrics, and optional prometheus-operator custom resources serviceMonitor and prometheusRule. - [#902](https://github.com/jertel/elastalert2/pull/902) - @PedroMSantosD + +## Other changes +- Upgrade pylint 2.13.8 to 2.14.3, Upgrade sphinx 4.5.0 to 5.0.2 - [#891](https://github.com/jertel/elastalert2/pull/891) - @nsano-rururu +- Add support for Kibana 8.3 for Kibana Discover - [#897](https://github.com/jertel/elastalert2/pull/897) - @nsano-rururu +- Fix internal json decode error in HTTP POST 2 if values from ES event contain control chars (e.g. newline) and are used in the jinja2 template - [#898](https://github.com/jertel/elastalert2/pull/898) - @ddurham2 +- Upgrade pylint 2.14.3 to 2.14.5 and tox 3.25.0 to 3.25.1 - [#911](https://github.com/jertel/elastalert2/pull/911) - @nsano-rururu + +# 2.5.1 + +## Breaking changes +- None + +## New features +- None + +## Other changes +- Upgrade stomp 8.0.0 to 8.0.1 - [#832](https://github.com/jertel/elastalert2/pull/832) - @jertel +- Add support for Kibana 8.2 for Kibana Discover, Upgrade Pytest 7.1.1 to 7.1.2, Upgrade pylint 2.13.5 to 2.13.8, Upgrade Jinja2 3.1.1 to 3.1.2 - [#840](https://github.com/jertel/elastalert2/pull/840) - @nsano-rururu +- Add the possibility to use rule and match fields in the description of TheHive alerts - [#855](https://github.com/jertel/elastalert2/pull/855) - @luffynextgen +- Fix missing colon on schema.yml and add unit test on it - [#866](https://github.com/jertel/elastalert2/pull/866) - @Isekai-Seikatsu +- Add the possibility to use tags, message and tlp level in TheHive observables [#873](https://github.com/jertel/elastalert2/pull/873) - @luffynextgen +- Support OpenSearch 2.x - [#880](https://github.com/jertel/elastalert2/pull/880) - @jertel + +# 2.5.0 + +## Breaking changes +- Remove Simple Alerter - [#793](https://github.com/jertel/elastalert2/pull/793) - @nsano-rururu + +## New features +- Add support for Kibana 8.1 for Kibana Discover - [#763](https://github.com/jertel/elastalert2/pull/763) - @nsano-rururu +- [MS Teams] Add arbitrary text value support for Facts - [#790](https://github.com/jertel/elastalert2/pull/790) - @iamxeph +- [MS Teams] Use alert_subject as ms_teams_alert_summary if ms_teams_alert_summary is not set - [#802](https://github.com/jertel/elastalert2/pull/802) - @iamxeph +- [Mattermost] List support for mattermost_channel_override - [#809](https://github.com/jertel/elastalert2/pull/809) - @nsano-rururu +- [Zabbix] Add the ability to specify `zbx_host` from available elasticsearch field - [#820](https://github.com/jertel/elastalert2/pull/820) - @timeforplanb123 + +## Other changes +- [Docs] Update FAQ ssl_show_warn - [#764](https://github.com/jertel/elastalert2/pull/764) - @nsano-rururu +- [Docs] Update FAQ telegram and Amazon SNS - [#765](https://github.com/jertel/elastalert2/pull/765) - @nsano-rururu +- Upgrade Pytest 7.0.1 to 7.1.1 - [#776](https://github.com/jertel/elastalert2/pull/776) - @nsano-rururu +- [Kubernetes] Add support for automatic SMTP mail server credential management - [#780](https://github.com/jertel/elastalert2/pull/780) - @lusson-luo +- Upgrade sphinx 4.4.0 to 4.5.0 - [#782](https://github.com/jertel/elastalert2/pull/782) - @nsano-rururu +- Upgrade pylint 2.12.2 to 2.13.2 - [#783](https://github.com/jertel/elastalert2/pull/783) - @nsano-rururu +- Upgrade jinja2 3.0.3 to 3.1.1 - [#784](https://github.com/jertel/elastalert2/pull/784) - @nsano-rururu +- Update schema.yaml(Alertmanager, Spike, Flatline, New Term, Metric Aggregation, Percentage Match) - [#789](https://github.com/jertel/elastalert2/pull/789) - @nsano-rururu +- Upgrade pylint 2.13.2 to 2.13.3 - [#792](https://github.com/jertel/elastalert2/pull/792) - @nsano-rururu +- Upgrade pylint 2.13.3 to 2.13.4 - [#801](https://github.com/jertel/elastalert2/pull/801) - @nsano-rururu +- Fix SpikeRule - [#804](https://github.com/jertel/elastalert2/pull/804) - @nsano-rururu +- [Kubernetes] Add scanSubdirectories (defaults to true) as an option in Helm Chart - [#805](https://github.com/jertel/elastalert2/pull/805) - @louzadod +- Upgrade pylint 2.13.4 to 2.13.5 - [#808](https://github.com/jertel/elastalert2/pull/808) - @nsano-rururu +- Update documentation on Cloud ID support - [#810](https://github.com/jertel/elastalert2/pull/810) - @ferozsalam +- Upgrade tox 3.24.5 to 3.25.0 - [#813](https://github.com/jertel/elastalert2/pull/813) - @nsano-rururu +- [Kubernetes] Add support to specify rules directory - [#816](https://github.com/jertel/elastalert2/pull/816) @SBe +- Fix HTTP POST 2 alerter for nested payload keys - [#823](https://github.com/jertel/elastalert2/pull/823) - @lepouletsuisse +- [Kubernetes] Expose prometheus metrics to kubernetes pod service discovery mechanism - [#827](https://github.com/jertel/elastalert2/pull/827) - @PedroMSantosD + +# 2.4.0 + +## Breaking changes +- Add support for Elasticsearch 8, remove support for Elasticsearch 6 and below - [#744](https://github.com/jertel/elastalert2/pull/744) - @ferozsalam, @jertel, and @nsano-rururu + WARNING! Read the [ES 8 upgrade notes](https://elastalert2.readthedocs.io/en/latest/recipes/faq.html#does-elastalert-2-support-elasticsearch-8) BEFORE upgrading your cluster to Elasticsearch 8. Failure to do so can result in your cluster no longer starting and unable to rollback to 7.x. +- Kibana dashboard integration has been removed, as it only was supported with older versions of Elasticsearch and Kibana. Per the above breaking change those older versions are no longer supported by ElastAlert 2. +- Dockerfile refactor for app home and user home to be the same directory (/opt/elastalert/). Before app home is /opt/elastalert/ and user home is /opt/elastalert/elastalert. After app home and user home are the same /opt/elastalert/ - [#656](https://github.com/jertel/elastalert2/pull/656) + +## New features +- [MS Teams] Kibana Discover URL and Facts - [#660](https://github.com/jertel/elastalert2/pull/660) - @thib12 +- Add support for Kibana 7.17 for Kibana Discover - [#695](https://github.com/jertel/elastalert2/pull/695) - @nsano-rururu +- Added a fixed name metric_agg_value to MetricAggregationRule match_body - [#697](https://github.com/jertel/elastalert2/pull/697) - @iamxeph + +## Other changes +- Load Jinja template when loading an alert - [#654](https://github.com/jertel/elastalert2/pull/654) - @thib12 +- Upgrade tox 3.24.4 to 3.24.5 - [#655](https://github.com/jertel/elastalert2/pull/655) - @nsano-rururu +- Upgrade sphinx 4.3.2 to 4.4.0 - [#661](https://github.com/jertel/elastalert2/pull/661) - @nsano-rururu +- [Docs] Fix Running Docker container - [#674](https://github.com/jertel/elastalert2/pull/674) - @nsano-rururu +- [Exotel] Added exotel_message_body to schema.yaml - [#685](https://github.com/jertel/elastalert2/pull/685) - @nsano-rururu +- Upgrade Pytest 6.2.5 to 7.0.0 - [#696](https://github.com/jertel/elastalert2/pull/696) - @nsano-rururu +- python-dateutil version specification change - [#704](https://github.com/jertel/elastalert2/pull/704) - @nsano-rururu +- Update minimum versions for third-party dependencies in requirements.txt and setup.py - [#705](https://github.com/jertel/elastalert2/pull/705) - @nsano-rururu +- [Docs] Document updates for Alerts and email addresses etc - [#706](https://github.com/jertel/elastalert2/pull/706) - @nsano-rururu +- [Docs] Update of RuleType Configuration Cheat Sheet - [#707](https://github.com/jertel/elastalert2/pull/707) - @nsano-rururu +- Upgrade Pytest 7.0.0 to 7.0.1 - [#710](https://github.com/jertel/elastalert2/pull/710) - @nsano-rururu +- Fixing jira_transition_to schema bug. Change property type from boolean to string - [#721](https://github.com/jertel/elastalert2/pull/721) - @toxisch +- Begin Elasticsearch 8 support - ElastAlert 2 now supports setup with fresh ES 8 instances, and works with some alert types - [#731](https://github.com/jertel/elastalert2/pull/731) - @ferozsalam +- Enable dynamic setting of rules volume in helm chart - [#732](https://github.com/jertel/elastalert2/pull/732) - @ChrisFraun +- Do not install tests via pip install - [#733](https://github.com/jertel/elastalert2/pull/733) - @buzzdeee +- [Docs] Add Elasticsearch 8 support documentation - [#735](https://github.com/jertel/elastalert2/pull/735) - @ferozsalam +- Remove download_dashboard - [#740](https://github.com/jertel/elastalert2/pull/740) - @nsano-rururu +- [Docs] Added documentation for metric|spike aggregation rule types for percentiles - [e682ea8](https://github.com/jertel/elastalert2/commit/e682ea8113bf9f413b6339e6803b5262881f2b30)- @jertel +- [Jira] Add support for Jira authentication via Personal Access Token - [#750](https://github.com/jertel/elastalert2/pull/750) - @buzzdeee +- [Docs] Update docs Negation, and, or - [#754](https://github.com/jertel/elastalert2/pull/754) - @nsano-rururu +- Remove call to `print` from elastalert.py - [#755](https://github.com/jertel/elastalert2/pull/755) - @ferozsalam +- [Docs] Added dingtalk_proxy, dingtalk_proxy_login, dingtalk_proxy_pass to docs - [#756](https://github.com/jertel/elastalert2/pull/756) - @nsano-rururu + +# 2.3.0 + +## Breaking changes +- [Kubernetes] The helm chart repository has changed. The new repository is located at https://jertel.github.io/elastalert2/. This was necessary due to the previous chart museum hosting service, Bonzai Cloud, terminating it's chart hosting service on January 21, 2022. - @jertel + +## New features +- Add metric_agg_script to MetricAggregationRule [#558](https://github.com/jertel/elastalert2/pull/558) - @dequis +- [Alertmanager] Add support for basic authentication - [#575](https://github.com/jertel/elastalert2/pull/575) - @nsano-rururu +- Add support for Kibana 7.16 for Kibana Discover - [#612](https://github.com/jertel/elastalert2/pull/612) - @nsano-rururu +- [MS Teams] Add support for disabling verification of SSL certificate - [#628](https://github.com/jertel/elastalert2/pull/628) - @nsano-rururu + +## Other changes +- sphinx 4.2.0 to 4.3.0 and tzlocal==2.1 - [#561](https://github.com/jertel/elastalert2/pull/561) - @nsano-rururu +- jinja2 3.0.1 to 3.0.3 - [#562](https://github.com/jertel/elastalert2/pull/562) - @nsano-rururu +- Fix `get_rule_file_hash` TypeError - [#566](https://github.com/jertel/elastalert2/pull/566) - @JeffAshton +- Ensure `schema.yaml` stream closed - [#567](https://github.com/jertel/elastalert2/pull/567) - @JeffAshton +- Fixing `import` bugs & memory leak in `RulesLoader`/`FileRulesLoader` - [#580](https://github.com/jertel/elastalert2/pull/580) - @JeffAshton +- sphinx 4.3.0 to 4.3.1 - [#588](https://github.com/jertel/elastalert2/pull/588) - @nsano-rururu +- pytest-xdist 2.4.0 to 2.5.0 - [#615](https://github.com/jertel/elastalert2/pull/615) - @nsano-rururu +- sphinx 4.3.1 to 4.3.2 - [#618](https://github.com/jertel/elastalert2/pull/618) - @nsano-rururu +- Remove unused parameter boto-profile - [#622](https://github.com/jertel/elastalert2/pull/622) - @nsano-rururu +- [Docs] Include Docker example; add additional FAQs - [#623](https://github.com/jertel/elastalert2/pull/623) - @nsano-rururu +- Add support for URL shortening with Kibana 7.16+ - [#633](https://github.com/jertel/elastalert2/pull/633) - @jertel +- [example] URL correction of information about Elasticsearch - [#642](https://github.com/jertel/elastalert2/pull/642) - @nsano-rururu +- pylint 2.11.1 to 2.12.2 - [#651](https://github.com/jertel/elastalert2/pull/651) - @nsano-rururu + +# 2.2.3 + +## Breaking changes +- None + +## New features +- [Alertmanager] Added support for Alertmanager - [#503](https://github.com/jertel/elastalert2/pull/503) - @nsano-rururu +- Add summary_table_max_rows optional configuration to limit rows in summary tables - [#508](https://github.com/jertel/elastalert2/pull/508) - @mdavyt92 +- Added support for shortening Kibana Discover URLs using Kibana Shorten URL API - [#512](https://github.com/jertel/elastalert2/pull/512) - @JeffAshton +- Added new alerter `HTTP Post 2` which allow more flexibility to build the body/headers of the request. - [#530](https://github.com/jertel/elastalert2/pull/530) - @lepouletsuisse +- [Slack] Added new option to include url to jira ticket if it is created in the same pipeline. - [#547](https://github.com/jertel/elastalert2/pull/547) - @hugefarsen +- Added support for multi ElasticSearch instances. - [#548](https://github.com/jertel/elastalert2/pull/548) - @buratinopy + +## Other changes +- [Docs] Add exposed metrics documentation - [#498](https://github.com/jertel/elastalert2/pull/498) - @thisisxgp +- [Tests] Fix rules_test.py - [#499](https://github.com/jertel/elastalert2/pull/499) - @nsano-rururu +- Upgrade to Python 3.10 and Sphinx 4.2.0 - [#501](https://github.com/jertel/elastalert2/pull/501) - @jertel +- max_scrolling_count now has a default value of 990 to avoid stack overflow crashes - [#509](https://github.com/jertel/elastalert2/pull/509) - @jertel +- Update pytest 6.2.5, pytest-cov 3.0.0, pytest-xdist 2.4.0, pylint<2.12, tox 3.24.4 - [#511](https://github.com/jertel/elastalert2/pull/511) - @nsano-rururu +- Added a check on the value of the path "rules_folder" to make sure it exists - [#519](https://github.com/jertel/elastalert2/pull/519) - @AntoineBlaud +- [OpsGenie] Fix tags on subsequent alerts - [#537](https://github.com/jertel/elastalert2/pull/537) - @jertel + +# 2.2.2 + +## Breaking changes +- None + +## New features +- Added support for markdown style formatting of aggregation tables - [#415](https://github.com/jertel/elastalert2/pull/415) - @Neuro-HSOC +- [OpsGenie] Add support for custom description - [#457](https://github.com/jertel/elastalert2/pull/457), [#460](https://github.com/jertel/elastalert2/pull/460) - @nickbabkin +- [Tencent SMS] Added support for Tencent SMS - [#470](https://github.com/jertel/elastalert2/pull/470) - @liuxingjun +- Add support for Kibana 7.15 for Kibana Discover - [#481](https://github.com/jertel/elastalert2/pull/481) - @nsano-rururu +- Begin working toward support of OpenSearch (beta) [#483](https://github.com/jertel/elastalert2/pull/483) @nbrownus + +## Other changes +- [Rule Test] Fix issue related to --start/--end/--days params - [#424](https://github.com/jertel/elastalert2/pull/424), [#433](https://github.com/jertel/elastalert2/pull/433) - @thican +- [TheHive] Reduce risk of sourceRef collision for Hive Alerts by using full UUID -[#513](https://github.com/jertel/elastalert2/pull/513) - @fwalloe +- Changed the wording of ElastAlert to ElastAlert 2 and Update FAQ -[#446](https://github.com/jertel/elastalert2/pull/446) - @nsano-rururu +- Add missing show_ssl_warn and silence_qk_value params to docs - [#469](https://github.com/jertel/elastalert2/pull/469) - @jertel +- [OpsGenie] Clarify documentation for URL endpoint to use in European region - [#475](https://github.com/jertel/elastalert2/pull/475) - @nsano-rururu +- [Docs] The documentation has been updated as the name of Amazon Elasticsearch Service has changed to Amazon OpenSearch Service. - [#478](https://github.com/jertel/elastalert2/pull/478) - @nsano-rururu +- [Tests] Improve test coverage of tencentsms.py - [#479](https://github.com/jertel/elastalert2/pull/479) - @liuxingjun +- [Docs] Tidy Exotel documentation - [#488](https://github.com/jertel/elastalert2/pull/488) - @ferozsalam + +# 2.2.1 + +## Breaking changes +- None + +## New features +- None + +## Other changes +- Fixed typo in default setting accidentally introduced in [#407](https://github.com/jertel/elastalert2/pull/407) - [#413](https://github.com/jertel/elastalert2/pull/413) - @perceptron01 + +# 2.2.0 + +## Breaking changes +- [VictorOps] Changed `state_message` and `entity_display_name` values to be taken from an alert rule. - [#329](https://github.com/jertel/elastalert2/pull/329) - @ChristophShyper + - Potentially a breaking change if the alert subject changes due to the new default behavior. +- Change metric/percentage rule types to store query_key as dict, instead of string, for consistency with other rule types. [#340](https://github.com/jertel/elastalert2/issues/340) - @AntoineBlaud + +## New features +- [Kubernetes] Adding Image Pull Secret to Helm Chart - [#370](https://github.com/jertel/elastalert2/pull/370) - @robrankin +- Apply percentage_format_string to match_body percentage value; will appear in new percentage_formatted key - [#387](https://github.com/jertel/elastalert2/pull/387) - @iamxeph +- Add support for Kibana 7.14 for Kibana Discover - [#392](https://github.com/jertel/elastalert2/pull/392) - @nsano-rururu +- Add metric_format_string optional configuration for Metric Aggregation to format aggregated value - [#399](https://github.com/jertel/elastalert2/pull/399) - @iamxeph +- Make percentage_format_string support format() syntax in addition to old %-formatted syntax - [#403](https://github.com/jertel/elastalert2/pull/403) - @iamxeph +- Add custom_pretty_ts_format option to provides a way to define custom format of timestamps printed by pretty_ts() function - [#407](https://github.com/jertel/elastalert2/pull/407) - @perceptron01 + +## Other changes +- [Tests] Improve test code coverage - [#331](https://github.com/jertel/elastalert2/pull/331) - @nsano-rururu +- [Docs] Upgrade Sphinx from 4.0.2 to 4.1.2- [#332](https://github.com/jertel/elastalert2/pull/332) [#343](https://github.com/jertel/elastalert2/pull/343) [#344](https://github.com/jertel/elastalert2/pull/344) [#369](https://github.com/jertel/elastalert2/pull/369) - @nsano-rururu +- Ensure hit count returns correct value for newer ES clusters - [#333](https://github.com/jertel/elastalert2/pull/333) - @jeffashton +- [Tests] Upgrade Tox from 3.23.1 to 3.24.1 - [#345](https://github.com/jertel/elastalert2/pull/345) [#388](https://github.com/jertel/elastalert2/pull/388) - @nsano-rururu +- Upgrade Jinja from 2.11.3 to 3.0.1 - [#350](https://github.com/jertel/elastalert2/pull/350) - @mrfroggg +- [Tests] Add test code. Changed ubuntu version of Dockerfile-test from latest to 21.10. - [#354](https://github.com/jertel/elastalert2/pull/354) - @nsano-rururu +- Remove Python 2.x compatibility code - [#354](https://github.com/jertel/elastalert2/pull/354) - @nsano-rururu +- [Docs] Added Chatwork proxy settings to documentation - [#360](https://github.com/jertel/elastalert2/pull/360) - @nsano-rururu +- Add settings to schema.yaml(Chatwork proxy, Dingtalk proxy) - [#361](https://github.com/jertel/elastalert2/pull/361) - @nsano-rururu +- [Docs] Tidy Twilio alerter documentation - [#363](https://github.com/jertel/elastalert2/pull/363) - @ferozsalam +- [Tests] Improved test coverage for opsgenie.py 96% to 100% - [#364](https://github.com/jertel/elastalert2/pull/364) - @nsano-rururu +- [Docs] Update mentions of JIRA to Jira - [#365](https://github.com/jertel/elastalert2/pull/365) - @ferozsalam +- [Docs] Tidy Datadog alerter documentation - [#380](https://github.com/jertel/elastalert2/pull/380) - @ferozsalam + +# 2.1.2 +## Breaking changes +- None + +## New features +- [Rocket.Chat] Add support for generating Kibana Discover URLs to Rocket.Chat alerter - [#260](https://github.com/jertel/elastalert2/pull/260) - @nsano-rururu +- [Jinja] Provide rule key/values as possible Jinja data inputs - [#281](https://github.com/jertel/elastalert2/pull/281) - @mrfroggg +- [Kubernetes] Add securityContext and podSecurityContext to Helm chart - [#289](https://github.com/jertel/elastalert2/pull/289) - @lepouletsuisse +- [Rocket.Chat] Add options: rocket_chat_ca_certs, rocket_chat_ignore_ssl_errors, rocket_chat_timeout - [#302](https://github.com/jertel/elastalert2/pull/302) - @nsano-rururu +- [Jinja] Favor match keys over colliding rule keys when resolving Jinja vars; also add alert_text_jinja unit test - [#311](https://github.com/jertel/elastalert2/pull/311) - @mrfroggg +- [Opsgenie] Added possibility to specify source and entity attrs - [#315](https://github.com/jertel/elastalert2/pull/315) - @konstantin-kornienko +- [ServiceNow] Add support for `servicenow_impact` and `servicenow_urgency` parameters for ServiceNow alerter - [#316](https://github.com/jertel/elastalert2/pull/316) - @randolph-esnet +- [Jinja] Add Jinja support to alert_subject - [#318](https://github.com/jertel/elastalert2/pull/318) - @mrfroggg +@lepouletsuisse +- Metrics will now include time_taken, representing the execution duration of the rule - [#324](https://github.com/jertel/elastalert2/pull/324) - @JeffAshton + +## Other changes +- [Prometheus] Continue fix for prometheus wrapper writeback function signature - [#256](https://github.com/jertel/elastalert2/pull/256) - @greut +- [Stomp] Improve exception handling in alerter - [#261](https://github.com/jertel/elastalert2/pull/261) - @nsano-rururu +- [AWS] Improve exception handling in Amazon SES and SNS alerters - [#264](https://github.com/jertel/elastalert2/pull/264) - @nsano-rururu +- [Docs] Clarify documentation for starting ElastAlert 2 - [#265](https://github.com/jertel/elastalert2/pull/265) - @ferozsalam +- Add exception handling for unsupported operand type - [#266](https://github.com/jertel/elastalert2/pull/266) - @nsano-rururu +- [Docs] Improve documentation for Python build requirements - [#267](https://github.com/jertel/elastalert2/pull/267) - @nsano-rururu +- [DataDog] Correct alerter logging - [#268](https://github.com/jertel/elastalert2/pull/268) - @nsano-rururu +- [Docs] Correct parameter code documentation for main ElastAlert runner - [#269](https://github.com/jertel/elastalert2/pull/269) - @ferozsalam +- [Command] alerter will now fail during init instead of during alert if given invalid command setting - [#270](https://github.com/jertel/elastalert2/pull/270) - @nsano-rururu +- [Docs] Consolidate all examples into a new examples/ sub folder - [#271](https://github.com/jertel/elastalert2/pull/271) - @ferozsalam +- [TheHive] Add example rule with Kibana Discover URL and query values in alert text - [#276](https://github.com/jertel/elastalert2/pull/276) - @markus-nclose +- Upgrade pytest-xdist from 2.2.1 to 2.3.0; clarify HTTPS support in docs; Add additional logging - [#283](https://github.com/jertel/elastalert2/pull/283) - @nsano-rururu +- [Tests] Add more alerter test coverage - [#284](https://github.com/jertel/elastalert2/pull/284) - @nsano-rururu +- [Tests] Improve structure and placement of test-related files in project tree - [#287](https://github.com/jertel/elastalert2/pull/287) - @ferozsalam +- Only attempt to adjust timezone if timezone is set to a non-empty string - [#288](https://github.com/jertel/elastalert2/pull/288) - @ferozsalam +- [Kubernetes] Deprecated `podSecurityPolicy` feature in Helm Chart as [it's deprecated in Kubernetes 1.21](https://kubernetes.io/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/) - [#289](https://github.com/jertel/elastalert2/pull/289) - @lepouletsuisse +- [Slack] Fix slack_channel_override schema - [#291](https://github.com/jertel/elastalert2/pull/291) - @JeffAshton +- [Rocket.Chat] Fix rocket_chat_channel_override schema - [#293](https://github.com/jertel/elastalert2/pull/293) - @nsano-rururu +- [Tests] Increase code coverage - [#294](https://github.com/jertel/elastalert2/pull/294) - @nsano-rururu +- [Docs] Added Kibana Discover sample - [#295](https://github.com/jertel/elastalert2/pull/295) - @nsano-rururu +- [AWS] Remove deprecated boto_profile setting - [#299](https://github.com/jertel/elastalert2/pull/299) - @nsano-rururu +- [Slack] Correct slack_alert_fields schema definition - [#300](https://github.com/jertel/elastalert2/pull/300) - @nsano-rururu +- [Tests] Correct code coverage to eliminate warnings - [#301](https://github.com/jertel/elastalert2/pull/301) - @nsano-rururu +- Eliminate unnecessary calls to Elasticsearch - [#303](https://github.com/jertel/elastalert2/pull/303) - @JeffAshton +- [Zabbix] Fix timezone parsing - [#304](https://github.com/jertel/elastalert2/pull/304) - @JeffAshton +- Improve logging of scheduler - [#305](https://github.com/jertel/elastalert2/pull/305) - @JeffAshton +- [Jinja] Update Jinja from 2.11.3 to 3.0.1; Improve handling of colliding variables - [#311](https://github.com/jertel/elastalert2/pull/311) - @mrfroggg +- [TheHive] Force observable artifacts to be strings - [#313](https://github.com/jertel/elastalert2/pull/313) - @pandvan +- Upgrade pylint from <2.9 to <2.10 - [#314](https://github.com/jertel/elastalert2/pull/314) - @nsano-rururu +- [ChatWork] Enforce character limit - [#319](https://github.com/jertel/elastalert2/pull/319) - @nsano-rururu +- [LineNotify] Enforce character limit - [#320](https://github.com/jertel/elastalert2/pull/320) - @nsano-rururu +- [Discord] Remove trailing backticks from alert body - [#321](https://github.com/jertel/elastalert2/pull/321) - @nsano-rururu +- Redirecting warnings to logging module - [#325](https://github.com/jertel/elastalert2/pull/325) - @JeffAshton + +# 2.1.1 + +## Breaking changes +- None + +## New features +- Add support for RocketChat - [#182](https://github.com/jertel/elastalert2/pull/182) - @nsano-rururu +- Expose rule scheduler properties as configurable settings - [#192](https://github.com/jertel/elastalert2/pull/192) - @jertel +- Exclude empty observables from TheHive requests - [#193](https://github.com/jertel/elastalert2/pull/193) - @LaZyDK +- Ensure TheHive tags are converted to strings before submitting TheHive request - [#206](https://github.com/jertel/elastalert2/pull/206) - @LaZyDK +- Add support for Elasticsearch API key authentication - [#208](https://github.com/jertel/elastalert2/pull/208) - @vbisserie +- Add support for Elasticsearch 7.13 for building Kibana Discover URLs - [#212](https://github.com/jertel/elastalert2/pull/212) - @nsano-rururu +- Follow symbolic links when traversing rules folder for rule files - [#214](https://github.com/jertel/elastalert2/pull/214) - @vbisserie +- Support optional suppression of SSL log warnings when http-posting alerts - [#222](https://github.com/jertel/elastalert2/pull/222) - @nsano-rururu +- Add support for inclusion of Kibana Discover URLs in MatterMost messages - [#239](https://github.com/jertel/elastalert2/pull/239) - @nsano-rururu +- Add support for inclusion of alert Title in MatterMost messages - [#246](https://github.com/jertel/elastalert2/pull/246) - @nsano-rururu + +## Other changes +- Speed up unit tests by adding default parallelism - [#164](https://github.com/jertel/elastalert2/pull/164) - @ferozsalam +- Remove unused writeback_alias and fix --patience argument - [#167](https://github.com/jertel/elastalert2/pull/167) - @mrfroggg +- Fix Bearer token auth in initialisation script - [#169](https://github.com/jertel/elastalert2/pull/169) - @ferozsalam +- Finish refactoring alerters and tests into individual files - [#175, et al](https://github.com/jertel/elastalert2/pull/175) - @ferozsalam +- Improve HTTP POST alert documentation - [#178](https://github.com/jertel/elastalert2/pull/178) - @nsano-rururu +- Upgrade Sphinx from 3.5.4 to 4.0.2 - [#179](https://github.com/jertel/elastalert2/pull/179) - @nsano-rururu +- Fix Sphinx dependency version - [#181](https://github.com/jertel/elastalert2/pull/181) - @ferozsalam +- Switch to absolute imports - [#198](https://github.com/jertel/elastalert2/pull/198) - @ferozsalam +- Encode JSON output before writing test data - [#215](https://github.com/jertel/elastalert2/pull/215) - @vbisserie +- Update pytest from 6.0.0 to 6.2.4 - [#223](https://github.com/jertel/elastalert2/pull/223/files) - @nsano-rururu +- Ensure ChatWork alerter fails to initialize if missing required args - [#224](https://github.com/jertel/elastalert2/pull/224) - @nsano-rururu +- Ensure DataDog alerter fails to initialize if missing required args - [#225](https://github.com/jertel/elastalert2/pull/225) - @nsano-rururu +- Ensure DingTalk alerter fails to initialize if missing required args - [#226](https://github.com/jertel/elastalert2/pull/226) - @nsano-rururu +- Ensure Zabbix alerter fails to initialize if missing required args - [#227](https://github.com/jertel/elastalert2/pull/227) - @nsano-rururu +- MS Teams alerter no longer requires ms_teams_alert_summary arg - [#228](https://github.com/jertel/elastalert2/pull/228) - @nsano-rururu +- Improve Gitter alerter by explicitly specifying arg names - [#230](https://github.com/jertel/elastalert2/pull/230) - @nsano-rururu +- Add more alerter test code coverage - [#231](https://github.com/jertel/elastalert2/pull/231) - @nsano-rururu +- Upgrade pytest-cov from 2.12.0 to 2.12.1 - [#232](https://github.com/jertel/elastalert2/pull/232) - @nsano-rururu +- Migrate away from external test mock dependency - [#233](https://github.com/jertel/elastalert2/pull/233) - @nsano-rururu +- Improve ElastAlert 2 documentation relating to running scenarios - [#234](https://github.com/jertel/elastalert2/pull/234) - @ferozsalam +- Improve test coverage and correct dict lookup syntax for alerter init functions - [#235](https://github.com/jertel/elastalert2/pull/235) - @nsano-rururu +- Fix schema bug with MatterMost alerts - [#239](https://github.com/jertel/elastalert2/pull/239) - @nsano-rururu +- Fix prometheus wrapper writeback function signature - [#253](https://github.com/jertel/elastalert2/pull/253) - @greut + +# 2.1.0 + +## Breaking changes +- TheHive alerter refactoring - [#142](https://github.com/jertel/elastalert2/pull/142) - @ferozsalam + - See the updated documentation for changes required to alert formatting +- Dockerfile refactor for performance and size improvements - [#102](https://github.com/jertel/elastalert2/pull/102) - @jgregmac + - Dockerfile base image changed from `python/alpine` to `python/slim-buster` to take advantage of pre-build python wheels, accelerate build times, and reduce image size. If you have customized an image, based on jertel/elastalert2, you may need to make adjustments. + - Default base path changed to `/opt/elastalert` in the Dockerfile and in Helm charts. Update your volume binds accordingly. + - Dockerfile now runs as a non-root user "elastalert". Ensure your volumes are accessible by this non-root user. + - System packages removed from the Dockerfile: All dev packages, cargo, libmagic. Image size reduced to 250Mb. + - `tmp` files and dev packages removed from the final container image. + +## New features +- Support for multiple rules directories and fix `..data` Kubernetes/Openshift recursive directories in FileRulesLoader [#157](https://github.com/jertel/elastalert2/pull/157) - @mrfroggg +- Support environment variable substition in yaml files - [#149](https://github.com/jertel/elastalert2/pull/149) - @archfz +- Update schema.yaml and enhance documentation for Email alerter - [#144](https://github.com/jertel/elastalert2/pull/144) - @nsano-rururu +- Default Email alerter to use port 25, and require http_post_url for HTTP Post alerter - [#143](https://github.com/jertel/elastalert2/pull/143) - @nsano-rururu +- Support extra message features for Slack and Mattermost - [#140](https://github.com/jertel/elastalert2/pull/140) - @nsano-rururu +- Support a footer in alert text - [#133](https://github.com/jertel/elastalert2/pull/133) - @nsano-rururu +- Added support for alerting via Amazon Simple Email System (SES) - [#105](https://github.com/jertel/elastalert2/pull/105) - @nsano-rururu + +## Other changes +- Begin alerter refactoring to split large source code files into smaller files - [#161](https://github.com/jertel/elastalert2/pull/161) - @ferozsalam +- Update contribution guidelines with additional instructions for local testing - [#147](https://github.com/jertel/elastalert2/pull/147), [#148](https://github.com/jertel/elastalert2/pull/148) - @ferozsalam +- Add more unit test coverage - [#108](https://github.com/jertel/elastalert2/pull/108) - @nsano-rururu +- Update documentation: describe limit_execution, correct alerters list - [#107](https://github.com/jertel/elastalert2/pull/107) - @fberrez +- Fix issue with testing alerts that contain Jinja templates - [#101](https://github.com/jertel/elastalert2/pull/101) - @jertel +- Updated all references of Elastalert to use the mixed case ElastAlert, as that is the most prevalent formatting found in the documentation. + +# 2.0.4 + +## Breaking changes +- None + +## New features +- Update python-dateutil requirement from <2.7.0,>=2.6.0 to >=2.6.0,<2.9.0 - [#96](https://github.com/jertel/elastalert2/pull/96) - @nsano-rururu +- Update pylint requirement from <2.8 to <2.9 - [#95](https://github.com/jertel/elastalert2/pull/95) - @nsano-rururu +- Pin ES library to 7.0.0 due to upcoming newer library conflicts - [#90](https://github.com/jertel/elastalert2/pull/90) - @robrankin +- Re-introduce CHANGELOG.md to project - [#88](https://github.com/jertel/elastalert2/pull/88) - @ferozsalam +- Add option for suppressing TLS warnings - [#87](https://github.com/jertel/elastalert2/pull/87) - @alvarolmedo +- Add support for Twilio Copilot - [#86](https://github.com/jertel/elastalert2/pull/86) - @cdmastercom +- Support bearer token authentication with ES - [#85](https://github.com/jertel/elastalert2/pull/85) - @StribPav +- Add support for statsd metrics - [#83](https://github.com/jertel/elastalert2/pull/83) - @eladamitpxi +- Add support for multiple imports of rules via recursive import - [#83](https://github.com/jertel/elastalert2/pull/83) - @eladamitpxi +- Specify search size of 0 to improve efficiency of searches - [#82](https://github.com/jertel/elastalert2/pull/82) - @clyfish +- Add alert handler to create Datadog events - [#81](https://github.com/jertel/elastalert2/pull/81) - @3vanlock + +## Other changes + +- Added missing Helm chart config.yaml template file. +- Update .gitignore with more precise rule for /config.yaml file. +- Now publishing container images to both DockerHub and to GitHub Packages for redundancy. +- Container images are now built and published via GitHub actions instead of relying on DockerHub's automated builds. +- Update PIP library description and Helm chart description to be consistent. +- Continue updates to change references from _ElastAlert_ to _ElastAlert 2_ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..92e4b6c77 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# Contributing to ElastAlert 2 + +## Guidelines + +PRs are welcome, but must include tests, when possible. PRs will not be merged if they do not pass +the automated CI workflows. To test your changes before creating a PR, run +`sudo make clean; sudo make test-docker` from the root of the repository (requires Docker to be +running on your machine). + +Make sure you follow the existing coding style from the existing codebase. Do not reformatting the existing code to fit your own personal style. + +Before submitting the PR review that you have included the following changes, where applicable: +- Documentation: If you're adding new functionality, any new configuration options should be documented appropriately in the docs/ folder. +- Helm Chart: If your new feature introduces settings consider adding those to the Helm chart [README.md](chart/elastalert2/README.md) and [values.yaml](chart/elastalert2/values.yaml) +- Examples: If your new feature includes new configuration options, review the [Example config file](examples/config.yaml.example) to see if they should be added there for consistency with other configuration options. +- Change log: Describe your contribution to the appropriate section(s) for the _Upcoming release_, in the [CHANGELOG.md](CHANGELOG.md) file. + +## Releases + +STOP - DO NOT PROCEED! This section is only applicable to project administrators. PR _contributors_ do not need to follow the below procedure. + +As ElastAlert 2 is a community-maintained project, releases will typically contain unrelated contributions without a common theme. It's up to the maintainers to determine when the project is ready for a release, however, if you are looking to use a newly merged feature that hasn't yet been released, feel free to open a [discussion][5] and let us know. + +Maintainers, when creating a new release, follow the procedure below: + +1. Determine an appropriate new version number in the format _a.b.c_, using the following guidelines: + - The major version (a) should not change. + - The minor version (b) should be incremented if a new feature has been added or if a bug fix will have a significant user-impact. Reset the patch version to zero if the minor version is incremented. + - The patch version (c) should be incremented when low-impact bugs are fixed, or security vulnerabilities are patched. +2. Ensure the following are updated _before_ publishing/tagging the new release: + - [setup.py](setup.py): Match the version to the new release version + - [Chart.yaml](chart/elastalert2/Chart.yaml): Match chart version and the app version to the new release version (typically keep them in sync) + - [values.yaml](chart/elastalert2/values.yaml): Match the default image version to the new release version. + - [Chart README.md](chart/elastalert2/README.md): Match the default image version to the new release version. + - [Docs](docs/source/running_elastalert.rst): Match the default image version to the new release version. + - [CHANGELOG.md](CHANGELOG.md): This must contain all PRs and any other relevent notes about this release +3. Publish a [new][1] release. + - The title (and tag) of the release will be the same value as the new version determined in step 1. + - Paste the new version change notes from CHANGELOG.md into the description field. + - Check the box to 'Create a discussion for this release'. +4. Verify that artifacts have been published: + - Python PIP package was [published][3] successfully. + - Helm chart has been [published][4] successfully. + - Container image was [published][2] successfully. + +[1]: https://github.com/jertel/elastalert2/releases/new +[2]: https://github.com/jertel/elastalert2/actions/workflows/publish_image.yml +[3]: https://github.com/jertel/elastalert2/actions/workflows/python-publish.yml +[4]: https://github.com/jertel/elastalert2/actions/workflows/upload_chart.yml +[5]: https://github.com/jertel/elastalert2/discussions diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..7a49357cc --- /dev/null +++ b/Dockerfile @@ -0,0 +1,23 @@ +FROM public.ecr.aws/i1i0w6p5/python:3.9.2 as build + +ENV ELASTALERT_HOME /opt/elastalert +ADD . /opt/elastalert/ + +WORKDIR /opt + +RUN pip install "setuptools==65.5.0" "elasticsearch==6.3.1" + +WORKDIR "${ELASTALERT_HOME}" + +RUN pip install -r requirements.txt +RUN python setup.py install + +RUN pip show elastalert2 + +RUN python --version + +WORKDIR /opt/elastalert + +COPY commands.sh /opt/elastalert/commands.sh +RUN ["chmod", "+x", "/opt/elastalert/commands.sh"] +ENTRYPOINT ["sh","/opt/elastalert/commands.sh"] diff --git a/Dockerfile-test b/Dockerfile-test deleted file mode 100644 index 3c153e644..000000000 --- a/Dockerfile-test +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu:latest - -RUN apt-get update && apt-get upgrade -y -RUN apt-get -y install build-essential python3.6 python3.6-dev python3-pip libssl-dev git - -WORKDIR /home/elastalert - -ADD requirements*.txt ./ -RUN pip3 install -r requirements-dev.txt diff --git a/Makefile b/Makefile index 470062ce8..b8dd36157 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,17 @@ .PHONY: all production test docs clean +COMPOSE = "-compose" +ifeq ($(shell docker$(COMPOSE) 2> /dev/null),) + COMPOSE = " compose" +endif + all: production production: @true docs: - tox -e docs + tox -c tests/tox.ini -e docs dev: $(LOCAL_CONFIG_DIR) $(LOGS_DIR) install-hooks @@ -14,17 +19,21 @@ install-hooks: pre-commit install -f --install-hooks test: - tox + tox -c tests/tox.ini test-elasticsearch: - tox -- --runelasticsearch + tox -c tests/tox.ini -- --runelasticsearch test-docker: - docker-compose --project-name elastalert build tox - docker-compose --project-name elastalert run tox + $(shell echo docker$(COMPOSE)) -f tests/docker-compose.yml --project-name elastalert build tox + $(shell echo docker$(COMPOSE)) -f tests/docker-compose.yml --project-name elastalert run --rm tox \ + tox -c tests/tox.ini -- $(filter-out $@,$(MAKECMDGOALS)) clean: make -C docs clean find . -name '*.pyc' -delete find . -name '__pycache__' -delete - rm -rf virtualenv_run .tox .coverage *.egg-info build + rm -rf virtualenv_run tests/.tox tests/.coverage *.egg-info docs/build + +%: + @: diff --git a/README.md b/README.md index 99acc02e7..a169c97ae 100644 --- a/README.md +++ b/README.md @@ -1,323 +1,45 @@ -Recent changes: As of Elastalert 0.2.0, you must use Python 3.6. Python 2 will not longer be supported. +# ElastAlert 2 -[![Build Status](https://travis-ci.org/Yelp/elastalert.svg)](https://travis-ci.org/Yelp/elastalert) -[![Join the chat at https://gitter.im/Yelp/elastalert](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Yelp/elastalert?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +ElastAlert 2 is a standalone software tool for alerting on anomalies, spikes, or other patterns of interest from data in [Elasticsearch][10] and [OpenSearch][9]. -## ElastAlert - [Read the Docs](http://elastalert.readthedocs.org). -### Easy & Flexible Alerting With Elasticsearch +ElastAlert 2 is backwards compatible with the original [ElastAlert][0] rules. -ElastAlert is a simple framework for alerting on anomalies, spikes, or other patterns of interest from data in Elasticsearch. +![CI Workflow](https://github.com/jertel/elastalert/workflows/master_build_test/badge.svg) -ElastAlert works with all versions of Elasticsearch. +## Docker and Kubernetes -At Yelp, we use Elasticsearch, Logstash and Kibana for managing our ever increasing amount of data and logs. -Kibana is great for visualizing and querying data, but we quickly realized that it needed a companion tool for alerting -on inconsistencies in our data. Out of this need, ElastAlert was created. +ElastAlert 2 is well-suited to being run as a microservice, and is available +as an image on [Docker Hub][2] and on [GitHub Container Registry][11]. For more instructions on how to +configure and run ElastAlert 2 using Docker, see [here][8]. -If you have data being written into Elasticsearch in near real time and want to be alerted when that data matches certain patterns, ElastAlert is the tool for you. If you can see it in Kibana, ElastAlert can alert on it. - -## Overview - -We designed ElastAlert to be reliable, highly modular, and easy to set up and configure. - -It works by combining Elasticsearch with two types of components, rule types and alerts. -Elasticsearch is periodically queried and the data is passed to the rule type, which determines when -a match is found. When a match occurs, it is given to one or more alerts, which take action based on the match. - -This is configured by a set of rules, each of which defines a query, a rule type, and a set of alerts. - -Several rule types with common monitoring paradigms are included with ElastAlert: - -- Match where there are at least X events in Y time" (``frequency`` type) -- Match when the rate of events increases or decreases" (``spike`` type) -- Match when there are less than X events in Y time" (``flatline`` type) -- Match when a certain field matches a blacklist/whitelist" (``blacklist`` and ``whitelist`` type) -- Match on any event matching a given filter" (``any`` type) -- Match when a field has two different values within some time" (``change`` type) -- Match when a never before seen term appears in a field" (``new_term`` type) -- Match when the number of unique values for a field is above or below a threshold (``cardinality`` type) - -Currently, we have built-in support for the following alert types: - -- Email -- JIRA -- OpsGenie -- Commands -- HipChat -- MS Teams -- Slack -- Telegram -- GoogleChat -- AWS SNS -- VictorOps -- PagerDuty -- PagerTree -- Exotel -- Twilio -- Gitter -- Line Notify -- Zabbix - -Additional rule types and alerts can be easily imported or written. - -In addition to this basic usage, there are many other features that make alerts more useful: - -- Alerts link to Kibana dashboards -- Aggregate counts for arbitrary fields -- Combine alerts into periodic reports -- Separate alerts by using a unique key field -- Intercept and enhance match data - -To get started, check out `Running ElastAlert For The First Time` in the [documentation](http://elastalert.readthedocs.org). - -## Running ElastAlert -You can either install the latest released version of ElastAlert using pip: - -```pip install elastalert``` - -or you can clone the ElastAlert repository for the most recent changes: - -```git clone https://github.com/Yelp/elastalert.git``` - -Install the module: - -```pip install "setuptools>=11.3"``` - -```python setup.py install``` - -The following invocation can be used to run ElastAlert after installing - -``$ elastalert [--debug] [--verbose] [--start ] [--end ] [--rule ] [--config ]`` - -``--debug`` will print additional information to the screen as well as suppresses alerts and instead prints the alert body. Not compatible with `--verbose`. - -``--verbose`` will print additional information without suppressing alerts. Not compatible with `--debug.` - -``--start`` will begin querying at the given timestamp. By default, ElastAlert will begin querying from the present. -Timestamp format is ``YYYY-MM-DDTHH-MM-SS[-/+HH:MM]`` (Note the T between date and hour). -Eg: ``--start 2014-09-26T12:00:00`` (UTC) or ``--start 2014-10-01T07:30:00-05:00`` - -``--end`` will cause ElastAlert to stop querying at the given timestamp. By default, ElastAlert will continue -to query indefinitely. - -``--rule`` will allow you to run only one rule. It must still be in the rules folder. -Eg: ``--rule this_rule.yaml`` - -``--config`` allows you to specify the location of the configuration. By default, it is will look for config.yaml in the current directory. - -## Third Party Tools And Extras -### Kibana plugin -![img](https://raw.githubusercontent.com/bitsensor/elastalert-kibana-plugin/master/showcase.gif) -Available at the [ElastAlert Kibana plugin repository](https://github.com/bitsensor/elastalert-kibana-plugin). - -### Docker -A [Dockerized version](https://github.com/bitsensor/elastalert) of ElastAlert including a REST api is build from `master` to `bitsensor/elastalert:latest`. - -```bash -git clone https://github.com/bitsensor/elastalert.git; cd elastalert -docker run -d -p 3030:3030 \ - -v `pwd`/config/elastalert.yaml:/opt/elastalert/config.yaml \ - -v `pwd`/config/config.json:/opt/elastalert-server/config/config.json \ - -v `pwd`/rules:/opt/elastalert/rules \ - -v `pwd`/rule_templates:/opt/elastalert/rule_templates \ - --net="host" \ - --name elastalert bitsensor/elastalert:latest -``` +A [Helm chart][7] is also included for easy configuration as a Kubernetes deployment. ## Documentation -Read the documentation at [Read the Docs](http://elastalert.readthedocs.org). - -To build a html version of the docs locally - -``` -pip install sphinx_rtd_theme sphinx -cd docs -make html -``` - -View in browser at build/html/index.html - -## Configuration - -See config.yaml.example for details on configuration. - -## Example rules - -Examples of different types of rules can be found in example_rules/. - -- ``example_spike.yaml`` is an example of the "spike" rule type, which allows you to alert when the rate of events, averaged over a time period, -increases by a given factor. This example will send an email alert when there are 3 times more events matching a filter occurring within the -last 2 hours than the number of events in the previous 2 hours. - -- ``example_frequency.yaml`` is an example of the "frequency" rule type, which will alert when there are a given number of events occuring -within a time period. This example will send an email when 50 documents matching a given filter occur within a 4 hour timeframe. - -- ``example_change.yaml`` is an example of the "change" rule type, which will alert when a certain field in two documents changes. In this example, -the alert email is sent when two documents with the same 'username' field but a different value of the 'country_name' field occur within 24 hours -of each other. - -- ``example_new_term.yaml`` is an example of the "new term" rule type, which alerts when a new value appears in a field or fields. In this example, -an email is sent when a new value of ("username", "computer") is encountered in example login logs. - -## Frequently Asked Questions - -### My rule is not getting any hits? - -So you've managed to set up ElastAlert, write a rule, and run it, but nothing happens, or it says ``0 query hits``. First of all, we recommend using the command ``elastalert-test-rule rule.yaml`` to debug. It will show you how many documents match your filters for the last 24 hours (or more, see ``--help``), and then shows you if any alerts would have fired. If you have a filter in your rule, remove it and try again. This will show you if the index is correct and that you have at least some documents. If you have a filter in Kibana and want to recreate it in ElastAlert, you probably want to use a query string. Your filter will look like - -``` -filter: -- query: - query_string: - query: "foo: bar AND baz: abc*" -``` -If you receive an error that Elasticsearch is unable to parse it, it's likely the YAML is not spaced correctly, and the filter is not in the right format. If you are using other types of filters, like ``term``, a common pitfall is not realizing that you may need to use the analyzed token. This is the default if you are using Logstash. For example, - -``` -filter: -- term: - foo: "Test Document" -``` - -will not match even if the original value for ``foo`` was exactly "Test Document". Instead, you want to use ``foo.raw``. If you are still having trouble troubleshooting why your documents do not match, try running ElastAlert with ``--es_debug_trace /path/to/file.log``. This will log the queries made to Elasticsearch in full so that you can see exactly what is happening. +Documentation, including an FAQ, for ElastAlert 2 can be found on [readthedocs.com][3]. This is the place to start if you're not familiar with ElastAlert 2 at all. -### I got hits, why didn't I get an alert? +Elasticsearch 8 support is documented in the [FAQ][12]. -If you got logs that had ``X query hits, 0 matches, 0 alerts sent``, it depends on the ``type`` why you didn't get any alerts. If ``type: any``, a match will occur for every hit. If you are using ``type: frequency``, ``num_events`` must occur within ``timeframe`` of each other for a match to occur. Different rules apply for different rule types. +The full list of platforms that ElastAlert 2 can fire alerts into can be found [in the documentation][4]. -If you see ``X matches, 0 alerts sent``, this may occur for several reasons. If you set ``aggregation``, the alert will not be sent until after that time has elapsed. If you have gotten an alert for this same rule before, that rule may be silenced for a period of time. The default is one minute between alerts. If a rule is silenced, you will see ``Ignoring match for silenced rule`` in the logs. +## Contributing -If you see ``X alerts sent`` but didn't get any alert, it's probably related to the alert configuration. If you are using the ``--debug`` flag, you will not receive any alerts. Instead, the alert text will be written to the console. Use ``--verbose`` to achieve the same affects without preventing alerts. If you are using email alert, make sure you have it configured for an SMTP server. By default, it will connect to localhost on port 25. It will also use the word "elastalert" as the "From:" address. Some SMTP servers will reject this because it does not have a domain while others will add their own domain automatically. See the email section in the documentation for how to configure this. - -### Why did I only get one alert when I expected to get several? - -There is a setting called ``realert`` which is the minimum time between two alerts for the same rule. Any alert that occurs within this time will simply be dropped. The default value for this is one minute. If you want to receive an alert for every single match, even if they occur right after each other, use - -``` -realert: - minutes: 0 -``` - -You can of course set it higher as well. - -### How can I prevent duplicate alerts? - -By setting ``realert``, you will prevent the same rule from alerting twice in an amount of time. - -``` -realert: - days: 1 -``` - -You can also prevent duplicates based on a certain field by using ``query_key``. For example, to prevent multiple alerts for the same user, you might use - -``` -realert: - hours: 8 -query_key: user -``` - -Note that this will also affect the way many rule types work. If you are using ``type: frequency`` for example, ``num_events`` for a single value of ``query_key`` must occur before an alert will be sent. You can also use a compound of multiple fields for this key. For example, if you only wanted to receieve an alert once for a specific error and hostname, you could use - -``` -query_key: [error, hostname] -``` - -Internally, this works by creating a new field for each document called ``field1,field2`` with a value of ``value1,value2`` and using that as the ``query_key``. - -The data for when an alert will fire again is stored in Elasticsearch in the ``elastalert_status`` index, with a ``_type`` of ``silence`` and also cached in memory. - -### How can I change what's in the alert? - -You can use the field ``alert_text`` to add custom text to an alert. By setting ``alert_text_type: alert_text_only``, it will be the entirety of the alert. You can also add different fields from the alert by using Python style string formatting and ``alert_text_args``. For example - -``` -alert_text: "Something happened with {0} at {1}" -alert_text_type: alert_text_only -alert_text_args: ["username", "@timestamp"] -``` - -You can also limit the alert to only containing certain fields from the document by using ``include``. - -``` -include: ["ip_address", "hostname", "status"] -``` - -### My alert only contains data for one event, how can I see more? - -If you are using ``type: frequency``, you can set the option ``attach_related: true`` and every document will be included in the alert. An alternative, which works for every type, is ``top_count_keys``. This will show the top counts for each value for certain fields. For example, if you have - -``` -top_count_keys: ["ip_address", "status"] -``` - -and 10 documents matched your alert, it may contain something like - -``` -ip_address: -127.0.0.1: 7 -10.0.0.1: 2 -192.168.0.1: 1 - -status: -200: 9 -500: 1 -``` - -### How can I make the alert come at a certain time? - -The ``aggregation`` feature will take every alert that has occured over a period of time and send them together in one alert. You can use cron style syntax to send all alerts that have occured since the last once by using - -``` -aggregation: - schedule: '2 4 * * mon,fri' -``` - -### I have lots of documents and it's really slow, how can I speed it up? - -There are several ways to potentially speed up queries. If you are using ``index: logstash-*``, Elasticsearch will query all shards, even if they do not possibly contain data with the correct timestamp. Instead, you can use Python time format strings and set ``use_strftime_index`` - -``` -index: logstash-%Y.%m -use_strftime_index: true -``` - -Another thing you could change is ``buffer_time``. By default, ElastAlert will query large overlapping windows in order to ensure that it does not miss any events, even if they are indexed in real time. In config.yaml, you can adjust ``buffer_time`` to a smaller number to only query the most recent few minutes. - -``` -buffer_time: - minutes: 5 -``` - -By default, ElastAlert will download every document in full before processing them. Instead, you can have ElastAlert simply get a count of the number of documents that have occured in between each query. To do this, set ``use_count_query: true``. This cannot be used if you use ``query_key``, because ElastAlert will not know the contents of each documents, just the total number of them. This also reduces the precision of alerts, because all events that occur between each query will be rounded to a single timestamp. - -If you are using ``query_key`` (a single key, not multiple keys) you can use ``use_terms_query``. This will make ElastAlert perform a terms aggregation to get the counts for each value of a certain field. Both ``use_terms_query`` and ``use_count_query`` also require ``doc_type`` to be set to the ``_type`` of the documents. They may not be compatible with all rule types. - -### Can I perform aggregations? - -The only aggregation supported currently is a terms aggregation, by setting ``use_terms_query``. - -### I'm not using @timestamp, what do I do? - -You can use ``timestamp_field`` to change which field ElastAlert will use as the timestamp. You can use ``timestamp_type`` to change it between ISO 8601 and unix timestamps. You must have some kind of timestamp for ElastAlert to work. If your events are not in real time, you can use ``query_delay`` and ``buffer_time`` to adjust when ElastAlert will look for documents. - -### I'm using flatline but I don't see any alerts - -When using ``type: flatline``, ElastAlert must see at least one document before it will alert you that it has stopped seeing them. - -### How can I get a "resolve" event? - -ElastAlert does not currently support stateful alerts or resolve events. - -### Can I set a warning threshold? - -Currently, the only way to set a warning threshold is by creating a second rule with a lower threshold. +Please see our [contributing guidelines][6]. ## License -ElastAlert is licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0 - -### Read the documentation at [Read the Docs](http://elastalert.readthedocs.org). - -### Questions? Drop by #elastalert on Freenode IRC. +ElastAlert 2 is licensed under the [Apache License, Version 2.0][5]. + +[0]: https://github.com/yelp/elastalert +[1]: https://github.com/jertel/elastalert2/blob/master/examples/config.yaml.example +[2]: https://hub.docker.com/r/jertel/elastalert2 +[3]: https://elastalert2.readthedocs.io/ +[4]: https://elastalert2.readthedocs.io/en/latest/ruletypes.html#alerts +[5]: https://www.apache.org/licenses/LICENSE-2.0 +[6]: https://github.com/jertel/elastalert2/blob/master/CONTRIBUTING.md +[7]: https://github.com/jertel/elastalert2/tree/master/chart/elastalert2 +[8]: https://elastalert2.readthedocs.io/en/latest/running_elastalert.html +[9]: https://opensearch.org/ +[10]: https://github.com/elastic/elasticsearch +[11]: https://github.com/jertel/elastalert2/pkgs/container/elastalert2%2Felastalert2 +[12]: https://elastalert2.readthedocs.io/en/latest/recipes/faq.html#does-elastalert-2-support-elasticsearch-8 diff --git a/changelog.md b/changelog.md deleted file mode 100644 index 975d6855f..000000000 --- a/changelog.md +++ /dev/null @@ -1,383 +0,0 @@ -# Change Log - -# v0.2.4 - -### Added -- Added back customFields support for The Hive - -# v0.2.3 - -### Added -- Added back TheHive alerter without TheHive4py library - -# v0.2.2 - -### Added -- Integration with Kibana Discover app -- Addied ability to specify opsgenie alert details  - -### Fixed -- Fix some encoding issues with command alerter -- Better error messages for missing config file -- Fixed an issue with run_every not applying per-rule -- Fixed an issue with rules not being removed -- Fixed an issue with top count keys and nested query keys -- Various documentation fixes -- Fixed an issue with not being able to use spike aggregation - -### Removed -- Remove The Hive alerter - -# v0.2.1 - -### Fixed -- Fixed an AttributeError introduced in 0.2.0 - -# v0.2.0 - -- Switched to Python 3 - -### Added -- Add rule loader class for customized rule loading -- Added thread based rules and limit_execution -- Run_every can now be customized per rule - -### Fixed -- Various small fixes - -# v0.1.39 - -### Added -- Added spike alerts for metric aggregations -- Allow SSL connections for Stomp -- Allow limits on alert text length -- Add optional min doc count for terms queries -- Add ability to index into arrays for alert_text_args, etc - -### Fixed -- Fixed bug involving --config flag with create-index -- Fixed some settings not being inherited from the config properly -- Some fixes for Hive alerter -- Close SMTP connections properly -- Fix timestamps in Pagerduty v2 payload -- Fixed an bug causing aggregated alerts to mix up - -# v0.1.38 - -### Added -- Added PagerTree alerter -- Added Line alerter -- Added more customizable logging -- Added new logic in test-rule to detemine the default timeframe - -### Fixed -- Fixed an issue causing buffer_time to sometimes be ignored - -# v0.1.37 - -### Added -- Added more options for Opsgenie alerter -- Added more pagerduty options -- Added ability to add metadata to elastalert logs - -### Fixed -- Fixed some documentation to be more clear -- Stop requiring doc_type for metric aggregations -- No longer puts quotes around regex terms in blacklists or whitelists - -# v0.1.36 - -### Added -- Added a prefix "metric_" to the key used for metric aggregations to avoid possible conflicts -- Added option to skip Alerta certificate validation - -### Fixed -- Fixed a typo in the documentation for spike rule - -# v0.1.35 - -### Fixed -- Fixed an issue preventing new term rule from working with terms query - -# v0.1.34 - -### Added -- Added prefix/suffix support for summary table -- Added support for ignoring SSL validation in Slack -- More visible exceptions during query parse failures - -### Fixed -- Fixed top_count_keys when using compound query_key -- Fixed num_hits sometimes being reported too low -- Fixed an issue with setting ES_USERNAME via env -- Fixed an issue when using test script with custom timestamps -- Fixed a unicode error when using Telegram -- Fixed an issue with jsonschema version conflict -- Fixed an issue with nested timestamps in cardinality type - -# v0.1.33 - -### Added -- Added ability to pipe alert text to a command -- Add --start and --end support for elastalert-test-rule -- Added ability to turn blacklist/whitelist files into queries for better performance -- Allow setting of OpsGenie priority -- Add ability to query the adjacent index if timestamp_field not used for index timestamping -- Add support for pagerduty v2 -- Add option to turn off .raw/.keyword field postfixing in new term rule -- Added --use-downloaded feature for elastalert-test-rule - -### Fixed -- Fixed a bug that caused num_hits in matches to sometimes be erroneously small -- Fixed an issue with HTTP Post alerter that could cause it to hang indefinitely -- Fixed some issues with string formatting for various alerters -- Fixed a couple of incorrect parts of the documentation - -# v0.1.32 - -### Added -- Add support for setting ES url prefix via environment var -- Add support for using native Slack fields in alerts - -### Fixed -- Fixed a bug that would could scrolling queries to sometimes terminate early - -# v0.1.31 - -### Added -- Added ability to add start date to new term rule - -### Fixed -- Fixed a bug in create_index which would try to delete a nonexistent index -- Apply filters to new term rule all terms query -- Support Elasticsearch 6 for new term rule -- Fixed is_enabled not working on rule changes - - -# v0.1.30 - -### Added -- Alerta alerter -- Added support for transitioning JIRA issues -- Option to recreate index in elastalert-create-index - -### Fixed -- Update jira_ custom fields before each alert if they were modified -- Use json instead of simplejson -- Allow for relative path for smtp_auth_file -- Fixed some grammar issues -- Better code formatting of index mappings -- Better formatting and size limit for HipChat HTML -- Fixed gif link in readme for kibana plugin -- Fixed elastalert-test-rule with Elasticsearch > 4 -- Added documentation for is_enabled option - -## v0.1.29 - -### Added -- Added a feature forget_keys to prevent realerting when using flatline with query_key -- Added a new alert_text_type, aggregation_summary_only - -### Fixed -- Fixed incorrect documentation about es_conn_timeout default - -## v0.1.28 - -### Added -- Added support for Stride formatting of simple HTML tags -- Added support for custom titles in Opsgenie alerts -- Added a denominator to percentage match based alerts - -### Fixed -- Fixed a bug with Stomp alerter connections -- Removed escaping of some characaters in Slack messages - -## v0.1.27 - -# Added -- Added support for a value other than in formatted alerts - -### Fixed -- Fixed a failed creation of elastalert indicies when using Elasticsearch 6 -- Truncate Telegram alerts to avoid API errors - -## v0.1.26 - -### Added -- Added support for Elasticsearch 6 -- Added support for mentions in Hipchat - -### Fixed -- Fixed an issue where a nested field lookup would crash if one of the intermediate fields was null - -## v0.1.25 - -### Fixed -- Fixed a bug causing new term rule to break unless you passed a start time -- Add a slight clarification on the localhost:9200 reported in es_debug_trace - -## v0.1.24 - -### Fixed -- Pinned pytest -- create-index reads index name from config.yaml -- top_count_keys now works for context on a flatline rule type -- Fixed JIRA behavior for issues with statuses that have spaces in the name - -## v0.1.22 - -### Added -- Added Stride alerter -- Allow custom string formatters for aggregation percentage -- Added a field to disable rules from config -- Added support for subaggregations for the metric rule type - -### Fixed -- Fixed a bug causing create-index to fail if missing config.yaml -- Fixed a bug when using ES5 with query_key and top_count_keys -- Allow enhancements to set and clear arbitrary JIRA fields -- Fixed a bug causing timestamps to be formatted in scientific notation -- Stop attempting to initialize alerters in debug mode -- Changed default alert ordering so that JIRA tickets end up in other alerts -- Fixed a bug when using Stomp alerter with complex query_key -- Fixed a bug preventing hipchat room ID from being an integer -- Fixed a bug causing duplicate alerts when using spike with alert_on_new_data -- Minor fixes to summary table formatting -- Fixed elastalert-test-rule when using new term rule type - -## v0.1.21 - -### Fixed -- Fixed an incomplete bug fix for preventing duplicate enhancement runs - -## v0.1.20 - -### Added -- Added support for client TLS keys - -### Fixed -- Fixed the formatting of summary tables in Slack -- Fixed ES_USE_SSL env variable -- Fixed the unique value count printed by new_term rule type -- Jira alerter no longer uses the non-existent json code formatter - -## v0.1.19 - -### Added -- Added support for populating JIRA fields via fields in the match -- Added support for using a TLS certificate file for SMTP connections -- Allow a custom suffix for non-analyzed Elasticsearch fields, like ".raw" or ".keyword" -- Added match_time to Elastalert alert documents in Elasticsearch - -### Fixed -- Fixed an error in the documentation for rule importing -- Prevent enhancements from re-running on retried alerts -- Fixed a bug when using custom timestamp formats and new term rule -- Lowered jira_bump_after_inactivity default to 0 days - -## v0.1.18 - -### Added -- Added a new alerter "post" based on "simple" which makes POSTS JSON to HTTP endpoints -- Added an option jira_bump_after_inacitivty to prevent ElastAlert commenting on active JIRA tickets - -### Removed -- Removed "simple" alerter, replaced by "post" - -## v0.1.17 - -### Added -- Added a --patience flag to allow Elastalert to wait for Elasticsearch to become available -- Allow custom PagerDuty alert titles via alert_subject - -## v0.1.16 - -### Fixed -- Fixed a bug where JIRA titles might not use query_key values -- Fixed a bug where flatline alerts don't respect query_key for realert -- Fixed a typo "twilio_accout_sid" - -### Added -- Added support for env variables in kibana4 dashboard links -- Added ca_certs option for custom CA support - -## v0.1.15 - -### Fixed -- Fixed a bug where Elastalert would crash on connection error during startup -- Fixed some typos in documentation -- Fixed a bug in metric bucket offset calculation -- Fixed a TypeError in Service Now alerter - -### Added -- Added support for compound compare key in change rules -- Added support for absolute paths in rule config imports -- Added Microsoft Teams alerter -- Added support for markdown in Slack alerts -- Added error codes to test script -- Added support for lists in email_from_field - - -## v0.1.14 - 2017-05-11 - -### Fixed -- Twilio alerter uses the from number appropriately -- Fixed a TypeError in SNS alerter -- Some changes to requirements.txt and setup.py -- Fixed a TypeError in new term rule - -### Added -- Set a custom pagerduty incident key -- Preserve traceback in most exceptions - -## v0.1.12 - 2017-04-21 - -### Fixed -- Fixed a bug causing filters to be ignored when using Elasticsearch 5 - - -## v0.1.11 - 2017-04-19 - -### Fixed -- Fixed an issue that would cause filters starting with "query" to sometimes throw errors in ES5 -- Fixed a bug with multiple versions of ES on different rules -- Fixed a possible KeyError when using use_terms_query with ES5 - -## v0.1.10 - 2017-04-17 - -### Fixed -- Fixed an AttributeError occuring with older versions of Elasticsearch library -- Made example rules more consistent and with unique names -- Fixed an error caused by a typo when es_username is used - -## v0.1.9 - 2017-04-14 - -### Added -- Added a changelog -- Added metric aggregation rule type -- Added percentage match rule type -- Added default doc style and improved the instructions -- Rule names will default to the filename -- Added import keyword in rules to include sections from other files -- Added email_from_field option to derive the recipient from a field in the match -- Added simple HTTP alerter -- Added Exotel SMS alerter -- Added a readme link to third party Kibana plugin -- Added option to use env variables to configure some settings -- Added duplicate hits count in log line - -### Fixed -- Fixed a bug in change rule where a boolean false would be ignored -- Clarify documentation on format of alert_text_args and alert_text_kw -- Fixed a bug preventing new silence stashes from being loaded after a rule has previous alerted -- Changed the default es_host in elastalert-test-rule to localhost -- Fixed a bug preventing ES <5.0 formatted queries working in elastalert-test-rule -- Fixed top_count_keys adding .raw on ES >5.0, uses .keyword instead -- Fixed a bug causing compound aggregation keys not to work -- Better error reporting for the Jira alerter -- AWS request signing now refreshes credentials, uses boto3 -- Support multiple ES versions on different rules -- Added documentation for percentage match rule type - -### Removed -- Removed a feature that would disable writeback_es on errors, causing various issues diff --git a/chart/elastalert2/.helmignore b/chart/elastalert2/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/chart/elastalert2/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/chart/elastalert2/Chart.yaml b/chart/elastalert2/Chart.yaml new file mode 100644 index 000000000..258d87016 --- /dev/null +++ b/chart/elastalert2/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +description: Automated rule-based alerting for Elasticsearch +name: elastalert2 +version: 2.9.0 +appVersion: 2.9.0 +home: https://github.com/jertel/elastalert2 +sources: +- https://github.com/jertel/elastalert2 +maintainers: + - name: jertel +engine: gotpl diff --git a/chart/elastalert2/README.md b/chart/elastalert2/README.md new file mode 100644 index 000000000..946c69b36 --- /dev/null +++ b/chart/elastalert2/README.md @@ -0,0 +1,111 @@ + +# ElastAlert 2 Helm Chart for Kubernetes + +An ElastAlert 2 helm chart is available, and can be installed into an existing Kubernetes cluster by following the instructions below. + +Inspiration for optional serviceMonitor and prometheusRules objects, along with source code for calculating and implementing labels on the chart, ported from https://github.com/bitnami/charts/tree/master/bitnami/thanos/templates + +## Installing the Chart + +Add the elastalert2 repository to your Helm configuration: + +```console +helm repo add elastalert2 https://jertel.github.io/elastalert2/ +``` + +Next, install the chart with a release name, such as _elastalert2_: + +```console +helm install elastalert2 elastalert2/elastalert2 +``` + +The command deploys ElastAlert 2 on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +See the comment in the default `values.yaml` for specifying a `writebackIndex` for ES 5.x. + +If necessary, open Dev Tools on Kibana and send the below request to avoid errors like `RequestError: TransportError(400, u'search_phase_execution_exception', u'No mapping found for [alert_time] in order to sort on')` + +``` +PUT /elastalert/_mapping/elastalert +{ + "properties": { + "alert_time": {"type": "date"} + } +} +``` + +## Uninstalling the Chart + +To uninstall/delete the ElastAlert 2 deployment: + +```console +helm delete elastalert2 --purge +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +|----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| +| `image.repository` | docker image | jertel/elastalert2 | +| `image.tag` | docker image tag | 2.9.0 | +| `image.pullPolicy` | image pull policy | IfNotPresent | +| `image.pullSecret` | image pull secret | "" | +| `podAnnotations` | Annotations to be added to pods | {} | +| `podSecurityContext` | Configurable podSecurityContext for pod execution environment | {"runAsUser": 1000, "runAsGroup": 1000, "fsGroup": 1000} | +| `securityContext` | Allows you to set the securityContext for the container | {"runAsNonRoot": true, "runAsUser": 1000} | +| `command` | command override for container | `NULL` | +| `args` | args override for container | `NULL` | +| `replicaCount` | number of replicas to run | 1 | +| `rulesFolder` | Locaton of rules directory. Usefull when you have one docker image and different set on rules per environemnt. For example development can reside in `/opt/elastalert/develop` and production in `/opt/elastalert/production`. | /opt/elastalert/rules | +| `elasticsearch.host` | elasticsearch endpoint to use | elasticsearch | +| `elasticsearch.port` | elasticsearch port to use | 9200 | +| `elasticsearch.useSsl` | whether or not to connect to es_host using SSL | False | +| `elasticsearch.username` | Username for ES with basic auth | `NULL` | +| `elasticsearch.password` | Password for ES with basic auth | `NULL` | +| `elasticsearch.credentialsSecret` | Specifies an existing secret to be used for the ES username/password auth | `NULL` | +| `elasticsearch.credentialsSecretUsernameKey` | The key in elasticsearch.credentialsSecret that stores the ES password auth | `NULL` | +| `elasticsearch.credentialsSecretPasswordKey` | The key in elasticsearch.credentialsSecret that stores the ES username auth | `NULL` | +| `elasticsearch.verifyCerts` | whether or not to verify TLS certificates | True | +| `elasticsearch.clientCert` | path to a PEM certificate to use as the client certificate | `NULL` | +| `elasticsearch.clientKey` | path to a private key file to use as the client key | `NULL` | +| `elasticsearch.caCerts` | path to a CA cert bundle to use to verify SSL connections | `NULL` | +| `elasticsearch.certsVolumes` | certs volumes, required to mount ssl certificates when elasticsearch has tls enabled | `NULL` | +| `elasticsearch.certsVolumeMounts` | mount certs volumes, required to mount ssl certificates when elasticsearch has tls enabled | `NULL` | +| `extraConfigOptions` | Additional options to propagate to all rules, cannot be `alert`, `type`, `name` or `index` | `{}` | +| `secretConfigName` | name of the secret which holds the ElastAlert config. **Note:** this will completely overwrite the generated config | `NULL` | +| `secretRulesName` | name of the secret which holds the ElastAlert rules. **Note:** this will overwrite the generated rules | `NULL` | +| `secretRulesList` | a list of rules to enable from the secret | [] | +| `optEnv` | Additional pod environment variable definitions | [] | +| `extraContainers` | List of additional containers run in the same pod as elastalert | [] | +| `extraVolumes` | Additional volume definitions | [] | +| `extraVolumeMounts` | Additional volumeMount definitions | [] | +| `serviceAccount.create` | Specifies whether a service account should be created. | `true` | +| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `podSecurityPolicy.create` | [DEPRECATED] Create pod security policy resources | `false` | +| `resources` | Container resource requests and limits | {} | +| `rulesVolumeName` | Specifies the rules volume to be mounted. Can be changed for mounting a custom rules folder via the extraVolumes parameter, instead of using the default rules configMap or secret rule mounting method. | "rules" | +| `rules` | Rule and alert configuration for ElastAlert 2 | {} example shown in values.yaml | +| `runIntervalMins` | Default interval between alert checks, in minutes | 1 | +| `realertIntervalMins` | Time between alarms for same rule, in minutes | `NULL` | +| `scanSubdirectories` | Enable/disable subdirectory scanning for rules | `true` | +| `alertRetryLimitMins` | Time to retry failed alert deliveries, in minutes | 2880 (2 days) | +| `bufferTimeMins` | Default rule buffer time, in minutes | 15 | +| `writebackIndex` | Name or prefix of elastalert index(es) | elastalert | +| `nodeSelector` | Node selector for deployment | {} | +| `affinity` | Affinity specifications for the deployed pod(s) | {} | +| `tolerations` | Tolerations for deployment | [] | +| `smtp_auth.username` | Optional SMTP mail server username. If the value is not empty, the smtp_auth secret will be created automatically. | `NULL` | +| `smtp_auth.password` | Optional SMTP mail server passwpord. This must be specified if the above field, `smtp_auth.username` is also specified. | `NULL` | +| `metrics.enabled` | Enable elastalert prometheus endpoint, add prometheus.io annotations to pod and create a service pointing to the port for prometheus to scrape the metrics | `false` | +| `metrics.prometheusPort` | If "metrics" is set to true, prometheus metrics will be exposed by the pod on this port. | `8080` | +| `metrics.prometheusPortName` | Name of the port where metrics are exposed | `http-alt` | +| `metrics.prometheusScrapeAnnotations` | If metrics are enabled, annotations to add to the pod for prometheus configuration. prometheus.io/port is also added during the prometheusPort and prometheusPortName values | `{prometheus.io/scrape: "true" prometheus.io/path: "/"}` | +| `metrics.serviceMonitor.enabled` | If metrics are enabled, create a serviceMonitor custom resource for prometheus-operator to detect and configure the metrics endpoint on prometheus. | `false` | +| `metrics.serviceMonitor.labels` | Labels to add to the prometheusRule object for prometheus-operator to detect it, when deployed on a namespace different from the one where prometheus-operator is running. | `{}` | +| `metrics.serviceMonitor.metricRelabelings` | List of prometheus metric relabeling configs to apply to scrape. Example: drop python_gc metrics or alter pod name | `[]` | +| `metrics.prometheusRule.enabled` | If metrics are enabled, create a prometheusRule custom resource for prometheus-operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Labels to add to the prometheusRule object for prometheus-operator to detect it, when deployed on a namespace different from the one where prometheus-operator is running. | `{}` | +| `metrics.prometheusRule.rules` | Group of alerting and/or recording rules to add to the prometheus configuration, example Alerting rules for pod down, or for file descriptors. Should be added as multiline Yaml string | `` | diff --git a/chart/elastalert2/templates/NOTES.txt b/chart/elastalert2/templates/NOTES.txt new file mode 100644 index 000000000..5557d351e --- /dev/null +++ b/chart/elastalert2/templates/NOTES.txt @@ -0,0 +1 @@ +1. ElastAlert 2 is now running against: {{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }} \ No newline at end of file diff --git a/chart/elastalert2/templates/_helpers.tpl b/chart/elastalert2/templates/_helpers.tpl new file mode 100644 index 000000000..2fbdad460 --- /dev/null +++ b/chart/elastalert2/templates/_helpers.tpl @@ -0,0 +1,36 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "elastalert.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "elastalert.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "elastalert.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "elastalert.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/chart/elastalert2/templates/_labels.tpl b/chart/elastalert2/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/chart/elastalert2/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/chart/elastalert2/templates/_names.tpl b/chart/elastalert2/templates/_names.tpl new file mode 100644 index 000000000..c6e0202ff --- /dev/null +++ b/chart/elastalert2/templates/_names.tpl @@ -0,0 +1,60 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "common.names.servicename" -}} +{{- $name := include "common.names.fullname" . | trunc 53 -}} +{{- printf "%s-%s" $name "metrics" -}} +{{- end -}} + +{{- define "common.names.configname" -}} +{{- $name := include "common.names.fullname" . | trunc 53 -}} +{{- printf "%s-%s" $name "config" -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/chart/elastalert2/templates/_tplvalues.tpl b/chart/elastalert2/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/chart/elastalert2/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/chart/elastalert2/templates/config.yaml b/chart/elastalert2/templates/config.yaml new file mode 100644 index 000000000..ea17e975c --- /dev/null +++ b/chart/elastalert2/templates/config.yaml @@ -0,0 +1,50 @@ +{{- if not .Values.secretConfigName }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.configname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +data: + elastalert_config: |- + --- + rules_folder: {{ .Values.rulesFolder }} +{{- if .Values.scanSubdirectories }} + scan_subdirectories: {{ .Values.scanSubdirectories }} +{{- end }} + run_every: + minutes: {{ .Values.runIntervalMins }} +{{- if .Values.realertIntervalMins }} + realert: + minutes: {{ .Values.realertIntervalMins }} +{{- end }} + buffer_time: + minutes: {{ .Values.bufferTimeMins }} + es_host: {{ .Values.elasticsearch.host }} + es_port: {{ .Values.elasticsearch.port }} +{{- if .Values.elasticsearch.username }} + es_username: {{ .Values.elasticsearch.username }} +{{- end }} +{{- if .Values.elasticsearch.password }} + es_password: {{ .Values.elasticsearch.password }} +{{- end }} + writeback_index: {{ .Values.writebackIndex }} + use_ssl: {{ .Values.elasticsearch.useSsl }} + verify_certs: {{ .Values.elasticsearch.verifyCerts }} +{{- if .Values.elasticsearch.clientCert }} + client_cert: {{ .Values.elasticsearch.clientCert }} +{{- end }} +{{- if .Values.elasticsearch.clientKey }} + client_key: {{ .Values.elasticsearch.clientKey }} +{{- end }} +{{- if .Values.elasticsearch.caCerts }} + ca_certs: {{ .Values.elasticsearch.caCerts }} +{{- end }} + alert_time_limit: + minutes: {{ .Values.alertRetryLimitMins }} +{{- if .Values.extraConfigOptions }} +{{ toYaml .Values.extraConfigOptions | indent 4 }} +{{- end }} +{{- end }} diff --git a/chart/elastalert2/templates/deployment.yaml b/chart/elastalert2/templates/deployment.yaml new file mode 100644 index 000000000..dc9cda93d --- /dev/null +++ b/chart/elastalert2/templates/deployment.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "elastalert.fullname" . }} + labels: + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app: {{ template "elastalert.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + replicas: {{ .Values.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} + checksum/rules: {{ include (print $.Template.BasePath "/rules.yaml") . | sha256sum }} +{{- if .Values.metrics.enabled }} +{{ toYaml .Values.metrics.prometheusScrapeAnnotations | indent 8 }} + prometheus.io/port: {{ .Values.metrics.prometheusPort | quote}} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: {{ .Values.appKubernetesIoComponent }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + spec: +{{- if .Values.image.pullSecret }} + imagePullSecrets: + - name: {{ .Values.image.pullSecret }} +{{- end }} + serviceAccountName: {{ include "elastalert.serviceAccountName" . }} +{{- if .Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} + containers: +{{- if .Values.extraContainers }} +{{ .Values.extraContainers | toYaml | indent 6}} +{{- end }} + + - name: elastalert + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if .Values.metrics.enabled }} + ports: + - containerPort: {{ .Values.metrics.prometheusPort }} + protocol: TCP + name: {{ .Values.metrics.prometheusPortName }} +{{- end }} +{{- if .Values.securityContext }} + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} +{{- end }} +{{- if .Values.command }} + command: +{{ toYaml .Values.command | indent 10 }} +{{- end }} + +{{- if or .Values.args .Values.metrics.enabled }} + args: + {{- if .Values.args }} +{{ toYaml .Values.args | indent 10 }} + {{- end }} + {{- if .Values.metrics.enabled }} + {{- $enableportlist := list "--prometheus_port" (.Values.metrics.prometheusPort | toString) }} +{{ toYaml $enableportlist | indent 10 }} + {{- end }} +{{- end }} + + volumeMounts: + - name: config + mountPath: '/opt/elastalert/config.yaml' + subPath: config.yaml + - name: {{ .Values.rulesVolumeName }} + mountPath: {{ .Values.rulesFolder }} +{{- if .Values.elasticsearch.certsVolumeMounts }} +{{ toYaml .Values.elasticsearch.certsVolumeMounts | indent 10 }} +{{- end }} +{{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 10 }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} + env: +{{- if .Values.elasticsearch.credentialsSecret }} +{{- if .Values.elasticsearch.credentialsSecretUsernameKey }} + - name: ES_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.elasticsearch.credentialsSecret }} + key: {{ .Values.elasticsearch.credentialsSecretUsernameKey }} +{{- end }} +{{- if .Values.elasticsearch.credentialsSecretPasswordKey }} + - name: ES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.elasticsearch.credentialsSecret }} + key: {{ .Values.elasticsearch.credentialsSecretPasswordKey }} +{{- end }} +{{- end }} +{{- if .Values.optEnv }} +{{ .Values.optEnv | toYaml | indent 10}} +{{- end }} + + restartPolicy: Always +{{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} + volumes: + - name: rules +{{- if .Values.secretRulesName }} + secret: + secretName: {{ .Values.secretRulesName }} + items: +{{- range $key := .Values.secretRulesList }} + - key: {{ $key }} + path: {{ $key}}.yaml +{{- end }} +{{- else }} + configMap: + name: {{ template "elastalert.fullname" . }}-rules + items: +{{- range $key, $value := .Values.rules }} + - key: {{ $key }} + path: {{ $key}}.yaml +{{- end }} +{{- end }} + - name: config +{{- if .Values.secretConfigName }} + secret: + secretName: {{ .Values.secretConfigName }} +{{- else }} + configMap: + name: {{ template "elastalert.fullname" . }}-config +{{- end }} + items: + - key: elastalert_config + path: config.yaml +{{- if .Values.elasticsearch.certsVolumes }} +{{ toYaml .Values.elasticsearch.certsVolumes | indent 8 }} +{{- end }} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 8 }} +{{- end }} diff --git a/chart/elastalert2/templates/podsecuritypolicy.yaml b/chart/elastalert2/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..5f4d29fc2 --- /dev/null +++ b/chart/elastalert2/templates/podsecuritypolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "elastalert.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +spec: + # Prevents running in privileged mode + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + volumes: + - configMap + - secret + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/chart/elastalert2/templates/prometheusrule.yaml b/chart/elastalert2/templates/prometheusrule.yaml new file mode 100644 index 000000000..ba61e1136 --- /dev/null +++ b/chart/elastalert2/templates/prometheusrule.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 2 }} +{{- end }} diff --git a/chart/elastalert2/templates/role.yaml b/chart/elastalert2/templates/role.yaml new file mode 100644 index 000000000..25b36df20 --- /dev/null +++ b/chart/elastalert2/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "elastalert.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - policy + resources: + - podsecuritypolicies + resourceNames: + - {{ template "elastalert.fullname" . }} + verbs: + - use +{{- end -}} diff --git a/chart/elastalert2/templates/rolebinding.yaml b/chart/elastalert2/templates/rolebinding.yaml new file mode 100644 index 000000000..92a39e7a3 --- /dev/null +++ b/chart/elastalert2/templates/rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "elastalert.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "elastalert.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "elastalert.serviceAccountName" . }} +{{- end -}} diff --git a/chart/elastalert2/templates/rules.yaml b/chart/elastalert2/templates/rules.yaml new file mode 100644 index 000000000..b00399fa8 --- /dev/null +++ b/chart/elastalert2/templates/rules.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elastalert.fullname" . }}-rules + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- range $key, $value := .Values.rules }} +{{ $key | indent 2}}: |- +{{ $value | indent 4}} +{{- end }} diff --git a/chart/elastalert2/templates/service.yaml b/chart/elastalert2/templates/service.yaml new file mode 100644 index 000000000..98bce2ba8 --- /dev/null +++ b/chart/elastalert2/templates/service.yaml @@ -0,0 +1,43 @@ +{{- if .Values.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.servicename" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: {{ .Values.appKubernetesIoComponent}} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and .Values.metrics.service.clusterIP (eq .Values.metrics.service.type "ClusterIP") }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + {{- if ne .Values.metrics.service.type "ClusterIP" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{- if and .Values.metrics.service.loadBalancerIP (eq .Values.metrics.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - port: {{ .Values.metrics.prometheusPort }} + targetPort: {{ .Values.metrics.prometheusPort }} + protocol: TCP + name: {{ .Values.metrics.prometheusPortName }} + {{- if and (or (eq .Values.metrics.service.type "NodePort") (eq .Values.metrics.service.type "LoadBalancer")) .Values.metrics.service.nodePorts }} + nodePort: {{ .Values.metrics.service.nodePorts }} + {{- else if eq .Values.metrics.service.type "ClusterIP" }} + nodePort: null + {{- end }} + + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: {{ .Values.appKubernetesIoComponent }} +{{- end }} diff --git a/chart/elastalert2/templates/serviceaccount.yaml b/chart/elastalert2/templates/serviceaccount.yaml new file mode 100644 index 000000000..e380f51cf --- /dev/null +++ b/chart/elastalert2/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "elastalert.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/chart/elastalert2/templates/servicemonitor.yaml b/chart/elastalert2/templates/servicemonitor.yaml new file mode 100644 index 000000000..387fedfea --- /dev/null +++ b/chart/elastalert2/templates/servicemonitor.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.servicename" . | quote }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: {{ .Values.appKubernetesIoComponent}} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.labels }} + {{- toYaml .Values.metrics.serviceMonitor.labels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + endpoints: + - port: {{ .Values.metrics.prometheusPortName }} + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{ toYaml .Values.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: {{ .Values.appKubernetesIoComponent }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} +{{- end }} diff --git a/chart/elastalert2/templates/smtp-auth.yaml b/chart/elastalert2/templates/smtp-auth.yaml new file mode 100644 index 000000000..da64fdcc5 --- /dev/null +++ b/chart/elastalert2/templates/smtp-auth.yaml @@ -0,0 +1,15 @@ +{{- if .Values.smtp_auth }} +apiVersion: v1 +kind: Secret +metadata: + name: elastalert-smtp-auth + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/Opaque +stringData: + smtp_auth.yaml: |- + user: {{ .Values.smtp_auth.username }} + password: {{ .Values.smtp_auth.password }} +{{- end}} diff --git a/chart/elastalert2/values.yaml b/chart/elastalert2/values.yaml new file mode 100644 index 000000000..f4f57749e --- /dev/null +++ b/chart/elastalert2/values.yaml @@ -0,0 +1,399 @@ +## Chart information +nameOverride: "" +fullnameOverride: "" +namespaceOverride: "" +commonLabels: {} +commonAnnotations: {} +appKubernetesIoComponent: elastalert2 + +# number of replicas to run +replicaCount: 1 + +# number of helm release revisions to retain +revisionHistoryLimit: 5 + +# Default internal between alert checks against the elasticsearch datasource, in minutes +runIntervalMins: 1 + +# Location of directory where rules reside +rulesFolder: "/opt/elastalert/rules" + +# Enable/disabe subdirectory scanning for rules +scanSubdirectories: true + +# Default rule buffer duration, in minutes +bufferTimeMins: 15 + +# Amount of time to retry and deliver failed alerts (1440 minutes per day) +alertRetryLimitMins: 2880 + +# Default time before realerting, in minutes +realertIntervalMins: "" + +# For ES 5: The name of the index which stores elastalert 2 statuses, typically elastalert_status +# For ES 6: The prefix of the names of indices which store elastalert 2 statuses, typically elastalert +# +writebackIndex: elastalert + +image: + # docker image + repository: jertel/elastalert2 + # docker image tag + tag: 2.9.0 + pullPolicy: IfNotPresent + pullSecret: "" + +resources: {} + +# Annotations to be added to pods +podAnnotations: {} + +elasticsearch: + # elasticsearch endpoint e.g. (svc.namespace||svc) + host: elasticsearch + # elasticsearch port + port: 9200 + # whether or not to connect to es_host using TLS + useSsl: "False" + # Username if authenticating to ES with basic auth + username: "" + # Password if authenticating to ES with basic auth + password: "" + # Specifies an existing secret to be used for the ES username/password + credentialsSecret: "" + # The key in elasticsearch.credentialsSecret that stores the ES password + credentialsSecretUsernameKey: "" + # The key in elasticsearch.credentialsSecret that stores the ES username + credentialsSecretPasswordKey: "" + # whether or not to verify TLS certificates + verifyCerts: "True" + # Enable certificate based authentication + # path to a PEM certificate to use as the client certificate + # clientCert: "/certs/client.pem" + # path to a private key file to use as the client key + # clientKey: "/certs/client-key.pem" + # path to a CA cert bundle to use to verify SSL connections + # caCerts: "/certs/ca.pem" + # # certs volumes, required to mount ssl certificates when elasticsearch has tls enabled + # certsVolumes: + # - name: es-certs + # secret: + # defaultMode: 420 + # secretName: es-certs + # # mount certs volumes, required to mount ssl certificates when elasticsearch has tls enabled + # certsVolumeMounts: + # - name: es-certs + # mountPath: /certs + # readOnly: true + +# Optional env variables for the pod +optEnv: [] + +## Specify optional additional containers to run alongside the Elastalert2 container. +extraContainers: [] + +extraConfigOptions: {} + # # Options to propagate to all rules, e.g. a common slack_webhook_url or kibana_url + # # Please note at the time of implementing this value, it will not work for required_locals + # # Which MUST be set at the rule level, these are: ['alert', 'type', 'name', 'index'] + # kibana_url: https://kibana.yourdomain.com + # slack_webhook_url: dummy + +# To load ElastAlert 2 config via secret, uncomment the line below +# secretConfigName: elastalert-config-secret + +# Example of a secret config + +#apiVersion: v1 +#kind: Secret +#metadata: +# name: elastalert-config-secret +#type: Opaque +#stringData: +# elastalert_config: |- +# rules_folder: /opt/elastalert/rules +# scan_subdirectories: false +# run_every: +# minutes: 1 +# buffer_time: +# minutes: 15 +# es_host: elasticsearch +# es_port: 9200 +# writeback_index: elastalert +# use_ssl: False +# verify_certs: True +# alert_time_limit: +# minutes: 2880 +# slack_webhook_url: https://hooks.slack.com/services/xxxx +# slack_channel_override: '#alerts' + + +# To load ElastAlert's rules via secret, uncomment the line below +#secretRulesName: elastalert-rules-secret + +# Additionally, you must specificy which rules to load from the secret +#secretRulesList: [ "rule_1", "rule_2" ] + +# Example of secret rules + +#apiVersion: v1 +#kind: Secret +#metadata: +# name: elastalert-rules-secret +# namespace: elastic-system +#type: Opaque +#stringData: +# rule_1: |- +# name: Rule 1 +# type: frequency +# index: index1-* +# num_events: 3 +# timeframe: +# minutes: 1 +# alert: +# - "slack" +# rule_2: |- +# name: Rule 2 +# type: frequency +# index: index2-* +# num_events: 5 +# timeframe: +# minutes: 10 +# alert: +# - "slack" + +# Command and args override for container e.g. (https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) +# command: ["YOUR_CUSTOM_COMMAND"] +# args: ["YOUR", "CUSTOM", "ARGS"] + +# specifies the rules volume to be used +rulesVolumeName: "rules" + +# additional rule configurations e.g. (http://elastalert2.readthedocs.io/en/latest/) +rules: {} + # deadman_slack: |- + # --- + # name: Deadman Switch Slack + # type: frequency + # index: containers-* + # num_events: 3 + # timeframe: + # minutes: 3 + # filter: + # - term: + # message: "deadmanslack" + # alert: + # - "slack" + # slack: + # slack_webhook_url: dummy + # deadman_pagerduty: |- + # --- + # name: Deadman Switch PagerDuty + # type: frequency + # index: containers-* + # num_events: 3 + # timeframe: + # minutes: 3 + # filter: + # - term: + # message: "deadmanpd" + # alert: + # - "pagerduty" + # pagerduty: + # pagerduty_service_key: dummy + # pagerduty_client_name: ElastAlert Deadman Switch + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +# Enable pod security policy +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +# DEPRECATED in Kubernetes 1.21 (https://kubernetes.io/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/) +podSecurityPolicy: + create: false + +securityContext: + runAsNonRoot: true + runAsUser: 1000 + +podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + runAsGroup: 1000 + +# Support using node selectors and tolerations +# nodeSelector: +# "node-role.kubernetes.io/infra_worker": "true" +nodeSelector: {} + +# Specify node affinity or anti-affinity specifications +affinity: {} + +# tolerations: +# - key: "node_role" +# operator: "Equal" +# value: "infra_worker" +# effect: "NoSchedule" +tolerations: [] + +# Optional automatic SMTP mail server credential management. +# smtp_auth: +# username: "" +# password: "" + +extraVolumes: [] + # - name: smtp-auth + # secret: + # secretName: elastalert-smtp-auth + # items: + # - key: smtp_auth.yaml + # path: smtp_auth.yaml + # mode: 0400 + +extraVolumeMounts: [] + # - name: smtp-auth + # mountPath: /opt/elastalert/config-smtp/smtp_auth.yaml + # subPath: smtp_auth.yaml + # readOnly: true + + +## @section Metrics parameters + +## Prometheus metrics +## +metrics: + ## @param metrics.enabled Enable the export of Prometheus metrics + ## + enabled: false + prometheusPort: 8080 + prometheusPortName: http-alt + # Prometheus Exporter defined by port: + prometheusScrapeAnnotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/" + + service: + type: ClusterIP + # clusterIP: "" + # externalTrafficPolicy: Cluster + # loadBalancerIP: "" + # loadBalancerSourceRanges: {} + # nodePorts: "" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Specify if a ServiceMonitor will be deployed for Prometheus Operator + ## + enabled: false + + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## Normally used for prometheus operator to detect the servicemonitor if deployed to different namespace + ## labels: + ## release: prometheus-operator + labels: {} + + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + + ## @param metrics.serviceMonitor.interval How frequently to scrape metrics + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.metricRelabelings [array] Specify additional relabeling of metrics + ## metricRelabelings: + ## # Drop GO metrics + ## - sourceLabels: [__name__] + ## regex: go_.* + ## action: drop + ## # Drop python_gc metrics + ## - sourceLabels: [__name__] + ## regex: python_gc.* + ## action: drop + ## # Normalise POD names + ## - sourceLabels: [pod] + ## regex: (.+elastalert2)\-([\w\d]+)\-([\w\d]+) + ## replacement: $1 + ## targetLabel: pod + metricRelabelings: [] + + ## @param metrics.serviceMonitor.relabelings [array] Specify general relabeling + ## + relabelings: [] + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + + ## PrometheusRule CRD configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled If `true`, creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace in which the PrometheusRule CRD is created + ## + namespace: "" + + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## to be detected by prometheus-operator + ## additionalLabels: + ## release: prometheus-operator + additionalLabels: {} + + ## @param metrics.prometheusRule.rules Prometheus Rules for Thanos components + ## These are just examples rules, please adapt them to your needs. + ## rules: |- + ## groups: + ## - name: elastalert + ## rules: + ## - alert: elastalert Pod down + ## annotations: + ## description: Prometheus is unable to scrape metrics service. Check pod logs for details + ## summary: elastalert POD is down + ## expr: up{service="{{ template "common.names.servicename" . }}",container="elastalert"} == 0 + ## for: 5m + ## labels: + ## severity: critical + ## production: 'True' + ## - alert: elastalert file descriptors use + ## annotations: + ## description: Elastalert pod nearly exhausting file descriptors + ## summary: too many file descriptors used + ## expr: |- + ## process_open_fds{service="{{ template "common.names.servicename" . }}",container="elastalert"} + ## / + ## process_max_fds{service="{{ template "common.names.servicename" . }}",container="elastalert"} + ## > 0.9 + ## for: 3m + ## labels: + ## severity: critical + ## production: 'True' + ## - alert: elastalert scrapes failing + ## annotations: + ## description: Elastalert is not scraping for a rule {{ "{{" }} $labels.rule_name {{ "}}" }} + ## summary: scrapes for rule stalled {{ "{{" }} $labels.rule_name {{ "}}" }} + ## expr: |- + ## rate(elastalert_scrapes_total{service="{{ template "common.names.servicename" . }}",container="elastalert"}[1m]) == 0 + ## for: 5m + ## labels: + ## severity: critical + ## production: 'True' + rules: [] diff --git a/commands.sh b/commands.sh new file mode 100644 index 000000000..f12a8cdd1 --- /dev/null +++ b/commands.sh @@ -0,0 +1,5 @@ +#!/bin/bash +echo "creating elastalert indices" +python -m elastalert.create_index --config /data/elastalert/config.yaml +echo "Starting elastalert" +python -m elastalert.elastalert --config /data/elastalert/config.yaml --verbose diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..eab455249 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,12 @@ +# Documentation + +You can read this documentation at [Read The Docs][0]. + +To build a local version of these docs, the following from within the `/docs` directory: + +``` +pip install m2r2 sphinx_rtd_theme sphinx +make html +``` + +You can then view the generated HTML in from within the `build/` folder. diff --git a/docs/source/conf.py b/docs/source/conf.py index 80a76ed1a..6769b99e8 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- # -# ElastAlert documentation build configuration file, created by +# ElastAlert 2 documentation build configuration file, created by # sphinx-quickstart on Thu Jul 11 15:45:31 2013. # # This file is execfile()d with the current directory set to its containing dir. @@ -19,13 +19,13 @@ # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [] +extensions = ["m2r2"] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ['.rst', '.md'] # The encoding of source files. # source_encoding = 'utf-8' @@ -34,8 +34,8 @@ master_doc = 'index' # General information about the project. -project = u'ElastAlert' -copyright = u'2014, Yelp' +project = u'ElastAlert 2' +copyright = u'2014-2021, Yelp, et al' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -62,6 +62,7 @@ # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] +exclude_patterns = ['recipes/*.md'] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None @@ -170,7 +171,7 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'elastalert.tex', u'ElastAlert Documentation', + ('index', 'elastalert.tex', u'ElastAlert 2 Documentation', u'Quentin Long', 'manual'), ] diff --git a/docs/source/elastalert.rst b/docs/source/elastalert.rst index b1008c3c4..ecfdcffbe 100755 --- a/docs/source/elastalert.rst +++ b/docs/source/elastalert.rst @@ -1,18 +1,14 @@ -ElastAlert - Easy & Flexible Alerting With Elasticsearch -******************************************************** +ElastAlert 2 - Automated rule-based alerting for Elasticsearch +************************************************************** -ElastAlert is a simple framework for alerting on anomalies, spikes, or other patterns of interest from data in Elasticsearch. +ElastAlert 2 is a simple framework for alerting on anomalies, spikes, or other patterns of interest from data in `Elasticsearch `_ and `OpenSearch `_. -At Yelp, we use Elasticsearch, Logstash and Kibana for managing our ever increasing amount of data and logs. -Kibana is great for visualizing and querying data, but we quickly realized that it needed a companion tool for alerting -on inconsistencies in our data. Out of this need, ElastAlert was created. - -If you have data being written into Elasticsearch in near real time and want to be alerted when that data matches certain patterns, ElastAlert is the tool for you. +If you have data being written into Elasticsearch in near real time and want to be alerted when that data matches certain patterns, ElastAlert 2 is the tool for you. Overview ======== -We designed ElastAlert to be :ref:`reliable `, highly :ref:`modular `, and easy to :ref:`set up ` and :ref:`configure `. +We designed ElastAlert 2 to be :ref:`reliable `, highly :ref:`modular `, and easy to :ref:`set up ` and :ref:`configure `. It works by combining Elasticsearch with two types of components, rule types and alerts. Elasticsearch is periodically queried and the data is passed to the rule type, which determines when @@ -20,7 +16,7 @@ a match is found. When a match occurs, it is given to one or more alerts, which This is configured by a set of rules, each of which defines a query, a rule type, and a set of alerts. -Several rule types with common monitoring paradigms are included with ElastAlert: +Several rule types with common monitoring paradigms are included with ElastAlert 2: - "Match where there are X events in Y time" (``frequency`` type) - "Match when the rate of events increases or decreases" (``spike`` type) @@ -31,40 +27,63 @@ Several rule types with common monitoring paradigms are included with ElastAlert Currently, we have support built in for these alert types: +- Alerta +- Alertmanager +- AWS SES (Amazon Simple Email Service) +- AWS SNS (Amazon Simple Notification Service) +- Chatwork - Command +- Datadog +- Debug +- Dingtalk +- Discord - Email -- JIRA +- Exotel +- Gitter +- GoogleChat +- Graylog GELF +- HTTP POST +- HTTP POST 2 +- Jira +- Line Notify +- Mattermost +- Microsoft Teams - OpsGenie -- SNS -- HipChat +- PagerDuty +- PagerTree +- Rocket.Chat +- Squadcast +- ServiceNow - Slack -- Telegram -- GoogleChat -- Debug +- Splunk On-Call (Formerly VictorOps) - Stomp +- Telegram +- Tencent SMS - TheHive +- Twilio +- Zabbix Additional rule types and alerts can be easily imported or written. (See :ref:`Writing rule types ` and :ref:`Writing alerts `) In addition to this basic usage, there are many other features that make alerts more useful: -- Alerts link to Kibana dashboards +- Alerts link to Kibana Discover searches - Aggregate counts for arbitrary fields - Combine alerts into periodic reports - Separate alerts by using a unique key field - Intercept and enhance match data -To get started, check out :ref:`Running ElastAlert For The First Time `. +To get started, check out :ref:`Running ElastAlert 2 For The First Time `. .. _reliability: Reliability =========== -ElastAlert has several features to make it more reliable in the event of restarts or Elasticsearch unavailability: +ElastAlert 2 has several features to make it more reliable in the event of restarts or Elasticsearch unavailability: -- ElastAlert :ref:`saves its state to Elasticsearch ` and, when started, will resume where previously stopped -- If Elasticsearch is unresponsive, ElastAlert will wait until it recovers before continuing +- ElastAlert 2 :ref:`saves its state to Elasticsearch ` and, when started, will resume where previously stopped +- If Elasticsearch is unresponsive, ElastAlert 2 will wait until it recovers before continuing - Alerts which throw errors may be automatically retried for a period of time .. _modularity: @@ -72,7 +91,7 @@ ElastAlert has several features to make it more reliable in the event of restart Modularity ========== -ElastAlert has three main components that may be imported as a module or customized: +ElastAlert 2 has three main components that may be imported as a module or customized: Rule types ---------- @@ -98,25 +117,34 @@ to the alerter. See :ref:`Enhancements` for more information. Configuration ============= -ElastAlert has a global configuration file, ``config.yaml``, which defines several aspects of its operation: +ElastAlert 2 has a global configuration file, ``config.yaml``, which defines several aspects of its operation: -``buffer_time``: ElastAlert will continuously query against a window from the present to ``buffer_time`` ago. -This way, logs can be back filled up to a certain extent and ElastAlert will still process the events. This +``buffer_time``: ElastAlert 2 will continuously query against a window from the present to ``buffer_time`` ago. +This way, logs can be back filled up to a certain extent and ElastAlert 2 will still process the events. This may be overridden by individual rules. This option is ignored for rules where ``use_count_query`` or ``use_terms_query`` is set to true. Note that back filled data may not always trigger count based alerts as if it was queried in real time. -``es_host``: The host name of the Elasticsearch cluster where ElastAlert records metadata about its searches. -When ElastAlert is started, it will query for information about the time that it was last run. This way, -even if ElastAlert is stopped and restarted, it will never miss data or look at the same events twice. It will also specify the default cluster for each rule to run on. +``es_host``: The host name of the Elasticsearch cluster where ElastAlert 2 records metadata about its searches. +When ElastAlert 2 is started, it will query for information about the time that it was last run. This way, +even if ElastAlert 2 is stopped and restarted, it will never miss data or look at the same events twice. It will also specify the default cluster for each rule to run on. The environment variable ``ES_HOST`` will override this field. +For multiple host Elasticsearch clusters see ``es_hosts`` parameter. ``es_port``: The port corresponding to ``es_host``. The environment variable ``ES_PORT`` will override this field. +``es_hosts`` is the list of addresses of the nodes of the Elasticsearch cluster. This +parameter can be used for high availability purposes, but the primary host must also +be specified in the ``es_host`` parameter. The ``es_hosts`` parameter can be overridden +within each rule. This value can be specified as ``host:port`` if overriding the default port. +The environment variable ``ES_HOSTS`` will override this field, and can be specified as a comma-separated value to denote multiple hosts. + ``use_ssl``: Optional; whether or not to connect to ``es_host`` using TLS; set to ``True`` or ``False``. The environment variable ``ES_USE_SSL`` will override this field. ``verify_certs``: Optional; whether or not to verify TLS certificates; set to ``True`` or ``False``. The default is ``True``. +``ssl_show_warn``: Optional; suppress TLS and certificate related warnings; set to ``True`` or ``False``. The default is ``True``. + ``client_cert``: Optional; path to a PEM certificate to use as the client certificate. ``client_key``: Optional; path to a private key file to use as the client key. @@ -127,55 +155,74 @@ The environment variable ``ES_USE_SSL`` will override this field. ``es_password``: Optional; basic-auth password for connecting to ``es_host``. The environment variable ``ES_PASSWORD`` will override this field. +``es_bearer``: Optional; Bearer token for connecting to ``es_host``. The environment variable ``ES_BEARER`` will override this field. This authentication option will override the password authentication option. + +``es_api_key``: Optional; Base64 api-key token for connecting to ``es_host``. The environment variable ``ES_API_KEY`` will override this field. This authentication option will override both the bearer and the password authentication options. + ``es_url_prefix``: Optional; URL prefix for the Elasticsearch endpoint. The environment variable ``ES_URL_PREFIX`` will override this field. ``es_send_get_body_as``: Optional; Method for querying Elasticsearch - ``GET``, ``POST`` or ``source``. The default is ``GET`` ``es_conn_timeout``: Optional; sets timeout for connecting to and reading from ``es_host``; defaults to ``20``. -``rules_loader``: Optional; sets the loader class to be used by ElastAlert to retrieve rules and hashes. +``rules_loader``: Optional; sets the loader class to be used by ElastAlert 2 to retrieve rules and hashes. Defaults to ``FileRulesLoader`` if not set. -``rules_folder``: The name of the folder which contains rule configuration files. ElastAlert will load all -files in this folder, and all subdirectories, that end in .yaml. If the contents of this folder change, ElastAlert will load, reload +``rules_folder``: The name of the folder or a list of folders which contains rule configuration files. ElastAlert 2 will load all +files in this folder, and all subdirectories, that end in .yaml. If the contents of this folder change, ElastAlert 2 will load, reload or remove rules based on their respective config files. (only required when using ``FileRulesLoader``). -``scan_subdirectories``: Optional; Sets whether or not ElastAlert should recursively descend the rules directory - ``true`` or ``false``. The default is ``true`` +``scan_subdirectories``: Optional; Sets whether or not ElastAlert 2 should recursively descend the rules directory - ``true`` or ``false``. The default is ``true`` -``run_every``: How often ElastAlert should query Elasticsearch. ElastAlert will remember the last time +``run_every``: How often ElastAlert 2 should query Elasticsearch. ElastAlert 2 will remember the last time it ran the query for a given rule, and periodically query from that time until the present. The format of -this field is a nested unit of time, such as ``minutes: 5``. This is how time is defined in every ElastAlert +this field is a nested unit of time, such as ``minutes: 5``. This is how time is defined in every ElastAlert 2 configuration. +``misfire_grace_time``: If the rule scheduler is running behind, due to large numbers of rules or long-running rules, this grace time settings allows a rule to still be executed, provided its next scheduled runt time is no more than this grace period, in seconds, overdue. The default is 5 seconds. + ``writeback_index``: The index on ``es_host`` to use. ``max_query_size``: The maximum number of documents that will be downloaded from Elasticsearch in a single query. The default is 10,000, and if you expect to get near this number, consider using ``use_count_query`` for the rule. If this -limit is reached, ElastAlert will `scroll `_ +limit is reached, ElastAlert 2 will `scroll `_ using the size of ``max_query_size`` through the set amount of pages, when ``max_scrolling_count`` is set or until processing all results. -``max_scrolling_count``: The maximum amount of pages to scroll through. The default is ``0``, which means the scrolling has no limit. -For example if this value is set to ``5`` and the ``max_query_size`` is set to ``10000`` then ``50000`` documents will be downloaded at most. +``max_scrolling_count``: The maximum amount of pages to scroll through. The default is ``990``, to avoid a stack overflow error due to Python's stack limit of 1000. For example, if this value is set to ``5`` and the ``max_query_size`` is set to ``10000`` then ``50000`` documents will be downloaded at most. + +``max_threads``: The maximum number of concurrent threads available to process scheduled rules. Large numbers of long-running rules may require this value be increased, though this could overload the Elasticsearch cluster if too many complex queries are running concurrently. Default is 10. ``scroll_keepalive``: The maximum time (formatted in `Time Units `_) the scrolling context should be kept alive. Avoid using high values as it abuses resources in Elasticsearch, but be mindful to allow sufficient time to finish processing all the results. ``max_aggregation``: The maximum number of alerts to aggregate together. If a rule has ``aggregation`` set, all alerts occuring within a timeframe will be sent together. The default is 10,000. -``old_query_limit``: The maximum time between queries for ElastAlert to start at the most recently run query. -When ElastAlert starts, for each rule, it will search ``elastalert_metadata`` for the most recently run query and start +``old_query_limit``: The maximum time between queries for ElastAlert 2 to start at the most recently run query. +When ElastAlert 2 starts, for each rule, it will search ``elastalert_metadata`` for the most recently run query and start from that time, unless it is older than ``old_query_limit``, in which case it will start from the present time. The default is one week. -``disable_rules_on_error``: If true, ElastAlert will disable rules which throw uncaught (not EAException) exceptions. It +``disable_rules_on_error``: If true, ElastAlert 2 will disable rules which throw uncaught (not EAException) exceptions. It will upload a traceback message to ``elastalert_metadata`` and if ``notify_email`` is set, send an email notification. The -rule will no longer be run until either ElastAlert restarts or the rule file has been modified. This defaults to True. +rule will no longer be run until either ElastAlert 2 restarts or the rule file has been modified. This defaults to True. -``show_disabled_rules``: If true, ElastAlert show the disable rules' list when finishes the execution. This defaults to True. +``show_disabled_rules``: If true, ElastAlert 2 show the disable rules' list when finishes the execution. This defaults to True. ``notify_email``: An email address, or list of email addresses, to which notification emails will be sent. Currently, only an uncaught exception will send a notification email. The from address, SMTP host, and reply-to header can be set using ``from_addr``, ``smtp_host``, and ``email_reply_to`` options, respectively. By default, no emails will be sent. +single address example:: + + notify_email: "one@domain" + +or + +multiple address example:: + + notify_email: + - "one@domain" + - "two@domain" + ``from_addr``: The address to use as the from header in email notifications. This value will be used for email alerts as well, unless overwritten in the rule config. The default value is "ElastAlert". @@ -185,80 +232,44 @@ unless overwritten in the rule config. The default is "localhost". ``email_reply_to``: This sets the Reply-To header in emails. The default is the recipient address. -``aws_region``: This makes ElastAlert to sign HTTP requests when using Amazon Elasticsearch Service. It'll use instance role keys to sign the requests. +``aws_region``: This makes ElastAlert 2 to sign HTTP requests when using Amazon OpenSearch Service. It'll use instance role keys to sign the requests. The environment variable ``AWS_DEFAULT_REGION`` will override this field. -``boto_profile``: Deprecated! Boto profile to use when signing requests to Amazon Elasticsearch Service, if you don't want to use the instance role keys. - -``profile``: AWS profile to use when signing requests to Amazon Elasticsearch Service, if you don't want to use the instance role keys. +``profile``: AWS profile to use when signing requests to Amazon OpenSearch Service, if you don't want to use the instance role keys. The environment variable ``AWS_DEFAULT_PROFILE`` will override this field. -``replace_dots_in_field_names``: If ``True``, ElastAlert replaces any dots in field names with an underscore before writing documents to Elasticsearch. +``replace_dots_in_field_names``: If ``True``, ElastAlert 2 replaces any dots in field names with an underscore before writing documents to Elasticsearch. The default value is ``False``. Elasticsearch 2.0 - 2.3 does not support dots in field names. ``string_multi_field_name``: If set, the suffix to use for the subfield for string multi-fields in Elasticsearch. -The default value is ``.raw`` for Elasticsearch 2 and ``.keyword`` for Elasticsearch 5. +The default value is ``.keyword``. ``add_metadata_alert``: If set, alerts will include metadata described in rules (``category``, ``description``, ``owner`` and ``priority``); set to ``True`` or ``False``. The default is ``False``. ``skip_invalid``: If ``True``, skip invalid files instead of exiting. +``jinja_root_name``: When using a Jinja template, specify the name of the root field name in the template. The default is ``_data``. + +``jinja_template_path``: When using a Jinja template, specify filesystem path to template, this overrides the default behaviour of using alert_text as the template. + +``custom_pretty_ts_format``: This option provides a way to define custom format of timestamps printed in log messages and in alert messages. +If this option is not set, default timestamp format ('%Y-%m-%d %H:%M %Z') will be used. (Optional, string, default None) + +Example usage and resulting formatted timestamps:: + + (not set; default) -> '2021-08-16 21:38 JST' + custom_pretty_ts_format: '%Y-%m-%d %H:%M %z' -> '2021-08-16 21:38 +0900' + custom_pretty_ts_format: '%Y-%m-%d %H:%M' -> '2021-08-16 21:38' + Logging ------- -By default, ElastAlert uses a simple basic logging configuration to print log messages to standard error. +By default, ElastAlert 2 uses a simple basic logging configuration to print log messages to standard error. You can change the log level to ``INFO`` messages by using the ``--verbose`` or ``--debug`` command line options. If you need a more sophisticated logging configuration, you can provide a full logging configuration in the config file. This way you can also configure logging to a file, to Logstash and adjust the logging format. -For details, see the end of ``config.yaml.example`` where you can find an example logging +For details, see the end of ``examples/config.yaml.example`` where you can find an example logging configuration. - - -.. _runningelastalert: - -Running ElastAlert -================== - -``$ python elastalert/elastalert.py`` - -Several arguments are available when running ElastAlert: - -``--config`` will specify the configuration file to use. The default is ``config.yaml``. - -``--debug`` will run ElastAlert in debug mode. This will increase the logging verboseness, change -all alerts to ``DebugAlerter``, which prints alerts and suppresses their normal action, and skips writing -search and alert metadata back to Elasticsearch. Not compatible with `--verbose`. - -``--verbose`` will increase the logging verboseness, which allows you to see information about the state -of queries. Not compatible with `--debug`. - -``--start `` will force ElastAlert to begin querying from the given time, instead of the default, -querying from the present. The timestamp should be ISO8601, e.g. ``YYYY-MM-DDTHH:MM:SS`` (UTC) or with timezone -``YYYY-MM-DDTHH:MM:SS-08:00`` (PST). Note that if querying over a large date range, no alerts will be -sent until that rule has finished querying over the entire time period. To force querying from the current time, use "NOW". - -``--end `` will cause ElastAlert to stop querying at the specified timestamp. By default, ElastAlert -will periodically query until the present indefinitely. - -``--rule `` will only run the given rule. The rule file may be a complete file path or a filename in ``rules_folder`` -or its subdirectories. - -``--silence =`` will silence the alerts for a given rule for a period of time. The rule must be specified using -``--rule``. is one of days, weeks, hours, minutes or seconds. is an integer. For example, -``--rule noisy_rule.yaml --silence hours=4`` will stop noisy_rule from generating any alerts for 4 hours. - -``--es_debug`` will enable logging for all queries made to Elasticsearch. - -``--es_debug_trace `` will enable logging curl commands for all queries made to Elasticsearch to the -specified log file. ``--es_debug_trace`` is passed through to `elasticsearch.py -`_ which logs `localhost:9200` -instead of the actual ``es_host``:``es_port``. - -``--end `` will force ElastAlert to stop querying after the given time, instead of the default, -querying to the present time. This really only makes sense when running standalone. The timestamp is formatted -as ``YYYY-MM-DDTHH:MM:SS`` (UTC) or with timezone ``YYYY-MM-DDTHH:MM:SS-XX:00`` (UTC-XX). - -``--pin_rules`` will stop ElastAlert from loading, reloading or removing rules based on changes to their config files. diff --git a/docs/source/elastalert_status.rst b/docs/source/elastalert_status.rst index 99f26101d..f1e68667a 100644 --- a/docs/source/elastalert_status.rst +++ b/docs/source/elastalert_status.rst @@ -1,17 +1,17 @@ .. _metadata: -ElastAlert Metadata Index -========================= +ElastAlert 2 Metadata Index +=========================== -ElastAlert uses Elasticsearch to store various information about its state. This not only allows for some -level of auditing and debugging of ElastAlert's operation, but also to avoid loss of data or duplication of alerts -when ElastAlert is shut down, restarted, or crashes. This cluster and index information is defined -in the global config file with ``es_host``, ``es_port`` and ``writeback_index``. ElastAlert must be able +ElastAlert 2 uses Elasticsearch to store various information about its state. This not only allows for some +level of auditing and debugging of ElastAlert 2's operation, but also to avoid loss of data or duplication of alerts +when ElastAlert 2 is shut down, restarted, or crashes. This cluster and index information is defined +in the global config file with ``es_host``, ``es_port`` and ``writeback_index``. ElastAlert 2 must be able to write to this index. The script, ``elastalert-create-index`` will create the index with the correct mapping -for you, and optionally copy the documents from an existing ElastAlert writeback index. Run it and it will +for you, and optionally copy the documents from an existing ElastAlert 2 writeback index. Run it and it will prompt you for the cluster information. -ElastAlert will create three different types of documents in the writeback index: +ElastAlert 2 will create three different types of documents in the writeback index: elastalert_status ~~~~~~~~~~~~~~~~~ @@ -26,8 +26,8 @@ elastalert_status - ``matches``: The number of matches that the rule returned after processing the hits. Note that this does not necessarily mean that alerts were triggered. - ``time_taken``: The number of seconds it took for this query to run. -``elastalert_status`` is what ElastAlert will use to determine what time range to query when it first starts to avoid duplicating queries. -For each rule, it will start querying from the most recent endtime. If ElastAlert is running in debug mode, it will still attempt to base +``elastalert_status`` is what ElastAlert 2 will use to determine what time range to query when it first starts to avoid duplicating queries. +For each rule, it will start querying from the most recent endtime. If ElastAlert 2 is running in debug mode, it will still attempt to base its start time by looking for the most recent search performed, but it will not write the results of any query back to Elasticsearch. elastalert @@ -47,7 +47,7 @@ elastalert elastalert_error ~~~~~~~~~~~~~~~~ -When an error occurs in ElastAlert, it is written to both Elasticsearch and to stderr. The ``elastalert_error`` type contains: +When an error occurs in ElastAlert 2, it is written to both Elasticsearch and to stderr. The ``elastalert_error`` type contains: - ``@timestamp``: The time when the error occurred. - ``message``: The error or exception message. @@ -66,5 +66,5 @@ an alert with ``realert`` is triggered, a ``silence`` record will be written wit - ``exponent``: The exponential factor which multiplies ``realert``. The length of this silence is equal to ``realert`` * 2**exponent. This will be 0 unless ``exponential_realert`` is set. -Whenever an alert is triggered, ElastAlert will check for a matching ``silence`` document, and if the ``until`` timestamp is in the future, it will ignore -the alert completely. See the :ref:`Running ElastAlert ` section for information on how to silence an alert. +Whenever an alert is triggered, ElastAlert 2 will check for a matching ``silence`` document, and if the ``until`` timestamp is in the future, it will ignore +the alert completely. See the :ref:`Running ElastAlert 2 ` section for information on how to silence an alert. diff --git a/docs/source/elasticsearch_security_privileges.rst b/docs/source/elasticsearch_security_privileges.rst new file mode 100644 index 000000000..9c7363388 --- /dev/null +++ b/docs/source/elasticsearch_security_privileges.rst @@ -0,0 +1,35 @@ +Elasticsearch Security Privileges +********************************* + +While ElastAlert 2 will just work out-of-the-box for unsecured Elasticsearch, it will need a user with a certain set of permissions to work on secure Elasticseach that allow it to read the documents, check the cluster status etc. + +SearchGuard Permissions +======================= + +The permissions in Elasticsearch are specific to the plugin being used for RBAC. However, the permissions mentioned here can be mapped easily to different plugins other than Searchguard. + +Details about SearchGuard Action Groups: https://docs.search-guard.com/latest/action-groups + + +Writeback Permissions +--------------------------- + +For the global config (which writes to the writeback index), you would need to give all permissions on the writeback indices. +In addition, some permissions related to Cluster Monitor Access are required. + +``Cluster Permissions``: CLUSTER_MONITOR, indices:data/read/scroll* + +``Index Permissions`` (Over Writeback Indices): INDICES_ALL + + +Per Rule Permissions +-------------------------- + +For per rule Elasticsearch config, you would need at least the read permissions on the index you want to query. +Detailed SearchGuard Permissions: + +``Cluster Permissions``: CLUSTER_COMPOSITE_OPS_RO + +``Index Permissions`` (Over the index the rule is querying on): READ, indices:data/read/scroll* + + diff --git a/docs/source/index.rst b/docs/source/index.rst index 4219bf13e..4cce22bf0 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,10 +1,10 @@ -.. ElastAlert documentation master file, created by +.. ElastAlert 2 documentation master file, created by sphinx-quickstart on Thu Jul 11 15:45:31 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -ElastAlert - Easy & Flexible Alerting With Elasticsearch -======================================================== +ElastAlert 2 - Automated rule-based alerting for Elasticsearch +============================================================== Contents: @@ -15,12 +15,15 @@ Contents: running_elastalert ruletypes elastalert_status + elasticsearch_security_privileges recipes/adding_rules recipes/adding_alerts recipes/writing_filters recipes/adding_enhancements recipes/adding_loaders + recipes/exposing_rule_metrics recipes/signing_requests + recipes/faq Indices and Tables ================== diff --git a/docs/source/recipes/adding_alerts.rst b/docs/source/recipes/adding_alerts.rst index 65372369d..38de67368 100644 --- a/docs/source/recipes/adding_alerts.rst +++ b/docs/source/recipes/adding_alerts.rst @@ -22,28 +22,28 @@ and file is the name of the python file containing a ``Alerter`` subclass named Basics ------ -The alerter class will be instantiated when ElastAlert starts, and be periodically passed -matches through the ``alert`` method. ElastAlert also writes back info about the alert into +The alerter class will be instantiated when ElastAlert 2 starts, and be periodically passed +matches through the ``alert`` method. ElastAlert 2 also writes back info about the alert into Elasticsearch that it obtains through ``get_info``. Several important member properties: ``self.required_options``: This is a set containing names of configuration options that must be -present. ElastAlert will not instantiate the alert if any are missing. +present. ElastAlert 2 will not instantiate the alert if any are missing. ``self.rule``: The dictionary containing the rule configuration. All options specific to the alert should be in the rule configuration file and can be accessed here. ``self.pipeline``: This is a dictionary object that serves to transfer information between alerts. When an alert is triggered, a new empty pipeline object will be created and each alerter can add or receive information from it. Note that alerters -are called in the order they are defined in the rule file. For example, the JIRA alerter will add its ticket number +are called in the order they are defined in the rule file. For example, the Jira alerter will add its ticket number to the pipeline and the email alerter will add that link if it's present in the pipeline. alert(self, match): ------------------- -ElastAlert will call this function to send an alert. ``matches`` is a list of dictionary objects with +ElastAlert 2 will call this function to send an alert. ``matches`` is a list of dictionary objects with information about the match. You can get a nice string representation of the match by calling ``self.rule['type'].get_match_str(match, self.rule)``. If this method raises an exception, it will -be caught by ElastAlert and the alert will be marked as unsent and saved for later. +be caught by ElastAlert 2 and the alert will be marked as unsent and saved for later. get_info(self): --------------- @@ -56,7 +56,7 @@ Tutorial -------- Let's create a new alert that will write alerts to a local output file. First, -create a modules folder in the base ElastAlert folder: +create a modules folder in the base ElastAlert 2 folder: .. code-block:: console @@ -74,7 +74,7 @@ Now, in a file named ``my_alerts.py``, add # By setting required_options to a set of strings # You can ensure that the rule config file specifies all - # of the options. Otherwise, ElastAlert will throw an exception + # of the options. Otherwise, ElastAlert 2 will throw an exception # when trying to load the rule. required_options = set(['output_file_path']) @@ -110,5 +110,5 @@ In the rule configuration file, we are going to specify the alert by writing alert: "elastalert_modules.my_alerts.AwesomeNewAlerter" output_file_path: "/tmp/alerts.log" -ElastAlert will attempt to import the alert with ``from elastalert_modules.my_alerts import AwesomeNewAlerter``. +ElastAlert 2 will attempt to import the alert with ``from elastalert_modules.my_alerts import AwesomeNewAlerter``. This means that the folder must be in a location where it can be imported as a python module. diff --git a/docs/source/recipes/adding_enhancements.rst b/docs/source/recipes/adding_enhancements.rst index 688bff7fe..c040ad0eb 100644 --- a/docs/source/recipes/adding_enhancements.rst +++ b/docs/source/recipes/adding_enhancements.rst @@ -26,7 +26,7 @@ Example ------- As an example enhancement, let's add a link to a whois website. The match must contain a field named domain and it will -add an entry named domain_whois_link. First, create a modules folder for the enhancement in the ElastAlert directory. +add an entry named domain_whois_link. First, create a modules folder for the enhancement in the ElastAlert 2 directory. .. code-block:: console @@ -45,7 +45,7 @@ Now, in a file named ``my_enhancements.py``, add # The enhancement is run against every match # The match is passed to the process function where it can be modified in any way - # ElastAlert will do this for each enhancement linked to a rule + # ElastAlert 2 will do this for each enhancement linked to a rule def process(self, match): if 'domain' in match: url = "http://who.is/whois/%s" % (match['domain']) diff --git a/docs/source/recipes/adding_loaders.rst b/docs/source/recipes/adding_loaders.rst index 672d42390..25c93b294 100644 --- a/docs/source/recipes/adding_loaders.rst +++ b/docs/source/recipes/adding_loaders.rst @@ -24,7 +24,7 @@ Example ------- As an example loader, let's retrieve rules from a database rather than from the local file system. First, create a -modules folder for the loader in the ElastAlert directory. +modules folder for the loader in the ElastAlert 2 directory. .. code-block:: console @@ -78,7 +78,7 @@ Now, in a file named ``mongo_loader.py``, add self.cache[rule] = yaml.load(self.db.rules.find_one({'name': rule})['yaml']) return self.cache[rule] -Finally, you need to specify in your ElastAlert configuration file that MongoRulesLoader should be used instead of the +Finally, you need to specify in your ElastAlert 2 configuration file that MongoRulesLoader should be used instead of the default FileRulesLoader, so in your ``elastalert.conf`` file:: rules_loader: "elastalert_modules.mongo_loader.MongoRulesLoader" diff --git a/docs/source/recipes/adding_rules.rst b/docs/source/recipes/adding_rules.rst index 1ea2be6f5..64d1c8abe 100644 --- a/docs/source/recipes/adding_rules.rst +++ b/docs/source/recipes/adding_rules.rst @@ -31,19 +31,19 @@ and generates matches. Several important member properties are created in the `` ``self.rules``: This dictionary is loaded from the rule configuration file. If there is a ``timeframe`` configuration option, this will be automatically converted to a ``datetime.timedelta`` object when the rules are loaded. -``self.matches``: This is where ElastAlert checks for matches from the rule. Whatever information is relevant to the match +``self.matches``: This is where ElastAlert 2 checks for matches from the rule. Whatever information is relevant to the match (generally coming from the fields in Elasticsearch) should be put into a dictionary object and -added to ``self.matches``. ElastAlert will pop items out periodically and send alerts based on these objects. It is +added to ``self.matches``. ElastAlert 2 will pop items out periodically and send alerts based on these objects. It is recommended that you use ``self.add_match(match)`` to add matches. In addition to appending to ``self.matches``, ``self.add_match`` will convert the datetime ``@timestamp`` back into an ISO8601 timestamp. -``self.required_options``: This is a set of options that must exist in the configuration file. ElastAlert will +``self.required_options``: This is a set of options that must exist in the configuration file. ElastAlert 2 will ensure that all of these fields exist before trying to instantiate a ``RuleType`` instance. add_data(self, data): --------------------- -When ElastAlert queries Elasticsearch, it will pass all of the hits to the rule type by calling ``add_data``. +When ElastAlert 2 queries Elasticsearch, it will pass all of the hits to the rule type by calling ``add_data``. ``data`` is a list of dictionary objects which contain all of the fields in ``include``, ``query_key`` and ``compare_key`` if they exist, and ``@timestamp`` as a datetime object. They will always come in chronological order sorted by '@timestamp'. @@ -58,7 +58,7 @@ should return a string that gives some information about the match in the contex garbage_collect(self, timestamp): --------------------------------- -This will be called after ElastAlert has run over a time period ending in ``timestamp`` and should be used +This will be called after ElastAlert 2 has run over a time period ending in ``timestamp`` and should be used to clear any state that may be obsolete as of ``timestamp``. ``timestamp`` is a datetime object. @@ -67,7 +67,7 @@ Tutorial As an example, we are going to create a rule type for detecting suspicious logins. Let's imagine the data we are querying is login events that contains IP address, username and a timestamp. Our configuration will take a list of usernames and a time range -and alert if a login occurs in the time range. First, let's create a modules folder in the base ElastAlert folder: +and alert if a login occurs in the time range. First, let's create a modules folder in the base ElastAlert 2 folder: .. code-block:: console @@ -91,7 +91,7 @@ Now, in a file named ``my_rules.py``, add # By setting required_options to a set of strings # You can ensure that the rule config file specifies all - # of the options. Otherwise, ElastAlert will throw an exception + # of the options. Otherwise, ElastAlert 2 will throw an exception # when trying to load the rule. required_options = set(['time_start', 'time_end', 'usernames']) @@ -123,14 +123,14 @@ Now, in a file named ``my_rules.py``, add self.rules['time_start'], self.rules['time_end']) - # garbage_collect is called indicating that ElastAlert has already been run up to timestamp + # garbage_collect is called indicating that ElastAlert 2 has already been run up to timestamp # It is useful for knowing that there were no query results from Elasticsearch because # add_data will not be called with an empty list def garbage_collect(self, timestamp): pass -In the rule configuration file, ``example_rules/example_login_rule.yaml``, we are going to specify this rule by writing +In the rule configuration file, ``examples/rules/example_login_rule.yaml``, we are going to specify this rule by writing .. code-block:: yaml @@ -151,7 +151,7 @@ In the rule configuration file, ``example_rules/example_login_rule.yaml``, we ar alert: - debug -ElastAlert will attempt to import the rule with ``from elastalert_modules.my_rules import AwesomeRule``. +ElastAlert 2 will attempt to import the rule with ``from elastalert_modules.my_rules import AwesomeRule``. This means that the folder must be in a location where it can be imported as a Python module. An alert from this rule will look something like:: diff --git a/docs/source/recipes/exposing_rule_metrics.rst b/docs/source/recipes/exposing_rule_metrics.rst new file mode 100644 index 000000000..40d135500 --- /dev/null +++ b/docs/source/recipes/exposing_rule_metrics.rst @@ -0,0 +1,58 @@ +.. _exposingrulemetrics: + +Exposing Rule Metrics +===================== + +Configuration +------------- +Running ElastAlert with ``--prometheus_port`` configuration flag will expose ElastAlert 2 Prometheus metrics on the specified port. Prometheus metrics are disabled by default. + +To expose ElastAlert rule metrics on port ``9979`` run the following command: + +.. code-block:: console + + $ elastalert --config config.yaml --prometheus_port 9979 + +Rule Metrics +------------ + +The metrics being exposed are related to the `ElastAlert metadata indices `_. The exposed metrics are in the `Prometheus text-based format `_. Metrics are of the metric type `counter `_ or `gauge `_ and follow the `Prometheus metric naming `_. + +In the standard metric definition, the metric names are structured as follows: + +.. code-block:: console + + elastalert_{metric}_{unit} + +Where: + +- ``{metric}`` is a unique name of the metric. For example, ``hits``. +- ``{unit}`` is the unit of measurement of the metric value. For example, ``total`` is a counter type metric and ``created`` is a gauge type metric. + +All metrics except ``elastalert_errors_{unit}`` have values that apply to a particular rule name. In the exported metrics, these can be identified using the ``rule_name`` `Prometheus label `_. + +Find below all available metrics: + ++---------------------------------------+-----------------+---------------------------+---------------+ +| METRIC | Type | Description | Label | ++=======================================+=================+===========================+===============+ +| ``elastalert_scrapes_{unit}`` | Counter, Gauge | Number of scrapes | ``rule_name`` | ++---------------------------------------+-----------------+---------------------------+---------------+ +| ``elastalert_hits_{unit}`` | Counter, Gauge | Number of hits | ``rule_name`` | ++---------------------------------------+-----------------+---------------------------+---------------+ +| ``elastalert_matches_{unit}`` | Counter, Gauge | Number of matches | ``rule_name`` | ++---------------------------------------+-----------------+---------------------------+---------------+ +| ``elastalert_time_taken_{unit}`` | Counter, Gauge | Time taken in seconds | ``rule_name`` | ++---------------------------------------+-----------------+---------------------------+---------------+ +| ``elastalert_alerts_sent_{unir}`` | Counter, Gauge | Number of alerts sent | ``rule_name`` | ++---------------------------------------+-----------------+---------------------------+---------------+ +| ``elastalert_alerts_not_sent_{unit}`` | Counter, Gauge | Number of alerts not sent | ``rule_name`` | ++---------------------------------------+-----------------+---------------------------+---------------+ +| ``elastalert_alerts_silenced_{unit}`` | Counter, Gauge | Number of silenced alerts | ``rule_name`` | ++---------------------------------------+-----------------+---------------------------+---------------+ +| ``elastalert_errors_{unit}`` | Counter, Gauge | Number of errors | | ++---------------------------------------+-----------------+---------------------------+---------------+ + + + + diff --git a/docs/source/recipes/faq-md.md b/docs/source/recipes/faq-md.md new file mode 100644 index 000000000..5dfae911b --- /dev/null +++ b/docs/source/recipes/faq-md.md @@ -0,0 +1,478 @@ +My rule is not getting any hits? +========== + +So you've managed to set up ElastAlert 2, write a rule, and run it, but nothing happens, or it says +``0 query hits``. First of all, we recommend using the command ``elastalert-test-rule rule.yaml`` to +debug. It will show you how many documents match your filters for the last 24 hours (or more, see +``--help``), and then shows you if any alerts would have fired. If you have a filter in your rule, +remove it and try again. This will show you if the index is correct and that you have at least some +documents. If you have a filter in Kibana and want to recreate it in ElastAlert 2, you probably want +to use a query string. Your filter will look like + +``` +filter: +- query: + query_string: + query: "foo: bar AND baz: abc*" +``` +If you receive an error that Elasticsearch is unable to parse it, it's likely the YAML is not spaced +correctly, and the filter is not in the right format. If you are using other types of filters, like +``term``, a common pitfall is not realizing that you may need to use the analyzed token. This is the +default if you are using Logstash. For example, + +``` +filter: +- term: + foo: "Test Document" +``` + +will not match even if the original value for ``foo`` was exactly "Test Document". Instead, you want +to use ``foo.raw``. If you are still having trouble troubleshooting why your documents do not match, +try running ElastAlert 2 with ``--es_debug_trace /path/to/file.log``. This will log the queries made +to Elasticsearch in full so that you can see exactly what is happening. + +I got hits, why didn't I get an alert? +========== + +If you got logs that had ``X query hits, 0 matches, 0 alerts sent``, it depends on the ``type`` why +you didn't get any alerts. If ``type: any``, a match will occur for every hit. If you are using +``type: frequency``, ``num_events`` must occur within ``timeframe`` of each other for a match to +occur. Different rules apply for different rule types. + +If you see ``X matches, 0 alerts sent``, this may occur for several reasons. If you set +``aggregation``, the alert will not be sent until after that time has elapsed. If you have gotten an +alert for this same rule before, that rule may be silenced for a period of time. The default is one +minute between alerts. If a rule is silenced, you will see ``Ignoring match for silenced rule`` in +the logs. + +If you see ``X alerts sent`` but didn't get any alert, it's probably related to the alert +configuration. If you are using the ``--debug`` flag, you will not receive any alerts. Instead, the +alert text will be written to the console. Use ``--verbose`` to achieve the same affects without +preventing alerts. If you are using email alert, make sure you have it configured for an SMTP +server. By default, it will connect to localhost on port 25. It will also use the word "elastalert" +as the "From:" address. Some SMTP servers will reject this because it does not have a domain while +others will add their own domain automatically. See the email section in the documentation for how +to configure this. + +Why did I only get one alert when I expected to get several? +========== + +There is a setting called ``realert`` which is the minimum time between two alerts for the same +rule. Any alert that occurs within this time will simply be dropped. The default value for this is +one minute. If you want to receive an alert for every single match, even if they occur right after +each other, use + +``` +realert: + minutes: 0 +``` + +You can of course set it higher as well. + +How can I prevent duplicate alerts? +========== + +By setting ``realert``, you will prevent the same rule from alerting twice in an amount of time. + +``` +realert: + days: 1 +``` + +You can also prevent duplicates based on a certain field by using ``query_key``. For example, to +prevent multiple alerts for the same user, you might use + +``` +realert: + hours: 8 +query_key: user +``` + +Note that this will also affect the way many rule types work. If you are using ``type: frequency`` +for example, ``num_events`` for a single value of ``query_key`` must occur before an alert will be +sent. You can also use a compound of multiple fields for this key. For example, if you only wanted +to receieve an alert once for a specific error and hostname, you could use + +``` +query_key: [error, hostname] +``` + +You can also write in the following way. + +``` +query_key: + - error + - hostname +``` + +Internally, this works by creating a new field for each document called ``field1,field2`` with a +value of ``value1,value2`` and using that as the ``query_key``. + +The data for when an alert will fire again is stored in Elasticsearch in the ``elastalert_status`` +index, with a ``_type`` of ``silence`` and also cached in memory. + +How can I change what's in the alert? +========== + +You can use the field ``alert_text`` to add custom text to an alert. By setting ``alert_text_type: +alert_text_only`` Or ``alert_text_type: alert_text_jinja``, it will be the entirety of the alert. +You can also add different fields from the alert: + +With ``alert_text_type: alert_text_jinja`` by using [Jinja2](https://pypi.org/project/Jinja2/) +Template. + +``` +alert_text_type: alert_text_jinja + +alert_text: | + Alert triggered! *({{num_hits}} Matches!)* + Something happened with {{username}} ({{email}}) + {{description|truncate}} + +``` + +- Top fields are accessible via `{{field_name}}` or `{{_data['field_name']}}`, `_data` is useful + when accessing *fields with dots in their keys*, as Jinja treat dot as a nested field. +- If `_data` conflicts with your top level data, use ``jinja_root_name`` to change its name. + +With ``alert_text_type: alert_text_only`` by using Python style string formatting and +``alert_text_args``. For example + +``` +alert_text: "Something happened with {0} at {1}" +alert_text_type: alert_text_only +alert_text_args: ["username", "@timestamp"] +``` + +You can also limit the alert to only containing certain fields from the document by using +``include``. + +``` +include: ["ip_address", "hostname", "status"] +``` + +My alert only contains data for one event, how can I see more? +========== + +If you are using ``type: frequency``, you can set the option ``attach_related: true`` and every +document will be included in the alert. An alternative, which works for every type, is +``top_count_keys``. This will show the top counts for each value for certain fields. For example, if +you have + +``` +top_count_keys: ["ip_address", "status"] +``` + +and 10 documents matched your alert, it may contain something like + +``` +ip_address: +127.0.0.1: 7 +10.0.0.1: 2 +192.168.0.1: 1 + +status: +200: 9 +500: 1 +``` + +How can I make the alert come at a certain time? +========== + +The ``aggregation`` feature will take every alert that has occured over a period of time and send +them together in one alert. You can use cron style syntax to send all alerts that have occured since +the last once by using + +``` +aggregation: + schedule: '2 4 * * mon,fri' +``` + +I have lots of documents and it's really slow, how can I speed it up? +========== + +There are several ways to potentially speed up queries. If you are using ``index: logstash-*``, +Elasticsearch will query all shards, even if they do not possibly contain data with the correct +timestamp. Instead, you can use Python time format strings and set ``use_strftime_index`` + +``` +index: logstash-%Y.%m +use_strftime_index: true +``` + +Another thing you could change is ``buffer_time``. By default, ElastAlert 2 will query large +overlapping windows in order to ensure that it does not miss any events, even if they are indexed in +real time. In config.yaml, you can adjust ``buffer_time`` to a smaller number to only query the most +recent few minutes. + +``` +buffer_time: + minutes: 5 +``` + +By default, ElastAlert 2 will download every document in full before processing them. Instead, you can +have ElastAlert 2 simply get a count of the number of documents that have occured in between each +query. To do this, set ``use_count_query: true``. This cannot be used if you use ``query_key``, +because ElastAlert 2 will not know the contents of each documents, just the total number of them. This +also reduces the precision of alerts, because all events that occur between each query will be +rounded to a single timestamp. + +If you are using ``query_key`` (a single key, not multiple keys) you can use ``use_terms_query``. +This will make ElastAlert 2 perform a terms aggregation to get the counts for each value of a certain +field. May not be compatible with all rule types. + +Can I perform aggregations? +========== + +The only aggregation supported currently is a terms aggregation, by setting ``use_terms_query``. + +I'm not using @timestamp, what do I do? +========== + +You can use ``timestamp_field`` to change which field ElastAlert 2 will use as the timestamp. You can +use ``timestamp_type`` to change it between ISO 8601 and unix timestamps. You must have some kind of +timestamp for ElastAlert 2 to work. If your events are not in real time, you can use ``query_delay`` +and ``buffer_time`` to adjust when ElastAlert 2 will look for documents. + +I'm using flatline but I don't see any alerts +========== + +When using ``type: flatline``, ElastAlert 2 must see at least one document before it will alert you +that it has stopped seeing them. + +How can I get a "resolve" event? +========== + +ElastAlert 2 does not currently support stateful alerts or resolve events. However, if you have a rule +alerting you that a condition has occurred, such as a service being down, then you can create a +second rule that will monitor the first rule, and alert you when the first rule ceases to trigger. + +For example, assuming you already have a rule named "Service is offline" that's working today, you +can add a second rule as follows: + +``` +name: Service is back online +type: flatline +index: elastalert* +query_key: "rule_name" +filter: +- query: + query_string: + query: "rule_name:\"Service is offline\" AND matches:>0" +forget_keys: true +timeframe: + minutes: 30 +threshold: 1 +``` + +This second rule will trigger after the timeframe of 30 minutes has elapsed with no further matches +against the first rule. + +Can I set a warning threshold? +========== + +Currently, the only way to set a warning threshold is by creating a second rule with a lower +threshold. + +Does it support Elastic Cloud's "Cloud ID"? +========== + +While Elastic Cloud is supported via the traditional URL connection method, +connecting via Cloud ID is not currently supported. + +I need to go through an http (s) proxy to connect to Elasticsearch. Does ElastAlert 2 support it? +========== + +Not supported. + +About boolean value +========== + +You can use all lowercase letters or only uppercase letters at the beginning. + +example + +``` +# OK +use_ssl: true +# OK +use_ssl: True +# OK +use_ssl: false +# OK +use_ssl: False +``` + +Is it possible to send an SNMP Trap with an alert notification? +========== + +* You need to additionally install snmp snmptrapd on the docker image. In other words, you need to modify the Dockerfile and recreate the Docker image with docker build. +* It is possible with the command Alerter. + +example + +``` +name: "mariadb-error-log-warning" +type: "frequency" +index: "mariadb-*" +num_events: 1 +timeframe: + minutes: 5 +realert: + minutes: 1 +filter: + - query: + query_string: + query: "@log_name:mysqld.error AND message:Warning" +alert: + - command +command: ["/usr/bin/snmptrap", "-IR", "-v", "2c", "-c", "public", "xxx.xxx.xxx.xxxxx:xxx", "", "netSnmp.99999", "netSnmp.99999.1", "s", "Hello, World"] +is_enabled: true +timestamp_field: "@timestamp" +timestamp_type: "iso" +use_strftime_index: false +``` + +Is Email Alerter compatible with Microsoft 365 (formerly Office 365)? +========== + +Not supported. + +Does Email Alerter support the Google Gmail API? +========== + +Not supported. + +Can Email Alerter send emails via the Gmail sending server? +========== + +It is possible. However, you need to turn on (enable) the item "Access to insecure apps" in the "Security" settings of your Google account. + +Is it possible to send a JPEG image encoded as base64 in elasticsearch as an image attachment with an Email Alerter? +========== + +Yes, this is possible if the base64 encoded bytes are available in the matched document, as shown in the example below: + +``` +include: [base64field] +alert_text_args: [base64field] +email_format: "html" +alert_text_type: alert_text_only +alert_text: | + + +
+ Image +
+ + +``` + +Does the alert notification destination support Alertmanager? +========== + +Now supported as of ElastAlert 2.2.3. + +The es_host parameter seems to use only one host. Is it possible to specify multiple nodes? +========== + +There are two options: + +1. Use haproxy in front of elasticsearch to support multiple hosts. +2. Use the new ``es_hosts`` parameter introduced in ElastAlert 2.2.3. See :ref:`Configuration `. + +Is there any plan to implement a REST API into this project? +========== + +No plan. + +An error occurred when trying to create a blacklist rule that parses a file with more than 1024 lines. +========== + +This is the default limit for ElasticSearch. Specifying more than 1024 items in the blacklist will result in an error. +This is a known issue. Perhaps White List can have similar issues. +See the following issues on the original yelp/elastalert for more information. + +https://github.com/Yelp/elastalert/issues/1867
+https://github.com/Yelp/elastalert/issues/2704 + +ElastAlert 2 doesn't have a listening port? +========== + +ElastAlert 2 does not have a network API. There is no listening port, unless activating optional modules like Prometheus. You can monitor its activity by viewing the console output or Docker logs. + +I've set `ssl_show_warn` but it doesn't seem to work. +========== + +Now supported as of ElastAlert 2.4.0. + +How to write a query filter for phrases containing spaces? +========== + +To search for values containing spaces, or other special characters you will need to use escape characters. This is briefly mentioned at the bottom of the [Lucene Query Parser Syntax documentation](https://lucene.apache.org/core/2_9_4/queryparsersyntax.html) but does not go into extensive detail. Below are some examples to use in ElastAlert 2 rule filters. + +Example 1 - Escaping double quotes within double quotes. Useful for embedded single quotes and double quotes in your search phrase: + +``` +filter: + - query: + query_string: + query: "\"Women's Clothing\"" +``` + +Example 2 - Avoiding escaping altogether by enclosing double quotes within single quotes: + +``` +filter: + - query: + query_string: + query: '"Rabbia Al"' +``` + +Does ElastAlert 2 support Elasticsearch 8? +=========== + +ElastAlert 2 supports Elasticsearch 8. + +To upgrade an existing ElastAlert 2 installation to Elasticsearch 8 the +following manual steps are required (note the important WARNING below): + +* Shutdown ElastAlert 2. +* Delete the old `elastalert*` indices. See [Elasticsearch + documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html) + for instructions on how to delete via the API, or use the Kibana Index Management interface. +* Upgrade the Elastic cluster to Elasticsearch 8 following the [Elastic 8 upgrade instructions](https://elastic.co/guide/en/elastic-stack/8.0/upgrading-elastic-stack.html). +* If NOT running ElastAlert 2 via Docker or Kubernetes, run + elastalert-create-index to create the new indices. This is not needed when + running via a container since the container always attempts to creates the + indices at startup, if they're not yet created. +* Restart ElastAlert 2. + +WARNING: Failure to remove the old ElastAlert indices can result in a non-working Elasticsearch cluster. This is because the ElastAlert indices contain deprecated features and the Elasticsearch 8 upgrade logic is currently flawed and does not correctly handle this situation. The Elasticsearch GitHub repository contains [more information](https://github.com/elastic/elasticsearch/issues/84199) on this problem. + +Support multiple sns_topic_arn in Alert Amazon SNS(Simple Notification Service)? +========== + +example + +``` +alert: + - sns: + sns_topic_arn: "aws-topic1" + - sns: + sns_topic_arn: "aws-topic2" +``` + +Support multiple telegram_room_id in Alert Telegram? +========== + +example + +``` +alert: + - telegram: + telegram_room_id: "AAA" + - telegram: + telegram_room_id: "BBB" +telegram_bot_token: "XXX" +``` diff --git a/docs/source/recipes/faq.rst b/docs/source/recipes/faq.rst new file mode 100644 index 000000000..34a632f69 --- /dev/null +++ b/docs/source/recipes/faq.rst @@ -0,0 +1,6 @@ +.. _faq: + +Frequently Asked Questions +-------------------------- + +.. mdinclude:: faq-md.md diff --git a/docs/source/recipes/signing_requests.rst b/docs/source/recipes/signing_requests.rst index d5d162b94..ff3362ded 100644 --- a/docs/source/recipes/signing_requests.rst +++ b/docs/source/recipes/signing_requests.rst @@ -1,9 +1,9 @@ .. _signingrequests: -Signing requests to Amazon Elasticsearch service +Signing requests to Amazon OpenSearch Service ================================================ -When using Amazon Elasticsearch service, you need to secure your Elasticsearch +When using Amazon OpenSearch Service, you need to secure your Elasticsearch from the outside. Currently, there is no way to secure your Elasticsearch using network firewall rules, so the only way is to signing the requests using the access key and secret key for a role or user with permissions on the @@ -18,7 +18,7 @@ credentials. Using an Instance Profile ------------------------- -Typically, you'll deploy ElastAlert on a running EC2 instance on AWS. You can +Typically, you'll deploy ElastAlert 2 on a running EC2 instance on AWS. You can assign a role to this instance that gives it permissions to read from and write to the Elasticsearch service. When using an Instance Profile, you will need to specify the ``aws_region`` in the configuration file or set the @@ -28,8 +28,8 @@ Using AWS profiles ------------------ You can also create a user with permissions on the Elasticsearch service and -tell ElastAlert to authenticate itself using that user. First, create an AWS -profile in the machine where you'd like to run ElastAlert for the user with +tell ElastAlert 2 to authenticate itself using that user. First, create an AWS +profile in the machine where you'd like to run ElastAlert 2 for the user with permissions. You can use the environment variables ``AWS_DEFAULT_PROFILE`` and diff --git a/docs/source/recipes/writing_filters.rst b/docs/source/recipes/writing_filters.rst index 1d2959262..eb4da6b7c 100644 --- a/docs/source/recipes/writing_filters.rst +++ b/docs/source/recipes/writing_filters.rst @@ -67,11 +67,13 @@ Terms allows for easy combination of multiple term filters:: - terms: field: ["value1", "value2"] # value1 OR value2 -You can also match on multiple fields:: +You can also match on multiple fields (All terms must match at least one of the given values):: - terms: fieldX: ["value1", "value2"] + - terms: fieldY: ["something", "something_else"] + - terms: fieldZ: ["foo", "bar", "baz"] wildcard @@ -98,59 +100,28 @@ For ranges on fields:: Negation, and, or ***************** -For Elasticsearch 2.X, any of the filters can be embedded in ``not``, ``and``, and ``or``:: - - filter: - - or: - - term: - field: "value" - - wildcard: - field: "foo*bar" - - and: - - not: - term: - field: "value" - - not: - term: - _type: "something" - -For Elasticsearch 5.x, this will not work and to implement boolean logic use query strings:: - - filter: - - query: - query_string: - query: "somefield: somevalue OR foo: bar" - - -Loading Filters Directly From Kibana 3 --------------------------------------- - -There are two ways to load filters directly from a Kibana 3 dashboard. You can set your filter to:: +Below is a more complex example for Elasticsearch 7.x, provided by a `community user. `_:: filter: - download_dashboard: "My Dashboard Name" - -and when ElastAlert starts, it will download the dashboard schema from Elasticsearch and use the filters from that. -However, if the dashboard name changes or if there is connectivity problems when ElastAlert starts, the rule will not load and -ElastAlert will exit with an error like "Could not download filters for .." - -The second way is to generate a config file once using the Kibana dashboard. To do this, run ``elastalert-rule-from-kibana``. - -.. code-block:: console - - $ elastalert-rule-from-kibana - Elasticsearch host: elasticsearch.example.com - Elasticsearch port: 14900 - Dashboard name: My Dashboard - - Partial Config file - ----------- - - name: My Dashboard - es_host: elasticsearch.example.com - es_port: 14900 - filter: - - query: - query_string: {query: '_exists_:log.message'} - - query: - query_string: {query: 'some_field:12345'} + - term: + action: order + - terms: + dining: + - pickup + - delivery + - bool: + #exclude common/expected orders + must_not: + #Alice usually gets a pizza + - bool: + must: [ {term: {uid: alice}}, {term: {menu_item: pizza}} ] + #Bob loves his hoagies + - bool: + must: [ {term: {uid: bob}}, {term: {menu_item: sandwich}} ] + #Charlie has a few favorites + - bool: + must: + - term: + uid: charlie + - match: + menu_item: "burrito pasta salad pizza" diff --git a/docs/source/requirements.txt b/docs/source/requirements.txt new file mode 100644 index 000000000..ecd67a4ad --- /dev/null +++ b/docs/source/requirements.txt @@ -0,0 +1 @@ +m2r2 \ No newline at end of file diff --git a/docs/source/ruletypes.rst b/docs/source/ruletypes.rst index ff3763712..224a7af3c 100644 --- a/docs/source/ruletypes.rst +++ b/docs/source/ruletypes.rst @@ -1,7 +1,7 @@ Rule Types and Configuration Options ************************************ -Examples of several types of rule configuration can be found in the example_rules folder. +Examples of several types of rule configuration can be found in the ``examples/rules`` folder. .. _commonconfig: @@ -26,6 +26,8 @@ Rule Configuration Cheat Sheet +--------------------------------------------------------------+ | | ``alert`` (string or list) | | +--------------------------------------------------------------+-----------+ +| ``es_hosts`` (list, no default) | | ++--------------------------------------------------------------+ | | ``name`` (string, defaults to the filename) | | +--------------------------------------------------------------+ | | ``use_strftime_index`` (boolean, default False) | Optional | @@ -34,38 +36,50 @@ Rule Configuration Cheat Sheet +--------------------------------------------------------------+ | | ``verify_certs`` (boolean, default True) | | +--------------------------------------------------------------+ | +| ``ssl_show_warn`` (boolean, default True) | | ++--------------------------------------------------------------+ | | ``es_username`` (string, no default) | | +--------------------------------------------------------------+ | | ``es_password`` (string, no default) | | +--------------------------------------------------------------+ | +| ``es_bearer`` (string, no default) | | ++--------------------------------------------------------------+ | +| ``es_api_key`` (string, no default) | | ++--------------------------------------------------------------+ | | ``es_url_prefix`` (string, no default) | | +--------------------------------------------------------------+ | +| ``statsd_instance_tag`` (string, no default) | | ++--------------------------------------------------------------+ | +| ``statsd_host`` (string, no default) | | ++--------------------------------------------------------------+ | | ``es_send_get_body_as`` (string, default "GET") | | +--------------------------------------------------------------+ | | ``aggregation`` (time, no default) | | +--------------------------------------------------------------+ | -| ``description`` (string, default empty string) | | -+--------------------------------------------------------------+ | -| ``generate_kibana_link`` (boolean, default False) | | +| ``limit_execution`` (string, no default) | | +--------------------------------------------------------------+ | -| ``use_kibana_dashboard`` (string, no default) | | +| ``description`` (string, default empty string) | | +--------------------------------------------------------------+ | | ``kibana_url`` (string, default from es_host) | | +--------------------------------------------------------------+ | -| ``use_kibana4_dashboard`` (string, no default) | | +| ``kibana_username`` (string, no default) | | +--------------------------------------------------------------+ | -| ``kibana4_start_timedelta`` (time, default: 10 min) | | +| ``kibana_password`` (string, no default) | | +--------------------------------------------------------------+ | -| ``kibana4_end_timedelta`` (time, default: 10 min) | | +| ``kibana_verify_certs`` (boolean, default True) | | +--------------------------------------------------------------+ | | ``generate_kibana_discover_url`` (boolean, default False) | | +--------------------------------------------------------------+ | +| ``shorten_kibana_discover_url`` (boolean, default False) | | ++--------------------------------------------------------------+ | | ``kibana_discover_app_url`` (string, no default) | | +--------------------------------------------------------------+ | | ``kibana_discover_version`` (string, no default) | | +--------------------------------------------------------------+ | | ``kibana_discover_index_pattern_id`` (string, no default) | | +--------------------------------------------------------------+ | +| ``kibana_discover_security_tenant`` (string, no default) | | ++--------------------------------------------------------------+ | | ``kibana_discover_columns`` (list of strs, default _source) | | +--------------------------------------------------------------+ | | ``kibana_discover_from_timedelta`` (time, default: 10 min) | | @@ -76,6 +90,8 @@ Rule Configuration Cheat Sheet +--------------------------------------------------------------+ | | ``realert`` (time, default: 1 min) | | +--------------------------------------------------------------+ | +| ``realert_key`` (string, defaults to the rule name) | | ++--------------------------------------------------------------+ | | ``exponential_realert`` (time, no default) | | +--------------------------------------------------------------+ | | ``match_enhancements`` (list of strs, no default) | | @@ -102,6 +118,8 @@ Rule Configuration Cheat Sheet +--------------------------------------------------------------+ | | ``scan_entire_timeframe`` (bool, default False) | | +--------------------------------------------------------------+ | +| ``query_timezone`` (string, default empty string) | | ++--------------------------------------------------------------+ | | ``import`` (string) | | | | | | IGNORED IF ``use_count_query`` or ``use_terms_query`` is true| | @@ -114,6 +132,8 @@ Rule Configuration Cheat Sheet +--------------------------------------------------------------+ | | ``timestamp_format_expr`` (string, no default ) | | +--------------------------------------------------------------+ | +| ``timestamp_to_datetime_format_expr`` (string, no default ) | | ++--------------------------------------------------------------+ | | ``_source_enabled`` (boolean, default True) | | +--------------------------------------------------------------+ | | ``alert_text_args`` (array of strs) | | @@ -129,67 +149,107 @@ Rule Configuration Cheat Sheet | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| RULE TYPE | Any | Blacklist | Whitelist | Change | Frequency | Spike | Flatline |New_term|Cardinality| -+====================================================+========+===========+===========+========+===========+=======+==========+========+===========+ -| ``compare_key`` (list of strs, no default) | | Req | Req | Req | | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``blacklist`` (list of strs, no default) | | Req | | | | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``whitelist`` (list of strs, no default) | | | Req | | | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| ``ignore_null`` (boolean, no default) | | | Req | Req | | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| ``query_key`` (string, no default) | Opt | | | Req | Opt | Opt | Opt | Req | Opt | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| ``aggregation_key`` (string, no default) | Opt | | | | | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| ``summary_table_fields`` (list, no default) | Opt | | | | | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| ``timeframe`` (time, no default) | | | | Opt | Req | Req | Req | | Req | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| ``num_events`` (int, no default) | | | | | Req | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| ``attach_related`` (boolean, no default) | | | | | Opt | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``use_count_query`` (boolean, no default) | | | | | Opt | Opt | Opt | | | -| | | | | | | | | | | -|``doc_type`` (string, no default) | | | | | | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``use_terms_query`` (boolean, no default) | | | | | Opt | Opt | | Opt | | -| | | | | | | | | | | -|``doc_type`` (string, no default) | | | | | | | | | | -| | | | | | | | | | | -|``query_key`` (string, no default) | | | | | | | | | | -| | | | | | | | | | | -|``terms_size`` (int, default 50) | | | | | | | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -| ``spike_height`` (int, no default) | | | | | | Req | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``spike_type`` ([up|down|both], no default) | | | | | | Req | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``alert_on_new_data`` (boolean, default False) | | | | | | Opt | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``threshold_ref`` (int, no default) | | | | | | Opt | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``threshold_cur`` (int, no default) | | | | | | Opt | | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``threshold`` (int, no default) | | | | | | | Req | | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``fields`` (string or list, no default) | | | | | | | | Req | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``terms_window_size`` (time, default 30 days) | | | | | | | | Opt | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``window_step_size`` (time, default 1 day) | | | | | | | | Opt | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``alert_on_missing_fields`` (boolean, default False)| | | | | | | | Opt | | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``cardinality_field`` (string, no default) | | | | | | | | | Req | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``max_cardinality`` (boolean, no default) | | | | | | | | | Opt | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ -|``min_cardinality`` (boolean, no default) | | | | | | | | | Opt | -+----------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+ ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| RULE TYPE | Any | Blacklist | Whitelist | Change | Frequency | Spike | Flatline |New_term|Cardinality|Metric Aggregation|Spike Aggregation|Percentage Match| ++=======================================================+========+===========+===========+========+===========+=======+==========+========+===========+==================+=================+================+ +| ``compare_key`` (list of strs, no default) | | Req | Req | Req | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``blacklist`` (list of strs, no default) | | Req | | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``whitelist`` (list of strs, no default) | | | Req | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| ``ignore_null`` (boolean, default False) | | | Req | Req | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| ``query_key`` (string or list, no default) | Opt | | | Req | Opt | Opt | Opt | Req | Opt | Opt | Opt | Opt | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| ``aggregation_key`` (string, no default) | Opt | | | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| ``summary_table_fields`` (list, no default) | Opt | | | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| ``timeframe`` (time, no default) | | | | Opt | Req | Req | Req | | Req | | Req | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| ``num_events`` (int, no default) | | | | | Req | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| ``attach_related`` (boolean, default False) | | | | | Opt | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``use_count_query`` (boolean, default False) | | | | | Opt | Opt | Opt | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``use_terms_query`` (boolean, default False) | | | | | Opt | Opt | | Opt | | | | | +| | | | | | | | | | | | | | +|``query_key`` (string or list, no default) | | | | | | | | | | | | | +| | | | | | | | | | | | | | +|``terms_size`` (int, default 50) | | | | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +| ``spike_height`` (int, no default) | | | | | | Req | | | | | Req | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``spike_type`` ([up|down|both], no default) | | | | | | Req | | | | | Req | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``alert_on_new_data`` (boolean, default False) | | | | | | Opt | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``threshold_ref`` (int, no default) | | | | | | Opt | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``threshold_ref`` (number, no default) | | | | | | | | | | | Opt | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``threshold_cur`` (int, no default) | | | | | | Opt | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``threshold_cur`` (number, no default) | | | | | | | | | | | Opt | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``threshold`` (int, no default) | | | | | | | Req | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``fields`` (string or list, no default) | | | | | | | | Req | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``terms_window_size`` (time, default 30 days) | | | | | | | | Opt | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``window_step_size`` (time, default 1 day) | | | | | | | | Opt | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``alert_on_missing_field`` (boolean, default False) | | | | | | | | Opt | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``cardinality_field`` (string, no default) | | | | | | | | | Req | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``max_cardinality`` (boolean, default False) | | | | | | | | | Opt | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``min_cardinality`` (boolean, default False) | | | | | | | | | Opt | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``metric_agg_key`` (string, no default) | | | | | | | | | | Req | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``metric_agg_type`` (no default, | | | | | | | | | | Req | Req | | +| | | | | | | | | | | | | | +|([min|max|avg|sum|cardinality|value_count|percentiles])| | | | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``metric_agg_script`` (no default) | | | | | | | | | | Opt | Opt | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``percentile_range`` ++required if percentiles is used | | | | | | | | | | Req++ | Req++ | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``max_threshold`` (number, no default) | | | | | | | | | | Opt | | | +| | | | | | | | | | | | | | +|``min_threshold`` (number, no default) | | | | | | | | | | | | | +| | | | | | | | | | | | | | +|Requires at least one of the two options | | | | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``min_doc_count`` (int, default 1) | | | | | | | | | | Opt | Opt | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``use_run_every_query_size`` (boolean, default False) | | | | | | | | | | Opt | | Opt | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``allow_buffer_time_overlap`` (boolean, default False) | | | | | | | | | | Opt | | Opt | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``bucket_interval`` (time, no default) | | | | | | | | | | Opt | | Opt | +| | | | | | | | | | | | | | +|``sync_bucket_interval`` (boolean, default False) | | | | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``metric_format_string`` (string, no default) | | | | | | | | | | Opt | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``match_bucket_filter`` (no default) | | | | | | | | | | | | Req | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``min_percentage`` (number, no default) | | | | | | | | | | | | Req | +| | | | | | | | | | | | | | +|``max_percentage`` (number, no default) | | | | | | | | | | | | | +| | | | | | | | | | | | | | +|Requires at least one of the two options | | | | | | | | | | | | | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``percentage_format_string`` (string, no default) | | | | | | | | | | | | Opt | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ +|``min_denominator`` (int, default 0) | | | | | | | | | | | | Opt | ++-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ Common Configuration Options ============================ @@ -205,6 +265,7 @@ es_host ``es_host``: The hostname of the Elasticsearch cluster the rule will use to query. (Required, string, no default) The environment variable ``ES_HOST`` will override this field. +For multiple host Elasticsearch clusters see ``es_hosts`` parameter. es_port ^^^^^^^ @@ -219,6 +280,10 @@ index ``index: my-index-*`` which will match ``my-index-2014-10-05``. You can also use a format string containing ``%Y`` for year, ``%m`` for month, and ``%d`` for day. To use this, you must also set ``use_strftime_index`` to true. (Required, string, no default) +For example, Separate multiple indices with commas.:: + + index: topbeat-*,packetbeat-* + name ^^^^ @@ -239,14 +304,20 @@ or loaded from a module. For loading from a module, the alert should be specifie Optional Settings ~~~~~~~~~~~~~~~~~ +es_hosts +^^^^^^^^ + +``es_hosts``: The list of nodes of the Elasticsearch cluster that the rule will use for the request. (Optional, list, default none). Values can be specified as ``host:port`` if overriding the default port. +The environment variable ``ES_HOSTS`` will override this field, and can be specified as a comma-separated value. Note that the ``es_host`` parameter must still be specified in order to identify a primary Elasticsearch host. import ^^^^^^ ``import``: If specified includes all the settings from this yaml file. This allows common config options to be shared. Note that imported files that aren't -complete rules should not have a ``.yml`` or ``.yaml`` suffix so that ElastAlert doesn't treat them as rules. Filters in imported files are merged (ANDed) -with any filters in the rule. You can only have one import per rule, though the imported file can import another file, recursively. The filename -can be an absolute path or relative to the rules directory. (Optional, string, no default) +complete rules should not have a ``.yml`` or ``.yaml`` suffix so that ElastAlert 2 doesn't treat them as rules. Filters in imported files are merged (ANDed) +with any filters in the rule. You can have one import per rule (value is string) or several imports per rule (value is a list of strings). +The imported file can import another file or multiple files, recursively. +The filename can be an absolute path or relative to the rules directory. (Optional, string or array of strings, no default) use_ssl ^^^^^^^ @@ -254,6 +325,11 @@ use_ssl ``use_ssl``: Whether or not to connect to ``es_host`` using TLS. (Optional, boolean, default False) The environment variable ``ES_USE_SSL`` will override this field. +ssl_show_warn +^^^^^^^^^^^^^ + +``ssl_show_warn``: Whether or not to show SSL/TLS warnings when ``verify_certs`` is disabled. (Optional, boolean, default True) + verify_certs ^^^^^^^^^^^^ @@ -284,11 +360,32 @@ es_password ``es_password``: basic-auth password for connecting to ``es_host``. (Optional, string, no default) The environment variable ``ES_PASSWORD`` will override this field. +es_bearer +^^^^^^^^^^^ + +``es_bearer``: bearer-token authorization for connecting to ``es_host``. (Optional, string, no default) The environment variable ``ES_BEARER`` will override this field. This authentication option will override the password authentication option. + +es_api_key +^^^^^^^^^^^ + +``es_api_key``: api-key-token authorization for connecting to ``es_host``. (Optional, base64 string, no default) The environment variable ``ES_API_KEY`` will override this field. This authentication option will override both the bearer and the password authentication options. + es_url_prefix ^^^^^^^^^^^^^ ``es_url_prefix``: URL prefix for the Elasticsearch endpoint. (Optional, string, no default) +statsd_instance_tag +^^^^^^^^^^^^^^^^^^^ + +``statsd_instance_tag``: prefix for statsd metrics. (Optional, string, no default) + + +statsd_host +^^^^^^^^^^^^^ + +``statsd_host``: statsd host. (Optional, string, no default) + es_send_get_body_as ^^^^^^^^^^^^^^^^^^^ @@ -297,7 +394,7 @@ es_send_get_body_as use_strftime_index ^^^^^^^^^^^^^^^^^^ -``use_strftime_index``: If this is true, ElastAlert will format the index using datetime.strftime for each query. +``use_strftime_index``: If this is true, ElastAlert 2 will format the index using datetime.strftime for each query. See https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior for more details. If a query spans multiple days, the formatted indexes will be concatenated with commas. This is useful as narrowing the number of indexes searched, compared to using a wildcard, may be significantly faster. For example, if ``index`` is @@ -307,7 +404,7 @@ as narrowing the number of indexes searched, compared to using a wildcard, may b search_extra_index ^^^^^^^^^^^^^^^^^^ -``search_extra_index``: If this is true, ElastAlert will add an extra index on the early side onto each search. For example, if it's querying +``search_extra_index``: If this is true, ElastAlert 2 will add an extra index on the early side onto each search. For example, if it's querying completely within 2018-06-28, it will actually use 2018-06-27,2018-06-28. This can be useful if your timestamp_field is not what's being used to generate the index names. If that's the case, sometimes a query would not have been using the right index. @@ -315,7 +412,7 @@ aggregation ^^^^^^^^^^^ ``aggregation``: This option allows you to aggregate multiple matches together into one alert. Every time a match is found, -ElastAlert will wait for the ``aggregation`` period, and send all of the matches that have occurred in that time for a particular +ElastAlert 2 will wait for the ``aggregation`` period, and send all of the matches that have occurred in that time for a particular rule together. For example:: @@ -350,7 +447,11 @@ Then, assuming an aggregation window of 10 minutes, if you receive the following This should result in 2 alerts: One containing alice's two events, sent at ``2016-09-20T00:10:00`` and one containing bob's one event sent at ``2016-09-20T00:16:00`` -For aggregations, there can sometimes be a large number of documents present in the viewing medium (email, jira ticket, etc..). If you set the ``summary_table_fields`` field, Elastalert will provide a summary of the specified fields from all the results. +For aggregations, there can sometimes be a large number of documents present in the viewing medium (email, Jira ticket, etc..). If you set the ``summary_table_fields`` field, ElastAlert 2 will provide a summary of the specified fields from all the results. + +The formatting style of the summary table can be switched between ``ascii`` (default) and ``markdown`` with parameter ``summary_table_type``. ``markdown`` might be the more suitable formatting for alerters supporting it like TheHive. + +The maximum number of rows in the summary table can be limited with the parameter ``summary_table_max_rows``. For example, if you wish to summarize the usernames and event_types that appear in the documents so that you can see the most relevant fields at a quick glance, you can set:: @@ -358,7 +459,7 @@ For example, if you wish to summarize the usernames and event_types that appear - my_data.username - my_data.event_type -Then, for the same sample data shown above listing alice and bob's events, Elastalert will provide the following summary table in the alert medium:: +Then, for the same sample data shown above listing alice and bob's events, ElastAlert 2 will provide the following summary table in the alert medium:: +------------------+--------------------+ | my_data.username | my_data.event_type | @@ -370,10 +471,19 @@ Then, for the same sample data shown above listing alice and bob's events, Elast .. note:: - By default, aggregation time is relative to the current system time, not the time of the match. This means that running elastalert over - past events will result in different alerts than if elastalert had been running while those events occured. This behavior can be changed + By default, aggregation time is relative to the current system time, not the time of the match. This means that running ElastAlert 2 over + past events will result in different alerts than if ElastAlert 2 had been running while those events occured. This behavior can be changed by setting ``aggregate_by_match_time``. +limit_execution +^^^^^^^^^^^^^^^ + +``limit_execution``: This option allows you to activate the rule during a limited period of time. This uses the cron format. + +For example, if you wish to activate the rule from monday to friday, between 10am to 6pm:: + + limit_execution: "* 10-18 * * 1-5" + aggregate_by_match_time ^^^^^^^^^^^^^^^^^^^^^^^ @@ -387,9 +497,15 @@ realert will be applied on a per key basis. All matches for a given rule, or for matches with the same ``query_key``, will be ignored for the given time. All matches with a missing ``query_key`` will be grouped together using a value of ``_missing``. This is applied to the time the alert is sent, not to the time of the event. It defaults to one minute, which means -that if ElastAlert is run over a large time period which triggers many matches, only the first alert will be sent by default. If you want +that if ElastAlert 2 is run over a large time period which triggers many matches, only the first alert will be sent by default. If you want every alert, set realert to 0 minutes. (Optional, time, default 1 minute) +realert_key +^^^^^^^^^^^ + +``realert_key``: This option allows you to customize the key for ``realert``. The default is the rule name, but if you have multiple rules that +you would like to use the same key for you can set the ``realert_key`` to be the same in those rules. (Optional, string, default is the rule name) + exponential_realert ^^^^^^^^^^^^^^^^^^^ @@ -409,9 +525,14 @@ buffer_time query_delay ^^^^^^^^^^^ -``query_delay``: This option will cause ElastAlert to subtract a time delta from every query, causing the rule to run with a delay. +``query_delay``: This option will cause ElastAlert 2 to subtract a time delta from every query, causing the rule to run with a delay. This is useful if the data is Elasticsearch doesn't get indexed immediately. (Optional, time) +For example:: + + query_delay: + hours: 2 + owner ^^^^^ @@ -432,13 +553,13 @@ max_query_size ``max_query_size``: The maximum number of documents that will be downloaded from Elasticsearch in a single query. If you expect a large number of results, consider using ``use_count_query`` for the rule. If this -limit is reached, a warning will be logged but ElastAlert will continue without downloading more results. This setting will +limit is reached, a warning will be logged but ElastAlert 2 will continue without downloading more results. This setting will override a global ``max_query_size``. (Optional, int, default value of global ``max_query_size``) filter ^^^^^^ -``filter``: A list of Elasticsearch query DSL filters that is used to query Elasticsearch. ElastAlert will query Elasticsearch using the format +``filter``: A list of Elasticsearch query DSL filters that is used to query Elasticsearch. ElastAlert 2 will query Elasticsearch using the format ``{'filter': {'bool': {'must': [config.filter]}}}`` with an additional timestamp range filter. All of the results of querying with these filters are passed to the ``RuleType`` for analysis. For more information writing filters, see :ref:`Writing Filters `. (Required, Elasticsearch query DSL, no default) @@ -453,12 +574,12 @@ fields, along with '@timestamp', ``query_key``, ``compare_key``, and ``top_count top_count_keys ^^^^^^^^^^^^^^ -``top_count_keys``: A list of fields. ElastAlert will perform a terms query for the top X most common values for each of the fields, +``top_count_keys``: A list of fields. ElastAlert 2 will perform a terms query for the top X most common values for each of the fields, where X is 5 by default, or ``top_count_number`` if it exists. For example, if ``num_events`` is 100, and ``top_count_keys`` is ``- "username"``, the alert will say how many of the 100 events have each username, for the top 5 usernames. When this is computed, the time range used is from ``timeframe`` before the most recent event -to 10 minutes past the most recent event. Because ElastAlert uses an aggregation query to compute this, it will attempt to use the -field name plus ".raw" to count unanalyzed terms. To turn this off, set ``raw_count_keys`` to false. +to 10 minutes past the most recent event. Because ElastAlert 2 uses an aggregation query to compute this, it will attempt to use the +field name plus ".keyword" to count unanalyzed terms. To turn this off, set ``raw_count_keys`` to false. top_count_number ^^^^^^^^^^^^^^^^ @@ -468,7 +589,7 @@ top_count_number raw_count_keys ^^^^^^^^^^^^^^ -``raw_count_keys``: If true, all fields in ``top_count_keys`` will have ``.raw`` appended to them. (Optional, boolean, default true) +``raw_count_keys``: If true, all fields in ``top_count_keys`` will have ``.keyword`` appended to them. This used to be ".raw" in older Elasticsearch versions, but the setting name `raw_count_keys` was left as-is to avoid breaking existing installations. (Optional, boolean, default true) description ^^^^^^^^^^^ @@ -476,53 +597,36 @@ description ``description``: text describing the purpose of rule. (Optional, string, default empty string) Can be referenced in custom alerters to provide context as to why a rule might trigger. -generate_kibana_link -^^^^^^^^^^^^^^^^^^^^ - -``generate_kibana_link``: This option is for Kibana 3 only. -If true, ElastAlert will generate a temporary Kibana dashboard and include a link to it in alerts. The dashboard -consists of an events over time graph and a table with ``include`` fields selected in the table. If the rule uses ``query_key``, the -dashboard will also contain a filter for the ``query_key`` of the alert. The dashboard schema will -be uploaded to the kibana-int index as a temporary dashboard. (Optional, boolean, default False) - kibana_url ^^^^^^^^^^ -``kibana_url``: The url to access Kibana. This will be used if ``generate_kibana_link`` or -``use_kibana_dashboard`` is true. If not specified, a URL will be constructed using ``es_host`` and ``es_port``. -(Optional, string, default ``http://:/_plugin/kibana/``) +``kibana_url``: The base url of the Kibana application. If not specified, a URL will be constructed using ``es_host`` +and ``es_port``. -use_kibana_dashboard -^^^^^^^^^^^^^^^^^^^^ +This value will be used if ``generate_kibana_discover_url`` is true and ``kibana_discover_app_url`` is a relative path -``use_kibana_dashboard``: The name of a Kibana 3 dashboard to link to. Instead of generating a dashboard from a template, -ElastAlert can use an existing dashboard. It will set the time range on the dashboard to around the match time, -upload it as a temporary dashboard, add a filter to the ``query_key`` of the alert if applicable, -and put the url to the dashboard in the alert. (Optional, string, no default) +(Optional, string, default ``http://:/_plugin/kibana/``) -use_kibana4_dashboard -^^^^^^^^^^^^^^^^^^^^^ +kibana_username +^^^^^^^^^^^^^^^ -``use_kibana4_dashboard``: A link to a Kibana 4 dashboard. For example, "https://kibana.example.com/#/dashboard/My-Dashboard". -This will set the time setting on the dashboard from the match time minus the timeframe, to 10 minutes after the match time. -Note that this does not support filtering by ``query_key`` like Kibana 3. This value can use `$VAR` and `${VAR}` references -to expand environment variables. +``kibana_username``: The username used to make basic authenticated API requests against Kibana. +This value is only used if ``shorten_kibana_discover_url`` is true. -kibana4_start_timedelta -^^^^^^^^^^^^^^^^^^^^^^^ +(Optional, string, no default) -``kibana4_start_timedelta``: Defaults to 10 minutes. This option allows you to specify the start time for the generated kibana4 dashboard. -This value is added in front of the event. For example, +kibana_password +^^^^^^^^^^^^^^^ -``kibana4_start_timedelta: minutes: 2`` +``kibana_password``: The password used to make basic authenticated API requests against Kibana. +This value is only used if ``shorten_kibana_discover_url`` is true. -kibana4_end_timedelta -^^^^^^^^^^^^^^^^^^^^^ +(Optional, string, no default) -``kibana4_end_timedelta``: Defaults to 10 minutes. This option allows you to specify the end time for the generated kibana4 dashboard. -This value is added in back of the event. For example, +kibana_verify_certs +^^^^^^^^^^^^^^^^^^^ -``kibana4_end_timedelta: minutes: 2`` +``kibana_verify_certs``: Whether or not to verify TLS certificates when querying Kibana. (Optional, boolean, default True) generate_kibana_discover_url ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -536,13 +640,53 @@ This setting requires the following settings are also configured: ``generate_kibana_discover_url: true`` +Example usage:: + + generate_kibana_discover_url: True + kibana_discover_app_url: "http://localhost:5601/app/discover#/" + kibana_discover_index_pattern_id: "4babf380-c3b1-11eb-b616-1b59c2feec54" + kibana_discover_version: "7.15" + kibana_discover_from_timedelta: + minutes: 10 + kibana_discover_to_timedelta: + minutes: 10 + alert_text: '{0}' + alert_text_args: [ kibana_discover_url ] + alert_text_type: alert_text_only + +shorten_kibana_discover_url +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``shorten_kibana_discover_url``: Enables the shortening of the generated Kibana Discover urls. +In order to use the Kibana Shorten URL REST API, the ``kibana_discover_app_url`` must be provided +as a relative url (e.g. app/discover?#/). + +ElastAlert may need to authenticate with Kibana to invoke the Kibana Shorten URL REST API. The +supported authentication methods are: + +- Basic authentication by specifying ``kibana_username`` and ``kibana_password`` +- AWS authentication (if configured already for ElasticSearch) + +(Optional, bool, false) + kibana_discover_app_url ^^^^^^^^^^^^^^^^^^^^^^^ ``kibana_discover_app_url``: The url of the Kibana Discover application used to generate the ``kibana_discover_url`` variable. This value can use `$VAR` and `${VAR}` references to expand environment variables. +This value should be relative to the base kibana url defined by ``kibana_url`` and will vary depending on your installation. + +``kibana_discover_app_url: app/discover#/`` + +(Optional, string, no default) + +kibana_discover_security_tenant +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``kibana_discover_security_tenant``: The Kibana security tenant to include in the generated +``kibana_discover_url`` variable. -``kibana_discover_app_url: http://kibana:5601/#/discover`` +(Optional, string, no default) kibana_discover_version ^^^^^^^^^^^^^^^^^^^^^^^ @@ -551,11 +695,10 @@ kibana_discover_version The currently supported versions of Kibana Discover are: -- `5.6` -- `6.0`, `6.1`, `6.2`, `6.3`, `6.4`, `6.5`, `6.6`, `6.7`, `6.8` -- `7.0`, `7.1`, `7.2`, `7.3` +- `7.0`, `7.1`, `7.2`, `7.3`, `7.4`, `7.5`, `7.6`, `7.7`, `7.8`, `7.9`, `7.10`, `7.11`, `7.12`, `7.13`, `7.14`, `7.15`, `7.16`, `7.17` +- `8.0`, `8.1`, `8.2`, `8.3`, `8.4`, `8.5`, `8.6` -``kibana_discover_version: '7.3'`` +``kibana_discover_version: '7.15'`` kibana_discover_index_pattern_id ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -563,6 +706,8 @@ kibana_discover_index_pattern_id ``kibana_discover_index_pattern_id``: The id of the index pattern to link to in the Kibana Discover application. These ids are usually generated and can be found in url of the index pattern management page, or by exporting its saved object. +In this documentation all references of "index pattern" refer to the similarly named concept in Kibana 8 called "data view". + Example export of an index pattern's saved object: .. code-block:: text @@ -607,7 +752,7 @@ use_local_time ^^^^^^^^^^^^^^ ``use_local_time``: Whether to convert timestamps to the local time zone in alerts. If false, timestamps will -be converted to UTC, which is what ElastAlert uses internally. (Optional, boolean, default true) +be converted to UTC, which is what ElastAlert 2 uses internally. (Optional, boolean, default true) match_enhancements ^^^^^^^^^^^^^^^^^^ @@ -646,6 +791,26 @@ summary_table_fields ``summary_table_fields``: Specifying the summmary_table_fields in conjunction with an aggregation will make it so that each aggregated alert will contain a table summarizing the values for the specified fields in all the matches that were aggregated together. +summary_table_type +^^^^^^^^^^^^^^^^^^^^ + +``summary_table_type``: Either ``ascii`` or ``markdown``. Select the table type to use for the aggregation summary. Defaults to ``ascii`` for the classical text based table. + +summary_table_max_rows +^^^^^^^^^^^^^^^^^^^^^^ + +``summary_table_max_rows``: Limit the maximum number of rows that will be shown in the summary table. + +summary_prefix +^^^^^^^^^^^^^^^^^^^^ + +``summary_prefix``: Specify a prefix string, which will be added in front of the aggregation summary table. This string is currently not subject to any formatting. + +summary_suffix +^^^^^^^^^^^^^^^^^^^^ + +``summary_suffix``: Specify a suffix string, which will be added after the aggregation summary table. This string is currently not subject to any formatting. + timestamp_type ^^^^^^^^^^^^^^ @@ -666,7 +831,7 @@ timestamp_format_expr ^^^^^^^^^^^^^^^^^^^^^ ``timestamp_format_expr``: In case Elasticsearch used custom date format for date type field, this option provides a way to adapt the -value obtained converting a datetime through ``timestamp_format``, when the format cannot match perfectly what defined in Elastisearch. +value obtained converting a datetime through ``timestamp_format``, when the format cannot match perfectly what defined in Elasticsearch. When set, this option is evaluated as a Python expression along with a *globals* dictionary containing the original datetime instance named ``dt`` and the timestamp to be refined, named ``ts``. The returned value becomes the timestamp obtained from the datetime. For example, when the date type field in Elasticsearch uses milliseconds (``yyyy-MM-dd'T'HH:mm:ss.SSS'Z'``) and ``timestamp_format`` @@ -676,32 +841,50 @@ Setting ``timestamp_format_expr: 'ts[:23] + ts[26:]'`` will truncate the value t This option is only valid if ``timestamp_type`` set to ``custom``. (Optional, string, no default). +timestamp_to_datetime_format_expr +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``timestamp_to_datetime_format_expr``: In the same spirit as timestamp_format_expr, in case Elasticsearch used custom date format for date type field, +this option provides a way to adapt the value (as a string) returned by an Elasticsearch query before converting it into a datetime used by elastalert. +The changes are applied before converting the timestamp string to a datetime using ``timestamp_format``. This is useful when the format cannot match perfectly what is returned by Elasticsearch. When set, this option is evaluated as a Python expression along with a *globals* dictionary containing the original timestamp to be refined (as a string) named ``ts``. The returned value will be parse into a python datetime using the previously defined format (or using the default '%Y-%m-%dT%H:%M:%SZ'). + +For example, when the date type field returned by Elasticsearch uses nanoseconds (``yyyy-MM-dd'T'HH:mm:ss.SSS.XXXXXX``) and ``timestamp_format`` +option is ``'%Y-%m-%dT%H:%M:%S.%f'`` (ns are not supported in python datetime.datetime.strptime), Elasticsearch would fail to parse the timestamp terms as they contain nanoseconds values - that is it gets 3 additional digits that can't be parsed, throwing the exception``ValueError: unconverted data remains: XXX``. Setting ``timestamp_to_datetime_format_expr: 'ts[:23]'`` will truncate the value to milliseconds, allowing a good conversion in a datetime object. This option is only valid if ``timestamp_type`` set to ``custom``. +(Optional, string, no default). + _source_enabled ^^^^^^^^^^^^^^^ -``_source_enabled``: If true, ElastAlert will use _source to retrieve fields from documents in Elasticsearch. If false, -ElastAlert will use ``fields`` to retrieve stored fields. Both of these are represented internally as if they came from ``_source``. +``_source_enabled``: If true, ElastAlert 2 will use _source to retrieve fields from documents in Elasticsearch. If false, +ElastAlert 2 will use ``fields`` to retrieve stored fields. Both of these are represented internally as if they came from ``_source``. See https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-fields.html for more details. The fields used come from ``include``, see above for more details. (Optional, boolean, default True) scan_entire_timeframe ^^^^^^^^^^^^^^^^^^^^^ -``scan_entire_timeframe``: If true, when ElastAlert starts, it will always start querying at the current time minus the timeframe. +``scan_entire_timeframe``: If true, when ElastAlert 2 starts, it will always start querying at the current time minus the timeframe. ``timeframe`` must exist in the rule. This may be useful, for example, if you are using a flatline rule type with a large timeframe, -and you want to be sure that if ElastAlert restarts, you can still get alerts. This may cause duplicate alerts for some rule types, -for example, Frequency can alert multiple times in a single timeframe, and if ElastAlert were to restart with this setting, it may +and you want to be sure that if ElastAlert 2 restarts, you can still get alerts. This may cause duplicate alerts for some rule types, +for example, Frequency can alert multiple times in a single timeframe, and if ElastAlert 2 were to restart with this setting, it may scan the same range again, triggering duplicate alerts. Some rules and alerts require additional options, which also go in the top level of the rule configuration file. +query_timezone +^^^^^^^^^^^^^^ + +``query_timezone``: Whether to convert UTC time to the specified time zone in rule queries. +If not set, start and end time of query will be used UTC. (Optional, string, default empty string) + +Example value : query_timezone: "Europe/Istanbul" .. _testing : Testing Your Rule ================= -Once you've written a rule configuration, you will want to validate it. To do so, you can either run ElastAlert in debug mode, +Once you've written a rule configuration, you will want to validate it. To do so, you can either run ElastAlert 2 in debug mode, or use ``elastalert-test-rule``, which is a script that makes various aspects of testing easier. It can: @@ -716,7 +899,7 @@ It can: - Save documents returned to a JSON file. -- Run ElastAlert using either a JSON file or actual results from Elasticsearch. +- Run ElastAlert 2 using either a JSON file or actual results from Elasticsearch. - Print out debug alerts or trigger real alerts. @@ -724,7 +907,7 @@ It can: - Show what metadata documents would be written to ``elastalert_status``. -Without any optional arguments, it will run ElastAlert over the last 24 hours and print out any alerts that would have occurred. +Without any optional arguments, it will run ElastAlert 2 over the last 24 hours and print out any alerts that would have occurred. Here is an example test run which triggered an alert: .. code-block:: console @@ -773,9 +956,9 @@ Other options include: ``--schema-only``: Only perform schema validation on the file. It will not load modules or query Elasticsearch. This may catch invalid YAML and missing or misconfigured fields. -``--count-only``: Only find the number of matching documents and list available fields. ElastAlert will not be run and documents will not be downloaded. +``--count-only``: Only find the number of matching documents and list available fields. ElastAlert 2 will not be run and documents will not be downloaded. -``--days N``: Instead of the default 1 day, query N days. For selecting more specific time ranges, you must run ElastAlert itself and use ``--start`` +``--days N``: Instead of the default 1 day, query N days. For selecting more specific time ranges, you must run ElastAlert 2 itself and use ``--start`` and ``--end``. ``--save-json FILE``: Save all documents downloaded to a file as JSON. This is useful if you wish to modify data while testing or do offline @@ -790,7 +973,7 @@ guaranteed to have the exact same results as with Elasticsearch. For example, an ``--formatted-output``: Output results in formatted JSON. .. note:: - Results from running this script may not always be the same as if an actual ElastAlert instance was running. Some rule types, such as spike + Results from running this script may not always be the same as if an actual ElastAlert 2 instance was running. Some rule types, such as spike and flatline require a minimum elapsed time before they begin alerting, based on their timeframe. In addition, use_count_query and use_terms_query rely on run_every to determine their resolution. This script uses a fixed 5 minute window, which is the same as the default. @@ -800,7 +983,7 @@ guaranteed to have the exact same results as with Elasticsearch. For example, an Rule Types ========== -The various ``RuleType`` classes, defined in ``elastalert/ruletypes.py``, form the main logic behind ElastAlert. An instance +The various ``RuleType`` classes, defined in ``elastalert/ruletypes.py``, form the main logic behind ElastAlert 2. An instance is held in memory for each rule, passed all of the data returned by querying Elasticsearch with a given filter, and generates matches based on that data. @@ -857,7 +1040,7 @@ It is possible to mix between whitelisted value definitions, or use either one. Change ~~~~~~ -For an example configuration file using this rule type, look at ``example_rules/example_change.yaml``. +For an example configuration file using this rule type, look at ``examples/rules/example_change.yaml``. ``change``: This rule will monitor a certain field and match if that field changes. The field must change with respect to the last event with the same ``query_key``. @@ -874,13 +1057,13 @@ the events that are checked. There is also an optional field: -``timeframe``: The maximum time between changes. After this time period, ElastAlert will forget the old value +``timeframe``: The maximum time between changes. After this time period, ElastAlert 2 will forget the old value of the ``compare_key`` field. Frequency ~~~~~~~~~ -For an example configuration file using this rule type, look at ``example_rules/example_frequency.yaml``. +For an example configuration file using this rule type, look at ``examples/rules/example_frequency.yaml``. ``frequency``: This rule matches when there are at least a certain number of events in a given time frame. This may be counted on a per-``query_key`` basis. @@ -893,14 +1076,12 @@ This rule requires two additional options: Optional: -``use_count_query``: If true, ElastAlert will poll Elasticsearch using the count api, and not download all of the matching documents. This is +``use_count_query``: If true, ElastAlert 2 will poll Elasticsearch using the count api, and not download all of the matching documents. This is useful is you care only about numbers and not the actual data. It should also be used if you expect a large number of query hits, in the order -of tens of thousands or more. ``doc_type`` must be set to use this. +of tens of thousands or more. -``doc_type``: Specify the ``_type`` of document to search for. This must be present if ``use_count_query`` or ``use_terms_query`` is set. - -``use_terms_query``: If true, ElastAlert will make an aggregation query against Elasticsearch to get counts of documents matching -each unique value of ``query_key``. This must be used with ``query_key`` and ``doc_type``. This will only return a maximum of ``terms_size``, +``use_terms_query``: If true, ElastAlert 2 will make an aggregation query against Elasticsearch to get counts of documents matching +each unique value of ``query_key``. This must be used with ``query_key``. This will only return a maximum of ``terms_size``, default 50, unique terms. ``terms_size``: When used with ``use_terms_query``, this is the maximum number of terms returned per query. Default is 50. @@ -938,7 +1119,7 @@ Optional: ``field_value``: When set, uses the value of the field in the document and not the number of matching documents. This is useful to monitor for example a temperature sensor and raise an alarm if the temperature grows too fast. Note that the means of the field on the reference and current windows are used to determine if the ``spike_height`` value is reached. -Note also that the threshold parameters are ignored in this smode. +Note also that the threshold parameters are ignored in this mode. ``threshold_ref``: The minimum number of events that must exist in the reference window for an alert to trigger. For example, if @@ -1028,14 +1209,12 @@ consider the following examples:: trigger an immediate alert. When set to false, baseline must be established for each new ``query_key`` value, and then subsequent spikes may cause alerts. Baseline is established after ``timeframe`` has elapsed twice since first occurrence. -``use_count_query``: If true, ElastAlert will poll Elasticsearch using the count api, and not download all of the matching documents. This is +``use_count_query``: If true, ElastAlert 2 will poll Elasticsearch using the count api, and not download all of the matching documents. This is useful is you care only about numbers and not the actual data. It should also be used if you expect a large number of query hits, in the order -of tens of thousands or more. ``doc_type`` must be set to use this. - -``doc_type``: Specify the ``_type`` of document to search for. This must be present if ``use_count_query`` or ``use_terms_query`` is set. +of tens of thousands or more. -``use_terms_query``: If true, ElastAlert will make an aggregation query against Elasticsearch to get counts of documents matching -each unique value of ``query_key``. This must be used with ``query_key`` and ``doc_type``. This will only return a maximum of ``terms_size``, +``use_terms_query``: If true, ElastAlert 2 will make an aggregation query against Elasticsearch to get counts of documents matching +each unique value of ``query_key``. This must be used with ``query_key``. This will only return a maximum of ``terms_size``, default 50, unique terms. ``terms_size``: When used with ``use_terms_query``, this is the maximum number of terms returned per query. Default is 50. @@ -1055,14 +1234,12 @@ This rule requires two additional options: Optional: -``use_count_query``: If true, ElastAlert will poll Elasticsearch using the count api, and not download all of the matching documents. This is +``use_count_query``: If true, ElastAlert 2 will poll Elasticsearch using the count api, and not download all of the matching documents. This is useful is you care only about numbers and not the actual data. It should also be used if you expect a large number of query hits, in the order -of tens of thousands or more. ``doc_type`` must be set to use this. +of tens of thousands or more. -``doc_type``: Specify the ``_type`` of document to search for. This must be present if ``use_count_query`` or ``use_terms_query`` is set. - -``use_terms_query``: If true, ElastAlert will make an aggregation query against Elasticsearch to get counts of documents matching -each unique value of ``query_key``. This must be used with ``query_key`` and ``doc_type``. This will only return a maximum of ``terms_size``, +``use_terms_query``: If true, ElastAlert 2 will make an aggregation query against Elasticsearch to get counts of documents matching +each unique value of ``query_key``. This must be used with ``query_key``. This will only return a maximum of ``terms_size``, default 50, unique terms. ``terms_size``: When used with ``use_terms_query``, this is the maximum number of terms returned per query. Default is 50. @@ -1070,13 +1247,13 @@ default 50, unique terms. ``query_key``: With flatline rule, ``query_key`` means that an alert will be triggered if any value of ``query_key`` has been seen at least once and then falls below the threshold. -``forget_keys``: Only valid when used with ``query_key``. If this is set to true, ElastAlert will "forget" about the ``query_key`` value that +``forget_keys``: Only valid when used with ``query_key``. If this is set to true, ElastAlert 2 will "forget" about the ``query_key`` value that triggers an alert, therefore preventing any more alerts for it until it's seen again. New Term ~~~~~~~~ -``new_term``: This rule matches when a new value appears in a field that has never been seen before. When ElastAlert starts, it will +``new_term``: This rule matches when a new value appears in a field that has never been seen before. When ElastAlert 2 starts, it will use an aggregation query to gather all known terms for a list of fields. This rule requires one additional option: @@ -1088,7 +1265,7 @@ that compose a composite key used for the ElasticSearch query. .. note:: The composite fields may only refer to primitive types, otherwise the initial ElasticSearch query will not properly return - the aggregation results, thus causing alerts to fire every time the ElastAlert service initially launches with the rule. + the aggregation results, thus causing alerts to fire every time the ElastAlert 2 service initially launches with the rule. A warning will be logged to the console if this scenario is encountered. However, future alerts will actually work as expected after the initial flurry. @@ -1103,7 +1280,7 @@ expensive aggregation queries. The default is 1 day. ``alert_on_missing_field``: Whether or not to alert when a field is missing from a document. The default is false. -``use_terms_query``: If true, ElastAlert will use aggregation queries to get terms instead of regular search queries. This is faster +``use_terms_query``: If true, ElastAlert 2 will use aggregation queries to get terms instead of regular search queries. This is faster than regular searching if there is a large number of documents. If this is used, you may only specify a single field, and must also set ``query_key`` to that field. Also, note that ``terms_size`` (the number of buckets returned per query) defaults to 50. This means that if a new term appears but there are at least 50 terms which appear more frequently, it will not be found. @@ -1111,11 +1288,11 @@ that if a new term appears but there are at least 50 terms which appear more fre .. note:: When using use_terms_query, make sure that the field you are using is not analyzed. If it is, the results of each terms - query may return tokens rather than full values. ElastAlert will by default turn on use_keyword_postfix, which attempts - to use the non-analyzed version (.keyword or .raw) to gather initial terms. These will not match the partial values and + query may return tokens rather than full values. ElastAlert 2 will by default turn on use_keyword_postfix, which attempts + to use the non-analyzed version (.keyword) to gather initial terms. These will not match the partial values and result in false positives. -``use_keyword_postfix``: If true, ElastAlert will automatically try to add .keyword (ES5+) or .raw to the fields when making an +``use_keyword_postfix``: If true, ElastAlert 2 will automatically try to add .keyword to the fields when making an initial query. These are non-analyzed fields added by Logstash. If the field used is analyzed, the initial query will return only the tokenized values, potentially causing false positives. Defaults to true. @@ -1153,12 +1330,11 @@ default this is ``buffer_time``. This rule requires: ``metric_agg_key``: This is the name of the field over which the metric value will be calculated. The underlying type of this field must be -supported by the specified aggregation type. +supported by the specified aggregation type. If using a scripted field via ``metric_agg_script``, this is the name for your scripted field -``metric_agg_type``: The type of metric aggregation to perform on the ``metric_agg_key`` field. This must be one of 'min', 'max', 'avg', -'sum', 'cardinality', 'value_count'. +``metric_agg_type``: The type of metric aggregation to perform on the ``metric_agg_key`` field. This must be one of 'min', 'max', 'avg', 'sum', 'cardinality', 'value_count', 'percentiles'. Note, if `percentiles` is used, then ``percentile_range`` must also be specified. -``doc_type``: Specify the ``_type`` of document to search for. +.. note:: When Metric Aggregation has a match, match_body includes an aggregated value that triggered the match so that you can use that on an alert. The value is named based on ``metric_agg_key`` and ``metric_agg_type``. For example, if you set ``metric_agg_key`` to 'system.cpu.total.norm.pct' and ``metric_agg_type`` to 'avg', the name of the value is 'metric_system.cpu.total.norm.pct_avg'. Because of this naming rule, you might face conflicts with jinja2 template, and when that happens, you also can use 'metric_agg_value' from match_body instead. This rule also requires at least one of the two following options: @@ -1166,11 +1342,19 @@ This rule also requires at least one of the two following options: ``min_threshold``: If the calculated metric value is less than this number, an alert will be triggered. This threshold is exclusive. +``percentile_range``: An integer specifying the percentage value to aggregate against. Must be specified if ``metric_agg_type`` is set to ``percentiles``. See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html for more information. + Optional: ``query_key``: Group metric calculations by this field. For each unique value of the ``query_key`` field, the metric will be calculated and evaluated separately against the threshold(s). +``metric_agg_script``: A `Painless` formatted script describing how to calculate your metric on-the-fly:: + + metric_agg_key: myScriptedMetric + metric_agg_script: + script: doc['field1'].value * doc['field2'].value + ``min_doc_count``: The minimum number of events in the current window needed for an alert to trigger. Used in conjunction with ``query_key``, this will only consider terms which in their last ``buffer_time`` had at least ``min_doc_count`` records. Default 1. @@ -1189,10 +1373,14 @@ multiple of ``bucket_interval``. (Or ``run_every`` if ``use_run_every_query_size ``sync_bucket_interval``: This only has an effect if ``bucket_interval`` is present. If true it will sync the start and end times of the metric calculation window to the keys (timestamps) of the underlying date_histogram buckets. Because of the way elasticsearch calculates date_histogram bucket keys these usually round evenly to nearest minute, hour, day etc (depending on the bucket size). By default the bucket keys are offset to -allign with the time elastalert runs, (This both avoid calculations on partial data, and ensures the very latest documents are included). +allign with the time ElastAlert 2 runs, (This both avoid calculations on partial data, and ensures the very latest documents are included). See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html#_offset for a more comprehensive explaination. +``metric_format_string``: An optional format string applies to the aggregated metric value in the alert match text and match_body. This adds 'metric_{metric_agg_key}_formatted' value to the match_body in addition to raw, unformatted 'metric_{metric_agg_key}' value so that you can use the values for ``alert_subject_args`` and ``alert_text_args``. Must be a valid python format string. Both str.format() and %-format syntax works. For example, "{:.2%}" will format '0.966666667' to '96.67%', and "%.2f" will format '0.966666667' to '0.97'. +See: https://docs.python.org/3.4/library/string.html#format-specification-mini-language + + Spike Aggregation ~~~~~~~~~~~~~~~~~~ @@ -1205,8 +1393,7 @@ This rule requires: ``metric_agg_key``: This is the name of the field over which the metric value will be calculated. The underlying type of this field must be supported by the specified aggregation type. If using a scripted field via ``metric_agg_script``, this is the name for your scripted field -``metric_agg_type``: The type of metric aggregation to perform on the ``metric_agg_key`` field. This must be one of 'min', 'max', 'avg', -'sum', 'cardinality', 'value_count'. +``metric_agg_type``: The type of metric aggregation to perform on the ``metric_agg_key`` field. This must be one of 'min', 'max', 'avg', 'sum', 'cardinality', 'value_count', 'percentiles'. Note, if `percentiles` is used, then ``percentile_range`` must also be specified. ``spike_height``: The ratio of the metric value in the last ``timeframe`` to the previous ``timeframe`` that when hit will trigger an alert. @@ -1219,6 +1406,8 @@ window will span from present to one hour ago, and the 'reference' window will s will not be active until the time elapsed from the first event is at least two timeframes. This is to prevent an alert being triggered before a baseline rate has been established. This can be overridden using ``alert_on_new_data``. +``percentile_range``: An integer specifying the percentage value to aggregate against. Must be specified if ``metric_agg_type`` is set to ``percentiles``. See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html for more information. + Optional: ``query_key``: Group metric calculations by this field. For each unique value of the ``query_key`` field, the metric will be calculated and @@ -1252,9 +1441,7 @@ This rule requires: ``match_bucket_filter``: ES filter DSL. This defines a filter for the match bucket, which should match a subset of the documents returned by the main query filter. -``doc_type``: Specify the ``_type`` of document to search for. - -This rule also requires at least one of the two following options: +ssThis rule also requires at least one of the two following options: ``min_percentage``: If the percentage of matching documents is less than this number, an alert will be triggered. @@ -1273,8 +1460,7 @@ evaluated separately against the threshold(s). ``sync_bucket_interval``: See ``sync_bucket_interval`` in Metric Aggregation rule -``percentage_format_string``: An optional format string to apply to the percentage value in the alert match text. Must be a valid python format string. -For example, "%.2f" will round it to 2 decimal places. +``percentage_format_string``: An optional format string applies to the percentage value in the alert match text and match_body. This adds 'percentage_formatted' value to the match_body in addition to raw, unformatted 'percentage' value so that you can use the values for ``alert_subject_args`` and ``alert_text_args``. Must be a valid python format string. Both str.format() and %-format syntax works. For example, both "{:.2f}" and "%.2f" will format '96.6666667' to '96.67'. See: https://docs.python.org/3.4/library/string.html#format-specification-mini-language ``min_denominator``: Minimum number of documents on which percentage calculation will apply. Default is 0. @@ -1285,7 +1471,7 @@ Alerts ====== Each rule may have any number of alerts attached to it. Alerts are subclasses of ``Alerter`` and are passed -a dictionary, or list of dictionaries, from ElastAlert which contain relevant information. They are configured +a dictionary, or list of dictionaries, from ElastAlert 2 which contain relevant information. They are configured in the rule configuration file similarly to rule types. To set the alerts for a rule, set the ``alert`` option to the name of the alert, or a list of the names of alerts: @@ -1297,8 +1483,40 @@ or .. code-block:: yaml alert: - - email - - jira + - alerta + - alertmanager + - chatwork + - command + - datadog + - debug + - dingtalk + - discord + - email + - exotel + - gitter + - googlechat + - gelf + - hivealerter + - jira + - linenotify + - mattermost + - ms_teams + - opsgenie + - pagerduty + - pagertree + - post + - post2 + - rocketchat + - servicenow + - ses + - slack + - sns + - stomp + - telegram + - tencent_sms + - twilio + - victorops + - zabbix Options for each alerter can either defined at the top level of the YAML file, or nested within the alert name, allowing for different settings for multiple of the same alerter. For example, consider sending multiple emails, but with different 'To' and 'From' fields: @@ -1328,7 +1546,7 @@ for each alerter. Alert Subject ~~~~~~~~~~~~~ -E-mail subjects, JIRA issue summaries, PagerDuty alerts, or any alerter that has a "subject" can be customized by adding an ``alert_subject`` +E-mail subjects, Jira issue summaries, PagerDuty alerts, or any alerter that has a "subject" can be customized by adding an ``alert_subject`` that contains a custom summary. It can be further formatted using standard Python formatting syntax:: @@ -1361,9 +1579,30 @@ There are several ways to format the body text of the various types of events. I top_counts = top_counts_header, LF, top_counts_value field_values = Field, ": ", Value -Similarly to ``alert_subject``, ``alert_text`` can be further formatted using standard Python formatting syntax. +Similarly to ``alert_subject``, ``alert_text`` can be further formatted using Jinja2 Templates or Standard Python Formatting Syntax + +1. Jinja Template + +By setting ``alert_text_type: alert_text_jinja`` you can use jinja2 templates in ``alert_text`` and ``alert_subject``. :: + + alert_text_type: alert_text_jinja + + alert_text: | + Alert triggered! *({{num_hits}} Matches!)* + Something happened with {{username}} ({{email}}) + {{description|truncate}} + +Top fields are accessible via `{{field_name}}` or `{{_data['field_name']}}`, `_data` is useful when accessing *fields with dots in their keys*, as Jinja treat dot as a nested field. +If `_data` conflicts with your top level data, use ``jinja_root_name`` to change its name. + +2. Standard Python Formatting Syntax + The field names whose values will be used as the arguments can be passed with ``alert_text_args`` or ``alert_text_kw``. -You may also refer to any top-level rule property in the ``alert_subject_args``, ``alert_text_args``, ``alert_missing_value``, and ``alert_text_kw fields``. However, if the matched document has a key with the same name, that will take preference over the rule property. +You may also refer to any top-level rule property in the ``alert_subject_args``, ``alert_text_args``, ``alert_missing_value``, and ``alert_text_kw fields``. However, if the matched document has a key with the same name, that will take preference over the rule property. :: + + alert_text: "Something happened with {0} at {1}" + alert_text_type: alert_text_only + alert_text_args: ["username", "@timestamp"] By default:: @@ -1383,6 +1622,14 @@ With ``alert_text_type: alert_text_only``:: alert_text + +With ``alert_text_type: alert_text_jinja``:: + + body = rule_name + + alert_text + + With ``alert_text_type: exclude_fields``:: body = rule_name @@ -1407,394 +1654,1005 @@ come from an individual event, usually the one which triggers the alert. When using ``alert_text_args``, you can access nested fields and index into arrays. For example, if your match was ``{"data": {"ips": ["127.0.0.1", "12.34.56.78"]}}``, then by using ``"data.ips[1]"`` in ``alert_text_args``, it would replace value with ``"12.34.56.78"``. This can go arbitrarily deep into fields and will still work on keys that contain dots themselves. -Command +Alerter ~~~~~~~ -The command alert allows you to execute an arbitrary command and pass arguments or stdin from the match. Arguments to the command can use -Python format string syntax to access parts of the match. The alerter will open a subprocess and optionally pass the match, or matches -in the case of an aggregated alert, as a JSON array, to the stdin of the process. +For all Alerter subclasses, you may reference values from a top-level rule property in your Alerter fields by referring to the property name surrounded by dollar signs. This can be useful when you have rule-level properties that you would like to reference many times in your alert. For example: -This alert requires one option: +Example usage:: -``command``: A list of arguments to execute or a string to execute. If in list format, the first argument is the name of the program to execute. If passed a -string, the command is executed through the shell. + jira_priority: $priority$ + jira_alert_owner: $owner$ -Strings can be formatted using the old-style format (``%``) or the new-style format (``.format()``). When the old-style format is used, fields are accessed -using ``%(field_name)s``, or ``%(field.subfield)s``. When the new-style format is used, fields are accessed using ``{field_name}``. New-style formatting allows accessing nested -fields (e.g., ``{field_1[subfield]}``). +Alerta +~~~~~~ -In an aggregated alert, these fields come from the first match. +Alerta alerter will post an alert in the Alerta server instance through the alert API endpoint. +See https://docs.alerta.io/en/latest/api/alert.html for more details on the Alerta JSON format. -Optional: +For Alerta 5.0 -``pipe_match_json``: If true, the match will be converted to JSON and passed to stdin of the command. Note that this will cause ElastAlert to block -until the command exits or sends an EOF to stdout. +Required: -``pipe_alert_text``: If true, the standard alert body text will be passed to stdin of the command. Note that this will cause ElastAlert to block -until the command exits or sends an EOF to stdout. It cannot be used at the same time as ``pipe_match_json``. +``alerta_api_url``: API server URL. -Example usage using old-style format:: +Optional: - alert: - - command - command: ["/bin/send_alert", "--username", "%(username)s"] +``alerta_api_key``: This is the api key for alerta server, sent in an ``Authorization`` HTTP header. If not defined, no Authorization header is sent. -.. warning:: +``alerta_use_qk_as_resource``: If true and query_key is present, this will override ``alerta_resource`` field with the ``query_key value`` (Can be useful if ``query_key`` is a hostname). - Executing commmands with untrusted data can make it vulnerable to shell injection! If you use formatted data in - your command, it is highly recommended that you use a args list format instead of a shell string. +``alerta_use_match_timestamp``: If true, it will use the timestamp of the first match as the ``createTime`` of the alert. otherwise, the current server time is used. -Example usage using new-style format:: +``alerta_api_skip_ssl``: Defaults to False. - alert: - - command - command: ["/bin/send_alert", "--username", "{match[username]}"] +``alert_missing_value``: Text to replace any match field not found when formating strings. Defaults to ````. +The following options dictate the values of the API JSON payload: -Email -~~~~~ +``alerta_severity``: Defaults to "warning". -This alert will send an email. It connects to an smtp server located at ``smtp_host``, or localhost by default. -If available, it will use STARTTLS. +``alerta_timeout``: Defaults 84600 (1 Day). -This alert requires one additional option: +``alerta_type``: Defaults to "elastalert". -``email``: An address or list of addresses to sent the alert to. +The following options use Python-like string syntax ``{}`` or ``%()s`` to access parts of the match, similar to the CommandAlerter. Ie: "Alert for {clientip}". +If the referenced key is not found in the match, it is replaced by the text indicated by the option ``alert_missing_value``. -Optional: +``alerta_resource``: Defaults to "elastalert". -``email_from_field``: Use a field from the document that triggered the alert as the recipient. If the field cannot be found, -the ``email`` value will be used as a default. Note that this field will not be available in every rule type, for example, if -you have ``use_count_query`` or if it's ``type: flatline``. You can optionally add a domain suffix to the field to generate the -address using ``email_add_domain``. It can be a single recipient or list of recipients. For example, with the following settings:: +``alerta_service``: Defaults to "elastalert". - email_from_field: "data.user" - email_add_domain: "@example.com" +``alerta_origin``: Defaults to "elastalert". -and a match ``{"@timestamp": "2017", "data": {"foo": "bar", "user": "qlo"}}`` +``alerta_environment``: Defaults to "Production". -an email would be sent to ``qlo@example.com`` +``alerta_group``: Defaults to "". -``smtp_host``: The SMTP host to use, defaults to localhost. +``alerta_correlate``: Defaults to an empty list. -``smtp_port``: The port to use. Default is 25. +``alerta_tags``: Defaults to an empty list. -``smtp_ssl``: Connect the SMTP host using TLS, defaults to ``false``. If ``smtp_ssl`` is not used, ElastAlert will still attempt -STARTTLS. +``alerta_event``: Defaults to the rule's name. -``smtp_auth_file``: The path to a file which contains SMTP authentication credentials. The path can be either absolute or relative -to the given rule. It should be YAML formatted and contain two fields, ``user`` and ``password``. If this is not present, -no authentication will be attempted. +``alerta_text``: Defaults to the rule's text according to its type. -``smtp_cert_file``: Connect the SMTP host using the given path to a TLS certificate file, default to ``None``. +``alerta_value``: Defaults to "". -``smtp_key_file``: Connect the SMTP host using the given path to a TLS key file, default to ``None``. +The ``attributes`` dictionary is built by joining the lists from ``alerta_attributes_keys`` and ``alerta_attributes_values``, considered in order. -``email_reply_to``: This sets the Reply-To header in the email. By default, the from address is ElastAlert@ and the domain will be set -by the smtp server. -``from_addr``: This sets the From header in the email. By default, the from address is ElastAlert@ and the domain will be set -by the smtp server. +Example usage using old-style format:: -``cc``: This adds the CC emails to the list of recipients. By default, this is left empty. + alert: + - alerta + alerta_api_url: "http://youralertahost/api/alert" + alerta_attributes_keys: ["hostname", "TimestampEvent", "senderIP" ] + alerta_attributes_values: ["%(key)s", "%(logdate)s", "%(sender_ip)s" ] + alerta_correlate: ["ProbeUP","ProbeDOWN"] + alerta_event: "ProbeUP" + alerta_text: "Probe %(hostname)s is UP at %(logdate)s GMT" + alerta_value: "UP" -``bcc``: This adds the BCC emails to the list of recipients but does not show up in the email message. By default, this is left empty. +Example usage using new-style format:: -``email_format``: If set to ``html``, the email's MIME type will be set to HTML, and HTML content should correctly render. If you use this, -you need to put your own HTML into ``alert_text`` and use ``alert_text_type: alert_text_only``. + alert: + - alerta + alerta_attributes_values: ["{key}", "{logdate}", "{sender_ip}" ] + alerta_text: "Probe {hostname} is UP at {logdate} GMT" -Jira -~~~~ +Alertmanager +~~~~~~~~~~~~ -The JIRA alerter will open a ticket on jira whenever an alert is triggered. You must have a service account for ElastAlert to connect with. -The credentials of the service account are loaded from a separate file. The ticket number will be written to the alert pipeline, and if it -is followed by an email alerter, a link will be included in the email. +This alert type will send alerts to Alertmanager postAlerts. ``alert_subject`` and ``alert_text`` are passed as the annotations labeled ``summary`` and ``description`` accordingly. The labels can be changed. +See https://prometheus.io/docs/alerting/clients/ for more details about the Alertmanager alert format. -This alert requires four additional options: +Required: -``jira_server``: The hostname of the JIRA server. +``alertmanager_hosts``: The list of hosts pointing to the Alertmanager. -``jira_project``: The project to open the ticket under. +Optional: -``jira_issuetype``: The type of issue that the ticket will be filed as. Note that this is case sensitive. +``alertmanager_api_version``: Defaults to `v1`. Set to `v2` to enable the Alertmanager V2 API postAlerts. -``jira_account_file``: The path to the file which contains JIRA account credentials. +``alertmanager_alertname``: ``alertname`` is the only required label. Defaults to using the rule name of the alert. -For an example JIRA account file, see ``example_rules/jira_acct.yaml``. The account file is also yaml formatted and must contain two fields: +``alertmanager_labels``: Key:value pairs of arbitrary labels to be attached to every alert. Keys should match the regular expression ``^[a-zA-Z_][a-zA-Z0-9_]*$``. -``user``: The username. +``alertmanager_annotations``: Key:value pairs of arbitrary annotations to be attached to every alert. Keys should match the regular expression ``^[a-zA-Z_][a-zA-Z0-9_]*$``. -``password``: The password. +``alertmanager_fields``: Key:value pairs of labels and corresponding match fields. When using ``alertmanager_fields`` you can access nested fields and index into arrays the same way as with ``alert_text_args``. Keys should match the regular expression ``^[a-zA-Z_][a-zA-Z0-9_]*$``. This dictionary will be merged with the ``alertmanager_labels``. -Optional: +``alertmanager_alert_subject_labelname``: Rename the annotations' label name for ``alert_subject``. Default is ``summary``. -``jira_component``: The name of the component or components to set the ticket to. This can be a single string or a list of strings. This is provided for backwards compatibility and will eventually be deprecated. It is preferable to use the plural ``jira_components`` instead. +``alertmanager_alert_text_labelname``: Rename the annotations' label name for ``alert_text``. Default is ``description``. -``jira_components``: The name of the component or components to set the ticket to. This can be a single string or a list of strings. +``alertmanager_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Alertmanager. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. -``jira_description``: Similar to ``alert_text``, this text is prepended to the JIRA description. +``alertmanager_ca_certs``: Set this option to ``True`` or a path to a CA cert bundle or directory (eg: ``/etc/ssl/certs/ca-certificates.crt``) to validate the SSL certificate. -``jira_label``: The label or labels to add to the JIRA ticket. This can be a single string or a list of strings. This is provided for backwards compatibility and will eventually be deprecated. It is preferable to use the plural ``jira_labels`` instead. +``alertmanager_ignore_ssl_errors``: By default ElastAlert 2 will verify SSL certificate. Set this option to ``True`` if you want to ignore SSL errors. -``jira_labels``: The label or labels to add to the JIRA ticket. This can be a single string or a list of strings. +``alertmanager_timeout``: You can specify a timeout value, in seconds, for making communicating with Alertmanager. The default is 10. If a timeout occurs, the alert will be retried next time ElastAlert 2 cycles. -``jira_priority``: The index of the priority to set the issue to. In the JIRA dropdown for priorities, 0 would represent the first priority, -1 the 2nd, etc. +``alertmanager_basic_auth_login``: Basic authentication username. -``jira_watchers``: A list of user names to add as watchers on a JIRA ticket. This can be a single string or a list of strings. +``alertmanager_basic_auth_password``: Basic authentication password. -``jira_bump_tickets``: If true, ElastAlert search for existing tickets newer than ``jira_max_age`` and comment on the ticket with -information about the alert instead of opening another ticket. ElastAlert finds the existing ticket by searching by summary. If the -summary has changed or contains special characters, it may fail to find the ticket. If you are using a custom ``alert_subject``, -the two summaries must be exact matches, except by setting ``jira_ignore_in_title``, you can ignore the value of a field when searching. -For example, if the custom subject is "foo occured at bar", and "foo" is the value field X in the match, you can set ``jira_ignore_in_title`` -to "X" and it will only bump tickets with "bar" in the subject. Defaults to false. +Example usage:: -``jira_ignore_in_title``: ElastAlert will attempt to remove the value for this field from the JIRA subject when searching for tickets to bump. -See ``jira_bump_tickets`` description above for an example. + alert: + - "alertmanager" + alertmanager_hosts: + - "http://alertmanager:9093" + alertmanager_alertname: "Title" + alertmanager_annotations: + severity: "error" + alertmanager_labels: + source: "elastalert" + alertmanager_fields: + msg: "message" + log: "@log_name" -``jira_max_age``: If ``jira_bump_tickets`` is true, the maximum age of a ticket, in days, such that ElastAlert will comment on the ticket -instead of opening a new one. Default is 30 days. +Additional explanation: -``jira_bump_not_in_statuses``: If ``jira_bump_tickets`` is true, a list of statuses the ticket must **not** be in for ElastAlert to comment on -the ticket instead of opening a new one. For example, to prevent comments being added to resolved or closed tickets, set this to 'Resolved' -and 'Closed'. This option should not be set if the ``jira_bump_in_statuses`` option is set. +ElastAlert 2 can send two categories of data to Alertmanager: labels and annotations -Example usage:: +Labels are sent as either static values or a single field value lookup. So if you specify the following:: - jira_bump_not_in_statuses: - - Resolved - - Closed + alertmanager_labels: + someStaticLabel: "Verify this issue" + anotherStaticLabel: "someone@somewhere.invalid" -``jira_bump_in_statuses``: If ``jira_bump_tickets`` is true, a list of statuses the ticket *must be in* for ElastAlert to comment on -the ticket instead of opening a new one. For example, to only comment on 'Open' tickets -- and thus not 'In Progress', 'Analyzing', -'Resolved', etc. tickets -- set this to 'Open'. This option should not be set if the ``jira_bump_not_in_statuses`` option is set. + alertmanager_fields: + myLabelName: someElasticFieldName + anotherLabel: anotherElasticFieldName -Example usage:: +The first labels will be static, but the two field will be replaced with the corresponding field values from the Elastic record that triggered the alert, and then merged back into the list of labels sent to Alertmanager. - jira_bump_in_statuses: - - Open +Annotations are slightly different. You can have many static (hardcoded) annotations and only two annotations that will be formatted according to the `alert_text` and `alert_subject` [documentation](https://elastalert2.readthedocs.io/en/latest/ruletypes.html#alert-subject). -``jira_bump_only``: Only update if a ticket is found to bump. This skips ticket creation for rules where you only want to affect existing tickets. +For example:: + + alertmanager_annotations: + someStaticAnnotation: "This is a static annotation value, it never changes" + severity: P3 + + alertmanager_alert_subject_labelname: myCustomAnnotationName1 + alertmanager_alert_text_labelname: myCustomAnnotationName2 + + alert_subject: "Host {0} has status {1}" + alert_subject_args: + - http_host + - status + + alert_text: "URL {0} has {1} matches" + alert_text_type: alert_text_only + alert_text_args: + - uri + - num_matches + +AWS SES (Amazon Simple Email Service) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The AWS SES alerter is similar to Email alerter but uses AWS SES to send emails. The AWS SES alerter can use AWS credentials +from the rule yaml, standard AWS config files or environment variables. + +AWS SES requires one option: + +``ses_email``: An address or list of addresses to sent the alert to. + +single address example:: + + ses_email: "one@domain" + +or + +multiple address example:: + + ses_email: + - "one@domain" + - "two@domain" + +``ses_from_addr``: This sets the From header in the email. + +Optional: + +``ses_aws_access_key``: An access key to connect to AWS SES with. + +``ses_aws_secret_key``: The secret key associated with the access key. + +``ses_aws_region``: The AWS region in which the AWS SES resource is located. Default is us-east-1 + +``ses_aws_profile``: The AWS profile to use. If none specified, the default will be used. + +``ses_email_reply_to``: This sets the Reply-To header in the email. + +``ses_cc``: This adds the CC emails to the list of recipients. By default, this is left empty. + +single address example:: + + ses_cc: "one@domain" + +or + +multiple address example:: + + ses_cc: + - "one@domain" + - "two@domain" + +``ses_bcc``: This adds the BCC emails to the list of recipients but does not show up in the email message. By default, this is left empty. + +single address example:: + + ses_bcc: "one@domain" + +or + +multiple address example:: + + ses_bcc: + - "one@domain" + - "two@domain" + +Example When not using aws_profile usage:: + + alert: + - "ses" + ses_aws_access_key_id: "XXXXXXXXXXXXXXXXXX'" + ses_aws_secret_access_key: "YYYYYYYYYYYYYYYYYYYY" + ses_aws_region: "us-east-1" + ses_from_addr: "xxxx1@xxx.com" + ses_email: "xxxx1@xxx.com" + +Example When to use aws_profile usage:: + + # Create ~/.aws/credentials + + [default] + aws_access_key_id = xxxxxxxxxxxxxxxxxxxx + aws_secret_access_key = yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy + + # Create ~/.aws/config + + [default] + region = us-east-1 + + # alert rule setting + + alert: + - "ses" + ses_aws_profile: "default" + ses_from_addr: "xxxx1@xxx.com" + ses_email: "xxxx1@xxx.com" + +AWS SNS (Amazon Simple Notification Service) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The AWS SNS alerter will send an AWS SNS notification. The body of the notification is formatted the same as with other alerters. +The AWS SNS alerter uses boto3 and can use credentials in the rule yaml, in a standard AWS credential and config files, or +via environment variables. See http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html for details. + +AWS SNS requires one option: + +``sns_topic_arn``: The SNS topic's ARN. For example, ``arn:aws:sns:us-east-1:123456789:somesnstopic`` + +Optional: + +``sns_aws_access_key_id``: An access key to connect to SNS with. + +``sns_aws_secret_access_key``: The secret key associated with the access key. + +``sns_aws_region``: The AWS region in which the SNS resource is located. Default is us-east-1 + +``sns_aws_profile``: The AWS profile to use. If none specified, the default will be used. + +Example When not using aws_profile usage:: + + alert: + - sns + sns_topic_arn: 'arn:aws:sns:us-east-1:123456789:somesnstopic' + sns_aws_access_key_id: 'XXXXXXXXXXXXXXXXXX'' + sns_aws_secret_access_key: 'YYYYYYYYYYYYYYYYYYYY' + sns_aws_region: 'us-east-1' # You must nest aws_region within your alert configuration so it is not used to sign AWS requests. + +Example When to use aws_profile usage:: + + # Create ~/.aws/credentials + + [default] + aws_access_key_id = xxxxxxxxxxxxxxxxxxxx + aws_secret_access_key = yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy + + # Create ~/.aws/config + + [default] + region = us-east-1 + + # alert rule setting + + alert: + - sns + sns_topic_arn: 'arn:aws:sns:us-east-1:123456789:somesnstopic' + sns_aws_profile: 'default' + +Chatwork +~~~~~~~~ + +Chatwork will send notification to a Chatwork application. The body of the notification is formatted the same as with other alerters. + +Required: + +``chatwork_apikey``: Chatwork API KEY. + +``chatwork_room_id``: The ID of the room you are talking to in Chatwork. How to find the room ID is the part of the number after "rid" at the end of the URL of the browser. + +``chatwork_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Chatwork. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. + +``chatwork_proxy_login``: The Chatwork proxy auth username. + +``chatwork_proxy_pass``: The Chatwork proxy auth password. + +Example usage:: + + alert: + - "chatwork" + chatwork_apikey: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + chatwork_room_id: "xxxxxxxxx" + +Command +~~~~~~~ + +The command alert allows you to execute an arbitrary command and pass arguments or stdin from the match. Arguments to the command can use +Python format string syntax to access parts of the match. The alerter will open a subprocess and optionally pass the match, or matches +in the case of an aggregated alert, as a JSON array, to the stdin of the process. + +This alert requires one option: + +``command``: A list of arguments to execute or a string to execute. If in list format, the first argument is the name of the program to execute. If passed a +string, the command is executed through the shell. + +Strings can be formatted using the old-style format (``%``) or the new-style format (``.format()``). When the old-style format is used, fields are accessed +using ``%(field_name)s``, or ``%(field.subfield)s``. When the new-style format is used, fields are accessed using ``{field_name}``. New-style formatting allows accessing nested +fields (e.g., ``{field_1[subfield]}``). + +In an aggregated alert, these fields come from the first match. + +Optional: + +``pipe_match_json``: If true, the match will be converted to JSON and passed to stdin of the command. Note that this will cause ElastAlert 2 to block +until the command exits or sends an EOF to stdout. + +``pipe_alert_text``: If true, the standard alert body text will be passed to stdin of the command. Note that this will cause ElastAlert 2 to block +until the command exits or sends an EOF to stdout. It cannot be used at the same time as ``pipe_match_json``. + +``fail_on_non_zero_exit``: By default this is ``False``. Allows monitoring of when commands fail to run. When a command returns a non-zero exit status, the alert raises an exception. + +Example usage using old-style format:: + + alert: + - command + command: ["/bin/send_alert", "--username", "%(username)s"] + +.. warning:: + + Executing commmands with untrusted data can make it vulnerable to shell injection! If you use formatted data in + your command, it is highly recommended that you use a args list format instead of a shell string. + +Example usage using new-style format:: + + alert: + - command + command: ["/bin/send_alert", "--username", "{match[username]}"] + +Datadog +~~~~~~~ + +This alert will create a `Datadog Event`_. Events are limited to 4000 characters. If an event is sent that contains +a message that is longer than 4000 characters, only his first 4000 characters will be displayed. + +This alert requires two additional options: + +``datadog_api_key``: `Datadog API key`_ + +``datadog_app_key``: `Datadog application key`_ + +Example usage:: + + alert: + - "datadog" + datadog_api_key: "Datadog API Key" + datadog_app_key: "Datadog APP Key" + +.. _`Datadog Event`: https://docs.datadoghq.com/events/ +.. _`Datadog API key`: https://docs.datadoghq.com/account_management/api-app-keys/#api-keys +.. _`Datadog application key`: https://docs.datadoghq.com/account_management/api-app-keys/#application-keys + +Debug +~~~~~ + +The debug alerter will log the alert information using the Python logger at the info level. It is logged into a Python Logger object with the name ``elastalert`` that can be easily accessed using the ``getLogger`` command. + +Dingtalk +~~~~~~~~ + +Dingtalk will send notification to a Dingtalk application. The body of the notification is formatted the same as with other alerters. + +Required: + +``dingtalk_access_token``: Dingtalk access token. + +``dingtalk_msgtype``: Dingtalk msgtype, default to ``text``. ``markdown``, ``single_action_card``, ``action_card``. + +dingtalk_msgtype single_action_card Required: + +``dingtalk_single_title``: The title of a single button.. + +``dingtalk_single_url``: Jump link for a single button. + +dingtalk_msgtype action_card Required: + +``dingtalk_btns``: Button. + +dingtalk_msgtype action_card Optional: + +``dingtalk_btn_orientation``: "0": Buttons are arranged vertically "1": Buttons are arranged horizontally. + +Example msgtype : text:: + + alert: + - "dingtalk" + dingtalk_access_token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + dingtalk_msgtype: "text" + + +Example msgtype : markdown:: + + alert: + - "dingtalk" + dingtalk_access_token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + dingtalk_msgtype: "markdown" + + +Example msgtype : single_action_card:: + + alert: + - "dingtalk" + dingtalk_access_token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + dingtalk_msgtype: "single_action_card" + dingtalk_single_title: "test3" + dingtalk_single_url: "https://xxxx.xxx" + + +Example msgtype : action_card:: + + alert: + - "dingtalk" + dingtalk_access_token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + dingtalk_msgtype: "action_card" + dingtalk_btn_orientation: "0" + dingtalk_btns: [{"title": "a", "actionURL": "https://xxxx1.xxx"}, {"title": "b", "actionURL": "https://xxxx2.xxx"}] + +Optional: + +``dingtalk_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Dingtalk. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. + +``dingtalk_proxy_login``: The Dingtalk proxy auth username. + +``dingtalk_proxy_pass``: The Dingtalk proxy auth username. + +Discord +~~~~~~~ + +Discord will send notification to a Discord application. The body of the notification is formatted the same as with other alerters. + +Required: + +``discord_webhook_url``: The webhook URL. + +Optional: + +``discord_emoji_title``: By default ElastAlert 2 will use the ``:warning:`` emoji when posting to the channel. You can use a different emoji per ElastAlert 2 rule. Any Apple emoji can be used, see http://emojipedia.org/apple/ . If discord_embed_icon_url parameter is provided, emoji is ignored. + +``discord_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Discord. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. + +``discord_proxy_login``: The Discord proxy auth username. + +``discord_proxy_password``: The Discord proxy auth username. + +``discord_embed_color``: embed color. By default ``0xffffff``. + +``discord_embed_footer``: embed footer. + +``discord_embed_icon_url``: You can provide icon_url to use custom image. Provide absolute address of the pciture. + +Example usage:: + + alert: + - "discord" + discord_webhook_url: "Your discord webhook url" + discord_emoji_title: ":lock:" + discord_embed_color: 0xE24D42 + discord_embed_footer: "Message sent by from your computer" + discord_embed_icon_url: "https://humancoders-formations.s3.amazonaws.com/uploads/course/logo/38/thumb_bigger_formation-elasticsearch.png" + +Email +~~~~~ + +This alert will send an email. It connects to an smtp server located at ``smtp_host``, or localhost by default. +If available, it will use STARTTLS. + +This alert requires one additional option: + +``email``: An address or list of addresses to sent the alert to. + +single address example:: + + email: "one@domain" + +or + +multiple address example:: + + email: + - "one@domain" + - "two@domain" + +Optional: + +``email_from_field``: Use a field from the document that triggered the alert as the recipient. If the field cannot be found, +the ``email`` value will be used as a default. Note that this field will not be available in every rule type, for example, if +you have ``use_count_query`` or if it's ``type: flatline``. You can optionally add a domain suffix to the field to generate the +address using ``email_add_domain``. It can be a single recipient or list of recipients. For example, with the following settings:: + + email_from_field: "data.user" + email_add_domain: "@example.com" + +and a match ``{"@timestamp": "2017", "data": {"foo": "bar", "user": "qlo"}}`` + +an email would be sent to ``qlo@example.com`` + +``smtp_host``: The SMTP host to use, defaults to localhost. + +``smtp_port``: The port to use. Defaults to port 25 when SSL is not used, or 465 when SSL is used. + +``smtp_ssl``: Connect the SMTP host using TLS, defaults to ``false``. If ``smtp_ssl`` is not used, ElastAlert 2 will still attempt +STARTTLS. + +``smtp_auth_file``: The path to a file which contains SMTP authentication credentials. The path can be either absolute or relative +to the given rule. It should be YAML formatted and contain two fields, ``user`` and ``password``. If this is not present, +no authentication will be attempted. + +``smtp_cert_file``: Connect the SMTP host using the given path to a TLS certificate file, default to ``None``. + +``smtp_key_file``: Connect the SMTP host using the given path to a TLS key file, default to ``None``. + +``email_reply_to``: This sets the Reply-To header in the email. By default, the from address is ElastAlert@ and the domain will be set +by the smtp server. + +``from_addr``: This sets the From header in the email. By default, the from address is ElastAlert@ and the domain will be set +by the smtp server. + +``cc``: This adds the CC emails to the list of recipients. By default, this is left empty. + +single address example:: + + cc: "one@domain" + +or + +multiple address example:: + + cc: + - "one@domain" + - "two@domain" + +``bcc``: This adds the BCC emails to the list of recipients but does not show up in the email message. By default, this is left empty. + +single address example:: + + bcc: "one@domain" + +or + +multiple address example:: + + bcc: + - "one@domain" + - "two@domain" + +``email_format``: If set to 'html', the email's MIME type will be set to HTML, and HTML content should correctly render. If you use this, +you need to put your own HTML into ``alert_text`` and use ``alert_text_type: alert_text_jinja`` Or ``alert_text_type: alert_text_only``. + +``assets_dir``: images dir. default to ``/tmp``. + +``email_image_keys``: mapping between images keys. + +``email_image_values``: mapping between images values + +Example assets_dir, email_image_keys, email_image_values:: + + assets_dir: "/opt/elastalert/email_images" + email_image_keys: ["img1"] + email_image_values: ["my_logo.png"] + +Exotel +~~~~~~ + +Developers in India can use the Exotel alerter, which can send an alert to a mobile phone as an SMS from your ExoPhone. The SMS will contain both the alert name and the specified message body. + +The alerter requires the following option: + +``exotel_account_sid``: The SID of your Exotel account. + +``exotel_auth_token``: The auth token associated with your Exotel account. + +Instructions for finding the SID and auth token associated with your account can be found `on the Exotel website +`_. + +``exotel_to_number``: The phone number to which you would like to send the alert. + +``exotel_from_number``: The ExoPhone number from which the alert will be sent. + +The alerter has one optional argument: + +``exotel_message_body``: The contents of the SMS. If you don't specify this argument, only the rule name is sent. + +Example usage:: + + alert: + - "exotel" + exotel_account_sid: "Exotel Account SID" + exotel_auth_token: "Exotel Auth token" + exotel_to_number: "Exotel to number" + exotel_from_number: "Exotel from number" + +Gitter +~~~~~~ + +Gitter alerter will send a notification to a predefined Gitter channel. The body of the notification is formatted the same as with other alerters. + +The alerter requires the following option: + +``gitter_webhook_url``: The webhook URL that includes your auth data and the ID of the channel (room) you want to post to. Go to the Integration Settings +of the channel https://gitter.im/ORGA/CHANNEL#integrations , click 'CUSTOM' and copy the resulting URL. + +Optional: + +``gitter_msg_level``: By default the alert will be posted with the 'error' level. You can use 'info' if you want the messages to be black instead of red. + +``gitter_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Gitter. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. + +Example usage:: + + alert: + - "gitter" + gitter_webhook_url: "Your Gitter Webhook URL" + gitter_msg_level: "error" + +GoogleChat +~~~~~~~~~~ +GoogleChat alerter will send a notification to a predefined GoogleChat channel. The body of the notification is formatted the same as with other alerters. + +The alerter requires the following options: + +``googlechat_webhook_url``: The webhook URL that includes the channel (room) you want to post to. Go to the Google Chat website https://chat.google.com and choose the channel in which you wish to receive the notifications. Select 'Configure Webhooks' to create a new webhook or to copy the URL from an existing one. You can use a list of URLs to send to multiple channels. -Example usage:: +Optional: - jira_bump_only: true +``googlechat_format``: Formatting for the notification. Can be either 'card' or 'basic' (default). -``jira_transition_to``: If ``jira_bump_tickets`` is true, Transition this ticket to the given Status when bumping. Must match the text of your JIRA implementation's Status field. +``googlechat_header_title``: Sets the text for the card header title. (Only used if format=card) -Example usage:: +``googlechat_header_subtitle``: Sets the text for the card header subtitle. (Only used if format=card) - jira_transition_to: 'Fixed' +``googlechat_header_image``: URL for the card header icon. (Only used if format=card) +``googlechat_footer_kibanalink``: URL to Kibana to include in the card footer. (Only used if format=card) +``googlechat_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to GoogleChat. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. -``jira_bump_after_inactivity``: If this is set, ElastAlert will only comment on tickets that have been inactive for at least this many days. -It only applies if ``jira_bump_tickets`` is true. Default is 0 days. +Graylog GELF +~~~~~~~~~~~~ +GELF alerter will send a custom message to a Graylog GELF input (HTTP/TCP). Alert payload content you form with key-value pairs. -Arbitrary Jira fields: +The alerter requires the following options: -ElastAlert supports setting any arbitrary JIRA field that your jira issue supports. For example, if you had a custom field, called "Affected User", you can set it by providing that field name in ``snake_case`` prefixed with ``jira_``. These fields can contain primitive strings or arrays of strings. Note that when you create a custom field in your JIRA server, internally, the field is represented as ``customfield_1111``. In elastalert, you may refer to either the public facing name OR the internal representation. +``gelf_type``: Type of your Graylog GELF Input. How available 'http' or 'tcp'. -In addition, if you would like to use a field in the alert as the value for a custom JIRA field, use the field name plus a # symbol in front. For example, if you wanted to set a custom JIRA field called "user" to the value of the field "username" from the match, you would use the following. +And in case of HTTP: -Example:: +``gelf_endpoint``: Link to GELF HTTP Input as an example: 'http://example.com/gelf' (Only used if gelf_type=http) - jira_user: "#username" +Or next if selected TCP: + +``gelf_host``: Graylog server address where Input launched. (Only used if gelf_type=tcp) + +``gelf_port``: Port, specified for Input. (Only used if gelf_type=tcp) + +``gelf_payload``: Main message body. Working as key-value, where the key is your custom name and value - data from elasticsearch message. Name of alert will write to beginning of the message. Example usage:: - jira_arbitrary_singular_field: My Name - jira_arbitrary_multivalue_field: - - Name 1 - - Name 2 - jira_customfield_12345: My Custom Value - jira_customfield_9999: - - My Custom Value 1 - - My Custom Value 2 + alert: + - gelf + gelf_type: http + gelf_endpoint: http://example.com:12201/gelf + gelf_payload: + username: user + src_ip: source_ip -OpsGenie -~~~~~~~~ +Optional: -OpsGenie alerter will create an alert which can be used to notify Operations people of issues or log information. An OpsGenie ``API`` -integration must be created in order to acquire the necessary ``opsgenie_key`` rule variable. Currently the OpsGenieAlerter only creates -an alert, however it could be extended to update or close existing alerts. +``gelf_log_level``: Standard syslog severity levels. By default set 5 (Notice) -It is necessary for the user to create an OpsGenie Rest HTTPS API `integration page `_ in order to create alerts. +``gelf_http_headers``: Additional headers. (Only used if gelf_type=http) -The OpsGenie alert requires one option: +``gelf_ca_cert``: Path to custom CA certificate. -``opsgenie_key``: The randomly generated API Integration key created by OpsGenie. +``gelf_http_ignore_ssl_errors``: Ignore ssl error. (Only used if gelf_type=http) + +``gelf_timeout``: Custom timeout. + +HTTP POST +~~~~~~~~~ + +This alert type will send results to a JSON endpoint using HTTP POST. The key names are configurable so this is compatible with almost any endpoint. By default, the JSON will contain all the items from the match, unless you specify http_post_payload, in which case it will only contain those items. + +Required: + +``http_post_url``: The URL to POST. Optional: -``opsgenie_account``: The OpsGenie account to integrate with. +``http_post_payload``: List of keys:values to use as the content of the POST. Example - ip:clientip will map the value from the clientip index of Elasticsearch to JSON key named ip. If not defined, all the Elasticsearch keys will be sent. -``opsgenie_recipients``: A list OpsGenie recipients who will be notified by the alert. -``opsgenie_recipients_args``: Map of arguments used to format opsgenie_recipients. -``opsgenie_default_recipients``: List of default recipients to notify when the formatting of opsgenie_recipients is unsuccesful. -``opsgenie_teams``: A list of OpsGenie teams to notify (useful for schedules with escalation). -``opsgenie_teams_args``: Map of arguments used to format opsgenie_teams (useful for assigning the alerts to teams based on some data) -``opsgenie_default_teams``: List of default teams to notify when the formatting of opsgenie_teams is unsuccesful. -``opsgenie_tags``: A list of tags for this alert. +``http_post_static_payload``: Key:value pairs of static parameters to be sent, along with the Elasticsearch results. Put your authentication or other information here. -``opsgenie_message``: Set the OpsGenie message to something other than the rule name. The message can be formatted with fields from the first match e.g. "Error occurred for {app_name} at {timestamp}.". +``http_post_headers``: Key:value pairs of headers to be sent as part of the request. -``opsgenie_alias``: Set the OpsGenie alias. The alias can be formatted with fields from the first match e.g "{app_name} error". +``http_post_proxy``: URL of proxy, if required. only supports https. -``opsgenie_subject``: A string used to create the title of the OpsGenie alert. Can use Python string formatting. +``http_post_all_values``: Boolean of whether or not to include every key value pair from the match in addition to those in http_post_payload and http_post_static_payload. Defaults to True if http_post_payload is not specified, otherwise False. -``opsgenie_subject_args``: A list of fields to use to format ``opsgenie_subject`` if it contains formaters. +``http_post_timeout``: The timeout value, in seconds, for making the post. The default is 10. If a timeout occurs, the alert will be retried next time elastalert cycles. -``opsgenie_priority``: Set the OpsGenie priority level. Possible values are P1, P2, P3, P4, P5. +``http_post_ca_certs``: Set this option to ``True`` or a path to a CA cert bundle or directory (eg: ``/etc/ssl/certs/ca-certificates.crt``) to validate the SSL certificate. -``opsgenie_details``: Map of custom key/value pairs to include in the alert's details. The value can sourced from either fields in the first match, environment variables, or a constant value. +``http_post_ignore_ssl_errors``: By default ElastAlert 2 will verify SSL certificate. Set this option to ``True`` if you want to ignore SSL errors. Example usage:: - opsgenie_details: - Author: 'Bob Smith' # constant value - Environment: '$VAR' # environment variable - Message: { field: message } # field in the first match + alert: post + http_post_url: "http://example.com/api" + http_post_payload: + ip: clientip + http_post_static_payload: + apikey: abc123 + http_post_headers: + authorization: Basic 123dr3234 -SNS -~~~ +HTTP POST 2 +~~~~~~~~~~~ -The SNS alerter will send an SNS notification. The body of the notification is formatted the same as with other alerters. -The SNS alerter uses boto3 and can use credentials in the rule yaml, in a standard AWS credential and config files, or -via environment variables. See http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html for details. +This alert type will send results to a JSON endpoint using HTTP POST. The key names are configurable so this is compatible with almost any endpoint. By default, the JSON will contain all the items from the match, unless you specify http_post_payload, in which case it will only contain those items. +This alert is a more flexible version of the HTTP Post alerter. -SNS requires one option: +Required: -``sns_topic_arn``: The SNS topic's ARN. For example, ``arn:aws:sns:us-east-1:123456789:somesnstopic`` +``http_post2_url``: The URL to POST. Optional: -``aws_access_key``: An access key to connect to SNS with. +``http_post2_payload``: A JSON string or list of keys:values to use for the payload of the HTTP Post. You can use {{ field }} (Jinja2 template) in the key and the value to reference any field in the matched events (works for nested ES fields and nested payload keys). If not defined, all the Elasticsearch keys will be sent. Ex: `"description_{{ my_field }}": "Type: {{ type }}\\nSubject: {{ title }}"`. When field names use dot notation or reserved characters, _data can be used to access these fields. If _data conflicts with your top level data, use jinja_root_name to change its name. -``aws_secret_key``: The secret key associated with the access key. +``http_post2_raw_fields``: List of keys:values to use as the content of the POST. Example - ip:clientip will map the value from the clientip field of Elasticsearch to JSON key named ip. This field overwrite the keys with the same name in `http_post2_payload`. -``aws_region``: The AWS region in which the SNS resource is located. Default is us-east-1 +``http_post2_headers``: A JSON string or list of keys:values to use for as headers of the HTTP Post. You can use {{ field }} (Jinja2 template) in the key and the value to reference any field in the matched events (works for nested fields). Ex: `"Authorization": "{{ user }}"`. Headers `"Content-Type": "application/json"` and `"Accept": "application/json;charset=utf-8"` are present by default, you can overwrite them if you think this is necessary. When field names use dot notation or reserved characters, _data can be used to access these fields. If _data conflicts with your top level data, use jinja_root_name to change its name. -``profile``: The AWS profile to use. If none specified, the default will be used. +``http_post2_proxy``: URL of proxy, if required. only supports https. -HipChat -~~~~~~~ +``http_post2_all_values``: Boolean of whether or not to include every key value pair from the match in addition to those in http_post2_payload and http_post2_static_payload. Defaults to True if http_post2_payload is not specified, otherwise False. -HipChat alerter will send a notification to a predefined HipChat room. The body of the notification is formatted the same as with other alerters. +``http_post2_timeout``: The timeout value, in seconds, for making the post. The default is 10. If a timeout occurs, the alert will be retried next time elastalert cycles. -The alerter requires the following two options: +``http_post2_ca_certs``: Set this option to ``True`` or a path to a CA cert bundle or directory (eg: ``/etc/ssl/certs/ca-certificates.crt``) to validate the SSL certificate. + +``http_post2_ignore_ssl_errors``: By default ElastAlert 2 will verify SSL certificate. Set this option to ``True`` if you want to ignore SSL errors. -``hipchat_auth_token``: The randomly generated notification token created by HipChat. Go to https://XXXXX.hipchat.com/account/api and use -'Create new token' section, choosing 'Send notification' in Scopes list. +.. note:: Due to how values are rendered to JSON, the http_post2_headers and http_post2_payload fields require single quotes where quotes are required for Jinja templating. This only applies when using the YAML key:value pairs. Any quotes can be used with the new JSON string format. See below for examples of how to properly use quotes as well as an example of the new JSON string formatting. -``hipchat_room_id``: The id associated with the HipChat room you want to send the alert to. Go to https://XXXXX.hipchat.com/rooms and choose -the room you want to post to. The room ID will be the numeric part of the URL. +Incorrect usage with double quotes:: -``hipchat_msg_color``: The color of the message background that is sent to HipChat. May be set to green, yellow or red. Default is red. + alert: post2 + http_post2_url: "http://example.com/api" + http_post2_payload: + # this will result in an error as " is escaped to \" + description: 'hello {{ _data["name"] }}' + # this will result in an error as " is escaped to \" + state: '{{ ["low","medium","high","critical"][event.severity] }}' + http_post2_headers: + authorization: Basic 123dr3234 + X-custom-type: '{{type}}' -``hipchat_domain``: The custom domain in case you have HipChat own server deployment. Default is api.hipchat.com. +Correct usage with single quotes:: -``hipchat_ignore_ssl_errors``: Ignore TLS errors (self-signed certificates, etc.). Default is false. + alert: post2 + http_post2_url: "http://example.com/api" + http_post2_payload: + description: hello {{ _data['name'] }} + state: "{{ ['low','medium','high','critical'][event.severity] }}" + http_post2_headers: + authorization: Basic 123dr3234 + X-custom-type: '{{type}}' -``hipchat_proxy``: By default ElastAlert will not use a network proxy to send notifications to HipChat. Set this option using ``hostname:port`` if you need to use a proxy. +Example usage:: -``hipchat_notify``: When set to true, triggers a hipchat bell as if it were a user. Default is true. + alert: post2 + http_post2_url: "http://example.com/api" + http_post2_payload: + description: "An event came from IP {{clientip}}" + username: "{{user.name}}" + http_post2_raw_fields: + ip: clientip + http_post2_headers: + authorization: Basic 123dr3234 + X-custom-type: {{type}} + +Example usage with json string formatting:: + + alert: post2 + jinja_root_name: _new_root + http_post2_url: "http://example.com/api" + http_post2_payload: | + { + "description": "An event came from IP {{ _new_root["client.ip"] }}", + "username": "{{ _new_root['username'] }}" + {%- for k, v in some_field.items() -%} + ,"{{ k }}": "changed_{{ v }}" + {%- endfor -%} + } + http_post2_raw_fields: + ip: clientip + http_post2_headers: | + { + "authorization": "Basic 123dr3234", + "X-custom-{{key}}": "{{type}}" + } -``hipchat_from``: When humans report to hipchat, a timestamp appears next to their name. For bots, the name is the name of the token. The from, instead of a timestamp, defaults to empty unless set, which you can do here. This is optional. +Jira +~~~~ -``hipchat_message_format``: Determines how the message is treated by HipChat and rendered inside HipChat applications -html - Message is rendered as HTML and receives no special treatment. Must be valid HTML and entities must be escaped (e.g.: '&' instead of '&'). May contain basic tags: a, b, i, strong, em, br, img, pre, code, lists, tables. -text - Message is treated just like a message sent by a user. Can include @mentions, emoticons, pastes, and auto-detected URLs (Twitter, YouTube, images, etc). -Valid values: html, text. -Defaults to 'html'. +The Jira alerter will open a ticket on Jira whenever an alert is triggered. You must have a service account for ElastAlert 2 to connect with. +The credentials of the service account are loaded from a separate file. Credentials can either be username and password or the Personal Access Token. +The ticket number will be written to the alert pipeline, and if it is followed by an email alerter, a link will be included in the email. -``hipchat_mentions``: When using a ``html`` message format, it's not possible to mentions specific users using the ``@user`` syntax. -In that case, you can set ``hipchat_mentions`` to a list of users which will be first mentioned using a single text message, then the normal ElastAlert message will be sent to Hipchat. -If set, it will mention the users, no matter if the original message format is set to HTML or text. -Valid values: list of strings. -Defaults to ``[]``. +This alert requires four additional options: +``jira_server``: The hostname of the Jira server. -Stride -~~~~~~~ +``jira_project``: The project to open the ticket under. -Stride alerter will send a notification to a predefined Stride room. The body of the notification is formatted the same as with other alerters. -Simple HTML such as and tags will be parsed into a format that Stride can consume. +``jira_issuetype``: The type of issue that the ticket will be filed as. Note that this is case sensitive. -The alerter requires the following two options: +``jira_account_file``: The path to the file which contains Jira account credentials. -``stride_access_token``: The randomly generated notification token created by Stride. + For an example Jira account file, see ``examples/rules/jira_acct.yaml``. The account file is a YAML formatted file. -``stride_cloud_id``: The site_id associated with the Stride site you want to send the alert to. + When using user/password authentication, or when using Jira Cloud the Jira account file must contain two fields: -``stride_conversation_id``: The conversation_id associated with the Stride conversation you want to send the alert to. + ``user``: The username to authenticate with Jira. -``stride_ignore_ssl_errors``: Ignore TLS errors (self-signed certificates, etc.). Default is false. + ``password``: The password to authenticate with Jira. Jira cloud users must specify the Jira Cloud API token for this value. -``stride_proxy``: By default ElastAlert will not use a network proxy to send notifications to Stride. Set this option using ``hostname:port`` if you need to use a proxy. + When using a Personal Access Token, such as when using a locally hosted Jira installation, the Jira account file must contain a single field: + ``apikey``: The Personal Access Token for authenticating with Jira. -MS Teams -~~~~~~~~ +Optional: -MS Teams alerter will send a notification to a predefined Microsoft Teams channel. +``jira_assignee``: Assigns an issue to a user. -The alerter requires the following options: +``jira_component``: The name of the component or components to set the ticket to. This can be a single string or a list of strings. This is provided for backwards compatibility and will eventually be deprecated. It is preferable to use the plural ``jira_components`` instead. -``ms_teams_webhook_url``: The webhook URL that includes your auth data and the ID of the channel you want to post to. Go to the Connectors -menu in your channel and configure an Incoming Webhook, then copy the resulting URL. You can use a list of URLs to send to multiple channels. +``jira_components``: The name of the component or components to set the ticket to. This can be a single string or a list of strings. -``ms_teams_alert_summary``: Summary should be configured according to `MS documentation `_, although it seems not displayed by Teams currently. +``jira_description``: Similar to ``alert_text``, this text is prepended to the Jira description. -Optional: +``jira_label``: The label or labels to add to the Jira ticket. This can be a single string or a list of strings. This is provided for backwards compatibility and will eventually be deprecated. It is preferable to use the plural ``jira_labels`` instead. -``ms_teams_theme_color``: By default the alert will be posted without any color line. To add color, set this attribute to a HTML color value e.g. ``#ff0000`` for red. +``jira_labels``: The label or labels to add to the Jira ticket. This can be a single string or a list of strings. -``ms_teams_proxy``: By default ElastAlert will not use a network proxy to send notifications to MS Teams. Set this option using ``hostname:port`` if you need to use a proxy. +``jira_priority``: The index of the priority to set the issue to. In the Jira dropdown for priorities, 0 would represent the first priority, +1 the 2nd, etc. -``ms_teams_alert_fixed_width``: By default this is ``False`` and the notification will be sent to MS Teams as-is. Teams supports a partial Markdown implementation, which means asterisk, underscore and other characters may be interpreted as Markdown. Currenlty, Teams does not fully implement code blocks. Setting this attribute to ``True`` will enable line by line code blocks. It is recommended to enable this to get clearer notifications in Teams. +``jira_watchers``: A list of user names to add as watchers on a Jira ticket. This can be a single string or a list of strings. -Slack -~~~~~ +``jira_bump_tickets``: If true, ElastAlert 2 search for existing tickets newer than ``jira_max_age`` and comment on the ticket with +information about the alert instead of opening another ticket. ElastAlert 2 finds the existing ticket by searching by summary. If the +summary has changed or contains special characters, it may fail to find the ticket. If you are using a custom ``alert_subject``, +the two summaries must be exact matches, except by setting ``jira_ignore_in_title``, you can ignore the value of a field when searching. +For example, if the custom subject is "foo occured at bar", and "foo" is the value field X in the match, you can set ``jira_ignore_in_title`` +to "X" and it will only bump tickets with "bar" in the subject. Defaults to false. -Slack alerter will send a notification to a predefined Slack channel. The body of the notification is formatted the same as with other alerters. +``jira_ignore_in_title``: ElastAlert 2 will attempt to remove the value for this field from the Jira subject when searching for tickets to bump. +See ``jira_bump_tickets`` description above for an example. -The alerter requires the following option: +``jira_max_age``: If ``jira_bump_tickets`` is true, the maximum age of a ticket, in days, such that ElastAlert 2 will comment on the ticket +instead of opening a new one. Default is 30 days. -``slack_webhook_url``: The webhook URL that includes your auth data and the ID of the channel (room) you want to post to. Go to the Incoming Webhooks -section in your Slack account https://XXXXX.slack.com/services/new/incoming-webhook , choose the channel, click 'Add Incoming Webhooks Integration' -and copy the resulting URL. You can use a list of URLs to send to multiple channels. +``jira_bump_not_in_statuses``: If ``jira_bump_tickets`` is true, a list of statuses the ticket must **not** be in for ElastAlert 2 to comment on +the ticket instead of opening a new one. For example, to prevent comments being added to resolved or closed tickets, set this to 'Resolved' +and 'Closed'. This option should not be set if the ``jira_bump_in_statuses`` option is set. -Optional: +Example usage:: -``slack_username_override``: By default Slack will use your username when posting to the channel. Use this option to change it (free text). + jira_bump_not_in_statuses: + - Resolved + - Closed -``slack_channel_override``: Incoming webhooks have a default channel, but it can be overridden. A public channel can be specified "#other-channel", and a Direct Message with "@username". +``jira_bump_in_statuses``: If ``jira_bump_tickets`` is true, a list of statuses the ticket *must be in* for ElastAlert 2 to comment on +the ticket instead of opening a new one. For example, to only comment on 'Open' tickets -- and thus not 'In Progress', 'Analyzing', +'Resolved', etc. tickets -- set this to 'Open'. This option should not be set if the ``jira_bump_not_in_statuses`` option is set. -``slack_emoji_override``: By default ElastAlert will use the :ghost: emoji when posting to the channel. You can use a different emoji per -ElastAlert rule. Any Apple emoji can be used, see http://emojipedia.org/apple/ . If slack_icon_url_override parameter is provided, emoji is ignored. +Example usage:: -``slack_icon_url_override``: By default ElastAlert will use the :ghost: emoji when posting to the channel. You can provide icon_url to use custom image. -Provide absolute address of the pciture, for example: http://some.address.com/image.jpg . + jira_bump_in_statuses: + - Open -``slack_msg_color``: By default the alert will be posted with the 'danger' color. You can also use 'good' or 'warning' colors. +``jira_bump_only``: Only update if a ticket is found to bump. This skips ticket creation for rules where you only want to affect existing tickets. -``slack_proxy``: By default ElastAlert will not use a network proxy to send notifications to Slack. Set this option using ``hostname:port`` if you need to use a proxy. +Example usage:: -``slack_alert_fields``: You can add additional fields to your slack alerts using this field. Specify the title using `title` and a value for the field using `value`. Additionally you can specify whether or not this field should be a `short` field using `short: true`. + jira_bump_only: true -``slack_title``: Sets a title for the message, this shows up as a blue text at the start of the message +``jira_transition_to``: If ``jira_bump_tickets`` is true, Transition this ticket to the given Status when bumping. Must match the text of your Jira implementation's Status field. -``slack_title_link``: You can add a link in your Slack notification by setting this to a valid URL. Requires slack_title to be set. +Example usage:: -``slack_timeout``: You can specify a timeout value, in seconds, for making communicating with Slac. The default is 10. If a timeout occurs, the alert will be retried next time elastalert cycles. + jira_transition_to: 'Fixed' -``slack_attach_kibana_discover_url``: Enables the attachment of the ``kibana_discover_url`` to the slack notification. The config ``generate_kibana_discover_url`` must also be ``True`` in order to generate the url. Defaults to ``False``. -``slack_kibana_discover_color``: The color of the Kibana Discover url attachment. Defaults to ``#ec4b98``. -``slack_kibana_discover_title``: The title of the Kibana Discover url attachment. Defaults to ``Discover in Kibana``. +``jira_bump_after_inactivity``: If this is set, ElastAlert 2 will only comment on tickets that have been inactive for at least this many days. +It only applies if ``jira_bump_tickets`` is true. Default is 0 days. + +Arbitrary Jira fields: + +ElastAlert 2 supports setting any arbitrary Jira field that your Jira issue supports. For example, if you had a custom field, called "Affected User", you can set it by providing that field name in ``snake_case`` prefixed with ``jira_``. These fields can contain primitive strings or arrays of strings. Note that when you create a custom field in your Jira server, internally, the field is represented as ``customfield_1111``. In ElastAlert 2, you may refer to either the public facing name OR the internal representation. + +In addition, if you would like to use a field in the alert as the value for a custom Jira field, use the field name plus a # symbol in front. For example, if you wanted to set a custom Jira field called "user" to the value of the field "username" from the match, you would use the following. + +Example:: + + jira_user: "#username" + +Example usage:: + + jira_arbitrary_singular_field: My Name + jira_arbitrary_multivalue_field: + - Name 1 + - Name 2 + jira_customfield_12345: My Custom Value + jira_customfield_9999: + - My Custom Value 1 + - My Custom Value 2 + +Line Notify +~~~~~~~~~~~ + +Line Notify will send notification to a Line application. The body of the notification is formatted the same as with other alerters. + +Required: + +``linenotify_access_token``: The access token that you got from https://notify-bot.line.me/my/ + +Example usage:: + + alert: + - "linenotify" + linenotify_access_token: "Your linenotify access token" Mattermost ~~~~~~~~~~ @@ -1807,61 +2665,229 @@ The alerter requires the following option: Optional: -``mattermost_proxy``: By default ElastAlert will not use a network proxy to send notifications to Mattermost. Set this option using ``hostname:port`` if you need to use a proxy. +``mattermost_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Mattermost. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. -``mattermost_ignore_ssl_errors``: By default ElastAlert will verify SSL certificate. Set this option to ``False`` if you want to ignore SSL errors. +``mattermost_ignore_ssl_errors``: By default ElastAlert 2 will verify SSL certificate. Set this option to ``True`` if you want to ignore SSL errors. ``mattermost_username_override``: By default Mattermost will use your username when posting to the channel. Use this option to change it (free text). ``mattermost_channel_override``: Incoming webhooks have a default channel, but it can be overridden. A public channel can be specified "#other-channel", and a Direct Message with "@username". -``mattermost_icon_url_override``: By default ElastAlert will use the default webhook icon when posting to the channel. You can provide icon_url to use custom image. -Provide absolute address of the picture (for example: http://some.address.com/image.jpg) or Base64 data url. +``mattermost_emoji_override``: By default ElastAlert 2 will use the ``:ghost:`` emoji when posting to the channel. You can use a different emoji per +ElastAlert 2 rule. Any Apple emoji can be used, see http://emojipedia.org/apple/ . If mattermost_icon_url_override parameter is provided, emoji is ignored. + +``mattermost_icon_url_override``: By default ElastAlert 2 will use the ``:ghost:`` emoji when posting to the channel. You can provide icon_url to use custom image. +Provide absolute address of the pciture. ``mattermost_msg_pretext``: You can set the message attachment pretext using this option. ``mattermost_msg_color``: By default the alert will be posted with the 'danger' color. You can also use 'good', 'warning', or hex color code. -``mattermost_msg_fields``: You can add fields to your Mattermost alerts using this option. You can specify the title using `title` and the text value using `value`. Additionally you can specify whether this field should be a `short` field using `short: true`. If you set `args` and `value` is a formattable string, ElastAlert will format the incident key based on the provided array of fields from the rule or match. +``mattermost_msg_fields``: You can add fields to your Mattermost alerts using this option. You can specify the title using `title` and the text value using `value`. Additionally you can specify whether this field should be a `short` field using `short: true`. If you set `args` and `value` is a formattable string, ElastAlert 2 will format the incident key based on the provided array of fields from the rule or match. See https://docs.mattermost.com/developer/message-attachments.html#fields for more information. +Example mattermost_msg_fields:: -Telegram + mattermost_msg_fields: + - title: Stack + value: "{0} {1}" # interpolate fields mentioned in args + short: false + args: ["type", "msg.status_code"] # fields from doc + - title: Name + value: static field + short: false + +``mattermost_title``: Sets a title for the message, this shows up as a blue text at the start of the message. Defaults to "". + +``mattermost_title_link``: You can add a link in your Mattermost notification by setting this to a valid URL. Requires mattermost_title to be set. Defaults to "". + +``mattermost_footer``: Add a static footer text for alert. Defaults to "". + +``mattermost_footer_icon``: A Public Url for a footer icon. Defaults to "". + +``mattermost_image_url``: An optional URL to an image file (GIF, JPEG, PNG, BMP, or SVG). Defaults to "". + +``mattermost_thumb_url``: An optional URL to an image file (GIF, JPEG, PNG, BMP, or SVG) that is displayed as thumbnail. Defaults to "". + +``mattermost_author_name``: An optional name used to identify the author. . Defaults to "". + +``mattermost_author_link``: An optional URL used to hyperlink the author_name. Defaults to "". + +``mattermost_author_icon``: An optional URL used to display a 16x16 pixel icon beside the author_name. Defaults to "". + +``mattermost_attach_kibana_discover_url``: Enables the attachment of the ``kibana_discover_url`` to the mattermost notification. The config ``generate_kibana_discover_url`` must also be ``True`` in order to generate the url. Defaults to ``False``. + +``mattermost_kibana_discover_color``: The color of the Kibana Discover url attachment. Defaults to ``#ec4b98``. + +``mattermost_kibana_discover_title``: The title of the Kibana Discover url attachment. Defaults to ``Discover in Kibana``. + +Example mattermost_attach_kibana_discover_url, mattermost_kibana_discover_color, mattermost_kibana_discover_title:: + + # (Required) + generate_kibana_discover_url: True + kibana_discover_app_url: "http://localhost:5601/app/discover#/" + kibana_discover_index_pattern_id: "4babf380-c3b1-11eb-b616-1b59c2feec54" + kibana_discover_version: "7.15" + + # (Optional) + kibana_discover_from_timedelta: + minutes: 10 + kibana_discover_to_timedelta: + minutes: 10 + + # (Required) + mattermost_attach_kibana_discover_url: True + + # (Optional) + mattermost_kibana_discover_color: "#ec4b98" + mattermost_kibana_discover_title: "Discover in Kibana" + +Microsoft Teams +~~~~~~~~~~~~~~~ + +Microsoft Teams alerter will send a notification to a predefined Microsoft Teams channel. + +The alerter requires the following options: + +``ms_teams_webhook_url``: The webhook URL that includes your auth data and the ID of the channel you want to post to. Go to the Connectors +menu in your channel and configure an Incoming Webhook, then copy the resulting URL. You can use a list of URLs to send to multiple channels. + +Optional: + +``ms_teams_alert_summary``: MS Teams use this value for notification title, defaults to `Alert Subject `_. You can set this value with arbitrary text if you don't want to use the default. + +``ms_teams_theme_color``: By default the alert will be posted without any color line. To add color, set this attribute to a HTML color value e.g. ``#ff0000`` for red. + +``ms_teams_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to MS Teams. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. + +``ms_teams_alert_fixed_width``: By default this is ``False`` and the notification will be sent to MS Teams as-is. Teams supports a partial Markdown implementation, which means asterisk, underscore and other characters may be interpreted as Markdown. Currenlty, Teams does not fully implement code blocks. Setting this attribute to ``True`` will enable line by line code blocks. It is recommended to enable this to get clearer notifications in Teams. + +``ms_teams_alert_facts``: You can add additional facts to your MS Teams alerts using this field. Specify the title using `name` and a value for the field or arbitrary text using `value`. + +Example ms_teams_alert_facts:: + + ms_teams_alert_facts: + - name: Host + value: monitor.host + - name: Status + value: monitor.status + - name: What to do + value: Page your boss + +``ms_teams_attach_kibana_discover_url``: Enables the attachment of the ``kibana_discover_url`` to the MS Teams notification. The config ``generate_kibana_discover_url`` must also be ``True`` in order to generate the url. Defaults to ``False``. + +``ms_teams_kibana_discover_title``: The title of the Kibana Discover url attachment. Defaults to ``Discover in Kibana``. + +Example ms_teams_attach_kibana_discover_url, ms_teams_kibana_discover_title:: + + # (Required) + generate_kibana_discover_url: True + kibana_discover_app_url: "http://localhost:5601/app/discover#/" + kibana_discover_index_pattern_id: "4babf380-c3b1-11eb-b616-1b59c2feec54" + kibana_discover_version: "7.15" + + # (Optional) + kibana_discover_from_timedelta: + minutes: 10 + kibana_discover_to_timedelta: + minutes: 10 + + # (Required) + ms_teams_attach_kibana_discover_url: True + + # (Optional) + ms_teams_kibana_discover_title: "Discover in Kibana" + +``ms_teams_ca_certs``: Set this option to ``True`` or a path to a CA cert bundle or directory (eg: ``/etc/ssl/certs/ca-certificates.crt``) to validate the SSL certificate. + +``ms_teams_ignore_ssl_errors``: By default ElastAlert 2 will verify SSL certificate. Set this option to ``True`` if you want to ignore SSL errors. + +Example usage:: + + alert: + - "ms_teams" + ms_teams_theme_color: "#6600ff" + ms_teams_webhook_url: "MS Teams Webhook URL" + +OpsGenie ~~~~~~~~ -Telegram alerter will send a notification to a predefined Telegram username or channel. The body of the notification is formatted the same as with other alerters. -The alerter requires the following two options: +OpsGenie alerter will create an alert which can be used to notify Operations people of issues or log information. An OpsGenie ``API`` +integration must be created in order to acquire the necessary ``opsgenie_key`` rule variable. Currently the OpsGenieAlerter only creates +an alert, however it could be extended to update or close existing alerts. + +It is necessary for the user to create an OpsGenie Rest HTTPS API `integration page `_ in order to create alerts. + +The OpsGenie alert requires one option: + +``opsgenie_key``: The randomly generated API Integration key created by OpsGenie. + +Optional: + +``opsgenie_account``: The OpsGenie account to integrate with. + +``opsgenie_addr``: The OpsGenie URL to to connect against, default is ``https://api.opsgenie.com/v2/alerts``. If using the EU instance of Opsgenie, the URL needs to be ``https://api.eu.opsgenie.com/v2/alerts`` for requests to be successful. + +``opsgenie_recipients``: A list OpsGenie recipients who will be notified by the alert. + +``opsgenie_recipients_args``: Map of arguments used to format opsgenie_recipients. + +``opsgenie_default_receipients``: List of default recipients to notify when the formatting of opsgenie_recipients is unsuccesful. + +``opsgenie_teams``: A list of OpsGenie teams to notify (useful for schedules with escalation). + +``opsgenie_teams_args``: Map of arguments used to format opsgenie_teams (useful for assigning the alerts to teams based on some data). + +``opsgenie_default_teams``: List of default teams to notify when the formatting of opsgenie_teams is unsuccesful. + +``opsgenie_tags``: A list of tags for this alert. + +``opsgenie_message``: Set the OpsGenie message to something other than the rule name. The message can be formatted with fields from the first match e.g. "Error occurred for {app_name} at {timestamp}.". -``telegram_bot_token``: The token is a string along the lines of ``110201543:AAHdqTcvCH1vGWJxfSeofSAs0K5PALDsaw`` that will be required to authorize the bot and send requests to the Bot API. You can learn about obtaining tokens and generating new ones in this document https://core.telegram.org/bots#botfather +``opsgenie_description``: Set the OpsGenie description to something other than the rule body. The message can be formatted with fields from the first match e.g. "Error occurred for {app_name} at {timestamp}.". -``telegram_room_id``: Unique identifier for the target chat or username of the target channel using telegram chat_id (in the format "-xxxxxxxx") +``opsgenie_alias``: Set the OpsGenie alias. The alias can be formatted with fields from the first match e.g "{app_name} error". -Optional: +``opsgenie_subject``: A string used to create the title of the OpsGenie alert. Can use Python string formatting. -``telegram_api_url``: Custom domain to call Telegram Bot API. Default to api.telegram.org +``opsgenie_subject_args``: A list of fields to use to format ``opsgenie_subject`` if it contains formaters. -``telegram_proxy``: By default ElastAlert will not use a network proxy to send notifications to Telegram. Set this option using ``hostname:port`` if you need to use a proxy. +``opsgenie_priority``: Set the OpsGenie priority level. Possible values are P1, P2, P3, P4, P5. Can be formatted with fields from the first match e.g "P{level}" -GoogleChat -~~~~~~~~~~ -GoogleChat alerter will send a notification to a predefined GoogleChat channel. The body of the notification is formatted the same as with other alerters. +``opsgenie_details``: Map of custom key/value pairs to include in the alert's details. The value can sourced from either fields in the first match, environment variables, or a constant value. -The alerter requires the following options: +``opsgenie_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to OpsGenie. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. -``googlechat_webhook_url``: The webhook URL that includes the channel (room) you want to post to. Go to the Google Chat website https://chat.google.com and choose the channel in which you wish to receive the notifications. Select 'Configure Webhooks' to create a new webhook or to copy the URL from an existing one. You can use a list of URLs to send to multiple channels. +``opsgenie_source``: Set the OpsGenie source, default is `ElastAlert`. Can be formatted with fields from the first match e.g "{source} {region}" -Optional: +``opsgenie_entity``: Set the OpsGenie entity. Can be formatted with fields from the first match e.g "{host_name}" -``googlechat_format``: Formatting for the notification. Can be either 'card' or 'basic' (default). +Example usage:: -``googlechat_header_title``: Sets the text for the card header title. (Only used if format=card) + opsgenie_details: + Author: 'Bob Smith' # constant value + Environment: '$VAR' # environment variable + Message: { field: message } # field in the first match -``googlechat_header_subtitle``: Sets the text for the card header subtitle. (Only used if format=card) +Example opsgenie_details with kibana_discover_url:: -``googlechat_header_image``: URL for the card header icon. (Only used if format=card) + # (Required) + generate_kibana_discover_url: True + kibana_discover_app_url: "http://localhost:5601/app/discover#/" + kibana_discover_index_pattern_id: "4babf380-c3b1-11eb-b616-1b59c2feec54" + kibana_discover_version: "7.15" -``googlechat_footer_kibanalink``: URL to Kibana to include in the card footer. (Only used if format=card) + # (Optional) + kibana_discover_from_timedelta: + minutes: 10 + kibana_discover_to_timedelta: + minutes: 10 + # (Required) + opsgenie_details: + Kibana Url: { field: kibana_discover_url } + Message: { field: message } + Testing: 'yes' PagerDuty ~~~~~~~~~ @@ -1878,42 +2904,46 @@ The alerter requires the following option: Optional: -``alert_subject``: If set, this will be used as the Incident description within PagerDuty. If not set, ElastAlert will default to using the rule name of the alert for the incident. +``alert_subject``: If set, this will be used as the Incident description within PagerDuty. If not set, ElastAlert 2 will default to using the rule name of the alert for the incident. -``alert_subject_args``: If set, and ``alert_subject`` is a formattable string, ElastAlert will format the incident key based on the provided array of fields from the rule or match. +``alert_subject_args``: If set, and ``alert_subject`` is a formattable string, ElastAlert 2 will format the incident key based on the provided array of fields from the rule or match. ``pagerduty_incident_key``: If not set PagerDuty will trigger a new incident for each alert sent. If set to a unique string per rule PagerDuty will identify the incident that this event should be applied. If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. -``pagerduty_incident_key_args``: If set, and ``pagerduty_incident_key`` is a formattable string, Elastalert will format the incident key based on the provided array of fields from the rule or match. +``pagerduty_incident_key_args``: If set, and ``pagerduty_incident_key`` is a formattable string, ElastAlert 2 will format the incident key based on the provided array of fields from the rule or match. -``pagerduty_proxy``: By default ElastAlert will not use a network proxy to send notifications to PagerDuty. Set this option using ``hostname:port`` if you need to use a proxy. +``pagerduty_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to PagerDuty. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. V2 API Options (Optional): These options are specific to the PagerDuty V2 API -See https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2 +See https://developer.pagerduty.com/api-reference/b3A6Mjc0ODI2Nw-send-an-event-to-pager-duty ``pagerduty_api_version``: Defaults to `v1`. Set to `v2` to enable the PagerDuty V2 Event API. ``pagerduty_v2_payload_class``: Sets the class of the payload. (the event type in PagerDuty) -``pagerduty_v2_payload_class_args``: If set, and ``pagerduty_v2_payload_class`` is a formattable string, Elastalert will format the class based on the provided array of fields from the rule or match. +``pagerduty_v2_payload_class_args``: If set, and ``pagerduty_v2_payload_class`` is a formattable string, ElastAlert 2 will format the class based on the provided array of fields from the rule or match. ``pagerduty_v2_payload_component``: Sets the component of the payload. (what program/interface/etc the event came from) -``pagerduty_v2_payload_component_args``: If set, and ``pagerduty_v2_payload_component`` is a formattable string, Elastalert will format the component based on the provided array of fields from the rule or match. +``pagerduty_v2_payload_component_args``: If set, and ``pagerduty_v2_payload_component`` is a formattable string, ElastAlert 2 will format the component based on the provided array of fields from the rule or match. ``pagerduty_v2_payload_group``: Sets the logical grouping (e.g. app-stack) -``pagerduty_v2_payload_group_args``: If set, and ``pagerduty_v2_payload_group`` is a formattable string, Elastalert will format the group based on the provided array of fields from the rule or match. +``pagerduty_v2_payload_group_args``: If set, and ``pagerduty_v2_payload_group`` is a formattable string, ElastAlert 2 will format the group based on the provided array of fields from the rule or match. ``pagerduty_v2_payload_severity``: Sets the severity of the page. (defaults to `critical`, valid options: `critical`, `error`, `warning`, `info`) ``pagerduty_v2_payload_source``: Sets the source of the event, preferably the hostname or fqdn. -``pagerduty_v2_payload_source_args``: If set, and ``pagerduty_v2_payload_source`` is a formattable string, Elastalert will format the source based on the provided array of fields from the rule or match. +``pagerduty_v2_payload_source_args``: If set, and ``pagerduty_v2_payload_source`` is a formattable string, ElastAlert 2 will format the source based on the provided array of fields from the rule or match. + +``pagerduty_v2_payload_custom_details``: List of keys:values to use as the content of the custom_details payload. Example - ip:clientip will map the value from the clientip index of Elasticsearch to JSON key named ip. + +``pagerduty_v2_payload_include_all_info``: If True, this will include the entire Elasticsearch document as a custom detail field called "information" in the PagerDuty alert. PagerTree ~~~~~~~~~ @@ -1924,80 +2954,101 @@ The alerter requires the following options: ``pagertree_integration_url``: URL generated by PagerTree for the integration. -Exotel -~~~~~~ - -Developers in India can use Exotel alerter, it will trigger an incident to a mobile phone as sms from your exophone. Alert name along with the message body will be sent as an sms. +``pagertree_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to PagerTree. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. -The alerter requires the following option: - -``exotel_account_sid``: This is sid of your Exotel account. - -``exotel_auth_token``: Auth token assosiated with your Exotel account. +Example usage:: -If you don't know how to find your accound sid and auth token, refer - http://support.exotel.in/support/solutions/articles/3000023019-how-to-find-my-exotel-token-and-exotel-sid- + alert: + - "pagertree" + pagertree_integration_url: "PagerTree Integration URL" -``exotel_to_number``: The phone number where you would like send the notification. +Rocket.Chat +~~~~~~~~~~~ -``exotel_from_number``: Your exophone number from which message will be sent. +Rocket.Chat alerter will send a notification to a predefined channel. The body of the notification is formatted the same as with other alerters. +https://developer.rocket.chat/api/rest-api/methods/chat/postmessage -The alerter has one optional argument: +The alerter requires the following option: -``exotel_message_body``: Message you want to send in the sms, is you don't specify this argument only the rule name is sent +``rocket_chat_webhook_url``: The webhook URL that includes your auth data and the ID of the channel (room) you want to post to. You can use a list of URLs to send to multiple channels. +Optional: -Twilio -~~~~~~ +``rocket_chat_username_override``: By default Rocket.Chat will use username defined in Integration when posting to the channel. Use this option to change it (free text). -Twilio alerter will trigger an incident to a mobile phone as sms from your twilio phone number. Alert name will arrive as sms once this option is chosen. +``rocket_chat_channel_override``: Incoming webhooks have a default channel, but it can be overridden. A public channel can be specified “#other-channel”, and a Direct Message with “@username”. -The alerter requires the following option: +``rocket_chat_emoji_override``: By default ElastAlert 2 will use the :ghost: emoji when posting to the channel. You can use a different emoji per +ElastAlert 2 rule. Any Apple emoji can be used, see http://emojipedia.org/apple/ . -``twilio_account_sid``: This is sid of your twilio account. +``rocket_chat_msg_color``: By default the alert will be posted with the ‘danger’ color. You can also use ‘good’ or ‘warning’ colors. -``twilio_auth_token``: Auth token assosiated with your twilio account. +``rocket_chat_text_string``: Notification message you want to add. -``twilio_to_number``: The phone number where you would like send the notification. +``rocket_chat_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Rocket.Chat. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. -``twilio_from_number``: Your twilio phone number from which message will be sent. +``rocket_chat_ca_certs``: Set this option to ``True`` or a path to a CA cert bundle or directory (eg: ``/etc/ssl/certs/ca-certificates.crt``) to validate the SSL certificate. +``rocket_chat_ignore_ssl_errors``: By default ElastAlert 2 will verify SSL certificate. Set this option to ``True`` if you want to ignore SSL errors. -VictorOps -~~~~~~~~~ +``rocket_chat_timeout``: You can specify a timeout value, in seconds, for making communicating with Rocket.Chat. The default is 10. If a timeout occurs, the alert will be retried next time ElastAlert 2 cycles. -VictorOps alerter will trigger an incident to a predefined VictorOps routing key. The body of the notification is formatted the same as with other alerters. +``rocket_chat_attach_kibana_discover_url``: Enables the attachment of the ``kibana_discover_url`` to the Rocket.Chat notification. The config ``generate_kibana_discover_url`` must also be ``True`` in order to generate the url. Defaults to ``False``. -The alerter requires the following options: +``rocket_chat_kibana_discover_color``: The color of the Kibana Discover url attachment. Defaults to ``#ec4b98``. -``victorops_api_key``: API key generated under the 'REST Endpoint' in the Integrations settings. +``rocket_chat_kibana_discover_title``: The title of the Kibana Discover url attachment. Defaults to ``Discover in Kibana``. -``victorops_routing_key``: VictorOps routing key to route the alert to. +Example rocket_chat_attach_kibana_discover_url, rocket_chat_kibana_discover_color, rocket_chat_kibana_discover_title:: -``victorops_message_type``: VictorOps field to specify severity level. Must be one of the following: INFO, WARNING, ACKNOWLEDGEMENT, CRITICAL, RECOVERY + # (Required) + generate_kibana_discover_url: True + kibana_discover_app_url: "http://localhost:5601/app/discover#/" + kibana_discover_index_pattern_id: "4babf380-c3b1-11eb-b616-1b59c2feec54" + kibana_discover_version: "7.15" -Optional: + # (Optional) + kibana_discover_from_timedelta: + minutes: 10 + kibana_discover_to_timedelta: + minutes: 10 -``victorops_entity_id``: The identity of the incident used by VictorOps to correlate incidents throughout the alert lifecycle. If not defined, VictorOps will assign a random string to each alert. + # (Required) + rocket_chat_attach_kibana_discover_url: True -``victorops_entity_display_name``: Human-readable name of alerting entity to summarize incidents without affecting the life-cycle workflow. + # (Optional) + rocket_chat_kibana_discover_color: "#ec4b98" + rocket_chat_kibana_discover_title: "Discover in Kibana" -``victorops_proxy``: By default ElastAlert will not use a network proxy to send notifications to VictorOps. Set this option using ``hostname:port`` if you need to use a proxy. +``rocket_chat_alert_fields``: You can add additional fields to your Rocket.Chat alerts using this field. Specify the title using `title` and a value for the field using `value`. Additionally you can specify whether or not this field should be a `short` field using `short: true`. -Gitter -~~~~~~ +Example rocket_chat_alert_fields:: -Gitter alerter will send a notification to a predefined Gitter channel. The body of the notification is formatted the same as with other alerters. + rocket_chat_alert_fields: + - title: Host + value: monitor.host + short: true + - title: Status + value: monitor.status + short: true + - title: Zone + value: beat.name + short: true -The alerter requires the following option: +Squadcast +~~~~~~~~~ -``gitter_webhook_url``: The webhook URL that includes your auth data and the ID of the channel (room) you want to post to. Go to the Integration Settings -of the channel https://gitter.im/ORGA/CHANNEL#integrations , click 'CUSTOM' and copy the resulting URL. +Alerts can be sent to Squadcast using the `http post` method described above and Squadcast will process it and send Phone, SMS, Email and Push notifications to the relevant person(s) and let them take actions. -Optional: +Configuration variables in rules YAML file:: -``gitter_msg_level``: By default the alert will be posted with the 'error' level. You can use 'info' if you want the messages to be black instead of red. + alert: post + http_post_url: + http_post_static_payload: + Title: + http_post_all_values: true -``gitter_proxy``: By default ElastAlert will not use a network proxy to send notifications to Gitter. Set this option using ``hostname:port`` if you need to use a proxy. +For more details, you can refer the `Squadcast documentation `_. ServiceNow ~~~~~~~~~~ @@ -2006,7 +3057,7 @@ The ServiceNow alerter will create a ne Incident in ServiceNow. The body of the The alerter requires the following options: -``servicenow_rest_url``: The ServiceNow RestApi url, this will look like https://instancename.service-now.com/api/now/v1/table/incident +``servicenow_rest_url``: The ServiceNow RestApi url, this will look like `TableAPI `_. ``username``: The ServiceNow Username to access the api. @@ -2029,217 +3080,464 @@ The alerter requires the following options: Optional: -``servicenow_proxy``: By default ElastAlert will not use a network proxy to send notifications to ServiceNow. Set this option using ``hostname:port`` if you need to use a proxy. +``servicenow_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to ServiceNow. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. +``servicenow_impact``: An integer 1, 2, or 3 representing high, medium, and low respectively. This measures the effect of an incident on business processes. -Debug -~~~~~ +``servicenow_urgency``: An integer 1, 2, or 3 representing high, medium, and low respecitvely. This measures how long this incident can be delayed until there is a significant business impact. -The debug alerter will log the alert information using the Python logger at the info level. It is logged into a Python Logger object with the name ``elastalert`` that can be easily accessed using the ``getLogger`` command. +Example usage:: -Stomp + alert: + - "servicenow" + servicenow_rest_url: "servicenow rest url" + username: "user" + password: "password" + short_description: "xxxxxx" + comments: "xxxxxx" + assignment_group: "xxxxxx" + category: "xxxxxx" + subcategory: "xxxxxx" + cmdb_ci: "xxxxxx" + caller_id: "xxxxxx" + servicenow_impact: 1 + servicenow_urgenc: 3 + +Slack ~~~~~ -This alert type will use the STOMP protocol in order to push a message to a broker like ActiveMQ or RabbitMQ. The message body is a JSON string containing the alert details. -The default values will work with a pristine ActiveMQ installation. +Slack alerter will send a notification to a predefined Slack channel. The body of the notification is formatted the same as with other alerters. + +The alerter requires the following option: + +``slack_webhook_url``: The webhook URL that includes your auth data and the ID of the channel (room) you want to post to. Go to the Incoming Webhooks +section in your Slack account https://XXXXX.slack.com/services/new/incoming-webhook , choose the channel, click 'Add Incoming Webhooks Integration' +and copy the resulting URL. You can use a list of URLs to send to multiple channels. Optional: -``stomp_hostname``: The STOMP host to use, defaults to localhost. -``stomp_hostport``: The STOMP port to use, defaults to 61613. -``stomp_login``: The STOMP login to use, defaults to admin. -``stomp_password``: The STOMP password to use, defaults to admin. -``stomp_destination``: The STOMP destination to use, defaults to /queue/ALERT +``slack_username_override``: By default Slack will use your username when posting to the channel. Use this option to change it (free text). -The stomp_destination field depends on the broker, the /queue/ALERT example is the nomenclature used by ActiveMQ. Each broker has its own logic. +``slack_channel_override``: Incoming webhooks have a default channel, but it can be overridden. A public channel can be specified "#other-channel", and a Direct Message with "@username". -Alerta -~~~~~~ +``slack_emoji_override``: By default ElastAlert 2 will use the ``:ghost:`` emoji when posting to the channel. You can use a different emoji per +ElastAlert 2 rule. Any Apple emoji can be used, see http://emojipedia.org/apple/ . If slack_icon_url_override parameter is provided, emoji is ignored. -Alerta alerter will post an alert in the Alerta server instance through the alert API endpoint. -See http://alerta.readthedocs.io/en/latest/api/alert.html for more details on the Alerta JSON format. +``slack_icon_url_override``: By default ElastAlert 2 will use the ``:ghost:`` emoji when posting to the channel. You can provide icon_url to use custom image. +Provide absolute address of the pciture. -For Alerta 5.0 +``slack_msg_color``: By default the alert will be posted with the 'danger' color. You can also use 'good' or 'warning' colors. -Required: +``slack_parse_override``: By default the notification message is escaped 'none'. You can also use 'full'. -``alerta_api_url``: API server URL. +``slack_text_string``: Notification message you want to add. -Optional: +``slack_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Slack. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. -``alerta_api_key``: This is the api key for alerta server, sent in an ``Authorization`` HTTP header. If not defined, no Authorization header is sent. +``slack_alert_fields``: You can add additional fields to your slack alerts using this field. Specify the title using `title` and a value for the field using `value`. Additionally you can specify whether or not this field should be a `short` field using `short: true`. -``alerta_use_qk_as_resource``: If true and query_key is present, this will override ``alerta_resource`` field with the ``query_key value`` (Can be useful if ``query_key`` is a hostname). +Example slack_alert_fields:: -``alerta_use_match_timestamp``: If true, it will use the timestamp of the first match as the ``createTime`` of the alert. otherwise, the current server time is used. + slack_alert_fields: + - title: Host + value: monitor.host + short: true + - title: Status + value: monitor.status + short: true + - title: Zone + value: beat.name + short: true -``alert_missing_value``: Text to replace any match field not found when formating strings. Defaults to ````. +``slack_ignore_ssl_errors``: By default ElastAlert 2 will verify SSL certificate. Set this option to ``True`` if you want to ignore SSL errors. -The following options dictate the values of the API JSON payload: +``slack_title``: Sets a title for the message, this shows up as a blue text at the start of the message -``alerta_severity``: Defaults to "warning". +``slack_title_link``: You can add a link in your Slack notification by setting this to a valid URL. Requires slack_title to be set. -``alerta_timeout``: Defaults 84600 (1 Day). +``slack_timeout``: You can specify a timeout value, in seconds, for making communicating with Slack. The default is 10. If a timeout occurs, the alert will be retried next time ElastAlert 2 cycles. -``alerta_type``: Defaults to "elastalert". +``slack_attach_kibana_discover_url``: Enables the attachment of the ``kibana_discover_url`` to the slack notification. The config ``generate_kibana_discover_url`` must also be ``True`` in order to generate the url. Defaults to ``False``. -The following options use Python-like string syntax ``{}`` or ``%()s`` to access parts of the match, similar to the CommandAlerter. Ie: "Alert for {clientip}". -If the referenced key is not found in the match, it is replaced by the text indicated by the option ``alert_missing_value``. +``slack_kibana_discover_color``: The color of the Kibana Discover url attachment. Defaults to ``#ec4b98``. -``alerta_resource``: Defaults to "elastalert". +``slack_kibana_discover_title``: The title of the Kibana Discover url attachment. Defaults to ``Discover in Kibana``. -``alerta_service``: Defaults to "elastalert". +Example slack_attach_kibana_discover_url, slack_kibana_discover_color, slack_kibana_discover_title:: -``alerta_origin``: Defaults to "elastalert". + # (Required) + generate_kibana_discover_url: True + kibana_discover_app_url: "http://localhost:5601/app/discover#/" + kibana_discover_index_pattern_id: "4babf380-c3b1-11eb-b616-1b59c2feec54" + kibana_discover_version: "7.15" -``alerta_environment``: Defaults to "Production". + # (Optional) + kibana_discover_from_timedelta: + minutes: 10 + kibana_discover_to_timedelta: + minutes: 10 -``alerta_group``: Defaults to "". + # (Required) + slack_attach_kibana_discover_url: True -``alerta_correlate``: Defaults to an empty list. + # (Optional) + slack_kibana_discover_color: "#ec4b98" + slack_kibana_discover_title: "Discover in Kibana" -``alerta_tags``: Defaults to an empty list. +``slack_ca_certs``: Set this option to ``True`` or a path to a CA cert bundle or directory (eg: ``/etc/ssl/certs/ca-certificates.crt``) to validate the SSL certificate. -``alerta_event``: Defaults to the rule's name. +``slack_footer``: Add a static footer text for alert. Defaults to "". -``alerta_text``: Defaults to the rule's text according to its type. +``slack_footer_icon``: A Public Url for a footer icon. Defaults to "". -``alerta_value``: Defaults to "". +``slack_image_url``: An optional URL to an image file (GIF, JPEG, PNG, BMP, or SVG). Defaults to "". -The ``attributes`` dictionary is built by joining the lists from ``alerta_attributes_keys`` and ``alerta_attributes_values``, considered in order. +``slack_thumb_url``: An optional URL to an image file (GIF, JPEG, PNG, BMP, or SVG) that is displayed as thumbnail. Defaults to "". +``slack_author_name``: An optional name used to identify the author. Defaults to "". -Example usage using old-style format:: +``slack_author_link``: An optional URL used to hyperlink the author_name. Defaults to "". - alert: - - alerta - alerta_api_url: "http://youralertahost/api/alert" - alerta_attributes_keys: ["hostname", "TimestampEvent", "senderIP" ] - alerta_attributes_values: ["%(key)s", "%(logdate)s", "%(sender_ip)s" ] - alerta_correlate: ["ProbeUP","ProbeDOWN"] - alerta_event: "ProbeUP" - alerta_text: "Probe %(hostname)s is UP at %(logdate)s GMT" - alerta_value: "UP" +``slack_author_icon``: An optional URL used to display a 16x16 pixel icon beside the author_name. Defaults to "". -Example usage using new-style format:: +``slack_msg_pretext``: You can set the message attachment pretext using this option. Defaults to "". - alert: - - alerta - alerta_attributes_values: ["{key}", "{logdate}", "{sender_ip}" ] - alerta_text: "Probe {hostname} is UP at {logdate} GMT" +``slack_attach_jira_ticket_url``: Add url to the jira ticket created. Only works if the Jira alert runs before Slack alert. Set the field to ``True`` in order to generate the url. Defaults to ``False``. +``slack_jira_ticket_color``: The color of the Jira Ticket url attachment. Defaults to ``#ec4b98``. +``slack_jira_ticket_title``: The title of the Jira Ticket url attachment. Defaults to ``Jira Ticket``. -HTTP POST -~~~~~~~~~ +Splunk On-Call (Formerly VictorOps) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This alert type will send results to a JSON endpoint using HTTP POST. The key names are configurable so this is compatible with almost any endpoint. By default, the JSON will contain all the items from the match, unless you specify http_post_payload, in which case it will only contain those items. +Splunk On-Call (Formerly VictorOps) alerter will trigger an incident to a predefined Splunk On-Call (Formerly VictorOps) routing key. The body of the notification is formatted the same as with other alerters. -Required: +The alerter requires the following options: -``http_post_url``: The URL to POST. +``victorops_api_key``: API key generated under the 'REST Endpoint' in the Integrations settings. + +``victorops_routing_key``: Splunk On-Call (Formerly VictorOps) routing key to route the alert to. + +``victorops_message_type``: Splunk On-Call (Formerly VictorOps) field to specify severity level. Must be one of the following: INFO, WARNING, ACKNOWLEDGEMENT, CRITICAL, RECOVERY Optional: -``http_post_payload``: List of keys:values to use as the content of the POST. Example - ip:clientip will map the value from the clientip index of Elasticsearch to JSON key named ip. If not defined, all the Elasticsearch keys will be sent. +``victorops_entity_id``: The identity of the incident used by Splunk On-Call (Formerly VictorOps) to correlate incidents throughout the alert lifecycle. If not defined, Splunk On-Call (Formerly VictorOps) will assign a random string to each alert. -``http_post_static_payload``: Key:value pairs of static parameters to be sent, along with the Elasticsearch results. Put your authentication or other information here. +``victorops_entity_display_name``: Human-readable name of alerting entity to summarize incidents without affecting the life-cycle workflow. Will use ``alert_subject`` if not set. -``http_post_headers``: Key:value pairs of headers to be sent as part of the request. +``victorops_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Splunk On-Call (Formerly VictorOps). Set this option using ``hostname:port`` if you need to use a proxy. only supports https. -``http_post_proxy``: URL of proxy, if required. +Example usage:: -``http_post_all_values``: Boolean of whether or not to include every key value pair from the match in addition to those in http_post_payload and http_post_static_payload. Defaults to True if http_post_payload is not specified, otherwise False. + alert: + - "victorops" + victorops_api_key: "VictorOps API Key" + victorops_routing_key: "VictorOps routing Key" + victorops_message_type: "INFO" -``http_post_timeout``: The timeout value, in seconds, for making the post. The default is 10. If a timeout occurs, the alert will be retried next time elastalert cycles. +Stomp +~~~~~ -Example usage:: +This alert type will use the STOMP protocol in order to push a message to a broker like ActiveMQ or RabbitMQ. The message body is a JSON string containing the alert details. +The default values will work with a pristine ActiveMQ installation. - alert: post - http_post_url: "http://example.com/api" - http_post_payload: - ip: clientip - http_post_static_payload: - apikey: abc123 - http_post_headers: - authorization: Basic 123dr3234 +The alerter requires the following options: +``stomp_hostname``: The STOMP host to use, defaults to ``localhost``. -Alerter -~~~~~~~ +``stomp_hostport``: The STOMP port to use, defaults to ``61613``. -For all Alerter subclasses, you may reference values from a top-level rule property in your Alerter fields by referring to the property name surrounded by dollar signs. This can be useful when you have rule-level properties that you would like to reference many times in your alert. For example: +``stomp_login``: The STOMP login to use, defaults to ``admin``. + +``stomp_password``: The STOMP password to use, defaults to ``admin``. + +Optional: + +``stomp_destination``: The STOMP destination to use, defaults to ``/queue/ALERT`` + +The stomp_destination field depends on the broker, the /queue/ALERT example is the nomenclature used by ActiveMQ. Each broker has its own logic. Example usage:: - jira_priority: $priority$ - jira_alert_owner: $owner$ + alert: + - "stomp" + stomp_hostname: "localhost" + stomp_hostport: "61613" + stomp_login: "admin" + stomp_password: "admin" + stomp_destination: "/queue/ALERT" +Telegram +~~~~~~~~ +Telegram alerter will send a notification to a predefined Telegram username or channel. The body of the notification is formatted the same as with other alerters. +The alerter requires the following two options: -Line Notify -~~~~~~~~~~~ +``telegram_bot_token``: The token is a string along the lines of ``110201543:AAHdqTcvCH1vGWJxfSeofSAs0K5PALDsaw`` that will be required to authorize the bot and send requests to the Bot API. You can learn about obtaining tokens and generating new ones in this document https://core.telegram.org/bots#6-botfather + +``telegram_room_id``: Unique identifier for the target chat or username of the target channel using telegram chat_id (in the format "-xxxxxxxx") + +Optional: + +``telegram_api_url``: Custom domain to call Telegram Bot API. Default to api.telegram.org + +``telegram_proxy``: By default ElastAlert 2 will not use a network proxy to send notifications to Telegram. Set this option using ``hostname:port`` if you need to use a proxy. only supports https. + +``telegram_proxy_login``: The Telegram proxy auth username. + +``telegram_proxy_pass``: The Telegram proxy auth password. + +``telegram_parse_mode``: The Telegram parsing mode, which determines the format of the alert text body. Possible values are ``markdown``, ``markdownV2``, ``html``. Defaults to ``markdown``. + +Example usage:: + + alert: + - "telegram" + telegram_bot_token: "bot_token" + telegram_room_id: "chat_id" -Line Notify will send notification to a Line application. The body of the notification is formatted the same as with other alerters. + +Tencent SMS +~~~~~~~~~~~ Required: -``linenotify_access_token``: The access token that you got from https://notify-bot.line.me/my/ +``tencent_sms_secret_id``: ``SecretID`` is used to identify the API caller. + +``tencent_sms_secret_key``: ``SecretKey`` is used to encrypt the string to sign that can be verified on the server. You should keep it private and avoid disclosure. + +``tencent_sms_sdk_appid``: SMS application ID, which is the `SdkAppId` generated after an application is added in the `SMS console `_, such as 1400006666 + +``tencent_sms_to_number``: Target mobile number in the E.164 standard (+[country/region code][mobile number]) + +Example: +8613711112222, which has a + sign followed by 86 (country/region code) and then by 13711112222 (mobile number). Up to 200 mobile numbers are supported + +``tencent_sms_template_id``: Template ID. You must enter the ID of an approved template, which can be viewed in the `SMS console `_. + +If you need to send SMS messages to global mobile numbers, you can only use a Global SMS template. + +Optional: + +``tencent_sms_sign_name``: Content of the SMS signature, which should be encoded in UTF-8. You must enter an approved signature, such as Tencent Cloud. The signature information can be viewed in the SMS console. +Note: this parameter is required for Mainland China SMS. + +``tencent_sms_region``: Region parameter, which is used to identify the region(`Mainland China `_ or +`Global `_) to which the data you want to work with belongs. + +``tencent_sms_template_parm``: The number of template parameters needs to be consistent with the number of variables of the template corresponding to TemplateId. +this value format by `rfc6901 `_ + +.. code-block:: json + + { + "_index" : "tmec" + "_type" : "fluentd", + "_id" : "PeXLrnsBvusb3d0w6dUl", + "_score" : 1.0, + "_source" : { + "kubernetes" : { + "host" : "9.134.191.187", + "pod_id" : "66ba4e5a-1ad2-4655-9a8e-cffb6b942559", + "labels" : { + "release" : "nginx", + "pod-template-hash" : "6bd96d6f74" + }, + "namespace_name" : "app", + "pod_name" : "app.nginx-6bd96d6f74-2ts4x" + }, + "time" : "2021-09-04T03:13:24.192875Z", + "message" : "2021-09-03T14:34:08+0000|INFO|vector eps : 192.168.0.2:10000,", + } + } + + +.. code-block:: yaml + + tencent_sms_template_id: "1123835" + tencent_sms_template_parm: + - "/kubernetes/pod_name" -theHive + + + +TheHive ~~~~~~~ -theHive alert type will send JSON request to theHive (Security Incident Response Platform) with TheHive4py API. Sent request will be stored like Hive Alert with description and observables. +TheHive alerter can be used to create a new alert in TheHive. The alerter supports adding tags, +custom fields, and observables from the alert matches and rule data. Required: -``hive_connection``: The connection details as key:values. Required keys are ``hive_host``, ``hive_port`` and ``hive_apikey``. +``hive_connection``: The connection details to your instance (see example below for the required syntax). +Only ``hive_apikey`` is required, ``hive_host`` and ``hive_port`` default to ``http://localhost`` and +``9000`` respectively. + +``hive_alert_config``: Configuration options for the alert, see example below for structure. -``hive_alert_config``: Configuration options for the alert. +``source``: Text content to use for TheHive event's "source" field. See the optional ``source_args`` parameter for dynamically formatting this content with dynamic lookup values. + +``type`` Text content to use for TheHive event's "type" field. See the optional ``type_args`` parameter for dynamically formatting this content with dynamic lookup values. Optional: +``tags`` can be populated from the matched record, using the same syntax used in ``alert_text_args``. +If a record doesn't contain the specified value, the rule itself will be examined for the tag. If +this doesn't contain the tag either, the tag is attached without modification to the alert. For +aggregated alerts, all matches are examined individually, and tags generated for each one. All tags +are then attached to the same alert. + +``customFields`` can also be populated from rule fields as well as matched results. Custom fields +are only populated once. If an alert is an aggregated alert, the custom field values will be populated +using the first matched record, before checking the rule. If neither matches, the ``customField.value`` +will be used directly. + +``hive_observable_data_mapping``: If needed, matched data fields can be mapped to TheHive +observable types using the same syntax as ``customFields``, described above. The algorithm used to populate +the observable value is similar to the one used to populate the ``tags``, including the behaviour for aggregated alerts. +The tlp, message, and tags fields are optional for each observable. If not specified, the tlp field is given a default value of 2. + ``hive_proxies``: Proxy configuration. -``hive_observable_data_mapping``: If needed, matched data fields can be mapped to TheHive observable types using python string formatting. +``hive_verify``: Whether or not to enable SSL certificate validation. Defaults to False. + +``description_args``: can be used to format the description field with additional rule and match field lookups. Note that the description will be initially populated from the ElastAlert 2 default ``alert_text`` fields, including any defined ``alert_text_args``. See the "Alert Content" section for more information on the default formatting. + +``description_missing_value``: Text to replace any match field not found when formatting the ``description``. Defaults to ````. + +``source_args``: List of parameters to format into the ``source`` text content, with values originating from the first match event. + +``title``: Text content to use for TheHive event's "title" field. This will override the default alert title generated from the ``alert_subject`` and associated arg parameters. See the "Alert Subject" section for more information on the default formatting. + +``title_args``: List of additional args to format against the "title" content. If the title argument is not provided then these optional arguments will be formatted against the already formatted title generated from the ``alert_subject`` and related parameters. This means that a two-phased formatting potentially could be utilized in very specific configuration scenarios. See the "Alert Subject" section for more information on the default formatting. The values will be used from the first match event. + +``type_args``: List of parameters to format into the ``type`` text content, with values originating from the first match event. Example usage:: alert: hivealerter - hive_connection: - hive_host: http://localhost - hive_port: - hive_apikey: - hive_proxies: - http: '' - https: '' - - hive_alert_config: - title: 'Title' ## This will default to {rule[index]_rule[name]} if not provided - type: 'external' - source: 'elastalert' - description: '{match[field1]} {rule[name]} Sample description' - severity: 2 - tags: ['tag1', 'tag2 {rule[name]}'] - tlp: 3 - status: 'New' - follow: True + hive_connection: + hive_host: http://localhost + hive_port: + hive_apikey: + hive_proxies: + http: '' + https: '' + + hive_alert_config: + customFields: + - name: example + type: string + value: example + follow: True + severity: 2 + status: 'New' + source: 'src-{}' + source_args: [ data.source ] + description_args: [ name, description] + description: '{0} : {1}' + tags: ['tag1', 'tag2'] + title: 'Title {}' + title_args: [ data.title ] + tlp: 3 + type: 'type-{}' + type_args: [ data.type ] hive_observable_data_mapping: - - domain: "{match[field1]}_{rule[name]}" - - domain: "{match[field]}" - - ip: "{match[ip_field]}" + - domain: agent.hostname + tlp: 1 + tags: ['tag1', 'tag2'] + message: 'agent hostname' + - domain: response.domain + tlp: 2 + tags: ['tag3'] + - ip: client.ip + +Twilio +~~~~~~ + +The Twilio alerter will send an alert to a mobile phone as an SMS from your Twilio +phone number. The SMS will contain the alert name. You may use either Twilio SMS +or Twilio Copilot to send the message, controlled by the ``twilio_use_copilot`` +option. + +Note that when Twilio Copilot *is* used the ``twilio_message_service_sid`` +option is required. Likewise, when *not* using Twilio Copilot, the +``twilio_from_number`` option is required. + +The alerter requires the following options: + +``twilio_account_sid``: The SID of your Twilio account. + +``twilio_auth_token``: Auth token associated with your Twilio account. + +``twilio_to_number``: The phone number where you would like to send the alert. + +Either one of + * ``twilio_from_number``: The Twilio phone number from which the alert will be sent. + * ``twilio_message_service_sid``: The SID of your Twilio message service. + +Optional: + +``twilio_use_copilot``: Whether or not to use Twilio Copilot, False by default. + +Example with Copilot usage:: + + alert: + - "twilio" + twilio_use_copilot: True + twilio_to_number: "0123456789" + twilio_auth_token: "abcdefghijklmnopqrstuvwxyz012345" + twilio_account_sid: "ABCDEFGHIJKLMNOPQRSTUVWXYZ01234567" + twilio_message_service_sid: "ABCDEFGHIJKLMNOPQRSTUVWXYZ01234567" + +Example with SMS usage:: + alert: + - "twilio" + twilio_to_number: "0123456789" + twilio_from_number: "9876543210" + twilio_auth_token: "abcdefghijklmnopqrstuvwxyz012345" + twilio_account_sid: "ABCDEFGHIJKLMNOPQRSTUVWXYZ01234567" Zabbix -~~~~~~~~~~~ +~~~~~~ -Zabbix will send notification to a Zabbix server. The item in the host specified receive a 1 value for each hit. For example, if the elastic query produce 3 hits in the last execution of elastalert, three '1' (integer) values will be send from elastalert to Zabbix Server. If the query have 0 hits, any value will be sent. +Zabbix will send notification to a Zabbix server. The item in the host specified receive a 1 value for each hit. For example, if the elastic query produce 3 hits in the last execution of ElastAlert 2, three '1' (integer) values will be send from elastalert to Zabbix Server. If the query have 0 hits, any value will be sent. Required: -``zbx_sender_host``: The address where zabbix server is running. -``zbx_sender_port``: The port where zabbix server is listenning. -``zbx_host``: This field setup the host in zabbix that receives the value sent by Elastalert. -``zbx_item``: This field setup the item in the host that receives the value sent by Elastalert. +``zbx_sender_host``: The address where zabbix server is running, defaults to ``'localhost'``. + +``zbx_sender_port``: The port where zabbix server is listenning, defaults to ``10051``. + +``zbx_host_from_field``: This field allows to specify ``zbx_host`` value from the available terms. Defaults to ``False``. + +``zbx_host``: This field setup the host in zabbix that receives the value sent by ElastAlert 2. + +``zbx_key``: This field setup the key in the host that receives the value sent by ElastAlert 2. + +Example usage:: + + alert: + - "zabbix" + zbx_sender_host: "zabbix-server" + zbx_sender_port: 10051 + zbx_host: "test001" + zbx_key: "sender_load1" + +To specify ``zbx_host`` depending on the available elasticsearch field, zabbix alerter has ``zbx_host_from_field`` option. + +Example usage:: + + alert: + - "zabbix" + zbx_sender_host: "zabbix-server" + zbx_sender_port: 10051 + zbx_host_from_field: True + zbx_host: "hostname" + zbx_key: "sender_load1" + +where ``hostname`` is the available elasticsearch field. diff --git a/docs/source/running_elastalert.rst b/docs/source/running_elastalert.rst index 7fdf1eeba..dfa15364d 100644 --- a/docs/source/running_elastalert.rst +++ b/docs/source/running_elastalert.rst @@ -1,74 +1,296 @@ .. _tutorial: -Running ElastAlert for the First Time -===================================== +Running ElastAlert 2 +******************** + +ElastAlert 2 can easily be run as :ref:`a Docker container` +or directly on your machine as :ref:`a Python package`. +If you are not interested in modifying the internals of ElastAlert 2, the Docker +container is recommended for ease of use. + +.. _elastalert-arguments: + +Configuration flags +=================== + +However you choose to run ElastAlert 2, the ElastAlert 2 process is started by invoking +``python -m elastalert.elastalert``. + +This command accepts several configuration flags: + +``--config`` will specify the configuration file to use. The default is +``config.yaml``. See :ref:`here` to understand what behaviour +can be configured in this file. + +``--debug`` will run ElastAlert 2 in debug mode. This will increase the logging +verboseness, change all alerts to ``DebugAlerter``, which prints alerts and +suppresses their normal action, and skips writing search and alert metadata back +to Elasticsearch. Not compatible with `--verbose`. + +``--end `` will force ElastAlert 2 to stop querying after the given +time, instead of the default, querying to the present time. This really only +makes sense when running standalone. The timestamp is formatted as +``YYYY-MM-DDTHH:MM:SS`` (UTC) or with timezone ``YYYY-MM-DDTHH:MM:SS-XX:00`` +(UTC-XX). + +``--es_debug`` will enable logging for all queries made to Elasticsearch. + +``--es_debug_trace `` will enable logging curl commands for all +queries made to Elasticsearch to the specified log file. ``--es_debug_trace`` is +passed through to `elasticsearch.py +`_ which +logs `localhost:9200` instead of the actual ``es_host``:``es_port``. + +``--pin_rules`` will stop ElastAlert 2 from loading, reloading or removing rules +based on changes to their config files. + +``--prometheus_port`` exposes ElastAlert 2 `Prometheus metrics `_ on the specified +port. Prometheus metrics disabled by default. + +``--rule `` will only run the given rule. The rule file may be a +complete file path or a filename in ``rules_folder`` or its subdirectories. + +``--silence =`` will silence the alerts for a given rule for a +period of time. The rule must be specified using ``--rule``. is one of +days, weeks, hours, minutes or seconds. is an integer. For example, +``--rule noisy_rule.yaml --silence hours=4`` will stop noisy_rule from +generating any alerts for 4 hours. + +``--silence_qk_value `` will force ElastAlert 2 to begin querying from the given +time, instead of the default, querying from the present. The timestamp should be +ISO8601, e.g. ``YYYY-MM-DDTHH:MM:SS`` (UTC) or with timezone +``YYYY-MM-DDTHH:MM:SS-08:00`` (PST). Note that if querying over a large date +range, no alerts will be sent until that rule has finished querying over the +entire time period. To force querying from the current time, use "NOW". + +``--verbose`` will increase the logging verboseness, which allows you to see +information about the state of queries. Not compatible with `--debug`. + +.. _docker-instructions: + +As a Docker container +===================== + +If you're interested in a pre-built Docker image check out the +elastalert2 container image on `Docker Hub `_ or `GitHub Container Registry `_. Both images are published for each release. Use GitHub Container Registry if you are running into Docker Hub usage limits. + +Be aware that the ``latest`` tag of the image represents the latest commit into +the master branch. If you prefer to upgrade more slowly you will need utilize a +versioned tag, such as ``2.9.0`` instead, or ``2`` if you are comfortable with +always using the latest released version of ElastAlert 2. + +A properly configured config.yaml file must be mounted into the container during +startup of the container. Use the `example file +`_ +as a template. + +The following example assumes Elasticsearch container has already been started with Docker. +This example also assumes both the Elasticsearch and ElastAlert2 containers are using the default Docker network: ``es_default`` + +Create a rule directory and rules file in addition to elastalert.yaml, and then mount both into the ElastAlert 2 container: + +.. code-block:: + + elastalert.yaml + rules/ + a.yaml + +elastalert.yaml + +.. code-block:: + + rules_folder: /opt/elastalert/rules + + run_every: + seconds: 10 + + buffer_time: + minutes: 15 + + es_host: elasticsearch + es_port: 9200 + + writeback_index: elastalert_status + + alert_time_limit: + days: 2 + +a.yaml + +.. code-block:: + + name: "a" + type: "frequency" + index: "mariadblog-*" + is_enabled: true + num_events: 2 + realert: + minutes: 5 + terms_size: 50 + timeframe: + minutes: 5 + timestamp_field: "@timestamp" + timestamp_type: "iso" + use_strftime_index: false + alert_subject: "Test {} 123 aa☃" + alert_subject_args: + - "message" + - "@log_name" + alert_text: "Test {} 123 bb☃" + alert_text_args: + - "message" + filter: + - query: + query_string: + query: "@timestamp:*" + alert: + - "slack" + slack_webhook_url: 'https://hooks.slack.com/services/xxxxxxxxx' + slack_channel_override: "#abc" + slack_emoji_override: ":kissing_cat:" + slack_msg_color: "warning" + slack_parse_override: "none" + slack_username_override: "elastalert" + +Starting the container via Docker Hub (hub.docker.com) + +.. code-block:: + + docker run --net=es_default -d --name elastalert --restart=always \ + -v $(pwd)/elastalert.yaml:/opt/elastalert/config.yaml \ + -v $(pwd)/rules:/opt/elastalert/rules \ + jertel/elastalert2 --verbose + + docker logs -f elastalert + +Starting the container via GitHub Container Registry (ghcr.io) + +.. code-block:: + + docker run --net=es_default -d --name elastalert --restart=always \ + -v $(pwd)/elastalert.yaml:/opt/elastalert/config.yaml \ + -v $(pwd)/rules:/opt/elastalert/rules \ + ghcr.io/jertel/elastalert2/elastalert2 --verbose + + docker logs -f elastalert + +For developers, the below command can be used to build the image locally: + +.. code-block:: + + docker build . -t elastalert2 + + +.. _kubernetes-instructions: + +As a Kubernetes deployment +========================== + +The Docker container for ElastAlert 2 can be used directly as a Kubernetes +deployment, but for convenience, a Helm chart is also available. See the +instructions provided `on Github +`_ +for more information on how to install, configure, and run the chart. + +.. _python-instructions: + +As a Python package +=================== Requirements ------------ -- Elasticsearch +- Elasticsearch 7.x or 8.x, or OpenSearch 1.x or 2.x - ISO8601 or Unix timestamped data -- Python 3.6 -- pip, see requirements.txt -- Packages on Ubuntu 14.x: python-pip python-dev libffi-dev libssl-dev +- Python 3.11. Require OpenSSL 1.1.1 or newer. +- pip +- Packages on Ubuntu 21.x: build-essential python3-pip python3.11 python3.11-dev libffi-dev libssl-dev + +If you want to install python 3.11 on CentOS, please install python 3.11 from the source code after installing 'Development Tools'. Downloading and Configuring --------------------------- -You can either install the latest released version of ElastAlert using pip:: +You can either install the latest released version of ElastAlert 2 using pip:: - $ pip install elastalert + $ pip install elastalert2 -or you can clone the ElastAlert repository for the most recent changes:: +or you can clone the ElastAlert2 repository for the most recent changes:: - $ git clone https://github.com/Yelp/elastalert.git + $ git clone https://github.com/jertel/elastalert2.git Install the module:: $ pip install "setuptools>=11.3" $ python setup.py install -Depending on the version of Elasticsearch, you may need to manually install the correct version of elasticsearch-py. - -Elasticsearch 5.0+:: +Next, open up ``examples/config.yaml.example``. In it, you will find several configuration +options. ElastAlert 2 may be run without changing any of these settings. - $ pip install "elasticsearch>=5.0.0" +``rules_folder`` is where ElastAlert 2 will load rule configuration files from. It +will attempt to load every .yaml file in the folder. Without any valid rules, +ElastAlert 2 will not start. ElastAlert 2 will also load new rules, stop running +missing rules, and restart modified rules as the files in this folder change. +For this tutorial, we will use the ``examples/rules`` folder. -Elasticsearch 2.X:: +``run_every`` is how often ElastAlert 2 will query Elasticsearch. - $ pip install "elasticsearch<3.0.0" +``buffer_time`` is the size of the query window, stretching backwards from the +time each query is run. This value is ignored for rules where +``use_count_query`` or ``use_terms_query`` is set to true. -Next, open up config.yaml.example. In it, you will find several configuration options. ElastAlert may be run without changing any of these settings. +``es_host`` is the primary address of an Elasticsearch cluster where ElastAlert 2 will +store data about its state, queries run, alerts, and errors. Each rule may also +use a different Elasticsearch host to query against. For multiple host Elasticsearch +clusters see ``es_hosts`` parameter. -``rules_folder`` is where ElastAlert will load rule configuration files from. It will attempt to load every .yaml file in the folder. Without any valid rules, ElastAlert will not start. ElastAlert will also load new rules, stop running missing rules, and restart modified rules as the files in this folder change. For this tutorial, we will use the example_rules folder. - -``run_every`` is how often ElastAlert will query Elasticsearch. - -``buffer_time`` is the size of the query window, stretching backwards from the time each query is run. This value is ignored for rules where ``use_count_query`` or ``use_terms_query`` is set to true. +``es_port`` is the port corresponding to ``es_host``. -``es_host`` is the address of an Elasticsearch cluster where ElastAlert will store data about its state, queries run, alerts, and errors. Each rule may also use a different Elasticsearch host to query against. +``es_hosts`` is the list of addresses of the nodes of the Elasticsearch cluster. This +parameter can be used for high availability purposes, but the primary host must also +be specified in the ``es_host`` parameter. The ``es_hosts`` parameter can be overridden +within each rule. This value can be specified as ``host:port`` if overriding the default +port. -``es_port`` is the port corresponding to ``es_host``. +``use_ssl``: Optional; whether or not to connect to ``es_host`` using TLS; set +to ``True`` or ``False``. -``use_ssl``: Optional; whether or not to connect to ``es_host`` using TLS; set to ``True`` or ``False``. +``verify_certs``: Optional; whether or not to verify TLS certificates; set to +``True`` or ``False``. The default is ``True`` -``verify_certs``: Optional; whether or not to verify TLS certificates; set to ``True`` or ``False``. The default is ``True`` +``ssl_show_warn``: Optional; suppress TLS and certificate related warnings; set +to ``True`` or ``False``. The default is ``True``. -``client_cert``: Optional; path to a PEM certificate to use as the client certificate +``client_cert``: Optional; path to a PEM certificate to use as the client +certificate ``client_key``: Optional; path to a private key file to use as the client key -``ca_certs``: Optional; path to a CA cert bundle to use to verify SSL connections +``ca_certs``: Optional; path to a CA cert bundle to use to verify SSL +connections ``es_username``: Optional; basic-auth username for connecting to ``es_host``. ``es_password``: Optional; basic-auth password for connecting to ``es_host``. +``es_bearer``: Optional; bearer token authorization for connecting to +``es_host``. If bearer token is specified, login and password are ignored. + ``es_url_prefix``: Optional; URL prefix for the Elasticsearch endpoint. -``es_send_get_body_as``: Optional; Method for querying Elasticsearch - ``GET``, ``POST`` or ``source``. The default is ``GET`` +``statsd_instance_tag``: Optional; prefix for statsd metrics. + +``statsd_host``: Optional; statsd host. + +``es_send_get_body_as``: Optional; Method for querying Elasticsearch - ``GET``, +``POST`` or ``source``. The default is ``GET`` -``writeback_index`` is the name of the index in which ElastAlert will store data. We will create this index later. +``writeback_index`` is the name of the index in which ElastAlert 2 will store +data. We will create this index later. ``alert_time_limit`` is the retry window for failed alerts. @@ -77,9 +299,15 @@ Save the file as ``config.yaml`` Setting Up Elasticsearch ------------------------ -ElastAlert saves information and metadata about its queries and its alerts back to Elasticsearch. This is useful for auditing, debugging, and it allows ElastAlert to restart and resume exactly where it left off. This is not required for ElastAlert to run, but highly recommended. +ElastAlert 2 saves information and metadata about its queries and its alerts back +to Elasticsearch. This is useful for auditing, debugging, and it allows +ElastAlert 2 to restart and resume exactly where it left off. This is not required +for ElastAlert 2 to run, but highly recommended. -First, we need to create an index for ElastAlert to write to by running ``elastalert-create-index`` and following the instructions:: +First, we need to create an index for ElastAlert 2 to write to by running +``elastalert-create-index`` and following the instructions. Note that this manual +step is only needed by users that run ElastAlert 2 directly on the host, whereas +container users will automatically see these indexes created on startup.:: $ elastalert-create-index New index name (Default elastalert_status) @@ -87,14 +315,17 @@ First, we need to create an index for ElastAlert to write to by running ``elasta New index elastalert_status created Done! -For information about what data will go here, see :ref:`ElastAlert Metadata Index `. +For information about what data will go here, see :ref:`ElastAlert 2 Metadata +Index `. Creating a Rule --------------- -Each rule defines a query to perform, parameters on what triggers a match, and a list of alerts to fire for each match. We are going to use ``example_rules/example_frequency.yaml`` as a template:: +Each rule defines a query to perform, parameters on what triggers a match, and a +list of alerts to fire for each match. We are going to use +``examples/rules/example_frequency.yaml`` as a template:: - # From example_rules/example_frequency.yaml + # From examples/rules/example_frequency.yaml es_host: elasticsearch.example.com es_port: 14900 name: Example rule @@ -102,7 +333,7 @@ Each rule defines a query to perform, parameters on what triggers a match, and a index: logstash-* num_events: 50 timeframe: - hours: 4 + hours: 4 filter: - term: some_field: "some_value" @@ -111,40 +342,61 @@ Each rule defines a query to perform, parameters on what triggers a match, and a email: - "elastalert@example.com" -``es_host`` and ``es_port`` should point to the Elasticsearch cluster we want to query. +``es_host`` and ``es_port`` should point to the Elasticsearch cluster we want to +query. -``name`` is the unique name for this rule. ElastAlert will not start if two rules share the same name. +``name`` is the unique name for this rule. ElastAlert 2 will not start if two +rules share the same name. -``type``: Each rule has a different type which may take different parameters. The ``frequency`` type means "Alert when more than ``num_events`` occur within ``timeframe``." For information other types, see :ref:`Rule types `. +``type``: Each rule has a different type which may take different parameters. +The ``frequency`` type means "Alert when more than ``num_events`` occur within +``timeframe``." For information other types, see :ref:`Rule types `. -``index``: The name of the index(es) to query. If you are using Logstash, by default the indexes will match ``"logstash-*"``. +``index``: The name of the index(es) to query. If you are using Logstash, by +default the indexes will match ``"logstash-*"``. -``num_events``: This parameter is specific to ``frequency`` type and is the threshold for when an alert is triggered. +``num_events``: This parameter is specific to ``frequency`` type and is the +threshold for when an alert is triggered. ``timeframe`` is the time period in which ``num_events`` must occur. -``filter`` is a list of Elasticsearch filters that are used to filter results. Here we have a single term filter for documents with ``some_field`` matching ``some_value``. See :ref:`Writing Filters For Rules ` for more information. If no filters are desired, it should be specified as an empty list: ``filter: []`` +``filter`` is a list of Elasticsearch filters that are used to filter results. +Here we have a single term filter for documents with ``some_field`` matching +``some_value``. See :ref:`Writing Filters For Rules ` for more +information. If no filters are desired, it should be specified as an empty list: +``filter: []`` -``alert`` is a list of alerts to run on each match. For more information on alert types, see :ref:`Alerts `. The email alert requires an SMTP server for sending mail. By default, it will attempt to use localhost. This can be changed with the ``smtp_host`` option. +``alert`` is a list of alerts to run on each match. For more information on +alert types, see :ref:`Alerts `. The email alert requires an SMTP server +for sending mail. By default, it will attempt to use localhost. This can be +changed with the ``smtp_host`` option. ``email`` is a list of addresses to which alerts will be sent. -There are many other optional configuration options, see :ref:`Common configuration options `. +There are many other optional configuration options, see :ref:`Common +configuration options `. -All documents must have a timestamp field. ElastAlert will try to use ``@timestamp`` by default, but this can be changed with the ``timestamp_field`` option. By default, ElastAlert uses ISO8601 timestamps, though unix timestamps are supported by setting ``timestamp_type``. +All documents must have a timestamp field. ElastAlert 2 will try to use +``@timestamp`` by default, but this can be changed with the ``timestamp_field`` +option. By default, ElastAlert 2 uses ISO8601 timestamps, though unix timestamps +are supported by setting ``timestamp_type``. -As is, this rule means "Send an email to elastalert@example.com when there are more than 50 documents with ``some_field == some_value`` within a 4 hour period." +As is, this rule means "Send an email to elastalert@example.com when there are +more than 50 documents with ``some_field == some_value`` within a 4 hour +period." Testing Your Rule ----------------- -Running the ``elastalert-test-rule`` tool will test that your config file successfully loads and run it in debug mode over the last 24 hours:: +Running the ``elastalert-test-rule`` tool will test that your config file +successfully loads and run it in debug mode over the last 24 hours:: - $ elastalert-test-rule example_rules/example_frequency.yaml + $ elastalert-test-rule examples/rules/example_frequency.yaml -If you want to specify a configuration file to use, you can run it with the config flag:: +If you want to specify a configuration file to use, you can run it with the +config flag:: - $ elastalert-test-rule --config example_rules/example_frequency.yaml + $ elastalert-test-rule --config examples/rules/example_frequency.yaml The configuration preferences will be loaded as follows: 1. Configurations specified in the yaml file. @@ -153,10 +405,12 @@ The configuration preferences will be loaded as follows: See :ref:`the testing section for more details ` -Running ElastAlert ------------------- +Running ElastAlert 2 +-------------------- -There are two ways of invoking ElastAlert. As a daemon, through Supervisor (http://supervisord.org/), or directly with Python. For easier debugging purposes in this tutorial, we will invoke it directly:: +There are two ways of invoking ElastAlert 2. As a daemon, through Supervisor +(http://supervisord.org/), or directly with Python. For easier debugging +purposes in this tutorial, we will invoke it directly:: $ python -m elastalert.elastalert --verbose --rule example_frequency.yaml # or use the entry point: elastalert --verbose --rule ... No handlers could be found for logger "Elasticsearch" @@ -165,27 +419,46 @@ There are two ways of invoking ElastAlert. As a daemon, through Supervisor (http INFO:root:Ran Example rule from 1-15 14:22 PST to 1-15 15:07 PST: 5 query hits (0 already seen), 0 matches, 0 alerts sent INFO:root:Sleeping for 297 seconds -ElastAlert uses the python logging system and ``--verbose`` sets it to display INFO level messages. ``--rule example_frequency.yaml`` specifies the rule to run, otherwise ElastAlert will attempt to load the other rules in the example_rules folder. +ElastAlert 2 uses the python logging system and ``--verbose`` sets it to display +INFO level messages. ``--rule example_frequency.yaml`` specifies the rule to +run, otherwise ElastAlert 2 will attempt to load the other rules in the +``examples/rules`` folder. Let's break down the response to see what's happening. ``Queried rule Example rule from 1-15 14:22 PST to 1-15 15:07 PST: 5 hits`` -ElastAlert periodically queries the most recent ``buffer_time`` (default 45 minutes) for data matching the filters. Here we see that it matched 5 hits. +ElastAlert 2 periodically queries the most recent ``buffer_time`` (default 45 +minutes) for data matching the filters. Here we see that it matched 5 hits: + +.. code-block:: + + POST http://elasticsearch.example.com:14900/elastalert_status/elastalert_status?op_type=create [status:201 request:0.025s] -``POST http://elasticsearch.example.com:14900/elastalert_status/elastalert_status?op_type=create [status:201 request:0.025s]`` +This line showing that ElastAlert 2 uploaded a document to the elastalert_status +index with information about the query it just made: -This line showing that ElastAlert uploaded a document to the elastalert_status index with information about the query it just made. +.. code-block:: -``Ran Example rule from 1-15 14:22 PST to 1-15 15:07 PST: 5 query hits (0 already seen), 0 matches, 0 alerts sent`` + Ran Example rule from 1-15 14:22 PST to 1-15 15:07 PST: 5 query hits (0 already seen), 0 matches, 0 alerts sent -The line means ElastAlert has finished processing the rule. For large time periods, sometimes multiple queries may be run, but their data will be processed together. ``query hits`` is the number of documents that are downloaded from Elasticsearch, ``already seen`` refers to documents that were already counted in a previous overlapping query and will be ignored, ``matches`` is the number of matches the rule type outputted, and ``alerts sent`` is the number of alerts actually sent. This may differ from ``matches`` because of options like ``realert`` and ``aggregation`` or because of an error. +The line means ElastAlert 2 has finished processing the rule. For large time +periods, sometimes multiple queries may be run, but their data will be processed +together. ``query hits`` is the number of documents that are downloaded from +Elasticsearch, ``already seen`` refers to documents that were already counted in +a previous overlapping query and will be ignored, ``matches`` is the number of +matches the rule type outputted, and ``alerts sent`` is the number of alerts +actually sent. This may differ from ``matches`` because of options like +``realert`` and ``aggregation`` or because of an error. ``Sleeping for 297 seconds`` -The default ``run_every`` is 5 minutes, meaning ElastAlert will sleep until 5 minutes have elapsed from the last cycle before running queries for each rule again with time ranges shifted forward 5 minutes. +The default ``run_every`` is 5 minutes, meaning ElastAlert 2 will sleep until 5 +minutes have elapsed from the last cycle before running queries for each rule +again with time ranges shifted forward 5 minutes. -Say, over the next 297 seconds, 46 more matching documents were added to Elasticsearch:: +Say, over the next 297 seconds, 46 more matching documents were added to +Elasticsearch:: INFO:root:Queried rule Example rule from 1-15 14:27 PST to 1-15 15:12 PST: 51 hits @@ -204,11 +477,29 @@ The body of the email will contain something like:: If an error occurred, such as an unreachable SMTP server, you may see: +.. code-block:: + + ERROR:root:Error while running alert email: Error connecting to SMTP host: [Errno 61] Connection refused + + +Note that if you stop ElastAlert 2 and then run it again later, it will look up +``elastalert_status`` and begin querying at the end time of the last query. This +is to prevent duplication or skipping of alerts if ElastAlert 2 is restarted. + +By using the ``--debug`` flag instead of ``--verbose``, the body of email will +instead be logged and the email will not be sent. In addition, the queries will +not be saved to ``elastalert_status``. -``ERROR:root:Error while running alert email: Error connecting to SMTP host: [Errno 61] Connection refused`` +Disabling a Rule +---------------- +To stop a rule from executing, add or adjust the `is_enabled` option inside the +rule's YAML file to `false`. When ElastAlert 2 reloads the rules it will detect +that the rule has been disabled and prevent it from executing. The rule reload +interval defaults to 5 minutes but can be adjusted via the `run_every` +configuration option. -Note that if you stop ElastAlert and then run it again later, it will look up ``elastalert_status`` and begin querying -at the end time of the last query. This is to prevent duplication or skipping of alerts if ElastAlert is restarted. +Optionally, once a rule has been disabled it is safe to remove the rule file, if +there is no intention of re-activating the rule. However, be aware that removing +a rule file without first disabling it will _not_ disable the rule! -By using the ``--debug`` flag instead of ``--verbose``, the body of email will instead be logged and the email will not be sent. In addition, the queries will not be saved to ``elastalert_status``. diff --git a/elastalert/__init__.py b/elastalert/__init__.py index 55bfdb32f..941ad8575 100644 --- a/elastalert/__init__.py +++ b/elastalert/__init__.py @@ -16,14 +16,17 @@ def __init__(self, conf): """ :arg conf: es_conn_config dictionary. Ref. :func:`~util.build_es_conn_config` """ - super(ElasticSearchClient, self).__init__(host=conf['es_host'], + super(ElasticSearchClient, self).__init__(host=conf.get('es_host'), + hosts=conf.get('es_hosts'), port=conf['es_port'], url_prefix=conf['es_url_prefix'], use_ssl=conf['use_ssl'], verify_certs=conf['verify_certs'], ca_certs=conf['ca_certs'], + ssl_show_warn=conf['ssl_show_warn'], connection_class=RequestsHttpConnection, http_auth=conf['http_auth'], + headers=conf['headers'], timeout=conf['es_conn_timeout'], send_get_body_as=conf['send_get_body_as'], client_cert=conf['client_cert'], @@ -44,55 +47,25 @@ def es_version(self): Returns the reported version from the Elasticsearch server. """ if self._es_version is None: - for retry in range(3): - try: - self._es_version = self.info()['version']['number'] - break - except TransportError: - if retry == 2: - raise - time.sleep(3) - return self._es_version + self._es_version = util.get_version_from_cluster_info(self) - def is_atleastfive(self): - """ - Returns True when the Elasticsearch server version >= 5 - """ - return int(self.es_version.split(".")[0]) >= 5 + return self._es_version - def is_atleastsix(self): + def is_atleastseven(self): """ - Returns True when the Elasticsearch server version >= 6 + Returns True when the Elasticsearch server version >= 7 """ - return int(self.es_version.split(".")[0]) >= 6 + return int(self.es_version.split(".")[0]) >= 7 - def is_atleastsixtwo(self): + def is_atleasteight(self): """ - Returns True when the Elasticsearch server version >= 6.2 + Returns True when the Elasticsearch server version >= 8 """ - major, minor = list(map(int, self.es_version.split(".")[:2])) - return major > 6 or (major == 6 and minor >= 2) + return int(self.es_version.split(".")[0]) >= 8 - def is_atleastsixsix(self): - """ - Returns True when the Elasticsearch server version >= 6.6 - """ - major, minor = list(map(int, self.es_version.split(".")[:2])) - return major > 6 or (major == 6 and minor >= 6) - - def is_atleastseven(self): - """ - Returns True when the Elasticsearch server version >= 7 - """ - return int(self.es_version.split(".")[0]) >= 7 def resolve_writeback_index(self, writeback_index, doc_type): - """ In ES6, you cannot have multiple _types per index, - therefore we use self.writeback_index as the prefix for the actual - index name, based on doc_type. """ - if not self.is_atleastsix(): - return writeback_index - elif doc_type == 'silence': + if doc_type == 'silence': return writeback_index + '_silence' elif doc_type == 'past_elastalert': return writeback_index + '_past' @@ -101,157 +74,3 @@ def resolve_writeback_index(self, writeback_index, doc_type): elif doc_type == 'elastalert_error': return writeback_index + '_error' return writeback_index - - @query_params( - "_source", - "_source_exclude", - "_source_excludes", - "_source_include", - "_source_includes", - "allow_no_indices", - "allow_partial_search_results", - "analyze_wildcard", - "analyzer", - "batched_reduce_size", - "default_operator", - "df", - "docvalue_fields", - "expand_wildcards", - "explain", - "from_", - "ignore_unavailable", - "lenient", - "max_concurrent_shard_requests", - "pre_filter_shard_size", - "preference", - "q", - "rest_total_hits_as_int", - "request_cache", - "routing", - "scroll", - "search_type", - "seq_no_primary_term", - "size", - "sort", - "stats", - "stored_fields", - "suggest_field", - "suggest_mode", - "suggest_size", - "suggest_text", - "terminate_after", - "timeout", - "track_scores", - "track_total_hits", - "typed_keys", - "version", - ) - def deprecated_search(self, index=None, doc_type=None, body=None, params=None): - """ - Execute a search query and get back search hits that match the query. - ``_ - :arg index: A list of index names to search, or a string containing a - comma-separated list of index names to search; use `_all` - or empty string to perform the operation on all indices - :arg doc_type: A comma-separated list of document types to search; leave - empty to perform the operation on all types - :arg body: The search definition using the Query DSL - :arg _source: True or false to return the _source field or not, or a - list of fields to return - :arg _source_exclude: A list of fields to exclude from the returned - _source field - :arg _source_include: A list of fields to extract and return from the - _source field - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg allow_partial_search_results: Set to false to return an overall - failure if the request would produce partial results. Defaults to - True, which will allow partial results in the case of timeouts or - partial failures - :arg analyze_wildcard: Specify whether wildcard and prefix queries - should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string - :arg batched_reduce_size: The number of shard results that should be - reduced at once on the coordinating node. This value should be used - as a protection mechanism to reduce the memory overhead per search - request if the potential number of shards in the request can be - large., default 512 - :arg default_operator: The default operator for query string query (AND - or OR), default 'OR', valid choices are: 'AND', 'OR' - :arg df: The field to use as default where no field prefix is given in - the query string - :arg docvalue_fields: A comma-separated list of fields to return as the - docvalue representation of a field for each hit - :arg expand_wildcards: Whether to expand wildcard expression to concrete - indices that are open, closed or both., default 'open', valid - choices are: 'open', 'closed', 'none', 'all' - :arg explain: Specify whether to return detailed information about score - computation as part of a hit - :arg from\\_: Starting offset (default: 0) - :arg ignore_unavailable: Whether specified concrete indices should be - ignored when unavailable (missing or closed) - :arg lenient: Specify whether format-based query failures (such as - providing text to a numeric field) should be ignored - :arg max_concurrent_shard_requests: The number of concurrent shard - requests this search executes concurrently. This value should be - used to limit the impact of the search on the cluster in order to - limit the number of concurrent shard requests, default 'The default - grows with the number of nodes in the cluster but is at most 256.' - :arg pre_filter_shard_size: A threshold that enforces a pre-filter - roundtrip to prefilter search shards based on query rewriting if - the number of shards the search request expands to exceeds the - threshold. This filter roundtrip can limit the number of shards - significantly if for instance a shard can not match any documents - based on it's rewrite method ie. if date filters are mandatory to - match but the shard bounds and the query are disjoint., default 128 - :arg preference: Specify the node or shard the operation should be - performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number - in the response. This param is added version 6.x to handle mixed cluster queries where nodes - are in multiple versions (7.0 and 6.latest) - :arg request_cache: Specify if request cache should be used for this - request or not, defaults to index level setting - :arg routing: A comma-separated list of specific routing values - :arg scroll: Specify how long a consistent view of the index should be - maintained for scrolled search - :arg search_type: Search operation type, valid choices are: - 'query_then_fetch', 'dfs_query_then_fetch' - :arg size: Number of hits to return (default: 10) - :arg sort: A comma-separated list of : pairs - :arg stats: Specific 'tag' of the request for logging and statistical - purposes - :arg stored_fields: A comma-separated list of stored fields to return as - part of a hit - :arg suggest_field: Specify which field to use for suggestions - :arg suggest_mode: Specify suggest mode, default 'missing', valid - choices are: 'missing', 'popular', 'always' - :arg suggest_size: How many suggestions to return in response - :arg suggest_text: The source text for which the suggestions should be - returned - :arg terminate_after: The maximum number of documents to collect for - each shard, upon reaching which the query execution will terminate - early. - :arg timeout: Explicit operation timeout - :arg track_scores: Whether to calculate and return scores even if they - are not used for sorting - :arg track_total_hits: Indicate if the number of documents that match - the query should be tracked - :arg typed_keys: Specify whether aggregation and suggester names should - be prefixed by their respective types in the response - :arg version: Specify whether to return document version as part of a - hit - """ - # from is a reserved word so it cannot be used, use from_ instead - if "from_" in params: - params["from"] = params.pop("from_") - - if not index: - index = "_all" - res = self.transport.perform_request( - "GET", _make_path(index, doc_type, "_search"), params=params, body=body - ) - if type(res) == list or type(res) == tuple: - return res[1] - return res diff --git a/elastalert/alerters/__init__.py b/elastalert/alerters/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/elastalert/alerters/alerta.py b/elastalert/alerters/alerta.py new file mode 100644 index 000000000..a718e059f --- /dev/null +++ b/elastalert/alerters/alerta.py @@ -0,0 +1,120 @@ +import datetime +import json + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import lookup_es_key, EAException, elastalert_logger, resolve_string, ts_to_dt + + +class AlertaAlerter(Alerter): + """ Creates an Alerta event for each alert """ + required_options = frozenset(['alerta_api_url']) + + def __init__(self, rule): + super(AlertaAlerter, self).__init__(rule) + + # Setup defaul parameters + self.url = self.rule.get('alerta_api_url', None) + self.api_key = self.rule.get('alerta_api_key', None) + self.timeout = self.rule.get('alerta_timeout', 86400) + self.use_match_timestamp = self.rule.get('alerta_use_match_timestamp', False) + self.use_qk_as_resource = self.rule.get('alerta_use_qk_as_resource', False) + self.verify_ssl = not self.rule.get('alerta_api_skip_ssl', False) + self.missing_text = self.rule.get('alert_missing_value', '') + + # Fill up default values of the API JSON payload + self.severity = self.rule.get('alerta_severity', 'warning') + self.resource = self.rule.get('alerta_resource', 'elastalert') + self.environment = self.rule.get('alerta_environment', 'Production') + self.origin = self.rule.get('alerta_origin', 'elastalert') + self.service = self.rule.get('alerta_service', ['elastalert']) + self.text = self.rule.get('alerta_text', 'elastalert') + self.type = self.rule.get('alerta_type', 'elastalert') + self.event = self.rule.get('alerta_event', 'elastalert') + self.correlate = self.rule.get('alerta_correlate', []) + self.tags = self.rule.get('alerta_tags', []) + self.group = self.rule.get('alerta_group', '') + self.attributes_keys = self.rule.get('alerta_attributes_keys', []) + self.attributes_values = self.rule.get('alerta_attributes_values', []) + self.value = self.rule.get('alerta_value', '') + + def alert(self, matches): + # Override the resource if requested + if self.use_qk_as_resource and 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): + self.resource = lookup_es_key(matches[0], self.rule['query_key']) + + headers = {'content-type': 'application/json'} + if self.api_key is not None: + headers['Authorization'] = 'Key %s' % (self.rule['alerta_api_key']) + alerta_payload = self.get_json_payload(matches) + + try: + response = requests.post(self.url, data=alerta_payload, headers=headers, verify=self.verify_ssl) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Alerta: %s" % e) + elastalert_logger.info("Alert sent to Alerta") + + def create_default_title(self, matches): + title = '%s' % (self.rule['name']) + # If the rule has a query_key, add that value + if 'query_key' in self.rule: + qk = matches[0].get(self.rule['query_key']) + if qk: + title += '.%s' % (qk) + return title + + def get_info(self): + return {'type': 'alerta', + 'alerta_url': self.url} + + def get_json_payload(self, matches): + """ + Builds the API Create Alert body, as in + http://alerta.readthedocs.io/en/latest/api/reference.html#create-an-alert + + For the values that could have references to fields on the match, resolve those references. + + """ + + # use the first match in the list for setting attributes + match = matches[0] + # Using default text and event title if not defined in rule + alerta_text = self.rule['type'].get_match_str([match]) if self.text == '' else resolve_string(self.text, match, self.missing_text) + alerta_event = self.create_default_title([match]) if self.event == '' else resolve_string(self.event, match, self.missing_text) + + match_timestamp = lookup_es_key(match, self.rule.get('timestamp_field', '@timestamp')) + if match_timestamp is None: + match_timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") + if self.use_match_timestamp: + createTime = ts_to_dt(match_timestamp).strftime("%Y-%m-%dT%H:%M:%S.%fZ") + else: + createTime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") + + alerta_payload_dict = { + 'resource': resolve_string(self.resource, match, self.missing_text), + 'severity': resolve_string(self.severity, match), + 'timeout': self.timeout, + 'createTime': createTime, + 'type': self.type, + 'environment': resolve_string(self.environment, match, self.missing_text), + 'origin': resolve_string(self.origin, match, self.missing_text), + 'group': resolve_string(self.group, match, self.missing_text), + 'event': alerta_event, + 'text': alerta_text, + 'value': resolve_string(self.value, match, self.missing_text), + 'service': [resolve_string(a_service, match, self.missing_text) for a_service in self.service], + 'tags': [resolve_string(a_tag, match, self.missing_text) for a_tag in self.tags], + 'correlate': [resolve_string(an_event, match, self.missing_text) for an_event in self.correlate], + 'attributes': dict(list(zip(self.attributes_keys, + [resolve_string(a_value, match, self.missing_text) for a_value in self.attributes_values]))), + 'rawData': self.create_alert_body(matches), + } + + try: + payload = json.dumps(alerta_payload_dict, cls=DateTimeEncoder) + except Exception as e: + raise Exception("Error building Alerta request: %s" % e) + return payload diff --git a/elastalert/alerters/alertmanager.py b/elastalert/alerters/alertmanager.py new file mode 100644 index 000000000..ea7bc5528 --- /dev/null +++ b/elastalert/alerters/alertmanager.py @@ -0,0 +1,103 @@ +import json +import warnings + +import requests +from requests import RequestException +from requests.auth import HTTPBasicAuth + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger, lookup_es_key + + +class AlertmanagerAlerter(Alerter): + """ Sends an alert to Alertmanager """ + + required_options = frozenset({'alertmanager_hosts'}) + + def __init__(self, rule): + super(AlertmanagerAlerter, self).__init__(rule) + self.api_version = self.rule.get('alertmanager_api_version', 'v1') + self.hosts = self.rule.get('alertmanager_hosts') + self.alertname = self.rule.get('alertmanager_alertname', self.rule.get('name')) + self.labels = self.rule.get('alertmanager_labels', dict()) + self.annotations = self.rule.get('alertmanager_annotations', dict()) + self.fields = self.rule.get('alertmanager_fields', dict()) + self.title_labelname = self.rule.get('alertmanager_alert_subject_labelname', 'summary') + self.body_labelname = self.rule.get('alertmanager_alert_text_labelname', 'description') + self.proxies =self.rule.get('alertmanager_proxy', None) + self.ca_certs = self.rule.get('alertmanager_ca_certs') + self.ignore_ssl_errors = self.rule.get('alertmanager_ignore_ssl_errors', False) + self.timeout = self.rule.get('alertmanager_timeout', 10) + self.alertmanager_basic_auth_login = self.rule.get('alertmanager_basic_auth_login', None) + self.alertmanager_basic_auth_password = self.rule.get('alertmanager_basic_auth_password', None) + self.tenant = self.rule.get('tenant', "haystack") + + @staticmethod + def _json_or_string(obj): + """helper to encode non-string objects to JSON""" + if isinstance(obj, str): + return obj + return json.dumps(obj, cls=DateTimeEncoder) + + def alert(self, matches): + headers = {'content-type': 'application/json'} + headers.update({"X-Scope-OrgID": self.tenant}) + proxies = {'https': self.proxies} if self.proxies else None + auth = HTTPBasicAuth(self.alertmanager_basic_auth_login, self.alertmanager_basic_auth_password) if self.alertmanager_basic_auth_login else None + + self.labels.update({ + label: self._json_or_string(lookup_es_key(matches[0], term)) + for label, term in self.fields.items()}) + self.labels.update( + alertname=self.alertname, + elastalert_rule=self.rule.get('name')) + if 'json_payload' in self.rule and self.rule['json_payload'] == True: + self.labels.update(query_key_fields=self.rule.get('query_key')) + if self.rule.get('query_key') in matches[0].keys(): + self.labels.update(query_key=matches[0][self.rule.get('query_key')]) + if self.rule.get('alert_field'): + if 'value' in matches[0]: + self.labels.update(query_key_fields=matches[0]['key']) + self.labels.update(query_key=matches[0]['value']) + else: + self.labels.update(query_key_fields=self.rule.get('alert_field')) + self.annotations.update({ + self.title_labelname: self.create_title(matches), + self.body_labelname: self.create_alert_body(matches)}) + payload = { + 'annotations': self.annotations, + 'labels': self.labels + } + + if self.rule.get('timestamp_field') in matches[0]: + payload['labels']['alert_match_time']=matches[0][self.rule.get('timestamp_field')] + + for host in self.hosts: + try: + url = host + + if self.ca_certs: + verify = self.ca_certs + else: + verify = not self.ignore_ssl_errors + if self.ignore_ssl_errors: + requests.packages.urllib3.disable_warnings() + + response = requests.post( + url, + data=json.dumps([payload], cls=DateTimeEncoder), + headers=headers, + verify=verify, + proxies=proxies, + timeout=self.timeout, + auth=auth + ) + + warnings.resetwarnings() + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Alertmanager: %s" % e) + elastalert_logger.info("Alert sent to Alertmanager") + + def get_info(self): + return {'type': 'alertmanager'} diff --git a/elastalert/alerters/chatwork.py b/elastalert/alerters/chatwork.py new file mode 100644 index 000000000..ec1bc8031 --- /dev/null +++ b/elastalert/alerters/chatwork.py @@ -0,0 +1,51 @@ +import warnings + +import requests +from requests import RequestException +from requests.auth import HTTPProxyAuth + +from elastalert.alerts import Alerter, BasicMatchString +from elastalert.util import EAException, elastalert_logger + + +class ChatworkAlerter(Alerter): + """ Creates a Chatwork room message for each alert """ + required_options = frozenset(['chatwork_apikey', 'chatwork_room_id']) + + def __init__(self, rule): + super(ChatworkAlerter, self).__init__(rule) + self.chatwork_apikey = self.rule.get('chatwork_apikey', None) + self.chatwork_room_id = self.rule.get('chatwork_room_id', None) + self.url = 'https://api.chatwork.com/v2/rooms/%s/messages' % (self.chatwork_room_id) + self.chatwork_proxy = self.rule.get('chatwork_proxy', None) + self.chatwork_proxy_login = self.rule.get('chatwork_proxy_login', None) + self.chatwork_proxy_pass = self.rule.get('chatwork_proxy_pass', None) + + def alert(self, matches): + body = '' + for match in matches: + body += str(BasicMatchString(self.rule, match)) + if len(matches) > 1: + body += '\n----------------------------------------\n' + if len(body) > 2047: + body = body[0:1950] + '\n *message was cropped according to chatwork embed description limits!*' + headers = {'X-ChatWorkToken': self.chatwork_apikey} + # set https proxy, if it was provided + proxies = {'https': self.chatwork_proxy} if self.chatwork_proxy else None + auth = HTTPProxyAuth(self.chatwork_proxy_login, self.chatwork_proxy_pass) if self.chatwork_proxy_login else None + params = {'body': body} + + try: + response = requests.post(self.url, params=params, headers=headers, proxies=proxies, auth=auth) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Chattwork: %s. Details: %s" % (e, "" if e.response is None else e.response.text)) + + elastalert_logger.info( + "Alert sent to Chatwork room %s" % self.chatwork_room_id) + + def get_info(self): + return { + "type": "chatwork", + "chatwork_room_id": self.chatwork_room_id + } diff --git a/elastalert/alerters/command.py b/elastalert/alerters/command.py new file mode 100644 index 000000000..ac15ad05f --- /dev/null +++ b/elastalert/alerters/command.py @@ -0,0 +1,50 @@ +import json +import subprocess + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import elastalert_logger, resolve_string, EAException + + +class CommandAlerter(Alerter): + """ Sends an command alert """ + required_options = set(['command']) + + def __init__(self, *args): + super(CommandAlerter, self).__init__(*args) + + self.last_command = [] + + self.shell = False + try: + if isinstance(self.rule['command'], str): + self.shell = True + if '%' in self.rule['command']: + elastalert_logger.warning('Warning! You could be vulnerable to shell injection!') + self.rule['command'] = [self.rule['command']] + except KeyError as e: + raise EAException("Error formatting command: %s" % (e)) + + def alert(self, matches): + # Format the command and arguments + command = [resolve_string(command_arg, matches[0]) for command_arg in self.rule['command']] + self.last_command = command + + # Run command and pipe data + try: + subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) + + if self.rule.get('pipe_match_json'): + match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n' + stdout, stderr = subp.communicate(input=match_json.encode()) + elif self.rule.get('pipe_alert_text'): + alert_text = self.create_alert_body(matches) + stdout, stderr = subp.communicate(input=alert_text.encode()) + if self.rule.get("fail_on_non_zero_exit", False) and subp.wait(): + raise EAException("Non-zero exit code while running command %s" % (' '.join(command))) + except OSError as e: + raise EAException("Error while running command %s: %s" % (' '.join(command), e)) + elastalert_logger.info("Alert sent to Command") + + def get_info(self): + return {'type': 'command', + 'command': ' '.join(self.last_command)} diff --git a/elastalert/alerters/datadog.py b/elastalert/alerters/datadog.py new file mode 100644 index 000000000..2fd71be1c --- /dev/null +++ b/elastalert/alerters/datadog.py @@ -0,0 +1,38 @@ +import json + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger + + +class DatadogAlerter(Alerter): + """ Creates a Datadog Event for each alert """ + required_options = frozenset(['datadog_api_key', 'datadog_app_key']) + + def __init__(self, rule): + super(DatadogAlerter, self).__init__(rule) + self.dd_api_key = self.rule.get('datadog_api_key', None) + self.dd_app_key = self.rule.get('datadog_app_key', None) + + def alert(self, matches): + url = 'https://api.datadoghq.com/api/v1/events' + headers = { + 'Content-Type': 'application/json', + 'DD-API-KEY': self.dd_api_key, + 'DD-APPLICATION-KEY': self.dd_app_key + } + payload = { + 'title': self.create_title(matches), + 'text': self.create_alert_body(matches) + } + try: + response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers) + response.raise_for_status() + except RequestException as e: + raise EAException('Error posting event to Datadog: %s' % e) + elastalert_logger.info('Alert sent to Datadog') + + def get_info(self): + return {'type': 'datadog'} diff --git a/elastalert/alerters/debug.py b/elastalert/alerters/debug.py new file mode 100644 index 000000000..61aa460e9 --- /dev/null +++ b/elastalert/alerters/debug.py @@ -0,0 +1,19 @@ +from elastalert.alerts import Alerter, BasicMatchString +from elastalert.util import elastalert_logger, lookup_es_key + + +class DebugAlerter(Alerter): + """ The debug alerter uses a Python logger (by default, alerting to terminal). """ + + def alert(self, matches): + qk = self.rule.get('query_key', None) + for match in matches: + if qk in match: + elastalert_logger.info( + 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) + else: + elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) + elastalert_logger.info(str(BasicMatchString(self.rule, match))) + + def get_info(self): + return {'type': 'debug'} diff --git a/elastalert/alerters/dingtalk.py b/elastalert/alerters/dingtalk.py new file mode 100644 index 000000000..7bb4c0b41 --- /dev/null +++ b/elastalert/alerters/dingtalk.py @@ -0,0 +1,96 @@ +import json +import warnings + +import requests +from requests import RequestException +from requests.auth import HTTPProxyAuth + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger + + +class DingTalkAlerter(Alerter): + """ Creates a DingTalk room message for each alert """ + required_options = frozenset(['dingtalk_access_token']) + + def __init__(self, rule): + super(DingTalkAlerter, self).__init__(rule) + self.dingtalk_access_token = self.rule.get('dingtalk_access_token', None) + self.dingtalk_webhook_url = 'https://oapi.dingtalk.com/robot/send?access_token=%s' % (self.dingtalk_access_token) + self.dingtalk_msgtype = self.rule.get('dingtalk_msgtype', 'text') + self.dingtalk_single_title = self.rule.get('dingtalk_single_title', 'elastalert') + self.dingtalk_single_url = self.rule.get('dingtalk_single_url', '') + self.dingtalk_btn_orientation = self.rule.get('dingtalk_btn_orientation', '') + self.dingtalk_btns = self.rule.get('dingtalk_btns', []) + self.dingtalk_proxy = self.rule.get('dingtalk_proxy', None) + self.dingtalk_proxy_login = self.rule.get('dingtalk_proxy_login', None) + self.dingtalk_proxy_password = self.rule.get('dingtalk_proxy_pass', None) + + def alert(self, matches): + title = self.create_title(matches) + body = self.create_alert_body(matches) + + proxies = {'https': self.dingtalk_proxy} if self.dingtalk_proxy else None + auth = HTTPProxyAuth(self.dingtalk_proxy_login, self.dingtalk_proxy_password) if self.dingtalk_proxy_login else None + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8' + } + + if self.dingtalk_msgtype == 'text': + # text + payload = { + 'msgtype': self.dingtalk_msgtype, + 'text': { + 'content': body + } + } + if self.dingtalk_msgtype == 'markdown': + # markdown + payload = { + 'msgtype': self.dingtalk_msgtype, + 'markdown': { + 'title': title, + 'text': body + } + } + if self.dingtalk_msgtype == 'single_action_card': + # singleActionCard + payload = { + 'msgtype': 'actionCard', + 'actionCard': { + 'title': title, + 'text': body, + 'singleTitle': self.dingtalk_single_title, + 'singleURL': self.dingtalk_single_url + } + } + if self.dingtalk_msgtype == 'action_card': + # actionCard + payload = { + 'msgtype': 'actionCard', + 'actionCard': { + 'title': title, + 'text': body + } + } + if self.dingtalk_btn_orientation != '': + payload['actionCard']['btnOrientation'] = self.dingtalk_btn_orientation + if self.dingtalk_btns: + payload['actionCard']['btns'] = self.dingtalk_btns + + try: + response = requests.post(self.dingtalk_webhook_url, data=json.dumps(payload, + cls=DateTimeEncoder), headers=headers, proxies=proxies, auth=auth) + warnings.resetwarnings() + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to dingtalk: %s" % e) + + elastalert_logger.info("Trigger sent to dingtalk") + + def get_info(self): + return { + "type": "dingtalk", + "dingtalk_webhook_url": self.dingtalk_webhook_url + } diff --git a/elastalert/alerters/discord.py b/elastalert/alerters/discord.py new file mode 100644 index 000000000..74caf2702 --- /dev/null +++ b/elastalert/alerters/discord.py @@ -0,0 +1,69 @@ +import json +import warnings + +import requests +from requests import RequestException +from requests.auth import HTTPProxyAuth + +from elastalert.alerts import Alerter, BasicMatchString +from elastalert.util import EAException, elastalert_logger + + +class DiscordAlerter(Alerter): + """ Created a Discord for each alert """ + required_options = frozenset(['discord_webhook_url']) + + def __init__(self, rule): + super(DiscordAlerter, self).__init__(rule) + self.discord_webhook_url = self.rule.get('discord_webhook_url', None) + self.discord_emoji_title = self.rule.get('discord_emoji_title', ':warning:') + self.discord_proxy = self.rule.get('discord_proxy', None) + self.discord_proxy_login = self.rule.get('discord_proxy_login', None) + self.discord_proxy_password = self.rule.get('discord_proxy_password', None) + self.discord_embed_color = self.rule.get('discord_embed_color', 0xffffff) + self.discord_embed_footer = self.rule.get('discord_embed_footer', None) + self.discord_embed_icon_url = self.rule.get('discord_embed_icon_url', None) + + def alert(self, matches): + body = '' + title = u'%s' % (self.create_title(matches)) + for match in matches: + body += str(BasicMatchString(self.rule, match)) + if len(matches) > 1: + body += '\n----------------------------------------\n' + if len(body) > 2047: + body = body[0:1950] + '\n *message was cropped according to discord embed description limits!*' + + proxies = {'https': self.discord_proxy} if self.discord_proxy else None + auth = HTTPProxyAuth(self.discord_proxy_login, self.discord_proxy_password) if self.discord_proxy_login else None + headers = {"Content-Type": "application/json"} + + data = {} + data["content"] = "%s %s %s" % (self.discord_emoji_title, title, self.discord_emoji_title) + data["embeds"] = [] + embed = {} + embed["description"] = "%s" % (body) + embed["color"] = (self.discord_embed_color) + + if self.discord_embed_footer: + embed["footer"] = {} + embed["footer"]["text"] = (self.discord_embed_footer) if self.discord_embed_footer else None + embed["footer"]["icon_url"] = (self.discord_embed_icon_url) if self.discord_embed_icon_url else None + else: + None + + data["embeds"].append(embed) + + try: + response = requests.post(self.discord_webhook_url, data=json.dumps(data), headers=headers, proxies=proxies, auth=auth) + warnings.resetwarnings() + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Discord: %s. Details: %s" % (e, "" if e.response is None else e.response.text)) + + elastalert_logger.info( + "Alert sent to the webhook %s" % self.discord_webhook_url) + + def get_info(self): + return {'type': 'discord', + 'discord_webhook_url': self.discord_webhook_url} diff --git a/elastalert/alerters/email.py b/elastalert/alerters/email.py new file mode 100644 index 000000000..7b5ca2fb0 --- /dev/null +++ b/elastalert/alerters/email.py @@ -0,0 +1,133 @@ +import os + +from elastalert.alerts import Alerter +from elastalert.util import elastalert_logger, lookup_es_key, EAException +from email.mime.text import MIMEText +from email.mime.multipart import MIMEMultipart +from email.mime.image import MIMEImage +from email.utils import formatdate +from socket import error +from smtplib import SMTP +from smtplib import SMTP_SSL +from smtplib import SMTPAuthenticationError +from smtplib import SMTPException + + +class EmailAlerter(Alerter): + """ Sends an email alert """ + required_options = frozenset(['email']) + + def __init__(self, *args): + super(EmailAlerter, self).__init__(*args) + + self.assets_dir = self.rule.get('assets_dir', '/tmp') + self.images_dictionary = dict(zip(self.rule.get('email_image_keys', []), self.rule.get('email_image_values', []))) + self.smtp_host = self.rule.get('smtp_host', 'localhost') + self.smtp_ssl = self.rule.get('smtp_ssl', False) + self.from_addr = self.rule.get('from_addr', 'ElastAlert') + self.smtp_port = self.rule.get('smtp_port') + if self.rule.get('smtp_auth_file'): + self.get_account(self.rule['smtp_auth_file']) + self.smtp_key_file = self.rule.get('smtp_key_file') + self.smtp_cert_file = self.rule.get('smtp_cert_file') + # Convert email to a list if it isn't already + if isinstance(self.rule['email'], str): + self.rule['email'] = [self.rule['email']] + # If there is a cc then also convert it a list if it isn't + cc = self.rule.get('cc') + if cc and isinstance(cc, str): + self.rule['cc'] = [self.rule['cc']] + # If there is a bcc then also convert it to a list if it isn't + bcc = self.rule.get('bcc') + if bcc and isinstance(bcc, str): + self.rule['bcc'] = [self.rule['bcc']] + add_suffix = self.rule.get('email_add_domain') + if add_suffix and not add_suffix.startswith('@'): + self.rule['email_add_domain'] = '@' + add_suffix + + def alert(self, matches): + body = self.create_alert_body(matches) + + # Add Jira ticket if it exists + if self.pipeline is not None and 'jira_ticket' in self.pipeline: + url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) + body += '\nJira ticket: %s' % (url) + + to_addr = self.rule['email'] + if 'email_from_field' in self.rule: + recipient = lookup_es_key(matches[0], self.rule['email_from_field']) + if isinstance(recipient, str): + if '@' in recipient: + to_addr = [recipient] + elif 'email_add_domain' in self.rule: + to_addr = [recipient + self.rule['email_add_domain']] + elif isinstance(recipient, list): + to_addr = recipient + if 'email_add_domain' in self.rule: + to_addr = [name + self.rule['email_add_domain'] for name in to_addr] + if self.rule.get('email_format') == 'html': + # email_msg = MIMEText(body, 'html', _charset='UTF-8') # old way + email_msg = MIMEMultipart() + msgText = MIMEText(body, 'html', _charset='UTF-8') + email_msg.attach(msgText) # Added, and edited the previous line + + for image_key in self.images_dictionary: + fp = open(os.path.join(self.assets_dir, self.images_dictionary[image_key]), 'rb') + img = MIMEImage(fp.read()) + fp.close() + img.add_header('Content-ID', '<{}>'.format(image_key)) + email_msg.attach(img) + else: + email_msg = MIMEText(body, _charset='UTF-8') + email_msg['Subject'] = self.create_title(matches) + email_msg['To'] = ', '.join(to_addr) + email_msg['From'] = self.from_addr + email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) + email_msg['Date'] = formatdate() + if self.rule.get('cc'): + email_msg['CC'] = ','.join(self.rule['cc']) + to_addr = to_addr + self.rule['cc'] + if self.rule.get('bcc'): + to_addr = to_addr + self.rule['bcc'] + + try: + if self.smtp_ssl: + if self.smtp_port: + self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) + else: + # default port : 465 + self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) + else: + if self.smtp_port: + self.smtp = SMTP(self.smtp_host, self.smtp_port) + else: + # default port : 25 + self.smtp = SMTP(self.smtp_host) + self.smtp.ehlo() + if self.smtp.has_extn('STARTTLS'): + self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) + if 'smtp_auth_file' in self.rule: + self.smtp.login(self.user, self.password) + except (SMTPException, error) as e: + raise EAException("Error connecting to SMTP host: %s" % (e)) + except SMTPAuthenticationError as e: + raise EAException("SMTP username/password rejected: %s" % (e)) + self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) + self.smtp.quit() + + elastalert_logger.info("Sent email to %s" % (to_addr)) + + def create_default_title(self, matches): + subject = 'ElastAlert: %s' % (self.rule['name']) + + # If the rule has a query_key, add that value plus timestamp to subject + if 'query_key' in self.rule: + qk = matches[0].get(self.rule['query_key']) + if qk: + subject += ' - %s' % (qk) + + return subject + + def get_info(self): + return {'type': 'email', + 'recipients': self.rule['email']} diff --git a/elastalert/alerters/exotel.py b/elastalert/alerters/exotel.py new file mode 100644 index 000000000..f7a95fd9a --- /dev/null +++ b/elastalert/alerters/exotel.py @@ -0,0 +1,35 @@ +import sys + +from exotel import Exotel +from requests import RequestException + +from elastalert.alerts import Alerter +from elastalert.util import EAException, elastalert_logger + + +class ExotelAlerter(Alerter): + """ Sends an exotel alert """ + required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) + + def __init__(self, rule): + super(ExotelAlerter, self).__init__(rule) + self.exotel_account_sid = self.rule.get('exotel_account_sid', None) + self.exotel_auth_token = self.rule.get('exotel_auth_token', None) + self.exotel_to_number = self.rule.get('exotel_to_number', None) + self.exotel_from_number = self.rule.get('exotel_from_number', None) + self.sms_body = self.rule.get('exotel_message_body', '') + + def alert(self, matches): + client = Exotel(self.exotel_account_sid, self.exotel_auth_token) + + try: + message_body = self.rule['name'] + self.sms_body + response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) + if response != 200: + raise EAException("Error posting to Exotel, response code is %s" % response) + except RequestException: + raise EAException("Error posting to Exotel").with_traceback(sys.exc_info()[2]) + elastalert_logger.info("Trigger sent to Exotel") + + def get_info(self): + return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} diff --git a/elastalert/alerters/gelf.py b/elastalert/alerters/gelf.py new file mode 100644 index 000000000..1a3f821c8 --- /dev/null +++ b/elastalert/alerters/gelf.py @@ -0,0 +1,107 @@ +import json +import socket +import ssl + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter +from elastalert.util import EAException, elastalert_logger + + +class GelfAlerter(Alerter): + required_options = set(['gelf_type']) + + def __init__(self, rule): + super(GelfAlerter, self).__init__(rule) + self.gelf_type = self.rule.get('gelf_type') + self.gelf_endpoint = self.rule.get('gelf_endpoint') + self.gelf_host = self.rule.get('gelf_host') + self.gelf_port = self.rule.get('gelf_port') + if 'http' in self.gelf_type: + if self.gelf_endpoint is None: + raise EAException('Error! Gelf http required "gelf_endpoint" variable') + elif 'tcp': + if self.gelf_host is None or self.gelf_port is None: + raise EAException('Error! Gelf tcp required "gelf_host" and "gelf_port" variables') + self.fields = self.rule.get('gelf_payload', {}) + self.headers = { + 'Content-Type': 'application/json' + } + self.gelf_version = self.rule.get('gelf_version', '1.1') + self.gelf_log_level = self.rule.get('gelf_log_level', 5) + self.additional_headers = self.rule.get('gelf_http_headers') + self.ca_cert = self.rule.get('gelf_ca_cert', False) + self.http_ignore_ssl_errors = self.rule.get('gelf_http_ignore_ssl_errors', False) + self.timeout = self.rule.get('gelf_timeout', 30) + + def send_http(self, gelf_msg): + + if self.additional_headers: + self.headers.update(self.additional_headers) + + if self.ca_cert: + verify = self.ca_cert + else: + verify = False + + if self.http_ignore_ssl_errors: + requests.packages.urllib3.disable_warnings() + + try: + requests.post(url=self.gelf_endpoint, headers=self.headers, json=gelf_msg, verify=verify, + timeout=self.timeout) + + except RequestException as e: + raise EAException("Error posting GELF message via HTTP: %s" % e) + elastalert_logger.info("GELF message sent via HTTP.") + + def sent_tcp(self, gelf_msg): + bytes_msg = json.dumps(gelf_msg).encode('utf-8') + b'\x00' + tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + tcp_socket.settimeout(self.timeout) + + tcp_socket.connect((self.gelf_host, self.gelf_port)) + + try: + if self.ca_cert: + tcp_socket = ssl.wrap_socket(tcp_socket, ca_certs=self.ca_cert) + tcp_socket.sendall(bytes_msg) + else: + tcp_socket.sendall(bytes_msg) + + except socket.error as e: + raise EAException("Error posting GELF message via TCP: %s" % e) + elastalert_logger.info("GELF message sent via TCP.") + + def alert(self, matches): + """ + Each match will trigger a POST GELF message to the endpoint. + """ + alert_message = { + 'Title': self.rule.get('name') + } + + for match in matches: + for key, value in self.fields.items(): + alert_message.update( + { + key: match.get(value) + } + ) + + gelf_msg = { + 'version': self.gelf_version, + 'host': socket.getfqdn(), + 'short_message': json.dumps(alert_message), + 'level': self.gelf_log_level, + } + + if self.gelf_type == 'http': + return self.send_http(gelf_msg) + elif self.gelf_type == 'tcp': + return self.sent_tcp(gelf_msg) + + def get_info(self): + return {'type': 'gelf', + 'gelf_type': self.gelf_type} diff --git a/elastalert/alerters/gitter.py b/elastalert/alerters/gitter.py new file mode 100644 index 000000000..7149a2038 --- /dev/null +++ b/elastalert/alerters/gitter.py @@ -0,0 +1,44 @@ +import json + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger + + +class GitterAlerter(Alerter): + """ Creates a Gitter activity message for each alert """ + required_options = frozenset(['gitter_webhook_url']) + + def __init__(self, rule): + super(GitterAlerter, self).__init__(rule) + self.gitter_webhook_url = self.rule.get('gitter_webhook_url', None) + self.gitter_proxy = self.rule.get('gitter_proxy', None) + self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') + + def alert(self, matches): + body = self.create_alert_body(matches) + + # post to Gitter + headers = {'content-type': 'application/json'} + # set https proxy, if it was provided + proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None + payload = { + 'message': body, + 'level': self.gitter_msg_level + } + + try: + response = requests.post(self.gitter_webhook_url, + data=json.dumps(payload, cls=DateTimeEncoder), + headers=headers, + proxies=proxies) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Gitter: %s" % e) + elastalert_logger.info("Alert sent to Gitter") + + def get_info(self): + return {'type': 'gitter', + 'gitter_webhook_url': self.gitter_webhook_url} diff --git a/elastalert/alerters/googlechat.py b/elastalert/alerters/googlechat.py new file mode 100644 index 000000000..a1865da7f --- /dev/null +++ b/elastalert/alerters/googlechat.py @@ -0,0 +1,100 @@ +import json + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter +from elastalert.util import EAException, elastalert_logger + + +class GoogleChatAlerter(Alerter): + """ Send a notification via Google Chat webhooks """ + required_options = frozenset(['googlechat_webhook_url']) + + def __init__(self, rule): + super(GoogleChatAlerter, self).__init__(rule) + self.googlechat_webhook_url = self.rule.get('googlechat_webhook_url', None) + if isinstance(self.googlechat_webhook_url, str): + self.googlechat_webhook_url = [self.googlechat_webhook_url] + self.googlechat_format = self.rule.get('googlechat_format', 'basic') + self.googlechat_header_title = self.rule.get('googlechat_header_title', None) + self.googlechat_header_subtitle = self.rule.get('googlechat_header_subtitle', None) + self.googlechat_header_image = self.rule.get('googlechat_header_image', None) + self.googlechat_footer_kibanalink = self.rule.get('googlechat_footer_kibanalink', None) + self.googlechat_proxy = self.rule.get('googlechat_proxy', None) + + def create_header(self): + header = None + if self.googlechat_header_title: + header = { + "title": self.googlechat_header_title, + "subtitle": self.googlechat_header_subtitle, + "imageUrl": self.googlechat_header_image + } + return header + + def create_footer(self): + footer = None + if self.googlechat_footer_kibanalink: + footer = {"widgets": [{ + "buttons": [{ + "textButton": { + "text": "VISIT KIBANA", + "onClick": { + "openLink": { + "url": self.googlechat_footer_kibanalink + } + } + } + }] + }] + } + return footer + + def create_card(self, matches): + card = {"cards": [{ + "sections": [{ + "widgets": [ + {"textParagraph": {"text": self.create_alert_body(matches)}} + ]} + ]} + ]} + + # Add the optional header + header = self.create_header() + if header: + card['cards'][0]['header'] = header + + # Add the optional footer + footer = self.create_footer() + if footer: + card['cards'][0]['sections'].append(footer) + return card + + def create_basic(self, matches): + body = self.create_alert_body(matches) + return {'text': body} + + def alert(self, matches): + # Format message + if self.googlechat_format == 'card': + message = self.create_card(matches) + else: + message = self.create_basic(matches) + + # proxy + proxies = {'https': self.googlechat_proxy} if self.googlechat_proxy else None + + # Post to webhook + headers = {'content-type': 'application/json'} + for url in self.googlechat_webhook_url: + try: + response = requests.post(url, data=json.dumps(message), headers=headers, proxies=proxies) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to google chat: {}".format(e)) + elastalert_logger.info("Alert sent to Google Chat!") + + def get_info(self): + return {'type': 'googlechat', + 'googlechat_webhook_url': self.googlechat_webhook_url} diff --git a/elastalert/alerters/httppost.py b/elastalert/alerters/httppost.py new file mode 100644 index 000000000..59e9a806b --- /dev/null +++ b/elastalert/alerters/httppost.py @@ -0,0 +1,61 @@ +import json + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import lookup_es_key, EAException, elastalert_logger + + +class HTTPPostAlerter(Alerter): + """ Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """ + required_options = frozenset(['http_post_url']) + + def __init__(self, rule): + super(HTTPPostAlerter, self).__init__(rule) + post_url = self.rule.get('http_post_url', None) + if isinstance(post_url, str): + post_url = [post_url] + self.post_url = post_url + self.post_proxy = self.rule.get('http_post_proxy', None) + self.post_payload = self.rule.get('http_post_payload', {}) + self.post_static_payload = self.rule.get('http_post_static_payload', {}) + self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) + self.post_http_headers = self.rule.get('http_post_headers', {}) + self.post_ca_certs = self.rule.get('http_post_ca_certs') + self.post_ignore_ssl_errors = self.rule.get('http_post_ignore_ssl_errors', False) + self.timeout = self.rule.get('http_post_timeout', 10) + + def alert(self, matches): + """ Each match will trigger a POST to the specified endpoint(s). """ + for match in matches: + payload = match if self.post_all_values else {} + payload.update(self.post_static_payload) + for post_key, es_key in list(self.post_payload.items()): + payload[post_key] = lookup_es_key(match, es_key) + headers = { + "Content-Type": "application/json", + "Accept": "application/json;charset=utf-8" + } + if self.post_ca_certs: + verify = self.post_ca_certs + else: + verify = not self.post_ignore_ssl_errors + if self.post_ignore_ssl_errors: + requests.packages.urllib3.disable_warnings() + + headers.update(self.post_http_headers) + proxies = {'https': self.post_proxy} if self.post_proxy else None + for url in self.post_url: + try: + response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), + headers=headers, proxies=proxies, timeout=self.timeout, + verify=verify) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting HTTP Post alert: %s" % e) + elastalert_logger.info("HTTP Post alert sent.") + + def get_info(self): + return {'type': 'http_post', + 'http_post_webhook_url': self.post_url} diff --git a/elastalert/alerters/httppost2.py b/elastalert/alerters/httppost2.py new file mode 100644 index 000000000..0980c0610 --- /dev/null +++ b/elastalert/alerters/httppost2.py @@ -0,0 +1,116 @@ +import json +from json import JSONDecodeError + +import requests +from jinja2 import Template, TemplateSyntaxError +from requests import RequestException + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import lookup_es_key, EAException, elastalert_logger + + +def _json_escape(s): + return json.encoder.encode_basestring(s)[1:-1] + + +def _escape_all_values(x): + """recursively rebuilds, and escapes all strings for json, the given dict/list""" + if isinstance(x, dict): + x = { k:_escape_all_values(v) for k, v in x.items() } + elif isinstance(x, list): + x = [ _escape_all_values(v) for v in x ] + elif isinstance(x, str): + x = _json_escape(x) + return x + + +def _render_json_template(template, match): + if not isinstance(template, str): + template = json.dumps(template) + template = Template(template) + + return json.loads(template.render(**match)) + + +class HTTPPost2Alerter(Alerter): + """ Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """ + required_options = frozenset(['http_post2_url']) + + def __init__(self, rule): + super(HTTPPost2Alerter, self).__init__(rule) + post_url = self.rule.get('http_post2_url', None) + if isinstance(post_url, str): + post_url = [post_url] + self.post_url = post_url + self.post_proxy = self.rule.get('http_post2_proxy', None) + self.post_payload = self.rule.get('http_post2_payload', {}) + self.post_raw_fields = self.rule.get('http_post2_raw_fields', {}) + self.post_all_values = self.rule.get('http_post2_all_values', not self.post_payload) + self.post_http_headers = self.rule.get('http_post2_headers', {}) + self.post_ca_certs = self.rule.get('http_post2_ca_certs') + self.post_ignore_ssl_errors = self.rule.get('http_post2_ignore_ssl_errors', False) + self.timeout = self.rule.get('http_post2_timeout', 10) + self.jinja_root_name = self.rule.get('jinja_root_name', None) + + def alert(self, matches): + """ Each match will trigger a POST to the specified endpoint(s). """ + for match in matches: + match_js_esc = _escape_all_values(match) + args = {**match_js_esc} + if self.jinja_root_name: + args[self.jinja_root_name] = match_js_esc + + try: + field = 'payload' + payload = match if self.post_all_values else {} + payload_res = _render_json_template(self.post_payload, args) + payload = {**payload, **payload_res} + + field = 'headers' + header_res = _render_json_template(self.post_http_headers, args) + headers = { + "Content-Type": "application/json", + "Accept": "application/json;charset=utf-8", + **header_res + } + except TemplateSyntaxError as e: + raise ValueError(f"HTTP Post 2: The value of 'http_post2_{field}' has an invalid Jinja2 syntax. " + f"Please check your template syntax: {e}") + + except JSONDecodeError as e: + raise ValueError(f"HTTP Post 2: The rendered value for 'http_post2_{field}' contains invalid JSON. " + f"Please check your template syntax: {e}") + + except Exception as e: + raise ValueError(f"HTTP Post 2: An unexpected error occurred with the 'http_post2_{field}' value. " + f"Please check your template syntax: {e}") + + for post_key, es_key in list(self.post_raw_fields.items()): + payload[post_key] = lookup_es_key(match, es_key) + + if self.post_ca_certs: + verify = self.post_ca_certs + else: + verify = not self.post_ignore_ssl_errors + if self.post_ignore_ssl_errors: + requests.packages.urllib3.disable_warnings() + + for key, value in headers.items(): + if type(value) in [type(None), list, dict]: + raise ValueError(f"HTTP Post 2: Can't send a header value which is not a string! " + f"Forbidden header {key}: {value}") + + proxies = {'https': self.post_proxy} if self.post_proxy else None + for url in self.post_url: + try: + response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), + headers=headers, proxies=proxies, timeout=self.timeout, + verify=verify) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting HTTP Post 2 alert: %s" % e) + elastalert_logger.info("HTTP Post 2 alert sent.") + + def get_info(self): + return {'type': 'http_post2', + 'http_post2_webhook_url': self.post_url} diff --git a/elastalert/alerters/jira.py b/elastalert/alerters/jira.py new file mode 100644 index 000000000..c8a9b205e --- /dev/null +++ b/elastalert/alerters/jira.py @@ -0,0 +1,414 @@ +import datetime +import sys +import os + +from elastalert.alerts import Alerter +from elastalert.alerts import BasicMatchString +from elastalert.util import (elastalert_logger, lookup_es_key, pretty_ts, ts_now, + ts_to_dt, EAException) +from elastalert.yaml import read_yaml +from jira.client import JIRA +from jira.exceptions import JIRAError + + +class JiraFormattedMatchString(BasicMatchString): + def _add_match_items(self): + match_items = dict([(x, y) for x, y in list(self.match.items()) if not x.startswith('top_events_')]) + json_blob = self._pretty_print_as_json(match_items) + preformatted_text = '{{code}}{0}{{code}}'.format(json_blob) + self.text += preformatted_text + + +class JiraAlerter(Alerter): + """ Creates a Jira ticket for each alert """ + required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) + + # Maintain a static set of built-in fields that we explicitly know how to set + # For anything else, we will do best-effort and try to set a string value + known_field_list = [ + 'jira_account_file', + 'jira_assignee', + 'jira_bump_after_inactivity', + 'jira_bump_in_statuses', + 'jira_bump_not_in_statuses', + 'jira_bump_only', + 'jira_bump_tickets', + 'jira_component', + 'jira_components', + 'jira_description', + 'jira_ignore_in_title', + 'jira_issuetype', + 'jira_label', + 'jira_labels', + 'jira_max_age', + 'jira_priority', + 'jira_project', + 'jira_server', + 'jira_transition_to', + 'jira_watchers', + ] + + # Some built-in Jira types that can be used as custom fields require special handling + # Here is a sample of one of them: + # {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true, + # "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string", + # "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}} + # There are likely others that will need to be updated on a case-by-case basis + custom_string_types_with_special_handling = [ + 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', + 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', + 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', + ] + + def __init__(self, rule): + super(JiraAlerter, self).__init__(rule) + self.server = self.rule['jira_server'] + self.get_account(self.rule['jira_account_file']) + self.project = self.rule['jira_project'] + self.issue_type = self.rule['jira_issuetype'] + + # Deferred settings refer to values that can only be resolved when a match + # is found and as such loading them will be delayed until we find a match + self.deferred_settings = [] + + # We used to support only a single component. This allows us to maintain backwards compatibility + # while also giving the user-facing API a more representative name + self.components = self.rule.get('jira_components', self.rule.get('jira_component')) + + # We used to support only a single label. This allows us to maintain backwards compatibility + # while also giving the user-facing API a more representative name + self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) + + self.description = self.rule.get('jira_description', '') + self.assignee = self.rule.get('jira_assignee') + self.max_age = self.rule.get('jira_max_age', 30) + self.priority = self.rule.get('jira_priority') + self.bump_tickets = self.rule.get('jira_bump_tickets', False) + self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') + self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') + self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', 0) + self.bump_only = self.rule.get('jira_bump_only', False) + self.transition = self.rule.get('jira_transition_to', None) + self.watchers = self.rule.get('jira_watchers') + self.client = None + + if self.bump_in_statuses and self.bump_not_in_statuses: + msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \ + (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) + intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) + if intersection: + msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % ( + msg, ','.join(intersection)) + msg += ' This should be simplified to use only one or the other.' + elastalert_logger.warning(msg) + + self.reset_jira_args() + + try: + if hasattr(self, 'apikey'): + self.client = JIRA(self.server, token_auth=(self.apikey)) + else: + self.client = JIRA(self.server, basic_auth=(self.user, self.password)) + self.get_priorities() + self.jira_fields = self.client.fields() + self.get_arbitrary_fields() + except JIRAError as e: + # JIRAError may contain HTML, pass along only first 1024 chars + raise EAException("Error connecting to Jira: %s" % (str(e)[:1024])).with_traceback(sys.exc_info()[2]) + + self.set_priority() + + def set_priority(self): + try: + if self.priority is not None and self.client is not None: + self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} + except KeyError: + elastalert_logger.error("Priority %s not found. Valid priorities are %s" % (self.priority, list(self.priority_ids.keys()))) + + def reset_jira_args(self): + self.jira_args = {'project': {'key': self.project}, + 'issuetype': {'name': self.issue_type}} + + if self.components: + # Support single component or list + if type(self.components) != list: + self.jira_args['components'] = [{'name': self.components}] + else: + self.jira_args['components'] = [{'name': component} for component in self.components] + if self.labels: + # Support single label or list + if type(self.labels) != list: + self.labels = [self.labels] + self.jira_args['labels'] = self.labels + if self.watchers: + # Support single watcher or list + if type(self.watchers) != list: + self.watchers = [self.watchers] + + self.set_priority() + + def set_jira_arg(self, jira_field, value, fields): + # Remove the jira_ part. Convert underscores to spaces + normalized_jira_field = jira_field[5:].replace('_', ' ').lower() + # All Jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case + for identifier in ['name', 'id']: + field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) + if field: + break + if not field: + # Log a warning to ElastAlert saying that we couldn't find that type? + # OR raise and fail to load the alert entirely? Probably the latter... + raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field)) + arg_name = field['id'] + # Check the schema information to decide how to set the value correctly + # If the schema information is not available, raise an exception since we don't know how to set it + # Note this is only the case for two built-in types, id: issuekey and id: thumbnail + if not ('schema' in field or 'type' in field['schema']): + raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field)) + arg_type = field['schema']['type'] + + # Handle arrays of simple types like strings or numbers + if arg_type == 'array': + # As a convenience, support the scenario wherein the user only provides + # a single value for a multi-value field e.g. jira_labels: Only_One_Label + if type(value) != list: + value = [value] + array_items = field['schema']['items'] + # Simple string types + if array_items in ['string', 'date', 'datetime']: + # Special case for multi-select custom types (the Jira metadata says that these are strings, but + # in reality, they are required to be provided as an object. + if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: + self.jira_args[arg_name] = [{'value': v} for v in value] + else: + self.jira_args[arg_name] = value + elif array_items == 'number': + self.jira_args[arg_name] = [int(v) for v in value] + # Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key' + elif array_items == 'option': + self.jira_args[arg_name] = [{'value': v} for v in value] + else: + # Try setting it as an object, using 'name' as the key + # This may not work, as the key might actually be 'key', 'id', 'value', or something else + # If it works, great! If not, it will manifest itself as an API error that will bubble up + self.jira_args[arg_name] = [{'name': v} for v in value] + # Handle non-array types + else: + # Simple string types + if arg_type in ['string', 'date', 'datetime']: + # Special case for custom types (the Jira metadata says that these are strings, but + # in reality, they are required to be provided as an object. + if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: + self.jira_args[arg_name] = {'value': value} + else: + self.jira_args[arg_name] = value + # Number type + elif arg_type == 'number': + self.jira_args[arg_name] = int(value) + elif arg_type == 'option': + self.jira_args[arg_name] = {'value': value} + # Complex type + else: + self.jira_args[arg_name] = {'name': value} + + def get_arbitrary_fields(self): + # Clear jira_args + self.reset_jira_args() + + for jira_field, value in self.rule.items(): + # If we find a field that is not covered by the set that we are aware of, it means it is either: + # 1. A built-in supported field in Jira that we don't have on our radar + # 2. A custom field that a Jira admin has configured + if jira_field.startswith('jira_') and jira_field not in self.known_field_list and str(value)[:1] != '#': + self.set_jira_arg(jira_field, value, self.jira_fields) + if jira_field.startswith('jira_') and jira_field not in self.known_field_list and str(value)[:1] == '#': + self.deferred_settings.append(jira_field) + + def get_priorities(self): + """ Creates a mapping of priority index to id. """ + priorities = self.client.priorities() + self.priority_ids = {} + for x in range(len(priorities)): + self.priority_ids[x] = priorities[x].id + + def find_existing_ticket(self, matches): + # Default title, get stripped search version + if 'alert_subject' not in self.rule: + title = self.create_default_title(matches, True) + else: + title = self.create_title(matches) + + if 'jira_ignore_in_title' in self.rule: + title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') + + # This is necessary for search to work. Other special characters and dashes + # directly adjacent to words appear to be ok + title = title.replace(' - ', ' ') + title = title.replace('\\', '\\\\') + + date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') + jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date) + if self.bump_in_statuses: + jql = '%s and status in (%s)' % (jql, ','.join(["\"%s\"" % status if ' ' in status else status for status + in self.bump_in_statuses])) + if self.bump_not_in_statuses: + jql = '%s and status not in (%s)' % (jql, ','.join(["\"%s\"" % status if ' ' in status else status + for status in self.bump_not_in_statuses])) + try: + issues = self.client.search_issues(jql) + except JIRAError as e: + elastalert_logger.exception("Error while searching for Jira ticket using jql '%s': %s" % (jql, e)) + return None + + if len(issues): + return issues[0] + + def comment_on_ticket(self, ticket, match): + text = str(JiraFormattedMatchString(self.rule, match)) + timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) + comment = "This alert was triggered again at %s\n%s" % (timestamp, text) + self.client.add_comment(ticket, comment) + + def transition_ticket(self, ticket): + transitions = self.client.transitions(ticket) + for t in transitions: + if t['name'] == self.transition: + self.client.transition_issue(ticket, t['id']) + + def alert(self, matches): + # Reset arbitrary fields to pick up changes + self.get_arbitrary_fields() + if len(self.deferred_settings) > 0: + fields = self.client.fields() + for jira_field in self.deferred_settings: + value = lookup_es_key(matches[0], self.rule[jira_field][1:]) + self.set_jira_arg(jira_field, value, fields) + + title = self.create_title(matches) + + if self.bump_tickets: + ticket = self.find_existing_ticket(matches) + if ticket: + inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) + if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: + if self.pipeline is not None: + self.pipeline['jira_ticket'] = None + self.pipeline['jira_server'] = self.server + return None + elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) + for match in matches: + try: + self.comment_on_ticket(ticket, match) + except JIRAError as e: + elastalert_logger.exception("Error while commenting on ticket %s: %s" % (ticket, e)) + if self.labels: + for label in self.labels: + try: + ticket.fields.labels.append(label) + except JIRAError as e: + elastalert_logger.exception("Error while appending labels to ticket %s: %s" % (ticket, e)) + if self.transition: + elastalert_logger.info('Transitioning existing ticket %s' % (ticket.key)) + try: + self.transition_ticket(ticket) + except JIRAError as e: + elastalert_logger.exception("Error while transitioning ticket %s: %s" % (ticket, e)) + + if self.pipeline is not None: + self.pipeline['jira_ticket'] = ticket + self.pipeline['jira_server'] = self.server + return None + if self.bump_only: + return None + + self.jira_args['summary'] = title + self.jira_args['description'] = self.create_alert_body(matches) + + try: + self.issue = self.client.create_issue(**self.jira_args) + + # Set JIRA assignee + if self.assignee: + self.client.assign_issue(self.issue, self.assignee) + + # You can not add watchers on initial creation. Only as a follow-up action + if self.watchers: + for watcher in self.watchers: + try: + self.client.add_watcher(self.issue.key, watcher) + except Exception as ex: + # Re-raise the exception, preserve the stack-trace, and give some + # context as to which watcher failed to be added + raise Exception( + "Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format( + watcher, + ex + )).with_traceback(sys.exc_info()[2]) + + except JIRAError as e: + raise EAException("Error creating Jira ticket using jira_args (%s): %s" % (self.jira_args, e)) + elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) + + if self.pipeline is not None: + self.pipeline['jira_ticket'] = self.issue + self.pipeline['jira_server'] = self.server + + def create_alert_body(self, matches): + body = self.description + '\n' + body += self.get_aggregation_summary_text(matches) + if self.rule.get('alert_text_type') != 'aggregation_summary_only': + for match in matches: + body += str(JiraFormattedMatchString(self.rule, match)) + if len(matches) > 1: + body += '\n----------------------------------------\n' + return body + + def get_aggregation_summary_text(self, matches): + text = super(JiraAlerter, self).get_aggregation_summary_text(matches) + if text: + text = '{{noformat}}{0}{{noformat}}'.format(text) + return text + + def create_default_title(self, matches, for_search=False): + # If there is a query_key, use that in the title + + if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): + title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) + else: + title = 'ElastAlert: %s' % (self.rule['name']) + + if for_search: + return title + + timestamp = matches[0].get(self.rule['timestamp_field']) + if timestamp: + title += ' - %s' % (pretty_ts(timestamp, self.rule.get('use_local_time'))) + + # Add count for spikes + count = matches[0].get('spike_count') + if count: + title += ' - %s+ events' % (count) + + return title + + def get_info(self): + return {'type': 'jira'} + + def get_account(self, account_file): + """ Gets the username and password, or the apikey, from an account file. + + :param account_file: Path to the file which contains the credentials. + It can be either an absolute file path or one that is relative to the given rule. + """ + if os.path.isabs(account_file): + account_file_path = account_file + else: + account_file_path = os.path.join(os.path.dirname(self.rule['rule_file']), account_file) + account_conf = read_yaml(account_file_path) + if not (('user' in account_conf and 'password' in account_conf) or 'apikey' in account_conf): + raise EAException('Account file must have user and password fields, or apikey field') + if 'apikey' in account_conf: + self.apikey = account_conf['apikey'] + else: + self.user = account_conf['user'] + self.password = account_conf['password'] diff --git a/elastalert/alerters/line.py b/elastalert/alerters/line.py new file mode 100644 index 000000000..e5e1a4433 --- /dev/null +++ b/elastalert/alerters/line.py @@ -0,0 +1,40 @@ +import requests +from requests import RequestException + +from elastalert.alerts import Alerter, BasicMatchString +from elastalert.util import EAException, elastalert_logger + + +class LineNotifyAlerter(Alerter): + """ Created a Line Notify for each alert """ + required_option = frozenset(["linenotify_access_token"]) + + def __init__(self, rule): + super(LineNotifyAlerter, self).__init__(rule) + self.linenotify_access_token = self.rule.get("linenotify_access_token", None) + + def alert(self, matches): + body = '' + for match in matches: + body += str(BasicMatchString(self.rule, match)) + if len(matches) > 1: + body += '\n----------------------------------------\n' + if len(body) > 999: + body = body[0:900] + '\n *message was cropped according to line notify embed description limits!*' + # post to Line Notify + headers = { + "Content-Type": "application/x-www-form-urlencoded", + "Authorization": "Bearer {}".format(self.linenotify_access_token) + } + payload = { + "message": body + } + try: + response = requests.post("https://notify-api.line.me/api/notify", data=payload, headers=headers) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Line Notify: %s" % e) + elastalert_logger.info("Alert sent to Line Notify") + + def get_info(self): + return {"type": "linenotify", "linenotify_access_token": self.linenotify_access_token} diff --git a/elastalert/alerters/mattermost.py b/elastalert/alerters/mattermost.py new file mode 100644 index 000000000..3dd30c163 --- /dev/null +++ b/elastalert/alerters/mattermost.py @@ -0,0 +1,168 @@ +import copy +import json +import requests +import warnings + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import elastalert_logger, lookup_es_key, EAException +from requests import RequestException + + +class MattermostAlerter(Alerter): + """ Creates a Mattermsot post for each alert """ + required_options = frozenset(['mattermost_webhook_url']) + + def __init__(self, rule): + super(MattermostAlerter, self).__init__(rule) + + # HTTP config + self.mattermost_webhook_url = self.rule.get('mattermost_webhook_url', None) + if isinstance(self.mattermost_webhook_url, str): + self.mattermost_webhook_url = [self.mattermost_webhook_url] + self.mattermost_proxy = self.rule.get('mattermost_proxy', None) + self.mattermost_ignore_ssl_errors = self.rule.get('mattermost_ignore_ssl_errors', False) + + # Override webhook config + self.mattermost_username_override = self.rule.get('mattermost_username_override', 'elastalert') + self.mattermost_channel_override = self.rule.get('mattermost_channel_override', '') + if isinstance(self.mattermost_channel_override, str): + self.mattermost_channel_override = [self.mattermost_channel_override] + self.mattermost_emoji_override = self.rule.get('mattermost_emoji_override', ':ghost:') + self.mattermost_icon_url_override = self.rule.get('mattermost_icon_url_override', '') + + # Message properties + self.mattermost_msg_pretext = self.rule.get('mattermost_msg_pretext', '') + self.mattermost_msg_color = self.rule.get('mattermost_msg_color', 'danger') + self.mattermost_msg_fields = self.rule.get('mattermost_msg_fields', '') + self.mattermost_image_url = self.rule.get('mattermost_image_url', '') + self.mattermost_title = self.rule.get('mattermost_title', '') + self.mattermost_title_link = self.rule.get('mattermost_title_link', '') + self.mattermost_footer = self.rule.get('mattermost_footer', '') + self.mattermost_footer_icon = self.rule.get('mattermost_footer_icon', '') + self.mattermost_image_url = self.rule.get('mattermost_image_url', '') + self.mattermost_thumb_url = self.rule.get('mattermost_thumb_url', '') + self.mattermost_author_name = self.rule.get('mattermost_author_name', '') + self.mattermost_author_link = self.rule.get('mattermost_author_link', '') + self.mattermost_author_icon = self.rule.get('mattermost_author_icon', '') + self.mattermost_attach_kibana_discover_url = self.rule.get('mattermost_attach_kibana_discover_url', False) + self.mattermost_kibana_discover_color = self.rule.get('mattermost_kibana_discover_color', '#ec4b98') + self.mattermost_kibana_discover_title = self.rule.get('mattermost_kibana_discover_title', 'Discover in Kibana') + + def get_aggregation_summary_text__maximum_width(self): + width = super(MattermostAlerter, self).get_aggregation_summary_text__maximum_width() + # Reduced maximum width for prettier Mattermost display. + return min(width, 75) + + def get_aggregation_summary_text(self, matches): + text = super(MattermostAlerter, self).get_aggregation_summary_text(matches) + if text: + text = '```\n{0}```\n'.format(text) + return text + + def populate_fields(self, matches): + alert_fields = [] + missing = self.rule.get('alert_missing_value', '') + for field in self.mattermost_msg_fields: + field = copy.copy(field) + if 'args' in field: + args_values = [lookup_es_key(matches[0], arg) or missing for arg in field['args']] + if 'value' in field: + field['value'] = field['value'].format(*args_values) + else: + field['value'] = "\n".join(str(arg) for arg in args_values) + del(field['args']) + alert_fields.append(field) + return alert_fields + + def alert(self, matches): + body = self.create_alert_body(matches) + title = self.create_title(matches) + + # post to mattermost + headers = {'content-type': 'application/json'} + # set https proxy, if it was provided + proxies = {'https': self.mattermost_proxy} if self.mattermost_proxy else None + payload = { + 'username': self.mattermost_username_override, + 'attachments': [ + { + 'fallback': "{0}: {1}".format(title, self.mattermost_msg_pretext), + 'color': self.mattermost_msg_color, + 'title': title, + 'pretext': self.mattermost_msg_pretext, + 'fields': [] + } + ] + } + + if self.rule.get('alert_text_type') == 'alert_text_only': + payload['attachments'][0]['text'] = body + else: + payload['text'] = body + + if self.mattermost_msg_fields != '': + payload['attachments'][0]['fields'] = self.populate_fields(matches) + + if self.mattermost_icon_url_override != '': + payload['icon_url'] = self.mattermost_icon_url_override + else: + payload['icon_emoji'] = self.mattermost_emoji_override + + if self.mattermost_title != '': + payload['attachments'][0]['title'] = self.mattermost_title + + if self.mattermost_title_link != '': + payload['attachments'][0]['title_link'] = self.mattermost_title_link + + if self.mattermost_footer != '': + payload['attachments'][0]['footer'] = self.mattermost_footer + + if self.mattermost_footer_icon != '': + payload['attachments'][0]['footer_icon'] = self.mattermost_footer_icon + + if self.mattermost_image_url != '': + payload['attachments'][0]['image_url'] = self.mattermost_image_url + + if self.mattermost_thumb_url != '': + payload['attachments'][0]['thumb_url'] = self.mattermost_thumb_url + + if self.mattermost_author_name != '': + payload['attachments'][0]['author_name'] = self.mattermost_author_name + + if self.mattermost_author_link != '': + payload['attachments'][0]['author_link'] = self.mattermost_author_link + + if self.mattermost_author_icon != '': + payload['attachments'][0]['author_icon'] = self.mattermost_author_icon + + if self.mattermost_attach_kibana_discover_url: + kibana_discover_url = lookup_es_key(matches[0], 'kibana_discover_url') + if kibana_discover_url: + payload['attachments'].append({ + 'color': self.mattermost_kibana_discover_color, + 'title': self.mattermost_kibana_discover_title, + 'title_link': kibana_discover_url + }) + + for url in self.mattermost_webhook_url: + for channel_override in self.mattermost_channel_override: + try: + if self.mattermost_ignore_ssl_errors: + requests.urllib3.disable_warnings() + payload['channel'] = channel_override + + response = requests.post( + url, data=json.dumps(payload, cls=DateTimeEncoder), + headers=headers, verify=not self.mattermost_ignore_ssl_errors, + proxies=proxies) + + warnings.resetwarnings() + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Mattermost: %s" % e) + elastalert_logger.info("Alert sent to Mattermost") + + def get_info(self): + return {'type': 'mattermost', + 'mattermost_username_override': self.mattermost_username_override, + 'mattermost_webhook_url': self.mattermost_webhook_url} diff --git a/elastalert/opsgenie.py b/elastalert/alerters/opsgenie.py similarity index 79% rename from elastalert/opsgenie.py rename to elastalert/alerters/opsgenie.py index bcdaf2d05..471ae87e0 100644 --- a/elastalert/opsgenie.py +++ b/elastalert/alerters/opsgenie.py @@ -1,14 +1,10 @@ # -*- coding: utf-8 -*- import json -import logging import os.path import requests -from .alerts import Alerter -from .alerts import BasicMatchString -from .util import EAException -from .util import elastalert_logger -from .util import lookup_es_key +from elastalert.alerts import Alerter, BasicMatchString +from elastalert.util import EAException, elastalert_logger, lookup_es_key class OpsGenieAlerter(Alerter): @@ -27,6 +23,7 @@ def __init__(self, *args): self.teams_args = self.rule.get('opsgenie_teams_args') self.tags = self.rule.get('opsgenie_tags', []) + ['ElastAlert', self.rule['name']] self.to_addr = self.rule.get('opsgenie_addr', 'https://api.opsgenie.com/v2/alerts') + self.description = self.rule.get('opsgenie_description', None) self.custom_message = self.rule.get('opsgenie_message') self.opsgenie_subject = self.rule.get('opsgenie_subject') self.opsgenie_subject_args = self.rule.get('opsgenie_subject_args') @@ -34,6 +31,8 @@ def __init__(self, *args): self.opsgenie_proxy = self.rule.get('opsgenie_proxy', None) self.priority = self.rule.get('opsgenie_priority') self.opsgenie_details = self.rule.get('opsgenie_details', {}) + self.entity = self.rule.get('opsgenie_entity', None) + self.source = self.rule.get('opsgenie_source', 'ElastAlert') def _parse_responders(self, responders, responder_args, matches, default_responders): if responder_args: @@ -46,20 +45,17 @@ def _parse_responders(self, responders, responder_args, matches, default_respond try: formated_responders.append(responder.format(**responders_values)) except KeyError as error: - logging.warn("OpsGenieAlerter: Cannot create responder for OpsGenie Alert. Key not foud: %s. " % (error)) + elastalert_logger.warning("OpsGenieAlerter: Cannot create responder for OpsGenie Alert. Key not foud: %s. " % (error)) if not formated_responders: - logging.warn("OpsGenieAlerter: no responders can be formed. Trying the default responder ") + elastalert_logger.warning("OpsGenieAlerter: no responders can be formed. Trying the default responder ") if not default_responders: - logging.warn("OpsGenieAlerter: default responder not set. Falling back") + elastalert_logger.warning("OpsGenieAlerter: default responder not set. Falling back") formated_responders = responders else: formated_responders = default_responders responders = formated_responders return responders - def _fill_responders(self, responders, type_): - return [{'id': r, 'type': type_} for r in responders] - def alert(self, matches): body = '' for match in matches: @@ -82,18 +78,27 @@ def alert(self, matches): post['responders'] = [{'username': r, 'type': 'user'} for r in self.recipients] if self.teams: post['teams'] = [{'name': r, 'type': 'team'} for r in self.teams] - post['description'] = body - post['source'] = 'ElastAlert' + if self.description: + post['description'] = self.description.format(**matches[0]) + else: + post['description'] = body + if self.entity: + post['entity'] = self.entity.format(**matches[0]) + if self.source: + post['source'] = self.source.format(**matches[0]) + post['tags'] = [] for i, tag in enumerate(self.tags): - self.tags[i] = tag.format(**matches[0]) - post['tags'] = self.tags - - if self.priority and self.priority not in ('P1', 'P2', 'P3', 'P4', 'P5'): - logging.warn("Priority level does not appear to be specified correctly. \ - Please make sure to set it to a value between P1 and P5") + post['tags'].append(tag.format(**matches[0])) + + priority = self.priority + if priority: + priority = priority.format(**matches[0]) + if priority and priority not in ('P1', 'P2', 'P3', 'P4', 'P5'): + elastalert_logger.warning("Priority level does not appear to be specified correctly. \ + Please make sure to set it to a value between P1 and P5") else: - post['priority'] = self.priority + post['priority'] = priority if self.alias is not None: post['alias'] = self.alias.format(**matches[0]) @@ -102,7 +107,7 @@ def alert(self, matches): if details: post['details'] = details - logging.debug(json.dumps(post)) + elastalert_logger.debug(json.dumps(post)) headers = { 'Content-Type': 'application/json', @@ -114,12 +119,12 @@ def alert(self, matches): try: r = requests.post(self.to_addr, json=post, headers=headers, proxies=proxies) - logging.debug('request response: {0}'.format(r)) + elastalert_logger.debug('request response: {0}'.format(r)) if r.status_code != 202: elastalert_logger.info("Error response from {0} \n " "API Response: {1}".format(self.to_addr, r)) r.raise_for_status() - logging.info("Alert sent to OpsGenie") + elastalert_logger.info("Alert sent to OpsGenie") except Exception as err: raise EAException("Error sending alert: {0}".format(err)) diff --git a/elastalert/alerters/pagerduty.py b/elastalert/alerters/pagerduty.py new file mode 100644 index 000000000..5cb5a15c8 --- /dev/null +++ b/elastalert/alerters/pagerduty.py @@ -0,0 +1,147 @@ +import json +import requests + +from elastalert.util import EAException, lookup_es_key, elastalert_logger +from elastalert.alerts import Alerter, DateTimeEncoder +from requests import RequestException + + +class PagerDutyAlerter(Alerter): + """ Create an incident on PagerDuty for each alert """ + required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) + + def __init__(self, rule): + super(PagerDutyAlerter, self).__init__(rule) + self.pagerduty_service_key = self.rule.get('pagerduty_service_key', None) + self.pagerduty_client_name = self.rule.get('pagerduty_client_name', None) + self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') + self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) + self.pagerduty_event_type = self.rule.get('pagerduty_event_type', 'trigger') + self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) + + self.pagerduty_api_version = self.rule.get('pagerduty_api_version', 'v1') + self.pagerduty_v2_payload_class = self.rule.get('pagerduty_v2_payload_class', '') + self.pagerduty_v2_payload_class_args = self.rule.get('pagerduty_v2_payload_class_args', None) + self.pagerduty_v2_payload_component = self.rule.get('pagerduty_v2_payload_component', '') + self.pagerduty_v2_payload_component_args = self.rule.get('pagerduty_v2_payload_component_args', None) + self.pagerduty_v2_payload_group = self.rule.get('pagerduty_v2_payload_group', '') + self.pagerduty_v2_payload_group_args = self.rule.get('pagerduty_v2_payload_group_args', None) + self.pagerduty_v2_payload_severity = self.rule.get('pagerduty_v2_payload_severity', 'critical') + self.pagerduty_v2_payload_source = self.rule.get('pagerduty_v2_payload_source', 'ElastAlert') + self.pagerduty_v2_payload_source_args = self.rule.get('pagerduty_v2_payload_source_args', None) + self.pagerduty_v2_payload_custom_details = self.rule.get('pagerduty_v2_payload_custom_details', {}) + self.pagerduty_v2_payload_include_all_info = self.rule.get('pagerduty_v2_payload_include_all_info', True) + + if self.pagerduty_api_version == 'v2': + self.url = 'https://events.pagerduty.com/v2/enqueue' + else: + self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' + + def alert(self, matches): + body = self.create_alert_body(matches) + + # post to pagerduty + headers = {'content-type': 'application/json'} + if self.pagerduty_api_version == 'v2': + + custom_details_payload = {'information': body} if self.pagerduty_v2_payload_include_all_info else {} + if self.pagerduty_v2_payload_custom_details: + for match in matches: + for custom_details_key, es_key in list(self.pagerduty_v2_payload_custom_details.items()): + custom_details_payload[custom_details_key] = lookup_es_key(match, es_key) + + payload = { + 'routing_key': self.pagerduty_service_key, + 'event_action': self.pagerduty_event_type, + 'dedup_key': self.get_incident_key(matches), + 'client': self.pagerduty_client_name, + 'payload': { + 'class': self.resolve_formatted_key(self.pagerduty_v2_payload_class, + self.pagerduty_v2_payload_class_args, + matches), + 'component': self.resolve_formatted_key(self.pagerduty_v2_payload_component, + self.pagerduty_v2_payload_component_args, + matches), + 'group': self.resolve_formatted_key(self.pagerduty_v2_payload_group, + self.pagerduty_v2_payload_group_args, + matches), + 'severity': self.pagerduty_v2_payload_severity, + 'source': self.resolve_formatted_key(self.pagerduty_v2_payload_source, + self.pagerduty_v2_payload_source_args, + matches), + 'summary': self.create_title(matches), + 'custom_details': custom_details_payload, + }, + } + match_timestamp = lookup_es_key(matches[0], self.rule.get('timestamp_field', '@timestamp')) + if match_timestamp: + payload['payload']['timestamp'] = match_timestamp + else: + payload = { + 'service_key': self.pagerduty_service_key, + 'description': self.create_title(matches), + 'event_type': self.pagerduty_event_type, + 'incident_key': self.get_incident_key(matches), + 'client': self.pagerduty_client_name, + 'details': { + "information": body, + }, + } + + # set https proxy, if it was provided + proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None + try: + response = requests.post( + self.url, + data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False).encode("utf-8"), + headers=headers, + proxies=proxies + ) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to pagerduty: %s" % e) + + if self.pagerduty_event_type == 'trigger': + elastalert_logger.info("Trigger sent to PagerDuty") + if self.pagerduty_event_type == 'resolve': + elastalert_logger.info("Resolve sent to PagerDuty") + if self.pagerduty_event_type == 'acknowledge': + elastalert_logger.info("acknowledge sent to PagerDuty") + + def resolve_formatted_key(self, key, args, matches): + if args: + key_values = [lookup_es_key(matches[0], arg) for arg in args] + + # Populate values with rule level properties too + for i in range(len(key_values)): + if key_values[i] is None: + key_value = self.rule.get(args[i]) + if key_value: + key_values[i] = key_value + + missing = self.rule.get('alert_missing_value', '') + key_values = [missing if val is None else val for val in key_values] + return key.format(*key_values) + else: + return key + + def get_incident_key(self, matches): + if self.pagerduty_incident_key_args: + incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] + + # Populate values with rule level properties too + for i in range(len(incident_key_values)): + if incident_key_values[i] is None: + key_value = self.rule.get(self.pagerduty_incident_key_args[i]) + if key_value: + incident_key_values[i] = key_value + + missing = self.rule.get('alert_missing_value', '') + incident_key_values = [missing if val is None else val for val in incident_key_values] + return self.pagerduty_incident_key.format(*incident_key_values) + else: + return self.pagerduty_incident_key + + def get_info(self): + return {'type': 'pagerduty', + 'pagerduty_client_name': self.pagerduty_client_name} diff --git a/elastalert/alerters/pagertree.py b/elastalert/alerters/pagertree.py new file mode 100644 index 000000000..9e159e46c --- /dev/null +++ b/elastalert/alerters/pagertree.py @@ -0,0 +1,41 @@ +import json +import uuid + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger + + +class PagerTreeAlerter(Alerter): + """ Creates a PagerTree Incident for each alert """ + required_options = frozenset(['pagertree_integration_url']) + + def __init__(self, rule): + super(PagerTreeAlerter, self).__init__(rule) + self.url = self.rule.get('pagertree_integration_url', None) + self.pagertree_proxy = self.rule.get('pagertree_proxy', None) + + def alert(self, matches): + # post to pagertree + headers = {'content-type': 'application/json'} + # set https proxy, if it was provided + proxies = {'https': self.pagertree_proxy} if self.pagertree_proxy else None + payload = { + "event_type": "create", + "Id": str(uuid.uuid4()), + "Title": self.create_title(matches), + "Description": self.create_alert_body(matches) + } + + try: + response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to PagerTree: %s" % e) + elastalert_logger.info("Trigger sent to PagerTree") + + def get_info(self): + return {'type': 'pagertree', + 'pagertree_integration_url': self.url} diff --git a/elastalert/alerters/rocketchat.py b/elastalert/alerters/rocketchat.py new file mode 100644 index 000000000..6b54ba941 --- /dev/null +++ b/elastalert/alerters/rocketchat.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +import copy +import json +import requests +from requests.exceptions import RequestException +import warnings + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger, lookup_es_key + + +class RocketChatAlerter(Alerter): + """ Creates a RocketChat notification for each alert """ + required_options = set(['rocket_chat_webhook_url']) + + def __init__(self, rule): + super(RocketChatAlerter, self).__init__(rule) + self.rocket_chat_webhook_url = self.rule.get('rocket_chat_webhook_url', None) + if isinstance(self.rocket_chat_webhook_url, str): + self.rocket_chat_webhook_url = [self.rocket_chat_webhook_url] + self.rocket_chat_proxy = self.rule.get('rocket_chat_proxy', None) + + self.rocket_chat_username_override = self.rule.get('rocket_chat_username_override', 'elastalert2') + self.rocket_chat_channel_override = self.rule.get('rocket_chat_channel_override', '') + if isinstance(self.rocket_chat_channel_override, str): + self.rocket_chat_channel_override = [self.rocket_chat_channel_override] + self.rocket_chat_emoji_override = self.rule.get('rocket_chat_emoji_override', ':ghost:') + self.rocket_chat_msg_color = self.rule.get('rocket_chat_msg_color', 'danger') + self.rocket_chat_text_string = self.rule.get('rocket_chat_text_string', '') + self.rocket_chat_alert_fields = self.rule.get('rocket_chat_alert_fields', '') + self.rocket_chat_attach_kibana_discover_url = self.rule.get('rocket_chat_attach_kibana_discover_url', False) + self.rocket_chat_kibana_discover_color = self.rule.get('rocket_chat_kibana_discover_color', '#ec4b98') + self.rocket_chat_kibana_discover_title = self.rule.get('rocket_chat_kibana_discover_title', 'Discover in Kibana') + self.rocket_chat_ignore_ssl_errors = self.rule.get('rocket_chat_ignore_ssl_errors', False) + self.rocket_chat_timeout = self.rule.get('rocket_chat_timeout', 10) + self.rocket_chat_ca_certs = self.rule.get('rocket_chat_ca_certs') + + def format_body(self, body): + return body + + def get_aggregation_summary_text__maximum_width(self): + width = super(RocketChatAlerter, self).get_aggregation_summary_text__maximum_width() + + # Reduced maximum width for prettier Slack display. + return min(width, 75) + + def get_aggregation_summary_text(self, matches): + text = super(RocketChatAlerter, self).get_aggregation_summary_text(matches) + if text: + text = '```\n{0}```\n'.format(text) + return text + + def populate_fields(self, matches): + alert_fields = [] + for arg in self.rocket_chat_alert_fields: + arg = copy.copy(arg) + arg['value'] = lookup_es_key(matches[0], arg['value']) + alert_fields.append(arg) + return alert_fields + + def alert(self, matches): + body = self.create_alert_body(matches) + body = self.format_body(body) + headers = {'content-type': 'application/json'} + proxies = {'https': self.rocket_chat_proxy} if self.rocket_chat_proxy else None + payload = { + 'username': self.rocket_chat_username_override, + 'text': self.rocket_chat_text_string, + 'attachments': [ + { + 'color': self.rocket_chat_msg_color, + 'title': self.create_title(matches), + 'text': body, + 'fields': [] + } + ] + } + + # if we have defined fields, populate noteable fields for the alert + if self.rocket_chat_alert_fields != '': + payload['attachments'][0]['fields'] = self.populate_fields(matches) + + if self.rocket_chat_emoji_override != '': + payload['emoji'] = self.rocket_chat_emoji_override + + if self.rocket_chat_attach_kibana_discover_url: + kibana_discover_url = lookup_es_key(matches[0], 'kibana_discover_url') + if kibana_discover_url: + payload['attachments'].append({ + 'color': self.rocket_chat_kibana_discover_color, + 'title': self.rocket_chat_kibana_discover_title, + 'title_link': kibana_discover_url + }) + + for url in self.rocket_chat_webhook_url: + for channel_override in self.rocket_chat_channel_override: + try: + if self.rocket_chat_ca_certs: + verify = self.rocket_chat_ca_certs + else: + verify = not self.rocket_chat_ignore_ssl_errors + if self.rocket_chat_ignore_ssl_errors: + requests.packages.urllib3.disable_warnings() + payload['channel'] = channel_override + response = requests.post( + url, data=json.dumps(payload, cls=DateTimeEncoder), + headers=headers, + verify=verify, + proxies=proxies, + timeout=self.rocket_chat_timeout) + warnings.resetwarnings() + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Rocket.Chat: %s" % e) + elastalert_logger.info("Alert sent to Rocket.Chat") + + def get_info(self): + return {'type': 'rocketchat', + 'rocket_chat_username_override': self.rocket_chat_username_override, + 'rocket_chat_webhook_url': self.rocket_chat_webhook_url} diff --git a/elastalert/alerters/servicenow.py b/elastalert/alerters/servicenow.py new file mode 100644 index 000000000..39d66af75 --- /dev/null +++ b/elastalert/alerters/servicenow.py @@ -0,0 +1,72 @@ +import json + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter, BasicMatchString, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger + + +class ServiceNowAlerter(Alerter): + """ Creates a ServiceNow alert """ + required_options = set([ + 'username', + 'password', + 'servicenow_rest_url', + 'short_description', + 'comments', + 'assignment_group', + 'category', + 'subcategory', + 'cmdb_ci', + 'caller_id' + ]) + + def __init__(self, rule): + super(ServiceNowAlerter, self).__init__(rule) + self.servicenow_rest_url = self.rule.get('servicenow_rest_url', None) + self.servicenow_proxy = self.rule.get('servicenow_proxy', None) + self.impact = self.rule.get('servicenow_impact', None) + self.urgency = self.rule.get('servicenow_urgency', None) + + def alert(self, matches): + for match in matches: + # Parse everything into description. + description = str(BasicMatchString(self.rule, match)) + + # Set proper headers + headers = { + "Content-Type": "application/json", + "Accept": "application/json;charset=utf-8" + } + proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None + payload = { + "description": description, + "short_description": self.rule['short_description'], + "comments": self.rule['comments'], + "assignment_group": self.rule['assignment_group'], + "category": self.rule['category'], + "subcategory": self.rule['subcategory'], + "cmdb_ci": self.rule['cmdb_ci'], + "caller_id": self.rule["caller_id"] + } + if self.impact != None: + payload["impact"] = self.impact + if self.urgency != None: + payload["urgency"] = self.urgency + try: + response = requests.post( + self.servicenow_rest_url, + auth=(self.rule['username'], self.rule['password']), + headers=headers, + data=json.dumps(payload, cls=DateTimeEncoder), + proxies=proxies + ) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to ServiceNow: %s" % e) + elastalert_logger.info("Alert sent to ServiceNow") + + def get_info(self): + return {'type': 'ServiceNow', + 'self.servicenow_rest_url': self.servicenow_rest_url} diff --git a/elastalert/alerters/ses.py b/elastalert/alerters/ses.py new file mode 100644 index 000000000..8171198e6 --- /dev/null +++ b/elastalert/alerters/ses.py @@ -0,0 +1,111 @@ +import boto3 + +from elastalert.alerts import Alerter +from elastalert.util import lookup_es_key, EAException, elastalert_logger + + +class SesAlerter(Alerter): + """ Sends an email alert using AWS SES """ + required_options = frozenset(['ses_email', 'ses_from_addr']) + + def __init__(self, *args): + super(SesAlerter, self).__init__(*args) + + self.aws_access_key_id = self.rule.get('ses_aws_access_key_id') + self.aws_secret_access_key = self.rule.get('ses_aws_secret_access_key') + self.aws_region = self.rule.get('ses_aws_region', 'us-east-1') + self.aws_profile = self.rule.get('ses_aws_profile', '') + + self.email = self.rule.get('ses_email', None) + self.from_addr = self.rule.get('ses_from_addr', None) + + # Convert email to a list if it isn't already + if isinstance(self.email, str): + self.email = [self.email] + + # If there is a cc then also convert it a list if it isn't + cc = self.rule.get('ses_cc') + if cc and isinstance(cc, str): + self.rule['ses_cc'] = [self.rule['ses_cc']] + + # If there is a bcc then also convert it to a list if it isn't + bcc = self.rule.get('ses_bcc') + if bcc and isinstance(bcc, str): + self.rule['ses_bcc'] = [self.rule['ses_bcc']] + + # If there is a email_reply_to then also convert it to a list if it isn't + reply_to = self.rule.get('ses_email_reply_to') + if reply_to and isinstance(reply_to, str): + self.rule['ses_email_reply_to'] = [self.rule['ses_email_reply_to']] + + add_suffix = self.rule.get('ses_email_add_domain') + if add_suffix and not add_suffix.startswith('@'): + self.rule['ses_email_add_domain'] = '@' + add_suffix + + def alert(self, matches): + body = self.create_alert_body(matches) + + to_addr = self.email + if 'ses_email_from_field' in self.rule: + recipient = lookup_es_key(matches[0], self.rule['ses_email_from_field']) + if isinstance(recipient, str): + if '@' in recipient: + to_addr = [recipient] + elif 'ses_email_add_domain' in self.rule: + to_addr = [recipient + self.rule['ses_email_add_domain']] + elif isinstance(recipient, list): + to_addr = recipient + if 'ses_email_add_domain' in self.rule: + to_addr = [name + self.rule['ses_email_add_domain'] for name in to_addr] + + try: + if self.aws_profile != '': + session = boto3.Session(profile_name=self.aws_profile) + else: + session = boto3.Session( + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.aws_secret_access_key, + region_name=self.aws_region + ) + + client = session.client('ses') + + client.send_email( + Source=self.from_addr, + Destination={ + 'ToAddresses': to_addr, + 'CcAddresses': self.rule.get('ses_cc', []), + 'BccAddresses': self.rule.get('ses_bcc', []) + }, + Message={ + 'Subject': { + 'Charset': 'UTF-8', + 'Data': self.create_title(matches), + }, + 'Body': { + 'Text': { + 'Charset': 'UTF-8', + 'Data': body, + } + } + }, + ReplyToAddresses=self.rule.get('ses_email_reply_to', [])) + except Exception as e: + raise EAException("Error sending Amazon SES: %s" % e) + + elastalert_logger.info("Sent Amazon SES to %s" % (to_addr,)) + + def create_default_title(self, matches): + subject = 'ElastAlert 2: %s' % (self.rule['name']) + + # If the rule has a query_key, add that value plus timestamp to subject + if 'query_key' in self.rule: + qk = matches[0].get(self.rule['query_key']) + if qk: + subject += ' - %s' % (qk) + + return subject + + def get_info(self): + return {'type': 'ses', + 'recipients': self.email} diff --git a/elastalert/alerters/slack.py b/elastalert/alerters/slack.py new file mode 100644 index 000000000..393572640 --- /dev/null +++ b/elastalert/alerters/slack.py @@ -0,0 +1,177 @@ +import copy +import json +import requests +import warnings + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import elastalert_logger, EAException, lookup_es_key +from requests.exceptions import RequestException + + +class SlackAlerter(Alerter): + """ Creates a Slack room message for each alert """ + required_options = frozenset(['slack_webhook_url']) + + def __init__(self, rule): + super(SlackAlerter, self).__init__(rule) + self.slack_webhook_url = self.rule.get('slack_webhook_url', None) + if isinstance(self.slack_webhook_url, str): + self.slack_webhook_url = [self.slack_webhook_url] + self.slack_proxy = self.rule.get('slack_proxy', None) + self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') + self.slack_channel_override = self.rule.get('slack_channel_override', '') + if isinstance(self.slack_channel_override, str): + self.slack_channel_override = [self.slack_channel_override] + self.slack_title_link = self.rule.get('slack_title_link', '') + self.slack_title = self.rule.get('slack_title', '') + self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') + self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') + self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') + self.slack_parse_override = self.rule.get('slack_parse_override', 'none') + self.slack_text_string = self.rule.get('slack_text_string', '') + self.slack_alert_fields = self.rule.get('slack_alert_fields', '') + self.slack_ignore_ssl_errors = self.rule.get('slack_ignore_ssl_errors', False) + self.slack_timeout = self.rule.get('slack_timeout', 10) + self.slack_ca_certs = self.rule.get('slack_ca_certs') + self.slack_attach_kibana_discover_url = self.rule.get('slack_attach_kibana_discover_url', False) + self.slack_kibana_discover_color = self.rule.get('slack_kibana_discover_color', '#ec4b98') + self.slack_kibana_discover_title = self.rule.get('slack_kibana_discover_title', 'Discover in Kibana') + self.slack_footer = self.rule.get('slack_footer', '') + self.slack_footer_icon = self.rule.get('slack_footer_icon', '') + self.slack_image_url = self.rule.get('slack_image_url', '') + self.slack_thumb_url = self.rule.get('slack_thumb_url', '') + self.slack_author_name = self.rule.get('slack_author_name', '') + self.slack_author_link = self.rule.get('slack_author_link', '') + self.slack_author_icon = self.rule.get('slack_author_icon', '') + self.slack_msg_pretext = self.rule.get('slack_msg_pretext', '') + self.slack_attach_jira_ticket_url = self.rule.get('slack_attach_jira_ticket_url', False) + self.slack_jira_ticket_color = self.rule.get('slack_jira_ticket_color', '#ec4b98') + self.slack_jira_ticket_title = self.rule.get('slack_jira_ticket_title', 'Jira Ticket') + + def format_body(self, body): + # https://api.slack.com/docs/formatting + return body + + def get_aggregation_summary_text__maximum_width(self): + width = super(SlackAlerter, self).get_aggregation_summary_text__maximum_width() + # Reduced maximum width for prettier Slack display. + return min(width, 75) + + def get_aggregation_summary_text(self, matches): + text = super(SlackAlerter, self).get_aggregation_summary_text(matches) + if text: + text = '```\n{0}```\n'.format(text) + return text + + def populate_fields(self, matches): + alert_fields = [] + for arg in self.slack_alert_fields: + arg = copy.copy(arg) + arg['value'] = lookup_es_key(matches[0], arg['value']) + alert_fields.append(arg) + return alert_fields + + def alert(self, matches): + body = self.create_alert_body(matches) + + body = self.format_body(body) + # post to slack + headers = {'content-type': 'application/json'} + # set https proxy, if it was provided + proxies = {'https': self.slack_proxy} if self.slack_proxy else None + payload = { + 'username': self.slack_username_override, + 'parse': self.slack_parse_override, + 'text': self.slack_text_string, + 'attachments': [ + { + 'color': self.slack_msg_color, + 'title': self.create_title(matches), + 'text': body, + 'mrkdwn_in': ['text', 'pretext'], + 'fields': [] + } + ] + } + + # if we have defined fields, populate noteable fields for the alert + if self.slack_alert_fields != '': + payload['attachments'][0]['fields'] = self.populate_fields(matches) + + if self.slack_icon_url_override != '': + payload['icon_url'] = self.slack_icon_url_override + else: + payload['icon_emoji'] = self.slack_emoji_override + + if self.slack_title != '': + payload['attachments'][0]['title'] = self.slack_title + + if self.slack_title_link != '': + payload['attachments'][0]['title_link'] = self.slack_title_link + + if self.slack_footer != '': + payload['attachments'][0]['footer'] = self.slack_footer + + if self.slack_footer_icon != '': + payload['attachments'][0]['footer_icon'] = self.slack_footer_icon + + if self.slack_image_url != '': + payload['attachments'][0]['image_url'] = self.slack_image_url + + if self.slack_thumb_url != '': + payload['attachments'][0]['thumb_url'] = self.slack_thumb_url + + if self.slack_author_name != '': + payload['attachments'][0]['author_name'] = self.slack_author_name + + if self.slack_author_link != '': + payload['attachments'][0]['author_link'] = self.slack_author_link + + if self.slack_author_icon != '': + payload['attachments'][0]['author_icon'] = self.slack_author_icon + + if self.slack_msg_pretext != '': + payload['attachments'][0]['pretext'] = self.slack_msg_pretext + + if self.slack_attach_kibana_discover_url: + kibana_discover_url = lookup_es_key(matches[0], 'kibana_discover_url') + if kibana_discover_url: + payload['attachments'].append({ + 'color': self.slack_kibana_discover_color, + 'title': self.slack_kibana_discover_title, + 'title_link': kibana_discover_url + }) + + if self.slack_attach_jira_ticket_url and self.pipeline is not None and 'jira_ticket' in self.pipeline: + jira_url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) + + payload['attachments'].append({ + 'color': self.slack_jira_ticket_color, + 'title': self.slack_jira_ticket_title, + 'title_link': jira_url + }) + + for url in self.slack_webhook_url: + for channel_override in self.slack_channel_override: + try: + if self.slack_ca_certs: + verify = self.slack_ca_certs + else: + verify = not self.slack_ignore_ssl_errors + if self.slack_ignore_ssl_errors: + requests.packages.urllib3.disable_warnings() + payload['channel'] = channel_override + response = requests.post( + url, data=json.dumps(payload, cls=DateTimeEncoder), + headers=headers, verify=verify, + proxies=proxies, + timeout=self.slack_timeout) + warnings.resetwarnings() + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to slack: %s" % e) + elastalert_logger.info("Alert '%s' sent to Slack" % self.rule['name']) + + def get_info(self): + return {'type': 'slack', + 'slack_username_override': self.slack_username_override} diff --git a/elastalert/alerters/sns.py b/elastalert/alerters/sns.py new file mode 100644 index 000000000..584f974ce --- /dev/null +++ b/elastalert/alerters/sns.py @@ -0,0 +1,47 @@ +import boto3 + +from elastalert.alerts import Alerter +from elastalert.util import elastalert_logger, EAException + + +class SnsAlerter(Alerter): + """ Send alert using AWS SNS service """ + required_options = frozenset(['sns_topic_arn']) + + def __init__(self, *args): + super(SnsAlerter, self).__init__(*args) + self.sns_topic_arn = self.rule.get('sns_topic_arn', None) + self.sns_aws_access_key_id = self.rule.get('sns_aws_access_key_id') + self.sns_aws_secret_access_key = self.rule.get('sns_aws_secret_access_key') + self.sns_aws_region = self.rule.get('sns_aws_region', 'us-east-1') + self.profile = self.rule.get('sns_aws_profile', None) + + def create_default_title(self, matches): + subject = 'ElastAlert: %s' % (self.rule['name']) + return subject + + def alert(self, matches): + body = self.create_alert_body(matches) + + try: + if self.profile is None: + session = boto3.Session( + aws_access_key_id=self.sns_aws_access_key_id, + aws_secret_access_key=self.sns_aws_secret_access_key, + region_name=self.sns_aws_region + ) + else: + session = boto3.Session(profile_name=self.profile) + + sns_client = session.client('sns') + sns_client.publish( + TopicArn=self.sns_topic_arn, + Message=body, + Subject=self.create_title(matches) + ) + except Exception as e: + raise EAException("Error sending Amazon SNS: %s" % e) + elastalert_logger.info("Sent Amazon SNS notification to %s" % (self.sns_topic_arn)) + + def get_info(self): + return {'type': 'sns'} diff --git a/elastalert/alerters/stomp.py b/elastalert/alerters/stomp.py new file mode 100644 index 000000000..97585a2ee --- /dev/null +++ b/elastalert/alerters/stomp.py @@ -0,0 +1,78 @@ +import datetime +import json +import time + +import stomp + +from elastalert.alerts import Alerter, BasicMatchString +from elastalert.util import lookup_es_key, elastalert_logger, EAException + + +class StompAlerter(Alerter): + """ The stomp alerter publishes alerts via stomp to a broker. """ + required_options = frozenset( + ['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) + + def alert(self, matches): + alerts = [] + + qk = self.rule.get('query_key', None) + + fullmessage = {} + for match in matches: + if qk is not None: + resmatch = lookup_es_key(match, qk) + else: + resmatch = None + + if resmatch is not None: + elastalert_logger.info( + 'Alert for %s, %s at %s:' % (self.rule['name'], resmatch, lookup_es_key(match, self.rule['timestamp_field']))) + alerts.append( + 'Alert for %s, %s at %s:' % (self.rule['name'], resmatch, lookup_es_key( + match, self.rule['timestamp_field'])) + ) + fullmessage['match'] = resmatch + else: + elastalert_logger.info('Rule %s generated an alert at %s:' % ( + self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) + alerts.append( + 'Rule %s generated an alert at %s:' % (self.rule['name'], lookup_es_key( + match, self.rule['timestamp_field'])) + ) + fullmessage['match'] = lookup_es_key( + match, self.rule['timestamp_field']) + elastalert_logger.info(str(BasicMatchString(self.rule, match))) + + fullmessage['alerts'] = alerts + fullmessage['rule'] = self.rule['name'] + fullmessage['rule_file'] = self.rule['rule_file'] + + fullmessage['matching'] = str(BasicMatchString(self.rule, match)) + fullmessage['alertDate'] = datetime.datetime.now( + ).strftime("%Y-%m-%d %H:%M:%S") + fullmessage['body'] = self.create_alert_body(matches) + + fullmessage['matches'] = matches + + self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') + self.stomp_hostport = self.rule.get('stomp_hostport', '61613') + self.stomp_login = self.rule.get('stomp_login', 'admin') + self.stomp_password = self.rule.get('stomp_password', 'admin') + self.stomp_destination = self.rule.get( + 'stomp_destination', '/queue/ALERT') + + try: + conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) + + conn.connect(self.stomp_login, self.stomp_password) + # Ensures that the CONNECTED frame is received otherwise, the disconnect call will fail. + time.sleep(1) + conn.send(self.stomp_destination, json.dumps(fullmessage)) + conn.disconnect() + except Exception as e: + raise EAException("Error posting to Stomp: %s" % e) + elastalert_logger.info("Alert sent to Stomp") + + def get_info(self): + return {'type': 'stomp'} diff --git a/elastalert/alerters/teams.py b/elastalert/alerters/teams.py new file mode 100644 index 000000000..731ef31e4 --- /dev/null +++ b/elastalert/alerters/teams.py @@ -0,0 +1,103 @@ +import copy +import json +import requests + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger, lookup_es_key +from requests.exceptions import RequestException + + +class MsTeamsAlerter(Alerter): + """ Creates a Microsoft Teams Conversation Message for each alert """ + required_options = frozenset(['ms_teams_webhook_url']) + + def __init__(self, rule): + super(MsTeamsAlerter, self).__init__(rule) + self.ms_teams_webhook_url = self.rule.get('ms_teams_webhook_url', None) + if isinstance(self.ms_teams_webhook_url, str): + self.ms_teams_webhook_url = [self.ms_teams_webhook_url] + self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) + self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', None) + self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) + self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') + self.ms_teams_ca_certs = self.rule.get('ms_teams_ca_certs') + self.ms_teams_ignore_ssl_errors = self.rule.get('ms_teams_ignore_ssl_errors', False) + self.ms_teams_alert_facts = self.rule.get('ms_teams_alert_facts', '') + self.ms_teams_attach_kibana_discover_url = self.rule.get('ms_teams_attach_kibana_discover_url', False) + self.ms_teams_kibana_discover_title = self.rule.get('ms_teams_kibana_discover_title', 'Discover in Kibana') + + def format_body(self, body): + if self.ms_teams_alert_fixed_width: + body = body.replace('`', "'") + body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '') + return body + + def populate_facts(self, matches): + alert_facts = [] + for arg in self.ms_teams_alert_facts: + arg = copy.copy(arg) + matched_value = lookup_es_key(matches[0], arg['value']) + arg['value'] = matched_value if matched_value is not None else arg['value'] + alert_facts.append(arg) + return alert_facts + + def alert(self, matches): + body = self.create_alert_body(matches) + body = self.format_body(body) + + title = self.create_title(matches) + summary = title if self.ms_teams_alert_summary is None else self.ms_teams_alert_summary + # post to Teams + headers = {'content-type': 'application/json'} + + if self.ms_teams_ca_certs: + verify = self.ms_teams_ca_certs + else: + verify = not self.ms_teams_ignore_ssl_errors + if self.ms_teams_ignore_ssl_errors: + requests.packages.urllib3.disable_warnings() + + # set https proxy, if it was provided + proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None + payload = { + '@type': 'MessageCard', + '@context': 'http://schema.org/extensions', + 'summary': summary , + 'title': title, + 'sections': [{'text': body}], + } + + if self.ms_teams_alert_facts != '': + payload['sections'][0]['facts'] = self.populate_facts(matches) + + if self.ms_teams_theme_color != '': + payload['themeColor'] = self.ms_teams_theme_color + + if self.ms_teams_attach_kibana_discover_url: + kibana_discover_url = lookup_es_key(matches[0], 'kibana_discover_url') + if kibana_discover_url: + payload['potentialAction'] = [ + { + '@type': 'OpenUri', + 'name': self.ms_teams_kibana_discover_title, + 'targets': [ + { + 'os': 'default', + 'uri': kibana_discover_url, + } + ], + } + ] + + for url in self.ms_teams_webhook_url: + try: + response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), + headers=headers, proxies=proxies, verify=verify) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to MS Teams: %s" % e) + elastalert_logger.info("Alert sent to MS Teams") + + def get_info(self): + return {'type': 'ms_teams', + 'ms_teams_webhook_url': self.ms_teams_webhook_url} diff --git a/elastalert/alerters/telegram.py b/elastalert/alerters/telegram.py new file mode 100644 index 000000000..486325372 --- /dev/null +++ b/elastalert/alerters/telegram.py @@ -0,0 +1,67 @@ +import json +import warnings + +import requests +from requests import RequestException +from requests.auth import HTTPProxyAuth + +from elastalert.alerts import Alerter, BasicMatchString, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger + + +class TelegramAlerter(Alerter): + """ Send a Telegram message via bot api for each alert """ + required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) + + def __init__(self, rule): + super(TelegramAlerter, self).__init__(rule) + self.telegram_bot_token = self.rule.get('telegram_bot_token', None) + self.telegram_room_id = self.rule.get('telegram_room_id', None) + self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') + self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage") + self.telegram_proxy = self.rule.get('telegram_proxy', None) + self.telegram_proxy_login = self.rule.get('telegram_proxy_login', None) + self.telegram_proxy_password = self.rule.get('telegram_proxy_pass', None) + self.telegram_parse_mode = self.rule.get('telegram_parse_mode', 'markdown') + + def alert(self, matches): + if self.telegram_parse_mode != 'html': + body = '⚠ *%s* ⚠ ```\n' % (self.create_title(matches)) + else: + body = '⚠ %s ⚠ \n' % (self.create_title(matches)) + + for match in matches: + body += str(BasicMatchString(self.rule, match)) + # Separate text of aggregated alerts with dashes + if len(matches) > 1: + body += '\n----------------------------------------\n' + if len(body) > 4095: + body = body[0:4000] + "\n⚠ *message was cropped according to telegram limits!* ⚠" + + if self.telegram_parse_mode != 'html': + body += ' ```' + + headers = {'content-type': 'application/json'} + # set https proxy, if it was provided + proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None + auth = HTTPProxyAuth(self.telegram_proxy_login, self.telegram_proxy_password) if self.telegram_proxy_login else None + payload = { + 'chat_id': self.telegram_room_id, + 'text': body, + 'parse_mode': self.telegram_parse_mode, + 'disable_web_page_preview': True + } + + try: + response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies, auth=auth) + warnings.resetwarnings() + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to Telegram: %s. Details: %s" % (e, "" if e.response is None else e.response.text)) + + elastalert_logger.info( + "Alert sent to Telegram room %s" % self.telegram_room_id) + + def get_info(self): + return {'type': 'telegram', + 'telegram_room_id': self.telegram_room_id} diff --git a/elastalert/alerters/tencentsms.py b/elastalert/alerters/tencentsms.py new file mode 100644 index 000000000..96f4b3f5e --- /dev/null +++ b/elastalert/alerters/tencentsms.py @@ -0,0 +1,150 @@ +from elastalert.alerts import Alerter, BasicMatchString +from elastalert.util import EAException, elastalert_logger +import json +from tencentcloud.common import credential +from tencentcloud.common.profile.client_profile import ClientProfile +from tencentcloud.common.profile.http_profile import HttpProfile +from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException +from tencentcloud.sms.v20210111 import models +from tencentcloud.sms.v20210111.sms_client import SmsClient +from jsonpointer import resolve_pointer + + +class TencentSMSAlerter(Alerter): + # en doc: https://intl.cloud.tencent.com/document/product/382/40606 + # zh-cn doc: https://cloud.tencent.com/document/product/382/43196 + """ Send alert using tencent SMS service """ + + # By setting required_options to a set of strings + # You can ensure that the rule config file specifies all + # of the options. Otherwise, ElastAlert 2 will throw an exception + # when trying to load the rule. + required_options = frozenset([ + 'tencent_sms_secret_id', + 'tencent_sms_secret_key', + 'tencent_sms_sdk_appid', + 'tencent_sms_to_number', + 'tencent_sms_template_id', + ]) + + def __init__(self, *args): + super(TencentSMSAlerter, self).__init__(*args) + self.tencent_sms_secret_id = self.rule.get('tencent_sms_secret_id') + self.tencent_sms_secret_key = self.rule.get('tencent_sms_secret_key') + self.tencent_sms_sdk_appid = self.rule.get('tencent_sms_sdk_appid') + self.tencent_sms_to_number = self.rule.get('tencent_sms_to_number', []) + self.tencent_sms_region = self.rule.get('tencent_sms_region', 'ap-guangzhou') + self.tencent_sms_sign_name = self.rule.get('tencent_sms_sign_name') # this parameter is required for Mainland China SMS. + self.tencent_sms_template_id = self.rule.get('tencent_sms_template_id') + self.tencent_sms_template_parm = self.rule.get('tencent_sms_template_parm', []) + + # Alert is called + def alert(self, matches): + try: + elastalert_logger.debug("matches:%s", json.dumps(matches)) + client = self.get_client() + # Instantiate a request object. You can further set the request parameters according to the API called and actual conditions + # You can directly check the SDK source code to determine which attributes of `SendSmsRequest` can be set + # An attribute may be of a basic type or import another data structure + # We recommend you use the IDE for development where you can easily redirect to and view the documentation of each API and data structure + req = models.SendSmsRequest() + # Settings of a basic parameter: + # The SDK uses the pointer style to specify parameters, so even for basic parameters, you need to use pointers to assign values to them. + # The SDK provides encapsulation functions for importing the pointers of basic parameters + # Help link: + # SMS console: https://console.cloud.tencent.com/smsv2 + # sms helper: https://intl.cloud.tencent.com/document/product/382/3773?from_cn_redirect=1 + # SMS application ID, which is the `SdkAppId` generated after an application is added in the [SMS console], such as 1400006666 + # 短信应用ID: 短信SdkAppid在 [短信控制台] 添加应用后生成的实际SdkAppid,示例如 1400006666 + req.SmsSdkAppId = self.tencent_sms_sdk_appid + + # SMS signature content, which should be encoded in UTF-8. You must enter an approved signature, which can be viewed in the [SMS console] + # 短信签名内容: 使用 UTF-8 编码,必须填写已审核通过的签名,签名信息可登录 [短信控制台] 查看 + req.SignName = self.tencent_sms_sign_name + + # SMS code number extension, which is not activated by default. If you need to activate it, please contact [SMS Helper] + # 短信码号扩展号: 默认未开通,如需开通请联系 [sms helper] + req.ExtendCode = "" + + # User session content, which can carry context information such as user-side ID and will be returned as-is by the server + # 用户的 session 内容: 可以携带用户侧 ID 等上下文信息,server 会原样返回 + # req.SessionContext = "xxx" + + # `senderid` for Global SMS, which is not activated by default. If you need to activate it, please contact [SMS Helper] for assistance. This parameter should be left empty for Mainland China SMS + # 国际/港澳台短信 senderid: 国内短信填空,默认未开通,如需开通请联系 [sms helper] + # req.SenderId = "" + + # Target mobile number in the E.164 standard (+[country/region code][mobile number]) + # Example: +8613711112222, which has a + sign followed by 86 (country/region code) and then by 13711112222 (mobile number). Up to 200 mobile numbers are supported + # 下发手机号码,采用 e.164 标准,+[国家或地区码][手机号] + # 示例如:+8613711112222,其中前面有一个+号 ,86为国家码,13711112222为手机号,最多不要超过200个手机号 + req.PhoneNumberSet = self.tencent_sms_to_number + + # Template ID. You must enter the ID of an approved template, which can be viewed in the [SMS console] + # 模板 ID: 必须填写已审核通过的模板 ID。模板ID可登录 [短信控制台] 查看 + req.TemplateId = self.tencent_sms_template_id + + # Template parameters. If there are no template parameters, leave it empty + req.TemplateParamSet = self.create_template_parm(matches) + + elastalert_logger.debug("SendSms request :%s", json.dumps(req.__dict__)) + + # Initialize the request by calling the `DescribeInstances` method on the client object. Note: the request method name corresponds to the request object + # The returned `resp` is an instance of the `DescribeInstancesResponse` class which corresponds to the request object + resp = client.SendSms(req) + # A string return packet in JSON format is outputted + elastalert_logger.debug("SendSms response :%s", resp.to_json_string()) + for item in resp.SendStatusSet: + if item.Code != "Ok": + raise TencentCloudSDKException(item.Code, item.Message, resp.RequestId) + except TencentCloudSDKException as e: + raise EAException("Error posting to TencentSMS: %s" % e) + elastalert_logger.info("Alert sent to TencentSMS") + + def get_client(self): + # Required steps: + # Instantiate an authentication object. The Tencent Cloud account key pair `secretId` and `secretKey` need to be passed in as the input parameters. + # The example here uses the way to read from the environment variable, so you need to set these two values in the environment variable first. + # You can also write the key pair directly into the code, but be careful not to copy, upload, or share the code to others; + # otherwise, the key pair may be leaked, causing damage to your properties. + # Query the CAM key: https://console.cloud.tencent.com/cam/capi + cred = credential.Credential(self.tencent_sms_secret_id, self.tencent_sms_secret_key) + # cred = credential.Credential( + # os.environ.get(""), + # os.environ.get("") + # ) + # (Optional) Instantiate an HTTP option + httpProfile = HttpProfile() + # If you need to specify the proxy for API access, you can initialize HttpProfile as follows + # httpProfile = HttpProfile(proxy="http://username:password@proxy IP:proxy port") + httpProfile.reqMethod = "POST" # POST request (POST request by default) + httpProfile.reqTimeout = 30 # Request timeout period in seconds (60 seconds by default) + httpProfile.endpoint = "sms.tencentcloudapi.com" # Specify the access region domain name (nearby access by default) + # Optional steps: + # Instantiate a client configuration object. You can specify the timeout period and other configuration items + clientProfile = ClientProfile() + clientProfile.signMethod = "TC3-HMAC-SHA256" # Specify the signature algorithm + clientProfile.language = "en-US" + clientProfile.httpProfile = httpProfile + # Instantiate the client object of the requested product (with SMS as an example) + # The second parameter is the region information. You can directly enter the string `ap-guangzhou` or import the preset constant + client = SmsClient(cred, self.tencent_sms_region, clientProfile) + return client + + def create_template_parm(self, matches): + esData = matches[0] + templateParam = [] + if len(self.tencent_sms_template_parm) == 0: + return [] + for key in self.tencent_sms_template_parm: + templateParam.append(resolve_pointer(esData, key)) + return templateParam + + # get_info is called after an alert is sent to get data that is written back + # to Elasticsearch in the field "alert_info" + # It should return a dict of information relevant to what the alert does + def get_info(self): + return { + 'type': 'tencent sms', + 'to_number': self.tencent_sms_to_number + } diff --git a/elastalert/alerters/thehive.py b/elastalert/alerters/thehive.py new file mode 100644 index 000000000..809cd174f --- /dev/null +++ b/elastalert/alerters/thehive.py @@ -0,0 +1,174 @@ +import json +import time +import uuid + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter +from elastalert.util import lookup_es_key, EAException, elastalert_logger + +class HiveAlerter(Alerter): + """ + Use matched data to create alerts containing observables in an instance of TheHive + """ + required_options = set(['hive_connection', 'hive_alert_config']) + + def lookup_field(self, match: dict, field_name: str, default): + """Populates a field with values depending on the contents of the Elastalert match + provided to it. + + Uses a similar algorithm to that implemented to populate the `alert_text_args`. + First checks any fields found in the match provided, then any fields defined in + the rule, finally returning the default value provided if no value can be found. + """ + field_value = lookup_es_key(match, field_name) + if field_value is None: + field_value = self.rule.get(field_name, default) + + return field_value + + # Iterate through the matches, building up a list of observables + def load_observable_artifacts(self, match: dict): + artifacts = [] + for mapping in self.rule.get('hive_observable_data_mapping', []): + for observable_type, mapping_key in mapping.items(): + if (observable_type != "tlp" and observable_type != "message" and observable_type != "tags"): + data = str(self.lookup_field(match, mapping_key, '')) + if len(data) != 0: + artifact = {'tlp': 2, + 'tags': [], + 'message': None, + 'dataType': observable_type, + 'data': data} + if mapping.get('tlp') is not None: + artifact['tlp'] = mapping['tlp'] + if mapping.get('message') is not None: + artifact['message'] = mapping['message'] + if mapping.get('tags') is not None: + artifact['tags'] = mapping['tags'] + artifacts.append(artifact) + break + return artifacts + + def load_custom_fields(self, custom_fields_raw: list, match: dict): + custom_fields = {} + position = 0 + + for field in custom_fields_raw: + if (isinstance(field['value'], str)): + value = self.lookup_field(match, field['value'], field['value']) + else: + value = field['value'] + + custom_fields[field['name']] = {'order': position, field['type']: value} + position += 1 + + return custom_fields + + def load_tags(self, tag_names: list, match: dict): + tag_values = set() + for tag in tag_names: + tag_value = self.lookup_field(match, tag, tag) + if isinstance(tag_value, list): + for sub_tag in tag_value: + tag_values.add(str(sub_tag)) + else: + tag_values.add(str(tag_value)) + + return tag_values + + def load_args(self, field, raw, match: dict): + missing = self.rule['hive_alert_config'].get(field + '_missing_value', '') + args = field + "_args" + if args in self.rule.get('hive_alert_config'): + process_args = self.rule['hive_alert_config'].get(args) + process_values=[] + for arg in process_args: + process_values.append(self.lookup_field(match, arg, missing)) + for i, text_value in enumerate(process_values): + if text_value is None: + process_value = self.rule.get(process_args[i]) + if process_value: + process_values[i] = process_value + process_values = [missing if val is None else val for val in process_values] + raw = raw.format(*process_values) + return raw + else: + return raw + + def alert(self, matches): + # Build TheHive alert object, starting with some defaults, updating with any + # user-specified config + alert_config = { + 'artifacts': [], + 'customFields': {}, + 'date': int(time.time()) * 1000, + 'description': self.create_alert_body(matches), + 'sourceRef': str(uuid.uuid4()), + 'tags': [], + 'title': self.create_title(matches), + } + alert_config.update(self.rule.get('hive_alert_config', {})) + + # Iterate through each match found, populating the alert tags and observables as required + tags = set() + artifacts = [] + for match in matches: + artifacts = artifacts + self.load_observable_artifacts(match) + tags.update(self.load_tags(alert_config['tags'], match)) + + alert_config['artifacts'] = artifacts + alert_config['tags'] = list(tags) + + # Populate the customFields + if len(matches) > 0: + #Populate dynamic fields + alert_config['customFields'] = self.load_custom_fields(alert_config['customFields'], matches[0]) + alert_config['description']=self.load_args("description", alert_config['description'], matches[0]) + if 'description_args' in alert_config: + del alert_config['description_args'] + + alert_config["title"] = self.load_args("title", alert_config["title"], matches[0]) + if 'title_args' in alert_config: + del alert_config['title_args'] + + alert_config["type"] = self.load_args("type", alert_config["type"], matches[0]) + if 'type_args' in alert_config: + del alert_config['type_args'] + + alert_config["source"] = self.load_args("source", alert_config["source"], matches[0]) + if 'source_args' in alert_config: + del alert_config['source_args'] + + # POST the alert to TheHive + connection_details = self.rule['hive_connection'] + + api_key = connection_details.get('hive_apikey', '') + hive_host = connection_details.get('hive_host', 'http://localhost') + hive_port = connection_details.get('hive_port', 9000) + proxies = connection_details.get('hive_proxies', {'http': '', 'https': ''}) + verify = connection_details.get('hive_verify', False) + + alert_body = json.dumps(alert_config, indent=4, sort_keys=True) + req = f'{hive_host}:{hive_port}/api/alert' + headers = {'Content-Type': 'application/json', + 'Authorization': f'Bearer {api_key}'} + + try: + response = requests.post(req, + headers=headers, + data=alert_body, + proxies=proxies, + verify=verify) + response.raise_for_status() + except RequestException as e: + raise EAException(f"Error posting to TheHive: {e}") + elastalert_logger.info("Alert sent to TheHive") + + def get_info(self): + + return { + 'type': 'hivealerter', + 'hive_host': self.rule.get('hive_connection', {}).get('hive_host', '') + } diff --git a/elastalert/alerters/twilio.py b/elastalert/alerters/twilio.py new file mode 100644 index 000000000..dece40117 --- /dev/null +++ b/elastalert/alerters/twilio.py @@ -0,0 +1,45 @@ +from twilio.base.exceptions import TwilioRestException +from twilio.rest import Client as TwilioClient + +from elastalert.alerts import Alerter +from elastalert.util import EAException, elastalert_logger + + +class TwilioAlerter(Alerter): + required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number']) + + def __init__(self, rule): + super(TwilioAlerter, self).__init__(rule) + self.twilio_account_sid = self.rule.get('twilio_account_sid', None) + self.twilio_auth_token = self.rule.get('twilio_auth_token', None) + self.twilio_to_number = self.rule.get('twilio_to_number', None) + self.twilio_from_number = self.rule.get('twilio_from_number', None) + self.twilio_message_service_sid = self.rule.get('twilio_message_service_sid', None) + self.twilio_use_copilot = self.rule.get('twilio_use_copilot', False) + + def alert(self, matches): + client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) + + try: + if self.twilio_use_copilot: + if self.twilio_message_service_sid is None: + raise EAException("Twilio Copilot requires the 'twilio_message_service_sid' option") + + client.messages.create(body=self.rule['name'], + to=self.twilio_to_number, + messaging_service_sid=self.twilio_message_service_sid) + else: + if self.twilio_from_number is None: + raise EAException("Twilio SMS requires the 'twilio_from_number' option") + + client.messages.create(body=self.rule['name'], + to=self.twilio_to_number, + from_=self.twilio_from_number) + except TwilioRestException as e: + raise EAException("Error posting to twilio: %s" % e) + + elastalert_logger.info("Trigger sent to Twilio") + + def get_info(self): + return {'type': 'twilio', + 'twilio_client_name': self.twilio_from_number} diff --git a/elastalert/alerters/victorops.py b/elastalert/alerters/victorops.py new file mode 100644 index 000000000..b9a7ecb82 --- /dev/null +++ b/elastalert/alerters/victorops.py @@ -0,0 +1,55 @@ +import json + +import requests +from requests import RequestException + +from elastalert.alerts import Alerter, DateTimeEncoder +from elastalert.util import EAException, elastalert_logger + + +class VictorOpsAlerter(Alerter): + """ Creates a VictorOps Incident for each alert """ + required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) + + def __init__(self, rule): + super(VictorOpsAlerter, self).__init__(rule) + self.victorops_api_key = self.rule.get('victorops_api_key', None) + self.victorops_routing_key = self.rule.get('victorops_routing_key', None) + self.victorops_message_type = self.rule.get('victorops_message_type', None) + self.victorops_entity_id = self.rule.get('victorops_entity_id', None) + self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', None) # set entity_display_name from alert_subject by default + self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( + self.victorops_api_key, self.victorops_routing_key) + self.victorops_proxy = self.rule.get('victorops_proxy', None) + + def alert(self, matches): + body = self.create_alert_body(matches) + + # post to victorops + headers = {'content-type': 'application/json'} + # set https proxy, if it was provided + proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None + # set title with alert_subject + self.victorops_entity_display_name = self.create_title(matches) if \ + self.victorops_entity_display_name is None else self.victorops_entity_display_name + payload = { + "message_type": self.victorops_message_type, + "entity_display_name": self.victorops_entity_display_name, + "monitoring_tool": "ElastAlert", + "state_message": body + } + # add all data from event payload + payload.update(matches[0]) + if self.victorops_entity_id: + payload["entity_id"] = self.victorops_entity_id + + try: + response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) + response.raise_for_status() + except RequestException as e: + raise EAException("Error posting to VictorOps: %s" % e) + elastalert_logger.info("Trigger sent to VictorOps") + + def get_info(self): + return {'type': 'victorops', + 'victorops_routing_key': self.victorops_routing_key} diff --git a/elastalert/alerters/zabbix.py b/elastalert/alerters/zabbix.py new file mode 100644 index 000000000..59bfd9eb9 --- /dev/null +++ b/elastalert/alerters/zabbix.py @@ -0,0 +1,105 @@ +from datetime import datetime + +from pyzabbix import ZabbixSender, ZabbixMetric, ZabbixAPI + +from elastalert.alerts import Alerter +from elastalert.util import elastalert_logger, lookup_es_key, EAException + + +class ZabbixClient(ZabbixAPI): + + def __init__(self, url='http://localhost', use_authenticate=False, user='Admin', password='zabbix', + sender_host='localhost', sender_port=10051): + self.url = url + self.use_authenticate = use_authenticate + self.sender_host = sender_host + self.sender_port = sender_port + self.metrics_chunk_size = 200 + self.aggregated_metrics = [] + + super(ZabbixClient, self).__init__(url=self.url, + use_authenticate=self.use_authenticate, + user=user, + password=password) + + def send_metric(self, hostname, key, data): + zm = ZabbixMetric(hostname, key, data) + if self.send_aggregated_metrics: + self.aggregated_metrics.append(zm) + if len(self.aggregated_metrics) > self.metrics_chunk_size: + elastalert_logger.info("Sending: %s metrics" % (len(self.aggregated_metrics))) + try: + ZabbixSender(zabbix_server=self.sender_host, zabbix_port=self.sender_port) \ + .send(self.aggregated_metrics) + self.aggregated_metrics = [] + except Exception as e: + elastalert_logger.exception(e) + else: + try: + ZabbixSender(zabbix_server=self.sender_host, zabbix_port=self.sender_port).send([zm]) + except Exception as e: + elastalert_logger.exception(e) + + +class ZabbixAlerter(Alerter): + # By setting required_options to a set of strings + # You can ensure that the rule config file specifies all + # of the options. Otherwise, ElastAlert will throw an exception + # when trying to load the rule. + required_options = frozenset(['zbx_host', 'zbx_key']) + + def __init__(self, *args): + super(ZabbixAlerter, self).__init__(*args) + + self.zbx_sender_host = self.rule.get('zbx_sender_host', 'localhost') + self.zbx_sender_port = self.rule.get('zbx_sender_port', 10051) + self.zbx_host_from_field = self.rule.get('zbx_host_from_field', False) + self.zbx_host = self.rule.get('zbx_host', None) + self.zbx_key = self.rule.get('zbx_key', None) + self.timestamp_field = self.rule.get('timestamp_field', '@timestamp') + self.timestamp_type = self.rule.get('timestamp_type', 'iso') + self.timestamp_strptime = self.rule.get('timestamp_strptime', '%Y-%m-%dT%H:%M:%S.%f%z') + + # Alert is called + def alert(self, matches): + + # Matches is a list of match dictionaries. + # It contains more than one match when the alert has + # the aggregation option set + zm = [] + for match in matches: + if ':' not in match[self.timestamp_field] or '-' not in match[self.timestamp_field]: + ts_epoch = int(match[self.timestamp_field]) + else: + try: + ts_epoch = int(datetime.strptime(match[self.timestamp_field], self.timestamp_strptime) + .timestamp()) + except ValueError: + ts_epoch = int(datetime.strptime(match[self.timestamp_field], '%Y-%m-%dT%H:%M:%S%z') + .timestamp()) + if self.zbx_host_from_field: + zbx_host = lookup_es_key(match, self.rule["zbx_host"]) + else: + zbx_host = self.zbx_host + zm.append(ZabbixMetric(host=zbx_host, key=self.zbx_key, value='1', clock=ts_epoch)) + + try: + response = ZabbixSender(zabbix_server=self.zbx_sender_host, zabbix_port=self.zbx_sender_port).send(zm) + if response.failed: + if self.zbx_host_from_field and not zbx_host: + elastalert_logger.warning("Missing term '%s' or host's item '%s', alert will be discarded" + % (self.zbx_host, self.zbx_key)) + else: + elastalert_logger.warning("Missing zabbix host '%s' or host's item '%s', alert will be discarded" + % (zbx_host, self.zbx_key)) + else: + elastalert_logger.info("Alert sent to '%s:%s' zabbix server, '%s' zabbix host, '%s' zabbix host key" + % (self.zbx_sender_host, self.zbx_sender_port, zbx_host, self.zbx_key)) + except Exception as e: + raise EAException("Error sending alert to Zabbix: %s" % e) + + # get_info is called after an alert is sent to get data that is written back + # to Elasticsearch in the field "alert_info" + # It should return a dict of information relevant to what the alert does + def get_info(self): + return {'type': 'zabbix Alerter'} diff --git a/elastalert/alerts.py b/elastalert/alerts.py index f2f31853f..bd5a196a9 100644 --- a/elastalert/alerts.py +++ b/elastalert/alerts.py @@ -1,45 +1,15 @@ # -*- coding: utf-8 -*- import copy -import datetime import json -import logging import os -import re -import subprocess -import sys -import time -import uuid -import warnings -from email.mime.text import MIMEText -from email.utils import formatdate -from html.parser import HTMLParser -from smtplib import SMTP -from smtplib import SMTP_SSL -from smtplib import SMTPAuthenticationError -from smtplib import SMTPException -from socket import error -import boto3 -import requests -import stomp -from exotel import Exotel -from jira.client import JIRA -from jira.exceptions import JIRAError -from requests.auth import HTTPProxyAuth -from requests.exceptions import RequestException -from staticconf.loader import yaml_loader +from jinja2 import Template from texttable import Texttable -from twilio.base.exceptions import TwilioRestException -from twilio.rest import Client as TwilioClient -from .util import EAException -from .util import elastalert_logger -from .util import lookup_es_key -from .util import pretty_ts -from .util import resolve_string -from .util import ts_now -from .util import ts_to_dt +from elastalert.util import EAException, lookup_es_key +from elastalert.yaml import read_yaml +from collections import Counter class DateTimeEncoder(json.JSONEncoder): def default(self, obj): @@ -63,7 +33,14 @@ def _ensure_new_line(self): def _add_custom_alert_text(self): missing = self.rule.get('alert_missing_value', '') alert_text = str(self.rule.get('alert_text', '')) - if 'alert_text_args' in self.rule: + if self.rule.get('alert_text_type') == 'alert_text_jinja': + # Top fields are accessible via `{{field_name}}` or `{{jinja_root_name['field_name']}}` + # `jinja_root_name` dict is useful when accessing *fields with dots in their keys*, + # as Jinja treat dot as a nested field. + template_values = self.rule | self.match + alert_text = self.rule.get("jinja_template").render( + template_values | {self.rule['jinja_root_name']: template_values}) + elif 'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args] @@ -126,7 +103,10 @@ def _add_match_items(self): except TypeError: # Non serializable object, fallback to str pass - self.text += '%s: %s\n' % (key, value_str) + if (isinstance(self.text,dict)): + self.text[key] = value_str + else: + self.text += '%s: %s\n' % (key, value_str) def _pretty_print_as_json(self, blob): try: @@ -136,28 +116,31 @@ def _pretty_print_as_json(self, blob): return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): - self.text = '' - if 'alert_text' not in self.rule: - self.text += self.rule['name'] + '\n\n' - - self._add_custom_alert_text() - self._ensure_new_line() - if self.rule.get('alert_text_type') != 'alert_text_only': - self._add_rule_text() + if 'json_payload' in self.rule and self.rule['json_payload'] == True: + self.text= {} + if 'alert_text' not in self.rule: + self.text['elastalert_rule'] = self.rule['name'] + if self.rule.get('alert_text_type') != 'alert_text_only' and self.rule.get('alert_text_type') != 'alert_text_jinja': + self.text['alert_criteria'] = self.rule['type'].get_match_str(self.match) + if self.rule.get('top_count_keys'): + self._add_top_counts() + if self.rule.get('alert_text_type') != 'exclude_fields': + self._add_match_items() + return str(self.text) + else: + self.text = '' + if 'alert_text' not in self.rule: + self.text += self.rule['name'] + '\n\n' + self._add_custom_alert_text() self._ensure_new_line() - if self.rule.get('top_count_keys'): - self._add_top_counts() - if self.rule.get('alert_text_type') != 'exclude_fields': - self._add_match_items() - return self.text - - -class JiraFormattedMatchString(BasicMatchString): - def _add_match_items(self): - match_items = dict([(x, y) for x, y in list(self.match.items()) if not x.startswith('top_events_')]) - json_blob = self._pretty_print_as_json(match_items) - preformatted_text = '{{code}}{0}{{code}}'.format(json_blob) - self.text += preformatted_text + if self.rule.get('alert_text_type') != 'alert_text_only' and self.rule.get('alert_text_type') != 'alert_text_jinja': + self._add_rule_text() + self._ensure_new_line() + if self.rule.get('top_count_keys'): + self._add_top_counts() + if self.rule.get('alert_text_type') != 'exclude_fields': + self._add_match_items() + return self.text class Alerter(object): @@ -214,7 +197,7 @@ def get_info(self): return {'type': 'Unknown'} def create_title(self, matches): - """ Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary. + """ Creates custom alert title to be used, e.g. as an e-mail subject or Jira issue summary. :param matches: A list of dictionaries of relevant information to the alert. """ @@ -243,7 +226,10 @@ def create_custom_title(self, matches): missing = self.rule.get('alert_missing_value', '') alert_subject_values = [missing if val is None else val for val in alert_subject_values] alert_subject = alert_subject.format(*alert_subject_values) - + elif self.rule.get('alert_text_type') == "alert_text_jinja": + title_template = Template(str(self.rule.get('alert_subject', ''))) + template_values = self.rule | matches[0] + alert_subject = title_template.render(template_values | {self.rule['jinja_root_name']: template_values}) if len(alert_subject) > alert_subject_max_len: alert_subject = alert_subject[:alert_subject_max_len] @@ -266,19 +252,26 @@ def get_aggregation_summary_text__maximum_width(self): def get_aggregation_summary_text(self, matches): text = '' if 'aggregation' in self.rule and 'summary_table_fields' in self.rule: + summary_table_type = self.rule.get('summary_table_type', 'ascii') + + #Type independent prefix text = self.rule.get('summary_prefix', '') + # If a prefix is set, ensure there is a newline between it and the hardcoded + # 'Aggregation resulted in...' header below + if text != '': + text += "\n" + summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] + # Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields + ['count'] text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format( summary_table_fields_with_count ) - text_table = Texttable(max_width=self.get_aggregation_summary_text__maximum_width()) - text_table.header(summary_table_fields_with_count) - # Format all fields as 'text' to avoid long numbers being shown as scientific notation - text_table.set_cols_dtype(['t' for i in summary_table_fields_with_count]) + + # Prepare match_aggregation used in both table types match_aggregation = {} # Maintain an aggregate count for each unique key encountered in the aggregation period @@ -288,10 +281,44 @@ def get_aggregation_summary_text(self, matches): match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 - for keys, count in match_aggregation.items(): - text_table.add_row([key for key in keys] + [count]) - text += text_table.draw() + '\n\n' - text += self.rule.get('summary_prefix', '') + + # Limit number of rows + if 'summary_table_max_rows' in self.rule: + max_rows = self.rule['summary_table_max_rows'] + match_aggregation = {k:v for k, v in Counter(match_aggregation).most_common(max_rows)} + + # Type dependent table style + if summary_table_type == 'ascii': + text_table = Texttable(max_width=self.get_aggregation_summary_text__maximum_width()) + text_table.header(summary_table_fields_with_count) + # Format all fields as 'text' to avoid long numbers being shown as scientific notation + text_table.set_cols_dtype(['t' for i in summary_table_fields_with_count]) + + for keys, count in match_aggregation.items(): + text_table.add_row([key for key in keys] + [count]) + text += text_table.draw() + '\n\n' + + elif summary_table_type == 'markdown': + # Adapted from https://github.com/codazoda/tomark/blob/master/tomark/tomark.py + # Create table header + text += '| ' + ' | '.join(map(str, summary_table_fields_with_count)) + ' |\n' + # Create header separator + text += '|-----' * len(summary_table_fields_with_count) + '|\n' + # Create table row + for keys, count in match_aggregation.items(): + markdown_row = "" + for key in keys: + markdown_row += '| ' + str(key) + ' ' + text += markdown_row + '| ' + str(count) + ' |\n' + text += '\n' + + # max_rows message + if 'summary_table_max_rows' in self.rule: + text += f"Showing top {self.rule['summary_table_max_rows']} rows" + text += "\n" + + # Type independent suffix + text += self.rule.get('summary_suffix', '') return str(text) def create_default_title(self, matches): @@ -307,1880 +334,8 @@ def get_account(self, account_file): account_file_path = account_file else: account_file_path = os.path.join(os.path.dirname(self.rule['rule_file']), account_file) - account_conf = yaml_loader(account_file_path) + account_conf = read_yaml(account_file_path) if 'user' not in account_conf or 'password' not in account_conf: raise EAException('Account file must have user and password fields') self.user = account_conf['user'] self.password = account_conf['password'] - - -class StompAlerter(Alerter): - """ The stomp alerter publishes alerts via stomp to a broker. """ - required_options = frozenset( - ['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) - - def alert(self, matches): - alerts = [] - - qk = self.rule.get('query_key', None) - - fullmessage = {} - for match in matches: - if qk is not None: - resmatch = lookup_es_key(match, qk) - else: - resmatch = None - - if resmatch is not None: - elastalert_logger.info( - 'Alert for %s, %s at %s:' % (self.rule['name'], resmatch, lookup_es_key(match, self.rule['timestamp_field']))) - alerts.append( - 'Alert for %s, %s at %s:' % (self.rule['name'], resmatch, lookup_es_key( - match, self.rule['timestamp_field'])) - ) - fullmessage['match'] = resmatch - else: - elastalert_logger.info('Rule %s generated an alert at %s:' % ( - self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) - alerts.append( - 'Rule %s generated an alert at %s:' % (self.rule['name'], lookup_es_key( - match, self.rule['timestamp_field'])) - ) - fullmessage['match'] = lookup_es_key( - match, self.rule['timestamp_field']) - elastalert_logger.info(str(BasicMatchString(self.rule, match))) - - fullmessage['alerts'] = alerts - fullmessage['rule'] = self.rule['name'] - fullmessage['rule_file'] = self.rule['rule_file'] - - fullmessage['matching'] = str(BasicMatchString(self.rule, match)) - fullmessage['alertDate'] = datetime.datetime.now( - ).strftime("%Y-%m-%d %H:%M:%S") - fullmessage['body'] = self.create_alert_body(matches) - - fullmessage['matches'] = matches - - self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') - self.stomp_hostport = self.rule.get('stomp_hostport', '61613') - self.stomp_login = self.rule.get('stomp_login', 'admin') - self.stomp_password = self.rule.get('stomp_password', 'admin') - self.stomp_destination = self.rule.get( - 'stomp_destination', '/queue/ALERT') - self.stomp_ssl = self.rule.get('stomp_ssl', False) - - conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)], use_ssl=self.stomp_ssl) - - conn.start() - conn.connect(self.stomp_login, self.stomp_password) - # Ensures that the CONNECTED frame is received otherwise, the disconnect call will fail. - time.sleep(1) - conn.send(self.stomp_destination, json.dumps(fullmessage)) - conn.disconnect() - - def get_info(self): - return {'type': 'stomp'} - - -class DebugAlerter(Alerter): - """ The debug alerter uses a Python logger (by default, alerting to terminal). """ - - def alert(self, matches): - qk = self.rule.get('query_key', None) - for match in matches: - if qk in match: - elastalert_logger.info( - 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) - else: - elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) - elastalert_logger.info(str(BasicMatchString(self.rule, match))) - - def get_info(self): - return {'type': 'debug'} - - -class EmailAlerter(Alerter): - """ Sends an email alert """ - required_options = frozenset(['email']) - - def __init__(self, *args): - super(EmailAlerter, self).__init__(*args) - - self.smtp_host = self.rule.get('smtp_host', 'localhost') - self.smtp_ssl = self.rule.get('smtp_ssl', False) - self.from_addr = self.rule.get('from_addr', 'ElastAlert') - self.smtp_port = self.rule.get('smtp_port') - if self.rule.get('smtp_auth_file'): - self.get_account(self.rule['smtp_auth_file']) - self.smtp_key_file = self.rule.get('smtp_key_file') - self.smtp_cert_file = self.rule.get('smtp_cert_file') - # Convert email to a list if it isn't already - if isinstance(self.rule['email'], str): - self.rule['email'] = [self.rule['email']] - # If there is a cc then also convert it a list if it isn't - cc = self.rule.get('cc') - if cc and isinstance(cc, str): - self.rule['cc'] = [self.rule['cc']] - # If there is a bcc then also convert it to a list if it isn't - bcc = self.rule.get('bcc') - if bcc and isinstance(bcc, str): - self.rule['bcc'] = [self.rule['bcc']] - add_suffix = self.rule.get('email_add_domain') - if add_suffix and not add_suffix.startswith('@'): - self.rule['email_add_domain'] = '@' + add_suffix - - def alert(self, matches): - body = self.create_alert_body(matches) - - # Add JIRA ticket if it exists - if self.pipeline is not None and 'jira_ticket' in self.pipeline: - url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) - body += '\nJIRA ticket: %s' % (url) - - to_addr = self.rule['email'] - if 'email_from_field' in self.rule: - recipient = lookup_es_key(matches[0], self.rule['email_from_field']) - if isinstance(recipient, str): - if '@' in recipient: - to_addr = [recipient] - elif 'email_add_domain' in self.rule: - to_addr = [recipient + self.rule['email_add_domain']] - elif isinstance(recipient, list): - to_addr = recipient - if 'email_add_domain' in self.rule: - to_addr = [name + self.rule['email_add_domain'] for name in to_addr] - if self.rule.get('email_format') == 'html': - email_msg = MIMEText(body, 'html', _charset='UTF-8') - else: - email_msg = MIMEText(body, _charset='UTF-8') - email_msg['Subject'] = self.create_title(matches) - email_msg['To'] = ', '.join(to_addr) - email_msg['From'] = self.from_addr - email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) - email_msg['Date'] = formatdate() - if self.rule.get('cc'): - email_msg['CC'] = ','.join(self.rule['cc']) - to_addr = to_addr + self.rule['cc'] - if self.rule.get('bcc'): - to_addr = to_addr + self.rule['bcc'] - - try: - if self.smtp_ssl: - if self.smtp_port: - self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) - else: - self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) - else: - if self.smtp_port: - self.smtp = SMTP(self.smtp_host, self.smtp_port) - else: - self.smtp = SMTP(self.smtp_host) - self.smtp.ehlo() - if self.smtp.has_extn('STARTTLS'): - self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) - if 'smtp_auth_file' in self.rule: - self.smtp.login(self.user, self.password) - except (SMTPException, error) as e: - raise EAException("Error connecting to SMTP host: %s" % (e)) - except SMTPAuthenticationError as e: - raise EAException("SMTP username/password rejected: %s" % (e)) - self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) - self.smtp.quit() - - elastalert_logger.info("Sent email to %s" % (to_addr)) - - def create_default_title(self, matches): - subject = 'ElastAlert: %s' % (self.rule['name']) - - # If the rule has a query_key, add that value plus timestamp to subject - if 'query_key' in self.rule: - qk = matches[0].get(self.rule['query_key']) - if qk: - subject += ' - %s' % (qk) - - return subject - - def get_info(self): - return {'type': 'email', - 'recipients': self.rule['email']} - - -class JiraAlerter(Alerter): - """ Creates a Jira ticket for each alert """ - required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) - - # Maintain a static set of built-in fields that we explicitly know how to set - # For anything else, we will do best-effort and try to set a string value - known_field_list = [ - 'jira_account_file', - 'jira_assignee', - 'jira_bump_after_inactivity', - 'jira_bump_in_statuses', - 'jira_bump_not_in_statuses', - 'jira_bump_only', - 'jira_bump_tickets', - 'jira_component', - 'jira_components', - 'jira_description', - 'jira_ignore_in_title', - 'jira_issuetype', - 'jira_label', - 'jira_labels', - 'jira_max_age', - 'jira_priority', - 'jira_project', - 'jira_server', - 'jira_transition_to', - 'jira_watchers', - ] - - # Some built-in jira types that can be used as custom fields require special handling - # Here is a sample of one of them: - # {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true, - # "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string", - # "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}} - # There are likely others that will need to be updated on a case-by-case basis - custom_string_types_with_special_handling = [ - 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', - 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', - 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', - ] - - def __init__(self, rule): - super(JiraAlerter, self).__init__(rule) - self.server = self.rule['jira_server'] - self.get_account(self.rule['jira_account_file']) - self.project = self.rule['jira_project'] - self.issue_type = self.rule['jira_issuetype'] - - # Deferred settings refer to values that can only be resolved when a match - # is found and as such loading them will be delayed until we find a match - self.deferred_settings = [] - - # We used to support only a single component. This allows us to maintain backwards compatibility - # while also giving the user-facing API a more representative name - self.components = self.rule.get('jira_components', self.rule.get('jira_component')) - - # We used to support only a single label. This allows us to maintain backwards compatibility - # while also giving the user-facing API a more representative name - self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) - - self.description = self.rule.get('jira_description', '') - self.assignee = self.rule.get('jira_assignee') - self.max_age = self.rule.get('jira_max_age', 30) - self.priority = self.rule.get('jira_priority') - self.bump_tickets = self.rule.get('jira_bump_tickets', False) - self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') - self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') - self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', 0) - self.bump_only = self.rule.get('jira_bump_only', False) - self.transition = self.rule.get('jira_transition_to', False) - self.watchers = self.rule.get('jira_watchers') - self.client = None - - if self.bump_in_statuses and self.bump_not_in_statuses: - msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \ - (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) - intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) - if intersection: - msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % ( - msg, ','.join(intersection)) - msg += ' This should be simplified to use only one or the other.' - logging.warning(msg) - - self.reset_jira_args() - - try: - self.client = JIRA(self.server, basic_auth=(self.user, self.password)) - self.get_priorities() - self.jira_fields = self.client.fields() - self.get_arbitrary_fields() - except JIRAError as e: - # JIRAError may contain HTML, pass along only first 1024 chars - raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024])).with_traceback(sys.exc_info()[2]) - - self.set_priority() - - def set_priority(self): - try: - if self.priority is not None and self.client is not None: - self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} - except KeyError: - logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, list(self.priority_ids.keys()))) - - def reset_jira_args(self): - self.jira_args = {'project': {'key': self.project}, - 'issuetype': {'name': self.issue_type}} - - if self.components: - # Support single component or list - if type(self.components) != list: - self.jira_args['components'] = [{'name': self.components}] - else: - self.jira_args['components'] = [{'name': component} for component in self.components] - if self.labels: - # Support single label or list - if type(self.labels) != list: - self.labels = [self.labels] - self.jira_args['labels'] = self.labels - if self.watchers: - # Support single watcher or list - if type(self.watchers) != list: - self.watchers = [self.watchers] - if self.assignee: - self.jira_args['assignee'] = {'name': self.assignee} - - self.set_priority() - - def set_jira_arg(self, jira_field, value, fields): - # Remove the jira_ part. Convert underscores to spaces - normalized_jira_field = jira_field[5:].replace('_', ' ').lower() - # All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case - for identifier in ['name', 'id']: - field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) - if field: - break - if not field: - # Log a warning to ElastAlert saying that we couldn't find that type? - # OR raise and fail to load the alert entirely? Probably the latter... - raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field)) - arg_name = field['id'] - # Check the schema information to decide how to set the value correctly - # If the schema information is not available, raise an exception since we don't know how to set it - # Note this is only the case for two built-in types, id: issuekey and id: thumbnail - if not ('schema' in field or 'type' in field['schema']): - raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field)) - arg_type = field['schema']['type'] - - # Handle arrays of simple types like strings or numbers - if arg_type == 'array': - # As a convenience, support the scenario wherein the user only provides - # a single value for a multi-value field e.g. jira_labels: Only_One_Label - if type(value) != list: - value = [value] - array_items = field['schema']['items'] - # Simple string types - if array_items in ['string', 'date', 'datetime']: - # Special case for multi-select custom types (the JIRA metadata says that these are strings, but - # in reality, they are required to be provided as an object. - if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: - self.jira_args[arg_name] = [{'value': v} for v in value] - else: - self.jira_args[arg_name] = value - elif array_items == 'number': - self.jira_args[arg_name] = [int(v) for v in value] - # Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key' - elif array_items == 'option': - self.jira_args[arg_name] = [{'value': v} for v in value] - else: - # Try setting it as an object, using 'name' as the key - # This may not work, as the key might actually be 'key', 'id', 'value', or something else - # If it works, great! If not, it will manifest itself as an API error that will bubble up - self.jira_args[arg_name] = [{'name': v} for v in value] - # Handle non-array types - else: - # Simple string types - if arg_type in ['string', 'date', 'datetime']: - # Special case for custom types (the JIRA metadata says that these are strings, but - # in reality, they are required to be provided as an object. - if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: - self.jira_args[arg_name] = {'value': value} - else: - self.jira_args[arg_name] = value - # Number type - elif arg_type == 'number': - self.jira_args[arg_name] = int(value) - elif arg_type == 'option': - self.jira_args[arg_name] = {'value': value} - # Complex type - else: - self.jira_args[arg_name] = {'name': value} - - def get_arbitrary_fields(self): - # Clear jira_args - self.reset_jira_args() - - for jira_field, value in self.rule.items(): - # If we find a field that is not covered by the set that we are aware of, it means it is either: - # 1. A built-in supported field in JIRA that we don't have on our radar - # 2. A custom field that a JIRA admin has configured - if jira_field.startswith('jira_') and jira_field not in self.known_field_list and str(value)[:1] != '#': - self.set_jira_arg(jira_field, value, self.jira_fields) - if jira_field.startswith('jira_') and jira_field not in self.known_field_list and str(value)[:1] == '#': - self.deferred_settings.append(jira_field) - - def get_priorities(self): - """ Creates a mapping of priority index to id. """ - priorities = self.client.priorities() - self.priority_ids = {} - for x in range(len(priorities)): - self.priority_ids[x] = priorities[x].id - - def set_assignee(self, assignee): - self.assignee = assignee - if assignee: - self.jira_args['assignee'] = {'name': assignee} - elif 'assignee' in self.jira_args: - self.jira_args.pop('assignee') - - def find_existing_ticket(self, matches): - # Default title, get stripped search version - if 'alert_subject' not in self.rule: - title = self.create_default_title(matches, True) - else: - title = self.create_title(matches) - - if 'jira_ignore_in_title' in self.rule: - title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') - - # This is necessary for search to work. Other special characters and dashes - # directly adjacent to words appear to be ok - title = title.replace(' - ', ' ') - title = title.replace('\\', '\\\\') - - date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') - jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date) - if self.bump_in_statuses: - jql = '%s and status in (%s)' % (jql, ','.join(["\"%s\"" % status if ' ' in status else status for status - in self.bump_in_statuses])) - if self.bump_not_in_statuses: - jql = '%s and status not in (%s)' % (jql, ','.join(["\"%s\"" % status if ' ' in status else status - for status in self.bump_not_in_statuses])) - try: - issues = self.client.search_issues(jql) - except JIRAError as e: - logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e)) - return None - - if len(issues): - return issues[0] - - def comment_on_ticket(self, ticket, match): - text = str(JiraFormattedMatchString(self.rule, match)) - timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) - comment = "This alert was triggered again at %s\n%s" % (timestamp, text) - self.client.add_comment(ticket, comment) - - def transition_ticket(self, ticket): - transitions = self.client.transitions(ticket) - for t in transitions: - if t['name'] == self.transition: - self.client.transition_issue(ticket, t['id']) - - def alert(self, matches): - # Reset arbitrary fields to pick up changes - self.get_arbitrary_fields() - if len(self.deferred_settings) > 0: - fields = self.client.fields() - for jira_field in self.deferred_settings: - value = lookup_es_key(matches[0], self.rule[jira_field][1:]) - self.set_jira_arg(jira_field, value, fields) - - title = self.create_title(matches) - - if self.bump_tickets: - ticket = self.find_existing_ticket(matches) - if ticket: - inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) - if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: - if self.pipeline is not None: - self.pipeline['jira_ticket'] = None - self.pipeline['jira_server'] = self.server - return None - elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) - for match in matches: - try: - self.comment_on_ticket(ticket, match) - except JIRAError as e: - logging.exception("Error while commenting on ticket %s: %s" % (ticket, e)) - if self.labels: - for label in self.labels: - try: - ticket.fields.labels.append(label) - except JIRAError as e: - logging.exception("Error while appending labels to ticket %s: %s" % (ticket, e)) - if self.transition: - elastalert_logger.info('Transitioning existing ticket %s' % (ticket.key)) - try: - self.transition_ticket(ticket) - except JIRAError as e: - logging.exception("Error while transitioning ticket %s: %s" % (ticket, e)) - - if self.pipeline is not None: - self.pipeline['jira_ticket'] = ticket - self.pipeline['jira_server'] = self.server - return None - if self.bump_only: - return None - - self.jira_args['summary'] = title - self.jira_args['description'] = self.create_alert_body(matches) - - try: - self.issue = self.client.create_issue(**self.jira_args) - - # You can not add watchers on initial creation. Only as a follow-up action - if self.watchers: - for watcher in self.watchers: - try: - self.client.add_watcher(self.issue.key, watcher) - except Exception as ex: - # Re-raise the exception, preserve the stack-trace, and give some - # context as to which watcher failed to be added - raise Exception( - "Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format( - watcher, - ex - )).with_traceback(sys.exc_info()[2]) - - except JIRAError as e: - raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e)) - elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) - - if self.pipeline is not None: - self.pipeline['jira_ticket'] = self.issue - self.pipeline['jira_server'] = self.server - - def create_alert_body(self, matches): - body = self.description + '\n' - body += self.get_aggregation_summary_text(matches) - if self.rule.get('alert_text_type') != 'aggregation_summary_only': - for match in matches: - body += str(JiraFormattedMatchString(self.rule, match)) - if len(matches) > 1: - body += '\n----------------------------------------\n' - return body - - def get_aggregation_summary_text(self, matches): - text = super(JiraAlerter, self).get_aggregation_summary_text(matches) - if text: - text = '{{noformat}}{0}{{noformat}}'.format(text) - return text - - def create_default_title(self, matches, for_search=False): - # If there is a query_key, use that in the title - - if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): - title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) - else: - title = 'ElastAlert: %s' % (self.rule['name']) - - if for_search: - return title - - timestamp = matches[0].get(self.rule['timestamp_field']) - if timestamp: - title += ' - %s' % (pretty_ts(timestamp, self.rule.get('use_local_time'))) - - # Add count for spikes - count = matches[0].get('spike_count') - if count: - title += ' - %s+ events' % (count) - - return title - - def get_info(self): - return {'type': 'jira'} - - -class CommandAlerter(Alerter): - required_options = set(['command']) - - def __init__(self, *args): - super(CommandAlerter, self).__init__(*args) - - self.last_command = [] - - self.shell = False - if isinstance(self.rule['command'], str): - self.shell = True - if '%' in self.rule['command']: - logging.warning('Warning! You could be vulnerable to shell injection!') - self.rule['command'] = [self.rule['command']] - - self.new_style_string_format = False - if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']: - self.new_style_string_format = True - - def alert(self, matches): - # Format the command and arguments - try: - command = [resolve_string(command_arg, matches[0]) for command_arg in self.rule['command']] - self.last_command = command - except KeyError as e: - raise EAException("Error formatting command: %s" % (e)) - - # Run command and pipe data - try: - subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) - - if self.rule.get('pipe_match_json'): - match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n' - stdout, stderr = subp.communicate(input=match_json.encode()) - elif self.rule.get('pipe_alert_text'): - alert_text = self.create_alert_body(matches) - stdout, stderr = subp.communicate(input=alert_text.encode()) - if self.rule.get("fail_on_non_zero_exit", False) and subp.wait(): - raise EAException("Non-zero exit code while running command %s" % (' '.join(command))) - except OSError as e: - raise EAException("Error while running command %s: %s" % (' '.join(command), e)) - - def get_info(self): - return {'type': 'command', - 'command': ' '.join(self.last_command)} - - -class SnsAlerter(Alerter): - """ Send alert using AWS SNS service """ - required_options = frozenset(['sns_topic_arn']) - - def __init__(self, *args): - super(SnsAlerter, self).__init__(*args) - self.sns_topic_arn = self.rule.get('sns_topic_arn', '') - self.aws_access_key_id = self.rule.get('aws_access_key_id') - self.aws_secret_access_key = self.rule.get('aws_secret_access_key') - self.aws_region = self.rule.get('aws_region', 'us-east-1') - self.profile = self.rule.get('boto_profile', None) # Deprecated - self.profile = self.rule.get('aws_profile', None) - - def create_default_title(self, matches): - subject = 'ElastAlert: %s' % (self.rule['name']) - return subject - - def alert(self, matches): - body = self.create_alert_body(matches) - - session = boto3.Session( - aws_access_key_id=self.aws_access_key_id, - aws_secret_access_key=self.aws_secret_access_key, - region_name=self.aws_region, - profile_name=self.profile - ) - sns_client = session.client('sns') - sns_client.publish( - TopicArn=self.sns_topic_arn, - Message=body, - Subject=self.create_title(matches) - ) - elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn)) - - -class HipChatAlerter(Alerter): - """ Creates a HipChat room notification for each alert """ - required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) - - def __init__(self, rule): - super(HipChatAlerter, self).__init__(rule) - self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') - self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') - self.hipchat_auth_token = self.rule['hipchat_auth_token'] - self.hipchat_room_id = self.rule['hipchat_room_id'] - self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') - self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) - self.hipchat_notify = self.rule.get('hipchat_notify', True) - self.hipchat_from = self.rule.get('hipchat_from', '') - self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( - self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) - self.hipchat_proxy = self.rule.get('hipchat_proxy', None) - - def create_alert_body(self, matches): - body = super(HipChatAlerter, self).create_alert_body(matches) - - # HipChat sends 400 bad request on messages longer than 10000 characters - if self.hipchat_message_format == 'html': - # Use appropriate line ending for text/html - br = '
' - body = body.replace('\n', br) - - truncated_message = '
...(truncated)' - truncate_to = 10000 - len(truncated_message) - else: - truncated_message = '..(truncated)' - truncate_to = 10000 - len(truncated_message) - - if (len(body) > 9999): - body = body[:truncate_to] + truncated_message - - return body - - def alert(self, matches): - body = self.create_alert_body(matches) - - # Post to HipChat - headers = {'content-type': 'application/json'} - # set https proxy, if it was provided - proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None - payload = { - 'color': self.hipchat_msg_color, - 'message': body, - 'message_format': self.hipchat_message_format, - 'notify': self.hipchat_notify, - 'from': self.hipchat_from - } - - try: - if self.hipchat_ignore_ssl_errors: - requests.packages.urllib3.disable_warnings() - - if self.rule.get('hipchat_mentions', []): - ping_users = self.rule.get('hipchat_mentions', []) - ping_msg = payload.copy() - ping_msg['message'] = "ping {}".format( - ", ".join("@{}".format(user) for user in ping_users) - ) - ping_msg['message_format'] = "text" - - response = requests.post( - self.url, - data=json.dumps(ping_msg, cls=DateTimeEncoder), - headers=headers, - verify=not self.hipchat_ignore_ssl_errors, - proxies=proxies) - - response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, - verify=not self.hipchat_ignore_ssl_errors, - proxies=proxies) - warnings.resetwarnings() - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to HipChat: %s" % e) - elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id) - - def get_info(self): - return {'type': 'hipchat', - 'hipchat_room_id': self.hipchat_room_id} - - -class MsTeamsAlerter(Alerter): - """ Creates a Microsoft Teams Conversation Message for each alert """ - required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) - - def __init__(self, rule): - super(MsTeamsAlerter, self).__init__(rule) - self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] - if isinstance(self.ms_teams_webhook_url, str): - self.ms_teams_webhook_url = [self.ms_teams_webhook_url] - self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) - self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') - self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) - self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') - - def format_body(self, body): - if self.ms_teams_alert_fixed_width: - body = body.replace('`', "'") - body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '') - return body - - def alert(self, matches): - body = self.create_alert_body(matches) - - body = self.format_body(body) - # post to Teams - headers = {'content-type': 'application/json'} - # set https proxy, if it was provided - proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None - payload = { - '@type': 'MessageCard', - '@context': 'http://schema.org/extensions', - 'summary': self.ms_teams_alert_summary, - 'title': self.create_title(matches), - 'text': body - } - if self.ms_teams_theme_color != '': - payload['themeColor'] = self.ms_teams_theme_color - - for url in self.ms_teams_webhook_url: - try: - response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to ms teams: %s" % e) - elastalert_logger.info("Alert sent to MS Teams") - - def get_info(self): - return {'type': 'ms_teams', - 'ms_teams_webhook_url': self.ms_teams_webhook_url} - - -class SlackAlerter(Alerter): - """ Creates a Slack room message for each alert """ - required_options = frozenset(['slack_webhook_url']) - - def __init__(self, rule): - super(SlackAlerter, self).__init__(rule) - self.slack_webhook_url = self.rule['slack_webhook_url'] - if isinstance(self.slack_webhook_url, str): - self.slack_webhook_url = [self.slack_webhook_url] - self.slack_proxy = self.rule.get('slack_proxy', None) - self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') - self.slack_channel_override = self.rule.get('slack_channel_override', '') - if isinstance(self.slack_channel_override, str): - self.slack_channel_override = [self.slack_channel_override] - self.slack_title_link = self.rule.get('slack_title_link', '') - self.slack_title = self.rule.get('slack_title', '') - self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') - self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') - self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') - self.slack_parse_override = self.rule.get('slack_parse_override', 'none') - self.slack_text_string = self.rule.get('slack_text_string', '') - self.slack_alert_fields = self.rule.get('slack_alert_fields', '') - self.slack_ignore_ssl_errors = self.rule.get('slack_ignore_ssl_errors', False) - self.slack_timeout = self.rule.get('slack_timeout', 10) - self.slack_ca_certs = self.rule.get('slack_ca_certs') - self.slack_attach_kibana_discover_url = self.rule.get('slack_attach_kibana_discover_url', False) - self.slack_kibana_discover_color = self.rule.get('slack_kibana_discover_color', '#ec4b98') - self.slack_kibana_discover_title = self.rule.get('slack_kibana_discover_title', 'Discover in Kibana') - - def format_body(self, body): - # https://api.slack.com/docs/formatting - return body - - def get_aggregation_summary_text__maximum_width(self): - width = super(SlackAlerter, self).get_aggregation_summary_text__maximum_width() - # Reduced maximum width for prettier Slack display. - return min(width, 75) - - def get_aggregation_summary_text(self, matches): - text = super(SlackAlerter, self).get_aggregation_summary_text(matches) - if text: - text = '```\n{0}```\n'.format(text) - return text - - def populate_fields(self, matches): - alert_fields = [] - for arg in self.slack_alert_fields: - arg = copy.copy(arg) - arg['value'] = lookup_es_key(matches[0], arg['value']) - alert_fields.append(arg) - return alert_fields - - def alert(self, matches): - body = self.create_alert_body(matches) - - body = self.format_body(body) - # post to slack - headers = {'content-type': 'application/json'} - # set https proxy, if it was provided - proxies = {'https': self.slack_proxy} if self.slack_proxy else None - payload = { - 'username': self.slack_username_override, - 'parse': self.slack_parse_override, - 'text': self.slack_text_string, - 'attachments': [ - { - 'color': self.slack_msg_color, - 'title': self.create_title(matches), - 'text': body, - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - } - ] - } - - # if we have defined fields, populate noteable fields for the alert - if self.slack_alert_fields != '': - payload['attachments'][0]['fields'] = self.populate_fields(matches) - - if self.slack_icon_url_override != '': - payload['icon_url'] = self.slack_icon_url_override - else: - payload['icon_emoji'] = self.slack_emoji_override - - if self.slack_title != '': - payload['attachments'][0]['title'] = self.slack_title - - if self.slack_title_link != '': - payload['attachments'][0]['title_link'] = self.slack_title_link - - if self.slack_attach_kibana_discover_url: - kibana_discover_url = lookup_es_key(matches[0], 'kibana_discover_url') - if kibana_discover_url: - payload['attachments'].append({ - 'color': self.slack_kibana_discover_color, - 'title': self.slack_kibana_discover_title, - 'title_link': kibana_discover_url - }) - - for url in self.slack_webhook_url: - for channel_override in self.slack_channel_override: - try: - if self.slack_ca_certs: - verify = self.slack_ca_certs - else: - verify = self.slack_ignore_ssl_errors - if self.slack_ignore_ssl_errors: - requests.packages.urllib3.disable_warnings() - payload['channel'] = channel_override - response = requests.post( - url, data=json.dumps(payload, cls=DateTimeEncoder), - headers=headers, verify=verify, - proxies=proxies, - timeout=self.slack_timeout) - warnings.resetwarnings() - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to slack: %s" % e) - elastalert_logger.info("Alert '%s' sent to Slack" % self.rule['name']) - - def get_info(self): - return {'type': 'slack', - 'slack_username_override': self.slack_username_override} - - -class MattermostAlerter(Alerter): - """ Creates a Mattermsot post for each alert """ - required_options = frozenset(['mattermost_webhook_url']) - - def __init__(self, rule): - super(MattermostAlerter, self).__init__(rule) - - # HTTP config - self.mattermost_webhook_url = self.rule['mattermost_webhook_url'] - if isinstance(self.mattermost_webhook_url, str): - self.mattermost_webhook_url = [self.mattermost_webhook_url] - self.mattermost_proxy = self.rule.get('mattermost_proxy', None) - self.mattermost_ignore_ssl_errors = self.rule.get('mattermost_ignore_ssl_errors', False) - - # Override webhook config - self.mattermost_username_override = self.rule.get('mattermost_username_override', 'elastalert') - self.mattermost_channel_override = self.rule.get('mattermost_channel_override', '') - self.mattermost_icon_url_override = self.rule.get('mattermost_icon_url_override', '') - - # Message properties - self.mattermost_msg_pretext = self.rule.get('mattermost_msg_pretext', '') - self.mattermost_msg_color = self.rule.get('mattermost_msg_color', 'danger') - self.mattermost_msg_fields = self.rule.get('mattermost_msg_fields', '') - - def get_aggregation_summary_text__maximum_width(self): - width = super(MattermostAlerter, self).get_aggregation_summary_text__maximum_width() - # Reduced maximum width for prettier Mattermost display. - return min(width, 75) - - def get_aggregation_summary_text(self, matches): - text = super(MattermostAlerter, self).get_aggregation_summary_text(matches) - if text: - text = '```\n{0}```\n'.format(text) - return text - - def populate_fields(self, matches): - alert_fields = [] - missing = self.rule.get('alert_missing_value', '') - for field in self.mattermost_msg_fields: - field = copy.copy(field) - if 'args' in field: - args_values = [lookup_es_key(matches[0], arg) or missing for arg in field['args']] - if 'value' in field: - field['value'] = field['value'].format(*args_values) - else: - field['value'] = "\n".join(str(arg) for arg in args_values) - del(field['args']) - alert_fields.append(field) - return alert_fields - - def alert(self, matches): - body = self.create_alert_body(matches) - title = self.create_title(matches) - - # post to mattermost - headers = {'content-type': 'application/json'} - # set https proxy, if it was provided - proxies = {'https': self.mattermost_proxy} if self.mattermost_proxy else None - payload = { - 'attachments': [ - { - 'fallback': "{0}: {1}".format(title, self.mattermost_msg_pretext), - 'color': self.mattermost_msg_color, - 'title': title, - 'pretext': self.mattermost_msg_pretext, - 'fields': [] - } - ] - } - - if self.rule.get('alert_text_type') == 'alert_text_only': - payload['attachments'][0]['text'] = body - else: - payload['text'] = body - - if self.mattermost_msg_fields != '': - payload['attachments'][0]['fields'] = self.populate_fields(matches) - - if self.mattermost_icon_url_override != '': - payload['icon_url'] = self.mattermost_icon_url_override - - if self.mattermost_username_override != '': - payload['username'] = self.mattermost_username_override - - if self.mattermost_channel_override != '': - payload['channel'] = self.mattermost_channel_override - - for url in self.mattermost_webhook_url: - try: - if self.mattermost_ignore_ssl_errors: - requests.urllib3.disable_warnings() - - response = requests.post( - url, data=json.dumps(payload, cls=DateTimeEncoder), - headers=headers, verify=not self.mattermost_ignore_ssl_errors, - proxies=proxies) - - warnings.resetwarnings() - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to Mattermost: %s" % e) - elastalert_logger.info("Alert sent to Mattermost") - - def get_info(self): - return {'type': 'mattermost', - 'mattermost_username_override': self.mattermost_username_override, - 'mattermost_webhook_url': self.mattermost_webhook_url} - - -class PagerDutyAlerter(Alerter): - """ Create an incident on PagerDuty for each alert """ - required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) - - def __init__(self, rule): - super(PagerDutyAlerter, self).__init__(rule) - self.pagerduty_service_key = self.rule['pagerduty_service_key'] - self.pagerduty_client_name = self.rule['pagerduty_client_name'] - self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') - self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) - self.pagerduty_event_type = self.rule.get('pagerduty_event_type', 'trigger') - self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) - - self.pagerduty_api_version = self.rule.get('pagerduty_api_version', 'v1') - self.pagerduty_v2_payload_class = self.rule.get('pagerduty_v2_payload_class', '') - self.pagerduty_v2_payload_class_args = self.rule.get('pagerduty_v2_payload_class_args', None) - self.pagerduty_v2_payload_component = self.rule.get('pagerduty_v2_payload_component', '') - self.pagerduty_v2_payload_component_args = self.rule.get('pagerduty_v2_payload_component_args', None) - self.pagerduty_v2_payload_group = self.rule.get('pagerduty_v2_payload_group', '') - self.pagerduty_v2_payload_group_args = self.rule.get('pagerduty_v2_payload_group_args', None) - self.pagerduty_v2_payload_severity = self.rule.get('pagerduty_v2_payload_severity', 'critical') - self.pagerduty_v2_payload_source = self.rule.get('pagerduty_v2_payload_source', 'ElastAlert') - self.pagerduty_v2_payload_source_args = self.rule.get('pagerduty_v2_payload_source_args', None) - - if self.pagerduty_api_version == 'v2': - self.url = 'https://events.pagerduty.com/v2/enqueue' - else: - self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' - - def alert(self, matches): - body = self.create_alert_body(matches) - - # post to pagerduty - headers = {'content-type': 'application/json'} - if self.pagerduty_api_version == 'v2': - payload = { - 'routing_key': self.pagerduty_service_key, - 'event_action': self.pagerduty_event_type, - 'dedup_key': self.get_incident_key(matches), - 'client': self.pagerduty_client_name, - 'payload': { - 'class': self.resolve_formatted_key(self.pagerduty_v2_payload_class, - self.pagerduty_v2_payload_class_args, - matches), - 'component': self.resolve_formatted_key(self.pagerduty_v2_payload_component, - self.pagerduty_v2_payload_component_args, - matches), - 'group': self.resolve_formatted_key(self.pagerduty_v2_payload_group, - self.pagerduty_v2_payload_group_args, - matches), - 'severity': self.pagerduty_v2_payload_severity, - 'source': self.resolve_formatted_key(self.pagerduty_v2_payload_source, - self.pagerduty_v2_payload_source_args, - matches), - 'summary': self.create_title(matches), - 'custom_details': { - 'information': body, - }, - }, - } - match_timestamp = lookup_es_key(matches[0], self.rule.get('timestamp_field', '@timestamp')) - if match_timestamp: - payload['payload']['timestamp'] = match_timestamp - else: - payload = { - 'service_key': self.pagerduty_service_key, - 'description': self.create_title(matches), - 'event_type': self.pagerduty_event_type, - 'incident_key': self.get_incident_key(matches), - 'client': self.pagerduty_client_name, - 'details': { - "information": body, - }, - } - - # set https proxy, if it was provided - proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None - try: - response = requests.post( - self.url, - data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), - headers=headers, - proxies=proxies - ) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to pagerduty: %s" % e) - - if self.pagerduty_event_type == 'trigger': - elastalert_logger.info("Trigger sent to PagerDuty") - elif self.pagerduty_event_type == 'resolve': - elastalert_logger.info("Resolve sent to PagerDuty") - elif self.pagerduty_event_type == 'acknowledge': - elastalert_logger.info("acknowledge sent to PagerDuty") - - def resolve_formatted_key(self, key, args, matches): - if args: - key_values = [lookup_es_key(matches[0], arg) for arg in args] - - # Populate values with rule level properties too - for i in range(len(key_values)): - if key_values[i] is None: - key_value = self.rule.get(args[i]) - if key_value: - key_values[i] = key_value - - missing = self.rule.get('alert_missing_value', '') - key_values = [missing if val is None else val for val in key_values] - return key.format(*key_values) - else: - return key - - def get_incident_key(self, matches): - if self.pagerduty_incident_key_args: - incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] - - # Populate values with rule level properties too - for i in range(len(incident_key_values)): - if incident_key_values[i] is None: - key_value = self.rule.get(self.pagerduty_incident_key_args[i]) - if key_value: - incident_key_values[i] = key_value - - missing = self.rule.get('alert_missing_value', '') - incident_key_values = [missing if val is None else val for val in incident_key_values] - return self.pagerduty_incident_key.format(*incident_key_values) - else: - return self.pagerduty_incident_key - - def get_info(self): - return {'type': 'pagerduty', - 'pagerduty_client_name': self.pagerduty_client_name} - - -class PagerTreeAlerter(Alerter): - """ Creates a PagerTree Incident for each alert """ - required_options = frozenset(['pagertree_integration_url']) - - def __init__(self, rule): - super(PagerTreeAlerter, self).__init__(rule) - self.url = self.rule['pagertree_integration_url'] - self.pagertree_proxy = self.rule.get('pagertree_proxy', None) - - def alert(self, matches): - # post to pagertree - headers = {'content-type': 'application/json'} - # set https proxy, if it was provided - proxies = {'https': self.pagertree_proxy} if self.pagertree_proxy else None - payload = { - "event_type": "create", - "Id": str(uuid.uuid4()), - "Title": self.create_title(matches), - "Description": self.create_alert_body(matches) - } - - try: - response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to PagerTree: %s" % e) - elastalert_logger.info("Trigger sent to PagerTree") - - def get_info(self): - return {'type': 'pagertree', - 'pagertree_integration_url': self.url} - - -class ExotelAlerter(Alerter): - required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) - - def __init__(self, rule): - super(ExotelAlerter, self).__init__(rule) - self.exotel_account_sid = self.rule['exotel_account_sid'] - self.exotel_auth_token = self.rule['exotel_auth_token'] - self.exotel_to_number = self.rule['exotel_to_number'] - self.exotel_from_number = self.rule['exotel_from_number'] - self.sms_body = self.rule.get('exotel_message_body', '') - - def alert(self, matches): - client = Exotel(self.exotel_account_sid, self.exotel_auth_token) - - try: - message_body = self.rule['name'] + self.sms_body - response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) - if response != 200: - raise EAException("Error posting to Exotel, response code is %s" % response) - except RequestException: - raise EAException("Error posting to Exotel").with_traceback(sys.exc_info()[2]) - elastalert_logger.info("Trigger sent to Exotel") - - def get_info(self): - return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} - - -class TwilioAlerter(Alerter): - required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) - - def __init__(self, rule): - super(TwilioAlerter, self).__init__(rule) - self.twilio_account_sid = self.rule['twilio_account_sid'] - self.twilio_auth_token = self.rule['twilio_auth_token'] - self.twilio_to_number = self.rule['twilio_to_number'] - self.twilio_from_number = self.rule['twilio_from_number'] - - def alert(self, matches): - client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) - - try: - client.messages.create(body=self.rule['name'], - to=self.twilio_to_number, - from_=self.twilio_from_number) - - except TwilioRestException as e: - raise EAException("Error posting to twilio: %s" % e) - - elastalert_logger.info("Trigger sent to Twilio") - - def get_info(self): - return {'type': 'twilio', - 'twilio_client_name': self.twilio_from_number} - - -class VictorOpsAlerter(Alerter): - """ Creates a VictorOps Incident for each alert """ - required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) - - def __init__(self, rule): - super(VictorOpsAlerter, self).__init__(rule) - self.victorops_api_key = self.rule['victorops_api_key'] - self.victorops_routing_key = self.rule['victorops_routing_key'] - self.victorops_message_type = self.rule['victorops_message_type'] - self.victorops_entity_id = self.rule.get('victorops_entity_id', None) - self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name') - self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( - self.victorops_api_key, self.victorops_routing_key) - self.victorops_proxy = self.rule.get('victorops_proxy', None) - - def alert(self, matches): - body = self.create_alert_body(matches) - - # post to victorops - headers = {'content-type': 'application/json'} - # set https proxy, if it was provided - proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None - payload = { - "message_type": self.victorops_message_type, - "entity_display_name": self.victorops_entity_display_name, - "monitoring_tool": "ElastAlert", - "state_message": body - } - if self.victorops_entity_id: - payload["entity_id"] = self.victorops_entity_id - - try: - response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to VictorOps: %s" % e) - elastalert_logger.info("Trigger sent to VictorOps") - - def get_info(self): - return {'type': 'victorops', - 'victorops_routing_key': self.victorops_routing_key} - - -class TelegramAlerter(Alerter): - """ Send a Telegram message via bot api for each alert """ - required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) - - def __init__(self, rule): - super(TelegramAlerter, self).__init__(rule) - self.telegram_bot_token = self.rule['telegram_bot_token'] - self.telegram_room_id = self.rule['telegram_room_id'] - self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') - self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage") - self.telegram_proxy = self.rule.get('telegram_proxy', None) - self.telegram_proxy_login = self.rule.get('telegram_proxy_login', None) - self.telegram_proxy_password = self.rule.get('telegram_proxy_pass', None) - - def alert(self, matches): - body = '⚠ *%s* ⚠ ```\n' % (self.create_title(matches)) - for match in matches: - body += str(BasicMatchString(self.rule, match)) - # Separate text of aggregated alerts with dashes - if len(matches) > 1: - body += '\n----------------------------------------\n' - if len(body) > 4095: - body = body[0:4000] + "\n⚠ *message was cropped according to telegram limits!* ⚠" - body += ' ```' - - headers = {'content-type': 'application/json'} - # set https proxy, if it was provided - proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None - auth = HTTPProxyAuth(self.telegram_proxy_login, self.telegram_proxy_password) if self.telegram_proxy_login else None - payload = { - 'chat_id': self.telegram_room_id, - 'text': body, - 'parse_mode': 'markdown', - 'disable_web_page_preview': True - } - - try: - response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies, auth=auth) - warnings.resetwarnings() - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to Telegram: %s. Details: %s" % (e, "" if e.response is None else e.response.text)) - - elastalert_logger.info( - "Alert sent to Telegram room %s" % self.telegram_room_id) - - def get_info(self): - return {'type': 'telegram', - 'telegram_room_id': self.telegram_room_id} - - -class GoogleChatAlerter(Alerter): - """ Send a notification via Google Chat webhooks """ - required_options = frozenset(['googlechat_webhook_url']) - - def __init__(self, rule): - super(GoogleChatAlerter, self).__init__(rule) - self.googlechat_webhook_url = self.rule['googlechat_webhook_url'] - if isinstance(self.googlechat_webhook_url, str): - self.googlechat_webhook_url = [self.googlechat_webhook_url] - self.googlechat_format = self.rule.get('googlechat_format', 'basic') - self.googlechat_header_title = self.rule.get('googlechat_header_title', None) - self.googlechat_header_subtitle = self.rule.get('googlechat_header_subtitle', None) - self.googlechat_header_image = self.rule.get('googlechat_header_image', None) - self.googlechat_footer_kibanalink = self.rule.get('googlechat_footer_kibanalink', None) - - def create_header(self): - header = None - if self.googlechat_header_title: - header = { - "title": self.googlechat_header_title, - "subtitle": self.googlechat_header_subtitle, - "imageUrl": self.googlechat_header_image - } - return header - - def create_footer(self): - footer = None - if self.googlechat_footer_kibanalink: - footer = {"widgets": [{ - "buttons": [{ - "textButton": { - "text": "VISIT KIBANA", - "onClick": { - "openLink": { - "url": self.googlechat_footer_kibanalink - } - } - } - }] - }] - } - return footer - - def create_card(self, matches): - card = {"cards": [{ - "sections": [{ - "widgets": [ - {"textParagraph": {"text": self.create_alert_body(matches)}} - ]} - ]} - ]} - - # Add the optional header - header = self.create_header() - if header: - card['cards'][0]['header'] = header - - # Add the optional footer - footer = self.create_footer() - if footer: - card['cards'][0]['sections'].append(footer) - return card - - def create_basic(self, matches): - body = self.create_alert_body(matches) - return {'text': body} - - def alert(self, matches): - # Format message - if self.googlechat_format == 'card': - message = self.create_card(matches) - else: - message = self.create_basic(matches) - - # Post to webhook - headers = {'content-type': 'application/json'} - for url in self.googlechat_webhook_url: - try: - response = requests.post(url, data=json.dumps(message), headers=headers) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to google chat: {}".format(e)) - elastalert_logger.info("Alert sent to Google Chat!") - - def get_info(self): - return {'type': 'googlechat', - 'googlechat_webhook_url': self.googlechat_webhook_url} - - -class GitterAlerter(Alerter): - """ Creates a Gitter activity message for each alert """ - required_options = frozenset(['gitter_webhook_url']) - - def __init__(self, rule): - super(GitterAlerter, self).__init__(rule) - self.gitter_webhook_url = self.rule['gitter_webhook_url'] - self.gitter_proxy = self.rule.get('gitter_proxy', None) - self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') - - def alert(self, matches): - body = self.create_alert_body(matches) - - # post to Gitter - headers = {'content-type': 'application/json'} - # set https proxy, if it was provided - proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None - payload = { - 'message': body, - 'level': self.gitter_msg_level - } - - try: - response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to Gitter: %s" % e) - elastalert_logger.info("Alert sent to Gitter") - - def get_info(self): - return {'type': 'gitter', - 'gitter_webhook_url': self.gitter_webhook_url} - - -class ServiceNowAlerter(Alerter): - """ Creates a ServiceNow alert """ - required_options = set([ - 'username', - 'password', - 'servicenow_rest_url', - 'short_description', - 'comments', - 'assignment_group', - 'category', - 'subcategory', - 'cmdb_ci', - 'caller_id' - ]) - - def __init__(self, rule): - super(ServiceNowAlerter, self).__init__(rule) - self.servicenow_rest_url = self.rule['servicenow_rest_url'] - self.servicenow_proxy = self.rule.get('servicenow_proxy', None) - - def alert(self, matches): - for match in matches: - # Parse everything into description. - description = str(BasicMatchString(self.rule, match)) - - # Set proper headers - headers = { - "Content-Type": "application/json", - "Accept": "application/json;charset=utf-8" - } - proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None - payload = { - "description": description, - "short_description": self.rule['short_description'], - "comments": self.rule['comments'], - "assignment_group": self.rule['assignment_group'], - "category": self.rule['category'], - "subcategory": self.rule['subcategory'], - "cmdb_ci": self.rule['cmdb_ci'], - "caller_id": self.rule["caller_id"] - } - try: - response = requests.post( - self.servicenow_rest_url, - auth=(self.rule['username'], self.rule['password']), - headers=headers, - data=json.dumps(payload, cls=DateTimeEncoder), - proxies=proxies - ) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to ServiceNow: %s" % e) - elastalert_logger.info("Alert sent to ServiceNow") - - def get_info(self): - return {'type': 'ServiceNow', - 'self.servicenow_rest_url': self.servicenow_rest_url} - - -class AlertaAlerter(Alerter): - """ Creates an Alerta event for each alert """ - required_options = frozenset(['alerta_api_url']) - - def __init__(self, rule): - super(AlertaAlerter, self).__init__(rule) - - # Setup defaul parameters - self.url = self.rule.get('alerta_api_url', None) - self.api_key = self.rule.get('alerta_api_key', None) - self.timeout = self.rule.get('alerta_timeout', 86400) - self.use_match_timestamp = self.rule.get('alerta_use_match_timestamp', False) - self.use_qk_as_resource = self.rule.get('alerta_use_qk_as_resource', False) - self.verify_ssl = not self.rule.get('alerta_api_skip_ssl', False) - self.missing_text = self.rule.get('alert_missing_value', '') - - # Fill up default values of the API JSON payload - self.severity = self.rule.get('alerta_severity', 'warning') - self.resource = self.rule.get('alerta_resource', 'elastalert') - self.environment = self.rule.get('alerta_environment', 'Production') - self.origin = self.rule.get('alerta_origin', 'elastalert') - self.service = self.rule.get('alerta_service', ['elastalert']) - self.text = self.rule.get('alerta_text', 'elastalert') - self.type = self.rule.get('alerta_type', 'elastalert') - self.event = self.rule.get('alerta_event', 'elastalert') - self.correlate = self.rule.get('alerta_correlate', []) - self.tags = self.rule.get('alerta_tags', []) - self.group = self.rule.get('alerta_group', '') - self.attributes_keys = self.rule.get('alerta_attributes_keys', []) - self.attributes_values = self.rule.get('alerta_attributes_values', []) - self.value = self.rule.get('alerta_value', '') - - def alert(self, matches): - # Override the resource if requested - if self.use_qk_as_resource and 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): - self.resource = lookup_es_key(matches[0], self.rule['query_key']) - - headers = {'content-type': 'application/json'} - if self.api_key is not None: - headers['Authorization'] = 'Key %s' % (self.rule['alerta_api_key']) - alerta_payload = self.get_json_payload(matches[0]) - - try: - response = requests.post(self.url, data=alerta_payload, headers=headers, verify=self.verify_ssl) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to Alerta: %s" % e) - elastalert_logger.info("Alert sent to Alerta") - - def create_default_title(self, matches): - title = '%s' % (self.rule['name']) - # If the rule has a query_key, add that value - if 'query_key' in self.rule: - qk = matches[0].get(self.rule['query_key']) - if qk: - title += '.%s' % (qk) - return title - - def get_info(self): - return {'type': 'alerta', - 'alerta_url': self.url} - - def get_json_payload(self, match): - """ - Builds the API Create Alert body, as in - http://alerta.readthedocs.io/en/latest/api/reference.html#create-an-alert - - For the values that could have references to fields on the match, resolve those references. - - """ - - # Using default text and event title if not defined in rule - alerta_text = self.rule['type'].get_match_str([match]) if self.text == '' else resolve_string(self.text, match, self.missing_text) - alerta_event = self.create_default_title([match]) if self.event == '' else resolve_string(self.event, match, self.missing_text) - - match_timestamp = lookup_es_key(match, self.rule.get('timestamp_field', '@timestamp')) - if match_timestamp is None: - match_timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") - if self.use_match_timestamp: - createTime = ts_to_dt(match_timestamp).strftime("%Y-%m-%dT%H:%M:%S.%fZ") - else: - createTime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") - - alerta_payload_dict = { - 'resource': resolve_string(self.resource, match, self.missing_text), - 'severity': self.severity, - 'timeout': self.timeout, - 'createTime': createTime, - 'type': self.type, - 'environment': resolve_string(self.environment, match, self.missing_text), - 'origin': resolve_string(self.origin, match, self.missing_text), - 'group': resolve_string(self.group, match, self.missing_text), - 'event': alerta_event, - 'text': alerta_text, - 'value': resolve_string(self.value, match, self.missing_text), - 'service': [resolve_string(a_service, match, self.missing_text) for a_service in self.service], - 'tags': [resolve_string(a_tag, match, self.missing_text) for a_tag in self.tags], - 'correlate': [resolve_string(an_event, match, self.missing_text) for an_event in self.correlate], - 'attributes': dict(list(zip(self.attributes_keys, - [resolve_string(a_value, match, self.missing_text) for a_value in self.attributes_values]))), - 'rawData': self.create_alert_body([match]), - } - - try: - payload = json.dumps(alerta_payload_dict, cls=DateTimeEncoder) - except Exception as e: - raise Exception("Error building Alerta request: %s" % e) - return payload - - -class HTTPPostAlerter(Alerter): - """ Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """ - - def __init__(self, rule): - super(HTTPPostAlerter, self).__init__(rule) - post_url = self.rule.get('http_post_url') - if isinstance(post_url, str): - post_url = [post_url] - self.post_url = post_url - self.post_proxy = self.rule.get('http_post_proxy') - self.post_payload = self.rule.get('http_post_payload', {}) - self.post_static_payload = self.rule.get('http_post_static_payload', {}) - self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) - self.post_http_headers = self.rule.get('http_post_headers', {}) - self.timeout = self.rule.get('http_post_timeout', 10) - - def alert(self, matches): - """ Each match will trigger a POST to the specified endpoint(s). """ - for match in matches: - payload = match if self.post_all_values else {} - payload.update(self.post_static_payload) - for post_key, es_key in list(self.post_payload.items()): - payload[post_key] = lookup_es_key(match, es_key) - headers = { - "Content-Type": "application/json", - "Accept": "application/json;charset=utf-8" - } - headers.update(self.post_http_headers) - proxies = {'https': self.post_proxy} if self.post_proxy else None - for url in self.post_url: - try: - response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), - headers=headers, proxies=proxies, timeout=self.timeout) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting HTTP Post alert: %s" % e) - elastalert_logger.info("HTTP Post alert sent.") - - def get_info(self): - return {'type': 'http_post', - 'http_post_webhook_url': self.post_url} - - -class StrideHTMLParser(HTMLParser): - """Parse html into stride's fabric structure""" - - def __init__(self): - """ - Define a couple markup place holders. - """ - self.content = [] - self.mark = None - HTMLParser.__init__(self) - - def handle_starttag(self, tag, attrs): - """Identify and verify starting tag is fabric compatible.""" - if tag == 'b' or tag == 'strong': - self.mark = dict(type='strong') - if tag == 'u': - self.mark = dict(type='underline') - if tag == 'a': - self.mark = dict(type='link', attrs=dict(attrs)) - - def handle_endtag(self, tag): - """Clear mark on endtag.""" - self.mark = None - - def handle_data(self, data): - """Construct data node for our data.""" - node = dict(type='text', text=data) - if self.mark: - node['marks'] = [self.mark] - self.content.append(node) - - -class StrideAlerter(Alerter): - """ Creates a Stride conversation message for each alert """ - - required_options = frozenset( - ['stride_access_token', 'stride_cloud_id', 'stride_conversation_id']) - - def __init__(self, rule): - super(StrideAlerter, self).__init__(rule) - - self.stride_access_token = self.rule['stride_access_token'] - self.stride_cloud_id = self.rule['stride_cloud_id'] - self.stride_conversation_id = self.rule['stride_conversation_id'] - self.stride_ignore_ssl_errors = self.rule.get('stride_ignore_ssl_errors', False) - self.stride_proxy = self.rule.get('stride_proxy', None) - self.url = 'https://api.atlassian.com/site/%s/conversation/%s/message' % ( - self.stride_cloud_id, self.stride_conversation_id) - - def alert(self, matches): - body = self.create_alert_body(matches).strip() - - # parse body with StrideHTMLParser - parser = StrideHTMLParser() - parser.feed(body) - - # Post to Stride - headers = { - 'content-type': 'application/json', - 'Authorization': 'Bearer {}'.format(self.stride_access_token) - } - - # set https proxy, if it was provided - proxies = {'https': self.stride_proxy} if self.stride_proxy else None - - # build stride json payload - # https://developer.atlassian.com/cloud/stride/apis/document/structure/ - payload = {'body': {'version': 1, 'type': "doc", 'content': [ - {'type': "panel", 'attrs': {'panelType': "warning"}, 'content': [ - {'type': 'paragraph', 'content': parser.content} - ]} - ]}} - - try: - if self.stride_ignore_ssl_errors: - requests.packages.urllib3.disable_warnings() - response = requests.post( - self.url, data=json.dumps(payload, cls=DateTimeEncoder), - headers=headers, verify=not self.stride_ignore_ssl_errors, - proxies=proxies) - warnings.resetwarnings() - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to Stride: %s" % e) - elastalert_logger.info( - "Alert sent to Stride conversation %s" % self.stride_conversation_id) - - def get_info(self): - return {'type': 'stride', - 'stride_cloud_id': self.stride_cloud_id, - 'stride_converstation_id': self.stride_converstation_id} - - -class LineNotifyAlerter(Alerter): - """ Created a Line Notify for each alert """ - required_option = frozenset(["linenotify_access_token"]) - - def __init__(self, rule): - super(LineNotifyAlerter, self).__init__(rule) - self.linenotify_access_token = self.rule["linenotify_access_token"] - - def alert(self, matches): - body = self.create_alert_body(matches) - # post to Line Notify - headers = { - "Content-Type": "application/x-www-form-urlencoded", - "Authorization": "Bearer {}".format(self.linenotify_access_token) - } - payload = { - "message": body - } - try: - response = requests.post("https://notify-api.line.me/api/notify", data=payload, headers=headers) - response.raise_for_status() - except RequestException as e: - raise EAException("Error posting to Line Notify: %s" % e) - elastalert_logger.info("Alert sent to Line Notify") - - def get_info(self): - return {"type": "linenotify", "linenotify_access_token": self.linenotify_access_token} - - -class HiveAlerter(Alerter): - """ - Use matched data to create alerts containing observables in an instance of TheHive - """ - - required_options = set(['hive_connection', 'hive_alert_config']) - - def alert(self, matches): - - connection_details = self.rule['hive_connection'] - - for match in matches: - context = {'rule': self.rule, 'match': match} - - artifacts = [] - for mapping in self.rule.get('hive_observable_data_mapping', []): - for observable_type, match_data_key in mapping.items(): - try: - match_data_keys = re.findall(r'\{match\[([^\]]*)\]', match_data_key) - rule_data_keys = re.findall(r'\{rule\[([^\]]*)\]', match_data_key) - data_keys = match_data_keys + rule_data_keys - context_keys = list(context['match'].keys()) + list(context['rule'].keys()) - if all([True if k in context_keys else False for k in data_keys]): - artifact = {'tlp': 2, 'tags': [], 'message': None, 'dataType': observable_type, - 'data': match_data_key.format(**context)} - artifacts.append(artifact) - except KeyError: - raise KeyError('\nformat string\n{}\nmatch data\n{}'.format(match_data_key, context)) - - alert_config = { - 'artifacts': artifacts, - 'sourceRef': str(uuid.uuid4())[0:6], - 'customFields': {}, - 'caseTemplate': None, - 'title': '{rule[index]}_{rule[name]}'.format(**context), - 'date': int(time.time()) * 1000 - } - alert_config.update(self.rule.get('hive_alert_config', {})) - custom_fields = {} - for alert_config_field, alert_config_value in alert_config.items(): - if alert_config_field == 'customFields': - n = 0 - for cf_key, cf_value in alert_config_value.items(): - cf = {'order': n, cf_value['type']: cf_value['value'].format(**context)} - n += 1 - custom_fields[cf_key] = cf - elif isinstance(alert_config_value, str): - alert_config[alert_config_field] = alert_config_value.format(**context) - elif isinstance(alert_config_value, (list, tuple)): - formatted_list = [] - for element in alert_config_value: - try: - formatted_list.append(element.format(**context)) - except (AttributeError, KeyError, IndexError): - formatted_list.append(element) - alert_config[alert_config_field] = formatted_list - if custom_fields: - alert_config['customFields'] = custom_fields - - alert_body = json.dumps(alert_config, indent=4, sort_keys=True) - req = '{}:{}/api/alert'.format(connection_details['hive_host'], connection_details['hive_port']) - headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer {}'.format(connection_details.get('hive_apikey', ''))} - proxies = connection_details.get('hive_proxies', {'http': '', 'https': ''}) - verify = connection_details.get('hive_verify', False) - response = requests.post(req, headers=headers, data=alert_body, proxies=proxies, verify=verify) - - if response.status_code != 201: - raise Exception('alert not successfully created in TheHive\n{}'.format(response.text)) - - def get_info(self): - - return { - 'type': 'hivealerter', - 'hive_host': self.rule.get('hive_connection', {}).get('hive_host', '') - } diff --git a/elastalert/config.py b/elastalert/config.py index 5ae9a26e6..9cc2618bc 100644 --- a/elastalert/config.py +++ b/elastalert/config.py @@ -4,48 +4,53 @@ import logging.config from envparse import Env -from staticconf.loader import yaml_loader -from . import loaders -from .util import EAException -from .util import elastalert_logger -from .util import get_module +from elastalert import loaders +from elastalert.util import EAException +from elastalert.util import elastalert_logger +from elastalert.util import get_module +from elastalert.yaml import read_yaml # Required global (config.yaml) configuration options required_globals = frozenset(['run_every', 'es_host', 'es_port', 'writeback_index', 'buffer_time']) # Settings that can be derived from ENV variables env_settings = {'ES_USE_SSL': 'use_ssl', + 'ES_BEARER': 'es_bearer', 'ES_PASSWORD': 'es_password', 'ES_USERNAME': 'es_username', + 'ES_API_KEY': 'es_api_key', 'ES_HOST': 'es_host', + 'ES_HOSTS': 'es_hosts', 'ES_PORT': 'es_port', - 'ES_URL_PREFIX': 'es_url_prefix'} + 'ES_URL_PREFIX': 'es_url_prefix', + 'STATSD_INSTANCE_TAG': 'statsd_instance_tag', + 'STATSD_HOST': 'statsd_host', + 'X_ENV':'X_ENV'} env = Env(ES_USE_SSL=bool) - # Used to map the names of rule loaders to their classes loader_mapping = { 'file': loaders.FileRulesLoader, } -def load_conf(args, defaults=None, overwrites=None): +def load_conf(args, defaults=None, overrides=None): """ Creates a conf dictionary for ElastAlerter. Loads the global config file and then each rule found in rules_folder. :param args: The parsed arguments to ElastAlert :param defaults: Dictionary of default conf values - :param overwrites: Dictionary of conf values to override + :param overrides: Dictionary of conf values to override :return: The global configuration, a dictionary. """ filename = args.config if filename: - conf = yaml_loader(filename) + conf = read_yaml(filename) else: try: - conf = yaml_loader('config.yaml') + conf = read_yaml('config.yaml') except FileNotFoundError: raise EAException('No --config or config.yaml found') @@ -61,20 +66,20 @@ def load_conf(args, defaults=None, overwrites=None): if key not in conf: conf[key] = value - for key, value in (iter(overwrites.items()) if overwrites is not None else []): + for key, value in (iter(overrides.items()) if overrides is not None else []): conf[key] = value # Make sure we have all required globals if required_globals - frozenset(list(conf.keys())): raise EAException('%s must contain %s' % (filename, ', '.join(required_globals - frozenset(list(conf.keys()))))) - conf.setdefault('writeback_alias', 'elastalert_alerts') conf.setdefault('max_query_size', 10000) conf.setdefault('scroll_keepalive', '30s') - conf.setdefault('max_scrolling_count', 0) + conf.setdefault('max_scrolling_count', 990) # Avoid stack overflow in run_query, note that 1000 is Python's stack limit conf.setdefault('disable_rules_on_error', True) conf.setdefault('scan_subdirectories', True) conf.setdefault('rules_loader', 'file') + conf.setdefault('custom_pretty_ts_format', None) # Convert run_every, buffer_time into a timedelta object try: @@ -88,6 +93,8 @@ def load_conf(args, defaults=None, overwrites=None): conf['old_query_limit'] = datetime.timedelta(**conf['old_query_limit']) else: conf['old_query_limit'] = datetime.timedelta(weeks=1) + if 'query_delay' in conf: + conf['query_delay'] = datetime.timedelta(**conf['query_delay']) except (KeyError, TypeError) as e: raise EAException('Invalid time format used: %s' % e) diff --git a/elastalert/create_index.py b/elastalert/create_index.py index a0858da70..95600c9c9 100644 --- a/elastalert/create_index.py +++ b/elastalert/create_index.py @@ -14,16 +14,24 @@ from elasticsearch.exceptions import NotFoundError from envparse import Env -from .auth import Auth +from elastalert.auth import Auth +from elastalert.util import get_version_from_cluster_info env = Env(ES_USE_SSL=bool) def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None): - esversion = es_client.info()["version"]["number"] - print("Elastic Version: " + esversion) + esversion = get_version_from_cluster_info(es_client) - es_index_mappings = read_es_index_mappings() if is_atleastsix(esversion) else read_es_index_mappings(5) + es_index_mappings = {} + + if is_atleasteight(esversion): + es_index_mappings = read_es_index_mappings() + elif is_atleastseven(esversion) or is_atleastsix(esversion): + es_index_mappings = read_es_index_mappings(7) + else: + print('FATAL - Unsupported Elasticsearch version: ' + esversion + '. Aborting.') + exit(1) es_index = IndicesClient(es_client) if not recreate: @@ -32,7 +40,7 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None return None # (Re-)Create indices. - if is_atleastsix(esversion): + if is_atleastseven(esversion) or is_atleastsix(esversion): index_names = ( ea_index, ea_index + '_status', @@ -56,10 +64,18 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None # To avoid a race condition. TODO: replace this with a real check time.sleep(2) - - if is_atleastseven(esversion): - # TODO remove doc_type completely when elasicsearch client allows doc_type=None - # doc_type is a deprecated feature and will be completely removed in Elasicsearch 8 + if is_atleasteight(esversion): + es_client.indices.put_mapping(index=ea_index, + body=es_index_mappings['elastalert']) + es_client.indices.put_mapping(index=ea_index + '_status', + body=es_index_mappings['elastalert_status']) + es_client.indices.put_mapping(index=ea_index + '_silence', + body=es_index_mappings['silence']) + es_client.indices.put_mapping(index=ea_index + '_error', + body=es_index_mappings['elastalert_error']) + es_client.indices.put_mapping(index=ea_index + '_past', + body=es_index_mappings['past_elastalert']) + elif is_atleastseven(esversion) : es_client.indices.put_mapping(index=ea_index, doc_type='_doc', body=es_index_mappings['elastalert'], include_type_name=True) es_client.indices.put_mapping(index=ea_index + '_status', doc_type='_doc', @@ -70,17 +86,6 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None body=es_index_mappings['elastalert_error'], include_type_name=True) es_client.indices.put_mapping(index=ea_index + '_past', doc_type='_doc', body=es_index_mappings['past_elastalert'], include_type_name=True) - elif is_atleastsixtwo(esversion): - es_client.indices.put_mapping(index=ea_index, doc_type='_doc', - body=es_index_mappings['elastalert']) - es_client.indices.put_mapping(index=ea_index + '_status', doc_type='_doc', - body=es_index_mappings['elastalert_status']) - es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='_doc', - body=es_index_mappings['silence']) - es_client.indices.put_mapping(index=ea_index + '_error', doc_type='_doc', - body=es_index_mappings['elastalert_error']) - es_client.indices.put_mapping(index=ea_index + '_past', doc_type='_doc', - body=es_index_mappings['past_elastalert']) elif is_atleastsix(esversion): es_client.indices.put_mapping(index=ea_index, doc_type='elastalert', body=es_index_mappings['elastalert']) @@ -92,18 +97,6 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None body=es_index_mappings['elastalert_error']) es_client.indices.put_mapping(index=ea_index + '_past', doc_type='past_elastalert', body=es_index_mappings['past_elastalert']) - else: - es_client.indices.put_mapping(index=ea_index, doc_type='elastalert', - body=es_index_mappings['elastalert']) - es_client.indices.put_mapping(index=ea_index, doc_type='elastalert_status', - body=es_index_mappings['elastalert_status']) - es_client.indices.put_mapping(index=ea_index, doc_type='silence', - body=es_index_mappings['silence']) - es_client.indices.put_mapping(index=ea_index, doc_type='elastalert_error', - body=es_index_mappings['elastalert_error']) - es_client.indices.put_mapping(index=ea_index, doc_type='past_elastalert', - body=es_index_mappings['past_elastalert']) - print('New index %s created' % ea_index) if old_ea_index: print("Copying all data from old index '{0}' to new index '{1}'".format(old_ea_index, ea_index)) @@ -113,7 +106,7 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None print('Done!') -def read_es_index_mappings(es_version=6): +def read_es_index_mappings(es_version=8): print('Reading Elastic {0} index mappings:'.format(es_version)) return { 'silence': read_es_index_mapping('silence', es_version), @@ -124,7 +117,7 @@ def read_es_index_mappings(es_version=6): } -def read_es_index_mapping(mapping, es_version=6): +def read_es_index_mapping(mapping, es_version=7): base_path = os.path.abspath(os.path.dirname(__file__)) mapping_path = 'es_mappings/{0}/{1}.json'.format(es_version, mapping) path = os.path.join(base_path, mapping_path) @@ -132,19 +125,14 @@ def read_es_index_mapping(mapping, es_version=6): print("Reading index mapping '{0}'".format(mapping_path)) return json.load(f) - def is_atleastsix(es_version): return int(es_version.split(".")[0]) >= 6 - -def is_atleastsixtwo(es_version): - major, minor = list(map(int, es_version.split(".")[:2])) - return major > 6 or (major == 6 and minor >= 2) - - def is_atleastseven(es_version): return int(es_version.split(".")[0]) >= 7 +def is_atleasteight(es_version): + return int(es_version.split(".")[0]) >= 8 def main(): parser = argparse.ArgumentParser() @@ -152,6 +140,8 @@ def main(): parser.add_argument('--port', default=os.environ.get('ES_PORT', None), type=int, help='Elasticsearch port') parser.add_argument('--username', default=os.environ.get('ES_USERNAME', None), help='Elasticsearch username') parser.add_argument('--password', default=os.environ.get('ES_PASSWORD', None), help='Elasticsearch password') + parser.add_argument('--bearer', default=os.environ.get('ES_BEARER', None), help='Elasticsearch bearer token') + parser.add_argument('--api-key', default=os.environ.get('ES_API_KEY', None), help='Elasticsearch api-key token') parser.add_argument('--url-prefix', help='Elasticsearch URL prefix') parser.add_argument('--no-auth', action='store_const', const=True, help='Suppress prompt for basic auth') parser.add_argument('--ssl', action='store_true', default=env('ES_USE_SSL', None), help='Use TLS') @@ -160,15 +150,9 @@ def main(): parser.add_argument('--no-verify-certs', dest='verify_certs', action='store_false', help='Do not verify TLS certificates') parser.add_argument('--index', help='Index name to create') - parser.add_argument('--alias', help='Alias name to create') parser.add_argument('--old-index', help='Old index name to copy') parser.add_argument('--send_get_body_as', default='GET', help='Method for querying Elasticsearch - POST, GET or source') - parser.add_argument( - '--boto-profile', - default=None, - dest='profile', - help='DEPRECATED: (use --profile) Boto profile to use for signing requests') parser.add_argument( '--profile', default=None, @@ -197,6 +181,8 @@ def main(): port = args.port if args.port else data.get('es_port') username = args.username if args.username else data.get('es_username') password = args.password if args.password else data.get('es_password') + bearer = args.bearer if args.bearer else data.get('es_bearer') + api_key = args.api_key if args.api_key else data.get('es_api_key') url_prefix = args.url_prefix if args.url_prefix is not None else data.get('es_url_prefix', '') use_ssl = args.ssl if args.ssl is not None else data.get('use_ssl') verify_certs = args.verify_certs if args.verify_certs is not None else data.get('verify_certs') is not False @@ -206,11 +192,12 @@ def main(): client_cert = data.get('client_cert') client_key = data.get('client_key') index = args.index if args.index is not None else data.get('writeback_index') - alias = args.alias if args.alias is not None else data.get('writeback_alias') old_index = args.old_index if args.old_index is not None else None else: username = args.username if args.username else None password = args.password if args.password else None + bearer = args.bearer if args.bearer else None + api_key = args.api_key if args.api_key else None aws_region = args.aws_region host = args.host if args.host else input('Enter Elasticsearch host: ') port = args.port if args.port else int(input('Enter Elasticsearch port: ')) @@ -233,9 +220,6 @@ def main(): index = args.index if args.index is not None else input('New index name? (Default elastalert_status) ') if not index: index = 'elastalert_status' - alias = args.alias if args.alias is not None else input('New alias name? (Default elastalert_alerts) ') - if not alias: - alias = 'elastalert_alias' old_index = (args.old_index if args.old_index is not None else input('Name of existing index to copy? (Default None) ')) @@ -247,6 +231,13 @@ def main(): password=password, aws_region=aws_region, profile_name=args.profile) + + headers = {} + if bearer is not None: + headers.update({'Authorization': f'Bearer {bearer}'}) + if api_key is not None: + headers.update({'Authorization': f'ApiKey {api_key}'}) + es = Elasticsearch( host=host, port=port, @@ -255,6 +246,7 @@ def main(): verify_certs=verify_certs, connection_class=RequestsHttpConnection, http_auth=http_auth, + headers=headers, url_prefix=url_prefix, send_get_body_as=send_get_body_as, client_cert=client_cert, diff --git a/elastalert/elastalert.py b/elastalert/elastalert.py index b078c86db..6a7356dd9 100755 --- a/elastalert/elastalert.py +++ b/elastalert/elastalert.py @@ -12,61 +12,46 @@ import time import timeit import traceback +import requests from email.mime.text import MIMEText from smtplib import SMTP from smtplib import SMTPException from socket import error +import statsd + import dateutil.tz import pytz from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.executors.pool import ThreadPoolExecutor from croniter import croniter from elasticsearch.exceptions import ConnectionError from elasticsearch.exceptions import ElasticsearchException from elasticsearch.exceptions import NotFoundError from elasticsearch.exceptions import TransportError - -from . import kibana -from .alerts import DebugAlerter -from .config import load_conf -from .enhancements import DropMatchException -from .kibana_discover import generate_kibana_discover_url -from .ruletypes import FlatlineRule -from .util import add_raw_postfix -from .util import cronite_datetime_to_timestamp -from .util import dt_to_ts -from .util import dt_to_unix -from .util import EAException -from .util import elastalert_logger -from .util import elasticsearch_client -from .util import format_index -from .util import lookup_es_key -from .util import parse_deadline -from .util import parse_duration -from .util import pretty_ts -from .util import replace_dots_in_field_names -from .util import seconds -from .util import set_es_key -from .util import should_scrolling_continue -from .util import total_seconds -from .util import ts_add -from .util import ts_now -from .util import ts_to_dt -from .util import unix_to_dt +from elastalert.ruletypes import AdvancedQueryRule +from elastalert.ruletypes import ErrorRateRule, NewTermsRule +from elastalert.ruletypes import PercentageMatchRule + +from elastalert.alerters.debug import DebugAlerter +from elastalert.config import load_conf +from elastalert.enhancements import DropMatchException +from elastalert.kibana_discover import generate_kibana_discover_url +from elastalert.kibana_external_url_formatter import create_kibana_external_url_formatter +from elastalert.prometheus_wrapper import PrometheusWrapper +from elastalert.ruletypes import FlatlineRule +from elastalert.util import (add_raw_postfix, cronite_datetime_to_timestamp, dt_to_ts, dt_to_unix, EAException, + elastalert_logger, elasticsearch_client, get_msearch_query,kibana_adapter_client, format_index, lookup_es_key, parse_deadline, + parse_duration, pretty_ts, replace_dots_in_field_names, seconds, set_es_key, + should_scrolling_continue, total_seconds, ts_add, ts_now, ts_to_dt, unix_to_dt, + ts_utc_to_tz, dt_to_ts_with_format) class ElastAlerter(object): """ The main ElastAlert runner. This class holds all state about active rules, controls when queries are run, and passes information between rules and alerts. - :param args: An argparse arguments instance. Should contain debug and start - - :param conf: The configuration dictionary. At the top level, this - contains global options, and under 'rules', contains all state relating - to rules and alerts. In each rule in conf['rules'], the RuleType and Alerter - instances live under 'type' and 'alerts', respectively. The conf dictionary - should not be passed directly from a configuration file, but must be populated - by config.py:load_rules instead. """ + :param args: An argparse arguments instance. Should contain debug and start""" thread_data = threading.local() @@ -83,6 +68,11 @@ def parse_args(self, args): parser.add_argument('--rule', dest='rule', help='Run only a specific rule (by filename, must still be in rules folder)') parser.add_argument('--silence', dest='silence', help='Silence rule for a time period. Must be used with --rule. Usage: ' '--silence =, eg. --silence hours=2') + parser.add_argument( + "--silence_qk_value", + dest="silence_qk_value", + help="Silence the rule only for this specific query key value.", + ) parser.add_argument('--start', dest='start', help='YYYY-MM-DDTHH:MM:SS Start querying from this timestamp. ' 'Use "NOW" to start from current time. (Default: present)') parser.add_argument('--end', dest='end', help='YYYY-MM-DDTHH:MM:SS Query to this timestamp. (Default: present)') @@ -105,6 +95,8 @@ def parse_args(self, args): dest='es_debug_trace', help='Enable logging from Elasticsearch queries as curl command. Queries will be logged to file. Note that ' 'this will incorrectly display localhost:9200 as the host/port') + #prometheus port changes + parser.add_argument('--prometheus_port', type=int, dest='prometheus_port', default=9099, help='Enables Prometheus metrics on specified port.') self.args = parser.parse_args(args) def __init__(self, args): @@ -139,12 +131,11 @@ def __init__(self, args): self.rules_loader = self.conf['rules_loader'] self.rules = self.rules_loader.load(self.conf, self.args) - print(len(self.rules), 'rules loaded') + elastalert_logger.info(f'{len(self.rules)} rules loaded') self.max_query_size = self.conf['max_query_size'] self.scroll_keepalive = self.conf['scroll_keepalive'] self.writeback_index = self.conf['writeback_index'] - self.writeback_alias = self.conf['writeback_alias'] self.run_every = self.conf['run_every'] self.alert_time_limit = self.conf['alert_time_limit'] self.old_query_limit = self.conf['old_query_limit'] @@ -159,18 +150,45 @@ def __init__(self, args): self.starttime = self.args.start self.disabled_rules = [] self.replace_dots_in_field_names = self.conf.get('replace_dots_in_field_names', False) + self.thread_data.alerts_sent = 0 self.thread_data.num_hits = 0 self.thread_data.num_dupes = 0 - self.scheduler = BackgroundScheduler() + executors = { + 'default': ThreadPoolExecutor(max_workers=self.conf.get('max_threads', 10)), + } + job_defaults = { + 'misfire_grace_time': self.conf.get('misfire_grace_time', 5), + 'coalesce': True, + 'max_instances': 1 + } + self.scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults) self.string_multi_field_name = self.conf.get('string_multi_field_name', False) + self.statsd_instance_tag = self.conf.get('statsd_instance_tag', '') + self.statsd_host = self.conf.get('statsd_host', '') + if self.statsd_host and len(self.statsd_host) > 0: + self.statsd = statsd.StatsClient(host=self.statsd_host, port=8125) + else: + self.statsd = None self.add_metadata_alert = self.conf.get('add_metadata_alert', False) + self.prometheus_port = self.args.prometheus_port self.show_disabled_rules = self.conf.get('show_disabled_rules', True) + self.pretty_ts_format = self.conf.get('custom_pretty_ts_format') self.writeback_es = elasticsearch_client(self.conf) + #kibana adapter is the modded elasticsearch_client + self.kibana_adapter = kibana_adapter_client(self.conf) + self._es_version = None + + #query_endpoint used by error_rate rule + self.query_endpoint = self.conf['query_endpoint'] + remove = [] for rule in self.rules: - if not self.init_rule(rule): + if 'is_enabled' in rule and not rule['is_enabled']: + self.disabled_rules.append(rule) + remove.append(rule) + elif not self.init_rule(rule): remove.append(rule) list(map(self.rules.remove, remove)) @@ -197,8 +215,7 @@ def get_index(rule, starttime=None, endtime=None): return index @staticmethod - def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field='@timestamp', to_ts_func=dt_to_ts, desc=False, - five=False): + def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field='@timestamp', to_ts_func=dt_to_ts, desc=False): """ Returns a query dict that will apply a list of filters, filter by start and end time, and sort results by timestamp. @@ -211,37 +228,57 @@ def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field= starttime = to_ts_func(starttime) endtime = to_ts_func(endtime) filters = copy.copy(filters) - es_filters = {'filter': {'bool': {'must': filters}}} + + # ElastAlert documentation still specifies an old way of writing filters + # This snippet of code converts it into the new standard + new_filters = [] + for es_filter in filters: + if es_filter.get('query'): + new_filters.append(es_filter['query']) + else: + new_filters.append(es_filter) + + es_filters = {'filter': {'bool': {'must': new_filters}}} if starttime and endtime: es_filters['filter']['bool']['must'].insert(0, {'range': {timestamp_field: {'gt': starttime, 'lte': endtime}}}) - if five: - query = {'query': {'bool': es_filters}} - else: - query = {'query': {'filtered': es_filters}} + query = {'query': {'bool': es_filters}} if sort: query['sort'] = [{timestamp_field: {'order': 'desc' if desc else 'asc'}}] return query - def get_terms_query(self, query, rule, size, field, five=False): + def get_terms_query(self, query, rule, size, field): """ Takes a query generated by get_query and outputs a aggregation query """ query_element = query['query'] if 'sort' in query_element: query_element.pop('sort') - if not five: - query_element['filtered'].update({'aggs': {'counts': {'terms': {'field': field, - 'size': size, - 'min_doc_count': rule.get('min_doc_count', 1)}}}}) - aggs_query = {'aggs': query_element} + + if 'nested_query_key' in rule and rule['nested_query_key'] == True and len(field.split(",")) > 1: + aggs_query = query + query_key_list = field.split(",") + first_query_key = query_key_list.pop() + aggs_element = {'counts': {'terms': {'field': first_query_key, + 'size': size, + 'min_doc_count': rule.get('min_doc_count', 1)}}} + + if len(query_key_list) > 0: + for key in reversed(query_key_list): + aggs_element = {'counts': {'terms': {'field': key, 'size': size, + 'min_doc_count': rule.get('min_doc_count', 1)}, 'aggs': aggs_element}} + aggs_query['aggs'] = aggs_element else: aggs_query = query aggs_query['aggs'] = {'counts': {'terms': {'field': field, - 'size': size, - 'min_doc_count': rule.get('min_doc_count', 1)}}} + 'size': size, + 'min_doc_count': rule.get('min_doc_count', 1)}}} + + return aggs_query def get_aggregation_query(self, query, rule, query_key, terms_size, timestamp_field='@timestamp'): """ Takes a query generated by get_query and outputs a aggregation query """ + if isinstance(rule['type'], PercentageMatchRule): + query['size'] = 10 query_element = query['query'] if 'sort' in query_element: query_element.pop('sort') @@ -253,7 +290,7 @@ def get_aggregation_query(self, query, rule, query_key, terms_size, timestamp_fi 'interval_aggs': { 'date_histogram': { 'field': timestamp_field, - 'interval': bucket_interval_period}, + 'fixed_interval': bucket_interval_period}, 'aggs': metric_agg_element } } @@ -268,12 +305,8 @@ def get_aggregation_query(self, query, rule, query_key, terms_size, timestamp_fi 'min_doc_count': rule.get('min_doc_count', 1)}, 'aggs': aggs_element}} - if not rule['five']: - query_element['filtered'].update({'aggs': aggs_element}) - aggs_query = {'aggs': query_element} - else: - aggs_query = query - aggs_query['aggs'] = aggs_element + aggs_query = query + aggs_query['aggs'] = aggs_element return aggs_query def get_index_start(self, index, timestamp_field='@timestamp'): @@ -284,12 +317,8 @@ def get_index_start(self, index, timestamp_field='@timestamp'): """ query = {'sort': {timestamp_field: {'order': 'asc'}}} try: - if self.thread_data.current_es.is_atleastsixsix(): - res = self.thread_data.current_es.search(index=index, size=1, body=query, - _source_includes=[timestamp_field], ignore_unavailable=True) - else: - res = self.thread_data.current_es.search(index=index, size=1, body=query, _source_include=[timestamp_field], - ignore_unavailable=True) + res = self.thread_data.current_es.search(index=index, size=1, body=query, + _source_includes=[timestamp_field], ignore_unavailable=True) except ElasticsearchException as e: self.handle_error("Elasticsearch query error: %s" % (e), {'index': index, 'query': query}) return '1969-12-30T00:00:00Z' @@ -358,39 +387,39 @@ def get_hits(self, rule, starttime, endtime, index, scroll=False): endtime, timestamp_field=rule['timestamp_field'], to_ts_func=rule['dt_to_ts'], - five=rule['five'], ) - if self.thread_data.current_es.is_atleastsixsix(): - extra_args = {'_source_includes': rule['include']} - else: - extra_args = {'_source_include': rule['include']} - scroll_keepalive = rule.get('scroll_keepalive', self.scroll_keepalive) - if not rule.get('_source_enabled'): - if rule['five']: - query['stored_fields'] = rule['include'] - else: - query['fields'] = rule['include'] - extra_args = {} - try: - if scroll: - res = self.thread_data.current_es.scroll(scroll_id=rule['scroll_id'], scroll=scroll_keepalive) - else: - res = self.thread_data.current_es.search( - scroll=scroll_keepalive, - index=index, - size=rule.get('max_query_size', self.max_query_size), - body=query, - ignore_unavailable=True, - **extra_args - ) - if '_scroll_id' in res: - rule['scroll_id'] = res['_scroll_id'] + request = get_msearch_query(query,rule) - if self.thread_data.current_es.is_atleastseven(): - self.thread_data.total_hits = int(res['hits']['total']['value']) - else: - self.thread_data.total_hits = int(res['hits']['total']) + #removed scroll as it aint supported + # extra_args = {'_source_includes': rule['include']} + # scroll_keepalive = rule.get('scroll_keepalive', self.scroll_keepalive) + # if not rule.get('_source_enabled'): + # query['stored_fields'] = rule['include'] + # extra_args = {} + + try: + #using backwards compatibile msearch + res = self.thread_data.current_es.msearch(body=request) + res = res['responses'][0] + self.thread_data.total_hits = int(res['hits']['total']['value'] if isinstance(res['hits']['total'], dict) else res['hits']['total']) + + #removed scroll as it aint supported + # if scroll: + # res = self.thread_data.current_es.scroll(scroll_id=rule['scroll_id'], scroll=scroll_keepalive) + # else: + # res = self.thread_data.current_es.search( + # scroll=scroll_keepalive, + # index=index, + # size=rule.get('max_query_size', self.max_query_size), + # body=query, + # ignore_unavailable=True, + # **extra_args + # ) + # if '_scroll_id' in res: + # rule['scroll_id'] = res['_scroll_id'] + + # self.thread_data.total_hits = int(res['hits']['total']['value']) if len(res.get('_shards', {}).get('failures', [])) > 0: try: @@ -401,7 +430,7 @@ def get_hits(self, rule, starttime, endtime, index, scroll=False): # Different versions of ES have this formatted in different ways. Fallback to str-ing the whole thing raise ElasticsearchException(str(res['_shards']['failures'])) - logging.debug(str(res)) + elastalert_logger.debug(str(res)) except ElasticsearchException as e: # Elasticsearch sometimes gives us GIGANTIC error messages # (so big that they will fill the entire terminal buffer) @@ -414,8 +443,8 @@ def get_hits(self, rule, starttime, endtime, index, scroll=False): lt = rule.get('use_local_time') status_log = "Queried rule %s from %s to %s: %s / %s hits" % ( rule['name'], - pretty_ts(starttime, lt), - pretty_ts(endtime, lt), + pretty_ts(starttime, lt, self.pretty_ts_format), + pretty_ts(endtime, lt, self.pretty_ts_format), self.thread_data.num_hits, len(hits) ) @@ -425,12 +454,41 @@ def get_hits(self, rule, starttime, endtime, index, scroll=False): elastalert_logger.info(status_log) hits = self.process_hits(rule, hits) - - # Record doc_type for use in get_top_counts - if 'doc_type' not in rule and len(hits): - rule['doc_type'] = hits[0]['_type'] return hits + + def get_terms_data(self,rule, starttime, endtime): + data = {} + rule_inst = rule['type'] + try: + for field in rule['fields']: + terms, counts = rule_inst.get_terms_data(self.thread_data.current_es,starttime,endtime,field) + self.thread_data.num_hits += len(terms) + terms_counts_pair = ( terms, counts ) + if type(field) == list: + data[tuple(field)] = terms_counts_pair + else: + data[field] = terms_counts_pair + except ElasticsearchException as e: + if len(str(e)) > 1024: + e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024) + self.handle_error('Error running new terms query: %s' % (e), {'rule': rule['name'], 'query': rule_inst.get_new_term_query(starttime, endtime,field)}) + return {endtime: {}} + + + + lt = rule.get('use_local_time') + status_log = "Queried rule %s from %s to %s: %s / %s hits" % ( + rule['name'], + pretty_ts(starttime, lt, self.pretty_ts_format), + pretty_ts(endtime, lt, self.pretty_ts_format), + self.thread_data.num_hits, + self.thread_data.num_hits, + ) + elastalert_logger.info(status_log) + + return {endtime : data} + def get_hits_count(self, rule, starttime, endtime, index): """ Query Elasticsearch for the count of results and returns a list of timestamps equal to the endtime. This allows the results to be passed to rules which expect @@ -446,13 +504,17 @@ def get_hits_count(self, rule, starttime, endtime, index): starttime, endtime, timestamp_field=rule['timestamp_field'], - sort=False, + sort=True, to_ts_func=rule['dt_to_ts'], - five=rule['five'] + desc=True, ) + request = get_msearch_query(query,rule) + try: - res = self.thread_data.current_es.count(index=index, doc_type=rule['doc_type'], body=query, ignore_unavailable=True) + #using backwards compatibile msearch + res = self.thread_data.current_es.msearch(body=request) + res = res['responses'][0] except ElasticsearchException as e: # Elasticsearch sometimes gives us GIGANTIC error messages # (so big that they will fill the entire terminal buffer) @@ -461,35 +523,39 @@ def get_hits_count(self, rule, starttime, endtime, index): self.handle_error('Error running count query: %s' % (e), {'rule': rule['name'], 'query': query}) return None - self.thread_data.num_hits += res['count'] + self.thread_data.num_hits += (res['hits']['total']['value'] if isinstance(res['hits']['total'], dict) else res['hits']['total']) lt = rule.get('use_local_time') elastalert_logger.info( - "Queried rule %s from %s to %s: %s hits" % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), res['count']) + "Queried rule %s from %s to %s: %s hits" % (rule['name'], pretty_ts(starttime, lt, self.pretty_ts_format), + pretty_ts(endtime, lt, self.pretty_ts_format), (res['hits']['total']['value'] if isinstance(res['hits']['total'], dict) else res['hits']['total'])) ) - return {endtime: res['count']} + + if len(res['hits']['hits']) > 0 : + event = self.process_hits(rule, res['hits']['hits']) + else: + event= self.process_hits(rule,[{'_source': {rule['timestamp_field']: endtime}}]) + + return {"endtime":endtime,"count": (res['hits']['total']['value'] if isinstance(res['hits']['total'], dict) else res['hits']['total']),"event": event} + #return {endtime: res['hits']['total']} def get_hits_terms(self, rule, starttime, endtime, index, key, qk=None, size=None): rule_filter = copy.copy(rule['filter']) if qk: qk_list = qk.split(",") - end = None - if rule['five']: - end = '.keyword' - else: - end = '.raw' + end = '.keyword' if len(qk_list) == 1: qk = qk_list[0] filter_key = rule['query_key'] if rule.get('raw_count_keys', True) and not rule['query_key'].endswith(end): - filter_key = add_raw_postfix(filter_key, rule['five']) + filter_key = add_raw_postfix(filter_key) rule_filter.extend([{'term': {filter_key: qk}}]) else: filter_keys = rule['compound_query_key'] for i in range(len(filter_keys)): key_with_postfix = filter_keys[i] if rule.get('raw_count_keys', True) and not key.endswith(end): - key_with_postfix = add_raw_postfix(key_with_postfix, rule['five']) + key_with_postfix = add_raw_postfix(key_with_postfix) rule_filter.extend([{'term': {key_with_postfix: qk_list[i]}}]) base_query = self.get_query( @@ -499,24 +565,17 @@ def get_hits_terms(self, rule, starttime, endtime, index, key, qk=None, size=Non timestamp_field=rule['timestamp_field'], sort=False, to_ts_func=rule['dt_to_ts'], - five=rule['five'] ) if size is None: size = rule.get('terms_size', 50) - query = self.get_terms_query(base_query, rule, size, key, rule['five']) + query = self.get_terms_query(base_query, rule, size, key) + request = get_msearch_query(query,rule) try: - if not rule['five']: - res = self.thread_data.current_es.deprecated_search( - index=index, - doc_type=rule['doc_type'], - body=query, - search_type='count', - ignore_unavailable=True - ) - else: - res = self.thread_data.current_es.deprecated_search(index=index, doc_type=rule['doc_type'], - body=query, size=0, ignore_unavailable=True) + #using backwards compatibile msearch + res = self.thread_data.current_es.msearch(body=request) + res = res['responses'][0] + except ElasticsearchException as e: # Elasticsearch sometimes gives us GIGANTIC error messages # (so big that they will fill the entire terminal buffer) @@ -527,14 +586,13 @@ def get_hits_terms(self, rule, starttime, endtime, index, key, qk=None, size=Non if 'aggregations' not in res: return {} - if not rule['five']: - buckets = res['aggregations']['filtered']['counts']['buckets'] - else: - buckets = res['aggregations']['counts']['buckets'] + buckets = res['aggregations']['counts']['buckets'] self.thread_data.num_hits += len(buckets) lt = rule.get('use_local_time') elastalert_logger.info( - 'Queried rule %s from %s to %s: %s buckets' % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), len(buckets)) + 'Queried rule %s from %s to %s: %s buckets' % ( + rule['name'], pretty_ts(starttime, lt, self.pretty_ts_format), + pretty_ts(endtime, lt, self.pretty_ts_format), len(buckets)) ) return {endtime: buckets} @@ -547,23 +605,15 @@ def get_hits_aggregation(self, rule, starttime, endtime, index, query_key, term_ timestamp_field=rule['timestamp_field'], sort=False, to_ts_func=rule['dt_to_ts'], - five=rule['five'] ) if term_size is None: term_size = rule.get('terms_size', 50) query = self.get_aggregation_query(base_query, rule, query_key, term_size, rule['timestamp_field']) + request = get_msearch_query(query,rule) try: - if not rule['five']: - res = self.thread_data.current_es.deprecated_search( - index=index, - doc_type=rule.get('doc_type'), - body=query, - search_type='count', - ignore_unavailable=True - ) - else: - res = self.thread_data.current_es.deprecated_search(index=index, doc_type=rule.get('doc_type'), - body=query, size=0, ignore_unavailable=True) + #using backwards compatibile msearch + res = self.thread_data.current_es.msearch(body=request) + res = res['responses'][0] except ElasticsearchException as e: if len(str(e)) > 1024: e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024) @@ -571,18 +621,105 @@ def get_hits_aggregation(self, rule, starttime, endtime, index, query_key, term_ return None if 'aggregations' not in res: return {} - if not rule['five']: - payload = res['aggregations']['filtered'] - else: - payload = res['aggregations'] + payload = res['aggregations'] + + self.thread_data.num_hits += (res['hits']['total']['value'] if isinstance(res['hits']['total'], dict) else res['hits']['total']) + return {endtime: payload} + + def get_adv_query_aggregation(self, rule, starttime, endtime, index, term_size=None): + rule_filter = copy.copy(rule['filter']) + base_query = self.get_query( + rule_filter, + starttime, + endtime, + timestamp_field=rule['timestamp_field'], + sort=False, + to_ts_func=rule['dt_to_ts'], + ) + request = get_msearch_query(base_query,rule) + try: + #using backwards compatibile msearch + res = self.thread_data.current_es.msearch(body=request) + res = res['responses'][0] + except ElasticsearchException as e: + if len(str(e)) > 1024: + e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024) + self.handle_error('Error running query: %s' % (e), {'rule': rule['name']}) + return None + if 'aggregations' not in res: + return {} + payload = res['aggregations'] + self.thread_data.num_hits += int(res['hits']['total']['value'] if isinstance(res['hits']['total'], dict) else res['hits']['total']) + return {endtime: payload} + - if self.thread_data.current_es.is_atleastseven(): - self.thread_data.num_hits += res['hits']['total']['value'] + #trace_alert specific error rate method + def get_error_rate(self, rule, starttime, endtime): + agg_key = '{}({})'.format(rule['total_agg_type'],rule['total_agg_key']) + query = self.get_query_string(rule) + aggregation = {"function": rule['total_agg_type'].upper(), "field": rule['total_agg_key']} + + total_data, total_count = self.get_ch_data(rule, starttime, endtime, agg_key, query, aggregation) + + if total_data is None: + return {} + + if(query): + query = '{} AND {}'.format(query,rule['error_condition']) else: - self.thread_data.num_hits += res['hits']['total'] + query = rule['error_condition'] + + if rule['count_all_errors']: + agg_key = "count()" + aggregation = {"function": "COUNT", "field": "1"} + + error_data, error_count = self.get_ch_data(rule, starttime, endtime, agg_key, query, aggregation) + + if error_data is None: + return {} + + payload = {'error_count': error_data, 'total_count': total_data, 'start_time': starttime, 'end_time': endtime} + elastalert_logger.info("query start time and endtime %s at %s , error_count %d ,total_count %d" % (starttime, endtime, error_data, total_data)) + + self.thread_data.num_hits += int(error_data) + return {endtime: payload} + #method used by get_error_rate + def get_query_string(self, rule): + if rule['filter'] and ('query_string' in rule['filter'][0]['query']) and ('query' in rule['filter'][0]['query']['query_string']): + return rule['filter'][0]['query']['query_string']['query'] + return "" + + #method used by get_error_rate for calculating aggregates from ch data using query_endpoint + def get_ch_data(self, rule, starttime, endtime, agg_key, freshquery,aggregation): + data = { + "selects":[], + "start_time":dt_to_ts_with_format(starttime,"%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z', + "end_time":dt_to_ts_with_format(endtime,"%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z', + "freshquery": freshquery, + "group_bys":[], + "sort_orders":[{"sort_by": agg_key,"sort_direction":"desc"}], + "aggregations":[aggregation] + } + try: + headers = {} + if 'X_ENV' in rule: + headers['X-ENV'] = rule['X_ENV'] + res = requests.post(self.query_endpoint, json=data, headers=headers) + res.raise_for_status() + except requests.exceptions.RequestException as e: + if len(str(e)) > 1024: + e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024) + self.handle_error('Error running query: %s' % (e), {'rule': rule['name']}) + return None,0 + res = json.loads(res.content) + return int(res['data'][0][agg_key]), res['rows'] + elastalert_logger.info("request data is %s" % json.dumps(data)) + # res = requests.post(self.query_endpoint, json=data) + # return None, None + def remove_duplicate_events(self, data, rule): new_events = [] for event in data: @@ -601,7 +738,10 @@ def remove_old_events(self, rule): remove = [] buffer_time = rule.get('buffer_time', self.buffer_time) if rule.get('query_delay'): - buffer_time += rule['query_delay'] + try: + buffer_time += rule['query_delay'] + except Exception as e: + self.handle_error("[remove_old_events]Error parsing query_delay send time format %s" % e) for _id, timestamp in rule['processed_hits'].items(): if now - timestamp > buffer_time: remove.append(_id) @@ -620,16 +760,29 @@ def run_query(self, rule, start=None, end=None, scroll=False): if end is None: end = ts_now() + if rule.get('query_timezone'): + elastalert_logger.info("Query start and end time converting UTC to query_timezone : {}".format(rule.get('query_timezone'))) + start = ts_utc_to_tz(start, rule.get('query_timezone')) + end = ts_utc_to_tz(end, rule.get('query_timezone')) # Reset hit counter and query rule_inst = rule['type'] rule['scrolling_cycle'] = rule.get('scrolling_cycle', 0) + 1 index = self.get_index(rule, start, end) - if rule.get('use_count_query'): + if isinstance(rule_inst, NewTermsRule): + data = self.get_terms_data(rule, start, end) + elif rule.get('use_count_query'): data = self.get_hits_count(rule, start, end, index) elif rule.get('use_terms_query'): data = self.get_hits_terms(rule, start, end, index, rule['query_key']) + elif isinstance(rule_inst, ErrorRateRule): + data = self.get_error_rate(rule, start, end) elif rule.get('aggregation_query_element'): - data = self.get_hits_aggregation(rule, start, end, index, rule.get('query_key', None)) + elastalert_logger.info("in agg query element") + if isinstance(rule_inst, AdvancedQueryRule): + data = self.get_adv_query_aggregation(rule, start, end,index) + else: + data = self.get_hits_aggregation(rule, start, end, index, rule.get('query_key', None)) + else: data = self.get_hits(rule, start, end, index, scroll) if data: @@ -641,29 +794,36 @@ def run_query(self, rule, start=None, end=None, scroll=False): if data is None: return False elif data: - if rule.get('use_count_query'): + if isinstance(rule_inst, NewTermsRule): + rule_inst.add_terms_data(data) + elif rule.get('use_count_query'): rule_inst.add_count_data(data) elif rule.get('use_terms_query'): rule_inst.add_terms_data(data) + elif isinstance(rule_inst, ErrorRateRule): + rule_inst.calculate_err_rate(data) elif rule.get('aggregation_query_element'): rule_inst.add_aggregation_data(data) else: rule_inst.add_data(data) - try: - if rule.get('scroll_id') and self.thread_data.num_hits < self.thread_data.total_hits and should_scrolling_continue(rule): - if not self.run_query(rule, start, end, scroll=True): - return False - except RuntimeError: - # It's possible to scroll far enough to hit max recursive depth - pass - - if 'scroll_id' in rule: - scroll_id = rule.pop('scroll_id') - try: - self.thread_data.current_es.clear_scroll(scroll_id=scroll_id) - except NotFoundError: - pass + + #Removed scrolling as in old elastalert + + # try: + # if rule.get('scroll_id') and self.thread_data.num_hits < self.thread_data.total_hits and should_scrolling_continue(rule): + # if not self.run_query(rule, start, end, scroll=True): + # return False + # except RuntimeError: + # # It's possible to scroll far enough to hit max recursive depth + # pass + + # if 'scroll_id' in rule: + # scroll_id = rule.pop('scroll_id') + # try: + # self.thread_data.current_es.clear_scroll(scroll_id=scroll_id) + # except NotFoundError: + # pass return True @@ -674,24 +834,15 @@ def get_starttime(self, rule): :return: A timestamp or None. """ sort = {'sort': {'@timestamp': {'order': 'desc'}}} - query = {'filter': {'term': {'rule_name': '%s' % (rule['name'])}}} - if self.writeback_es.is_atleastfive(): - query = {'query': {'bool': query}} + query = {'query': {'bool': {'filter': {'term': {'rule_name': '%s' % (rule['name'])}}}}} query.update(sort) try: doc_type = 'elastalert_status' index = self.writeback_es.resolve_writeback_index(self.writeback_index, doc_type) - if self.writeback_es.is_atleastsixtwo(): - if self.writeback_es.is_atleastsixsix(): - res = self.writeback_es.search(index=index, size=1, body=query, - _source_includes=['endtime', 'rule_name']) - else: - res = self.writeback_es.search(index=index, size=1, body=query, - _source_include=['endtime', 'rule_name']) - else: - res = self.writeback_es.deprecated_search(index=index, doc_type=doc_type, - size=1, body=query, _source_include=['endtime', 'rule_name']) + #modded for elasticsearch ver 6 library compatibility + res = self.writeback_es.search(index=index, size=1, body=query, + _source_includes=['endtime', 'rule_name']) if res['hits']['hits']: endtime = ts_to_dt(res['hits']['hits'][0]['_source']['endtime']) @@ -743,7 +894,8 @@ def set_starttime(self, rule, endtime): # Query from the end of the last run, if it exists, otherwise a run_every sized window rule['starttime'] = rule.get('previous_endtime', endtime - self.run_every) else: - rule['starttime'] = rule.get('previous_endtime', endtime - rule['timeframe']) + #Based on PR 3141 old Yelp/elastalert - rschirin + rule['starttime'] = endtime - rule['timeframe'] def adjust_start_time_for_overlapping_agg_query(self, rule): if rule.get('aggregation_query_element'): @@ -840,11 +992,16 @@ def enhance_filter(self, rule): else: query = " OR ".join(additional_terms) query_str_filter = {'query_string': {'query': query}} - if self.writeback_es.is_atleastfive(): - filters.append(query_str_filter) - else: - filters.append({'query': query_str_filter}) - logging.debug("Enhanced filter with {} terms: {}".format(listname, str(query_str_filter))) + filters.append(query_str_filter) + elastalert_logger.debug("Enhanced filter with {} terms: {}".format(listname, str(query_str_filter))) + + def get_elasticsearch_client(self, rule): + key = rule['name'] + es_client = self.es_clients.get(key) + if es_client is None: + es_client = elasticsearch_client(rule) + self.es_clients[key] = es_client + return es_client def run_rule(self, rule, endtime, starttime=None): """ Run a rule for a given time period, including querying and alerting on results. @@ -855,7 +1012,9 @@ def run_rule(self, rule, endtime, starttime=None): :return: The number of matches that the rule produced. """ run_start = time.time() - self.thread_data.current_es = self.es_clients.setdefault(rule['name'], elasticsearch_client(rule)) + + self.thread_data.current_es = kibana_adapter_client(rule) + self.current_es_addr = (rule['es_host'], rule['es_port']) # If there are pending aggregate matches, try processing them for x in range(len(rule['agg_matches'])): @@ -870,16 +1029,16 @@ def run_rule(self, rule, endtime, starttime=None): rule['original_starttime'] = rule['starttime'] rule['scrolling_cycle'] = 0 + self.thread_data.num_hits = 0 + self.thread_data.num_dupes = 0 + self.thread_data.cumulative_hits = 0 # Don't run if starttime was set to the future if ts_now() <= rule['starttime']: - logging.warning("Attempted to use query start time in the future (%s), sleeping instead" % (starttime)) + elastalert_logger.warning("Attempted to use query start time in the future (%s), sleeping instead" % (starttime)) return 0 # Run the rule. If querying over a large time period, split it up into segments - self.thread_data.num_hits = 0 - self.thread_data.num_dupes = 0 - self.thread_data.cumulative_hits = 0 segment_size = self.get_segment_size(rule) tmp_endtime = rule['starttime'] @@ -919,7 +1078,7 @@ def run_rule(self, rule, endtime, starttime=None): # If realert is set, silence the rule for that duration # Silence is cached by query_key, if it exists # Default realert time is 0 seconds - silence_cache_key = rule['name'] + silence_cache_key = rule['realert_key'] query_key_value = self.get_query_key_value(rule, match) if query_key_value is not None: silence_cache_key += '.' + query_key_value @@ -954,6 +1113,7 @@ def run_rule(self, rule, endtime, starttime=None): rule['previous_endtime'] = endtime time_taken = time.time() - run_start + # Write to ES that we've run this rule against this time period body = {'rule_name': rule['name'], 'endtime': endtime, @@ -964,44 +1124,47 @@ def run_rule(self, rule, endtime, starttime=None): 'time_taken': time_taken} self.writeback('elastalert_status', body) + # Write metrics about the run to statsd + if self.statsd: + try: + self.statsd.gauge( + 'rule.time_taken', time_taken, + tags={"elastalert_instance": self.statsd_instance_tag, "rule_name": rule['name']}) + self.statsd.gauge( + 'query.hits', self.thread_data.num_hits, + tags={"elastalert_instance": self.statsd_instance_tag, "rule_name": rule['name']}) + self.statsd.gauge( + 'already_seen.hits', self.thread_data.num_dupes, + tags={"elastalert_instance": self.statsd_instance_tag, "rule_name": rule['name']}) + self.statsd.gauge( + 'query.matches', num_matches, + tags={"elastalert_instance": self.statsd_instance_tag, "rule_name": rule['name']}) + self.statsd.gauge( + 'query.alerts_sent', self.thread_data.alerts_sent, + tags={"elastalert_instance": self.statsd_instance_tag, "rule_name": rule['name']}) + except BaseException as e: + elastalert_logger.error("unable to send metrics:\n%s" % str(e)) + return num_matches def init_rule(self, new_rule, new=True): ''' Copies some necessary non-config state from an exiting rule to a new rule. ''' - if not new: + if not new and self.scheduler.get_job(job_id=new_rule['name']): self.scheduler.remove_job(job_id=new_rule['name']) - try: - self.modify_rule_for_ES5(new_rule) - except TransportError as e: - elastalert_logger.warning('Error connecting to Elasticsearch for rule {}. ' - 'The rule has been disabled.'.format(new_rule['name'])) - self.send_notification_email(exception=e, rule=new_rule) - return False - self.enhance_filter(new_rule) # Change top_count_keys to .raw if 'top_count_keys' in new_rule and new_rule.get('raw_count_keys', True): if self.string_multi_field_name: string_multi_field_name = self.string_multi_field_name - elif self.writeback_es.is_atleastfive(): - string_multi_field_name = '.keyword' else: - string_multi_field_name = '.raw' + string_multi_field_name = '.keyword' for i, key in enumerate(new_rule['top_count_keys']): if not key.endswith(string_multi_field_name): new_rule['top_count_keys'][i] += string_multi_field_name - if 'download_dashboard' in new_rule['filter']: - # Download filters from Kibana and set the rules filters to them - db_filters = self.filters_from_kibana(new_rule, new_rule['filter']['download_dashboard']) - if db_filters is not None: - new_rule['filter'] = db_filters - else: - raise EAException("Could not download filters from %s" % (new_rule['filter']['download_dashboard'])) - blank_rule = {'agg_matches': [], 'aggregate_alert_time': {}, 'current_aggregate_id': {}, @@ -1034,31 +1197,13 @@ def init_rule(self, new_rule, new=True): args=[new_rule], seconds=new_rule['run_every'].total_seconds(), id=new_rule['name'], + name="Rule: %s" % (new_rule['name']), max_instances=1, jitter=5) job.modify(next_run_time=datetime.datetime.now() + datetime.timedelta(seconds=random.randint(0, 15))) return new_rule - @staticmethod - def modify_rule_for_ES5(new_rule): - # Get ES version per rule - rule_es = elasticsearch_client(new_rule) - if rule_es.is_atleastfive(): - new_rule['five'] = True - else: - new_rule['five'] = False - return - - # In ES5, filters starting with 'query' should have the top wrapper removed - new_filters = [] - for es_filter in new_rule.get('filter', []): - if es_filter.get('query'): - new_filters.append(es_filter['query']) - else: - new_filters.append(es_filter) - new_rule['filter'] = new_filters - def load_rule_changes(self): """ Using the modification times of rule config files, syncs the running rules to match the files in rules_folder by removing, adding or reloading rules. """ @@ -1082,12 +1227,21 @@ def load_rule_changes(self): try: new_rule = self.rules_loader.load_configuration(rule_file, self.conf) if not new_rule: - logging.error('Invalid rule file skipped: %s' % rule_file) + elastalert_logger.error('Invalid rule file skipped: %s' % rule_file) continue if 'is_enabled' in new_rule and not new_rule['is_enabled']: elastalert_logger.info('Rule file %s is now disabled.' % (rule_file)) # Remove this rule if it's been disabled self.rules = [rule for rule in self.rules if rule['rule_file'] != rule_file] + # Stop job if is running + if self.scheduler.get_job(job_id=new_rule['name']): + self.scheduler.remove_job(job_id=new_rule['name']) + # Append to disabled_rule + for disabled_rule in self.disabled_rules: + if disabled_rule['name'] == new_rule['name']: + break + else: + self.disabled_rules.append(new_rule) continue except EAException as e: message = 'Could not load rule %s: %s' % (rule_file, e) @@ -1106,7 +1260,6 @@ def load_rule_changes(self): # Re-enable if rule had been disabled for disabled_rule in self.disabled_rules: if disabled_rule['name'] == new_rule['name']: - self.rules.append(disabled_rule) self.disabled_rules.remove(disabled_rule) break @@ -1122,7 +1275,7 @@ def load_rule_changes(self): try: new_rule = self.rules_loader.load_configuration(rule_file, self.conf) if not new_rule: - logging.error('Invalid rule file skipped: %s' % rule_file) + elastalert_logger.error('Invalid rule file skipped: %s' % rule_file) continue if 'is_enabled' in new_rule and not new_rule['is_enabled']: continue @@ -1158,9 +1311,13 @@ def start(self): self.running = True elastalert_logger.info("Starting up") self.scheduler.add_job(self.handle_pending_alerts, 'interval', - seconds=self.run_every.total_seconds(), id='_internal_handle_pending_alerts') + seconds=self.run_every.total_seconds(), + id='_internal_handle_pending_alerts', + name='Internal: Handle Pending Alerts') self.scheduler.add_job(self.handle_config_change, 'interval', - seconds=self.run_every.total_seconds(), id='_internal_handle_config_change') + seconds=self.run_every.total_seconds(), + id='_internal_handle_config_change', + name='Internal: Handle Config Change') self.scheduler.start() while self.running: next_run = datetime.datetime.utcnow() + self.run_every @@ -1198,19 +1355,19 @@ def wait_until_responsive(self, timeout, clock=timeit.default_timer): ref = clock() while (clock() - ref) < timeout: try: - if self.writeback_es.indices.exists(self.writeback_alias): + if self.writeback_es.indices.exists(self.writeback_index): return except ConnectionError: pass time.sleep(1.0) if self.writeback_es.ping(): - logging.error( - 'Writeback alias "%s" does not exist, did you run `elastalert-create-index`?', - self.writeback_alias, + elastalert_logger.error( + 'Writeback index "%s" does not exist, did you run `elastalert-create-index`?', + self.writeback_index, ) else: - logging.error( + elastalert_logger.error( 'Could not reach ElasticSearch at "%s:%d".', self.conf['es_host'], self.conf['es_port'], @@ -1229,13 +1386,14 @@ def run_all_rules(self): def handle_pending_alerts(self): self.thread_data.alerts_sent = 0 self.send_pending_alerts() - elastalert_logger.info("Background alerts thread %s pending alerts sent at %s" % (self.thread_data.alerts_sent, - pretty_ts(ts_now()))) + elastalert_logger.info("Background alerts thread %s pending alerts sent at %s" % ( + self.thread_data.alerts_sent, pretty_ts(ts_now(), ts_format=self.pretty_ts_format))) def handle_config_change(self): if not self.args.pin_rules: self.load_rule_changes() - elastalert_logger.info("Background configuration change check run at %s" % (pretty_ts(ts_now()))) + elastalert_logger.info( + "Background configuration change check run at %s" % (pretty_ts(ts_now(), ts_format=self.pretty_ts_format))) def handle_rule_execution(self, rule): self.thread_data.alerts_sent = 0 @@ -1245,7 +1403,10 @@ def handle_rule_execution(self, rule): if hasattr(self.args, 'end') and self.args.end: endtime = ts_to_dt(self.args.end) elif delay: - endtime = ts_now() - delay + try: + endtime = ts_now() - delay + except Exception as e: + self.handle_error("[handle_rule_execution]Error parsing query_delay send time format %s" % e) else: endtime = ts_now() @@ -1274,21 +1435,27 @@ def handle_rule_execution(self, rule): except Exception as e: self.handle_uncaught_exception(e, rule) else: - old_starttime = pretty_ts(rule.get('original_starttime'), rule.get('use_local_time')) + old_starttime = pretty_ts(rule.get('original_starttime'), rule.get('use_local_time'), self.pretty_ts_format) elastalert_logger.info("Ran %s from %s to %s: %s query hits (%s already seen), %s matches," - " %s alerts sent" % (rule['name'], old_starttime, pretty_ts(endtime, rule.get('use_local_time')), - self.thread_data.num_hits, self.thread_data.num_dupes, num_matches, + " %s alerts sent" % (rule['name'], old_starttime, + pretty_ts(endtime, rule.get('use_local_time'), + self.pretty_ts_format), + self.thread_data.num_hits, self.thread_data.num_dupes, + num_matches, self.thread_data.alerts_sent)) + rule_duration = seconds(endtime - rule.get('original_starttime')) + elastalert_logger.info("%s range %s" % (rule['name'], rule_duration)) + self.thread_data.alerts_sent = 0 if next_run < datetime.datetime.utcnow(): # We were processing for longer than our refresh interval # This can happen if --start was specified with a large time period # or if we are running too slow to process events in real time. - logging.warning( + elastalert_logger.warning( "Querying from %s to %s took longer than %s!" % ( old_starttime, - pretty_ts(endtime, rule.get('use_local_time')), + pretty_ts(endtime, rule.get('use_local_time'), self.pretty_ts_format), self.run_every ) ) @@ -1307,7 +1474,8 @@ def reset_rule_schedule(self, rule): if rule['next_min_starttime']: rule['minimum_starttime'] = rule['next_min_starttime'] rule['previous_endtime'] = rule['next_min_starttime'] - elastalert_logger.info('Pausing %s until next run at %s' % (rule['name'], pretty_ts(rule['next_starttime']))) + elastalert_logger.info('Pausing %s until next run at %s' % ( + rule['name'], pretty_ts(rule['next_starttime'], ts_format=self.pretty_ts_format))) def stop(self): """ Stop an ElastAlert runner that's been started """ @@ -1322,130 +1490,6 @@ def sleep_for(self, duration): elastalert_logger.info("Sleeping for %s seconds" % (duration)) time.sleep(duration) - def generate_kibana4_db(self, rule, match): - ''' Creates a link for a kibana4 dashboard which has time set to the match. ''' - db_name = rule.get('use_kibana4_dashboard') - start = ts_add( - lookup_es_key(match, rule['timestamp_field']), - -rule.get('kibana4_start_timedelta', rule.get('timeframe', datetime.timedelta(minutes=10))) - ) - end = ts_add( - lookup_es_key(match, rule['timestamp_field']), - rule.get('kibana4_end_timedelta', rule.get('timeframe', datetime.timedelta(minutes=10))) - ) - return kibana.kibana4_dashboard_link(db_name, start, end) - - def generate_kibana_db(self, rule, match): - ''' Uses a template dashboard to upload a temp dashboard showing the match. - Returns the url to the dashboard. ''' - db = copy.deepcopy(kibana.dashboard_temp) - - # Set timestamp fields to match our rule especially if - # we have configured something other than @timestamp - kibana.set_timestamp_field(db, rule['timestamp_field']) - - # Set filters - for filter in rule['filter']: - if filter: - kibana.add_filter(db, filter) - kibana.set_included_fields(db, rule['include']) - - # Set index - index = self.get_index(rule) - kibana.set_index_name(db, index) - - return self.upload_dashboard(db, rule, match) - - def upload_dashboard(self, db, rule, match): - ''' Uploads a dashboard schema to the kibana-int Elasticsearch index associated with rule. - Returns the url to the dashboard. ''' - # Set time range - start = ts_add(lookup_es_key(match, rule['timestamp_field']), -rule.get('timeframe', datetime.timedelta(minutes=10))) - end = ts_add(lookup_es_key(match, rule['timestamp_field']), datetime.timedelta(minutes=10)) - kibana.set_time(db, start, end) - - # Set dashboard name - db_name = 'ElastAlert - %s - %s' % (rule['name'], end) - kibana.set_name(db, db_name) - - # Add filter for query_key value - if 'query_key' in rule: - for qk in rule.get('compound_query_key', [rule['query_key']]): - if qk in match: - term = {'term': {qk: match[qk]}} - kibana.add_filter(db, term) - - # Add filter for aggregation_key value - if 'aggregation_key' in rule: - for qk in rule.get('compound_aggregation_key', [rule['aggregation_key']]): - if qk in match: - term = {'term': {qk: match[qk]}} - kibana.add_filter(db, term) - - # Convert to json - db_js = json.dumps(db) - db_body = {'user': 'guest', - 'group': 'guest', - 'title': db_name, - 'dashboard': db_js} - - # Upload - es = elasticsearch_client(rule) - # TODO: doc_type = _doc for elastic >= 6 - res = es.index(index='kibana-int', - doc_type='temp', - body=db_body) - - # Return dashboard URL - kibana_url = rule.get('kibana_url') - if not kibana_url: - kibana_url = 'http://%s:%s/_plugin/kibana/' % (rule['es_host'], - rule['es_port']) - return kibana_url + '#/dashboard/temp/%s' % (res['_id']) - - def get_dashboard(self, rule, db_name): - """ Download dashboard which matches use_kibana_dashboard from Elasticsearch. """ - es = elasticsearch_client(rule) - if not db_name: - raise EAException("use_kibana_dashboard undefined") - query = {'query': {'term': {'_id': db_name}}} - try: - # TODO use doc_type = _doc - res = es.deprecated_search(index='kibana-int', doc_type='dashboard', body=query, _source_include=['dashboard']) - except ElasticsearchException as e: - raise EAException("Error querying for dashboard: %s" % (e)).with_traceback(sys.exc_info()[2]) - - if res['hits']['hits']: - return json.loads(res['hits']['hits'][0]['_source']['dashboard']) - else: - raise EAException("Could not find dashboard named %s" % (db_name)) - - def use_kibana_link(self, rule, match): - """ Uploads an existing dashboard as a temp dashboard modified for match time. - Returns the url to the dashboard. """ - # Download or get cached dashboard - dashboard = rule.get('dashboard_schema') - if not dashboard: - db_name = rule.get('use_kibana_dashboard') - dashboard = self.get_dashboard(rule, db_name) - if dashboard: - rule['dashboard_schema'] = dashboard - else: - return None - dashboard = copy.deepcopy(dashboard) - return self.upload_dashboard(dashboard, rule, match) - - def filters_from_kibana(self, rule, db_name): - """ Downloads a dashboard from Kibana and returns corresponding filters, None on error. """ - try: - db = rule.get('dashboard_schema') - if not db: - db = self.get_dashboard(rule, db_name) - filters = kibana.filters_from_dashboard(db) - except EAException: - return None - return filters - def alert(self, matches, rule, alert_time=None, retried=False): """ Wraps alerting, Kibana linking and enhancements in an exception handler """ try: @@ -1487,28 +1531,11 @@ def send_alert(self, matches, rule, alert_time=None, retried=False): counts = self.get_top_counts(rule, start, end, keys, qk=qk) match.update(counts) - # Generate a kibana3 dashboard for the first match - if rule.get('generate_kibana_link') or rule.get('use_kibana_dashboard'): - try: - if rule.get('generate_kibana_link'): - kb_link = self.generate_kibana_db(rule, matches[0]) - else: - kb_link = self.use_kibana_link(rule, matches[0]) - except EAException as e: - self.handle_error("Could not generate Kibana dash for %s match: %s" % (rule['name'], e)) - else: - if kb_link: - matches[0]['kibana_link'] = kb_link - - if rule.get('use_kibana4_dashboard'): - kb_link = self.generate_kibana4_db(rule, matches[0]) - if kb_link: - matches[0]['kibana_link'] = kb_link - if rule.get('generate_kibana_discover_url'): kb_link = generate_kibana_discover_url(rule, matches[0]) if kb_link: - matches[0]['kibana_discover_url'] = kb_link + kb_link_formatter = self.get_kibana_discover_external_url_formatter(rule) + matches[0]['kibana_discover_url'] = kb_link_formatter.format(kb_link) # Enhancements were already run at match time if # run_enhancements_first is set or @@ -1545,7 +1572,7 @@ def send_alert(self, matches, rule, alert_time=None, retried=False): try: alert.alert(matches) except EAException as e: - self.handle_error('Error while running alert %s: %s' % (alert.get_info()['type'], e), {'rule': rule['name']}) + self.handle_error('Error while running alert %s ( Tenant : %s , Rule : %s ) - %s' % (alert.get_info()['type'], rule.get('tenant') , rule.get('name'), e), {'rule': rule['name']}) alert_exception = str(e) else: self.thread_data.alerts_sent += 1 @@ -1591,6 +1618,17 @@ def get_alert_body(self, match, rule, alert_sent, alert_time, alert_exception=No body['alert_exception'] = alert_exception return body + def get_kibana_discover_external_url_formatter(self, rule): + """ Gets or create the external url formatter for kibana discover links """ + key = '__kibana_discover_external_url_formatter__' + formatter = rule.get(key) + if formatter is None: + shorten = rule.get('shorten_kibana_discover_url') + security_tenant = rule.get('kibana_discover_security_tenant') + formatter = create_kibana_external_url_formatter(rule, shorten, security_tenant) + rule[key] = formatter + return formatter + def writeback(self, doc_type, body, rule=None, match_body=None): # ES 2.0 - 2.3 does not support dots in field names. if self.replace_dots_in_field_names: @@ -1612,13 +1650,10 @@ def writeback(self, doc_type, body, rule=None, match_body=None): try: index = self.writeback_es.resolve_writeback_index(self.writeback_index, doc_type) - if self.writeback_es.is_atleastsixtwo(): - res = self.writeback_es.index(index=index, body=body) - else: - res = self.writeback_es.index(index=index, doc_type=doc_type, body=body) + res = self.writeback_es.index(index=index, body=body) return res except ElasticsearchException as e: - logging.exception("Error writing alert info to Elasticsearch: %s" % (e)) + elastalert_logger.exception("Error writing alert info to Elasticsearch: %s" % (e)) def find_recent_pending_alerts(self, time_limit): """ Queries writeback_es to find alerts that did not send @@ -1632,21 +1667,17 @@ def find_recent_pending_alerts(self, time_limit): time_filter = {'range': {'alert_time': {'from': dt_to_ts(ts_now() - time_limit), 'to': dt_to_ts(ts_now())}}} sort = {'sort': {'alert_time': {'order': 'asc'}}} - if self.writeback_es.is_atleastfive(): - query = {'query': {'bool': {'must': inner_query, 'filter': time_filter}}} - else: - query = {'query': inner_query, 'filter': time_filter} + query = {'query': {'bool': {'must': inner_query, 'filter': time_filter}}} query.update(sort) try: - if self.writeback_es.is_atleastsixtwo(): - res = self.writeback_es.search(index=self.writeback_index, body=query, size=1000) - else: - res = self.writeback_es.deprecated_search(index=self.writeback_index, - doc_type='elastalert', body=query, size=1000) + #modded for elasticsearch ver 6 library compatibility + res = self.writeback_es.search(index=self.writeback_index, + body=query, + size=1000) if res['hits']['hits']: return res['hits']['hits'] except ElasticsearchException as e: - logging.exception("Error finding recent pending alerts: %s %s" % (e, query)) + elastalert_logger.exception("Error finding recent pending alerts: %s %s" % (e, query)) return [] def send_pending_alerts(self): @@ -1694,10 +1725,8 @@ def send_pending_alerts(self): # Delete it from the index try: - if self.writeback_es.is_atleastsixtwo(): - self.writeback_es.delete(index=self.writeback_index, id=_id) - else: - self.writeback_es.delete(index=self.writeback_index, doc_type='elastalert', id=_id) + self.writeback_es.delete(index=self.writeback_index, + id=_id) except ElasticsearchException: # TODO: Give this a more relevant exception, try:except: is evil. self.handle_error("Failed to delete alert %s at %s" % (_id, alert_time)) @@ -1724,21 +1753,17 @@ def get_aggregated_matches(self, _id): """ Removes and returns all matches from writeback_es that have aggregate_id == _id """ # XXX if there are more than self.max_aggregation matches, you have big alerts and we will leave entries in ES. - query = {'query': {'query_string': {'query': 'aggregate_id:%s' % (_id)}}, 'sort': {'@timestamp': 'asc'}} + query = {'query': {'query_string': {'query': 'aggregate_id:"%s"' % (_id)}}, 'sort': {'@timestamp': 'asc'}} matches = [] try: - if self.writeback_es.is_atleastsixtwo(): - res = self.writeback_es.search(index=self.writeback_index, body=query, - size=self.max_aggregation) - else: - res = self.writeback_es.deprecated_search(index=self.writeback_index, doc_type='elastalert', - body=query, size=self.max_aggregation) + #modded for elasticsearch ver 6 library compatibility + res = self.writeback_es.search(index=self.writeback_index, + body=query, + size=self.max_aggregation) for match in res['hits']['hits']: matches.append(match['_source']) - if self.writeback_es.is_atleastsixtwo(): - self.writeback_es.delete(index=self.writeback_index, id=match['_id']) - else: - self.writeback_es.delete(index=self.writeback_index, doc_type='elastalert', id=match['_id']) + self.writeback_es.delete(index=self.writeback_index, + id=match['_id']) except (KeyError, ElasticsearchException) as e: self.handle_error("Error fetching aggregated matches: %s" % (e), {'id': _id}) return matches @@ -1750,14 +1775,14 @@ def find_pending_aggregate_alert(self, rule, aggregation_key_value=None): 'must_not': [{'exists': {'field': 'aggregate_id'}}]}}} if aggregation_key_value: query['filter']['bool']['must'].append({'term': {'aggregation_key': aggregation_key_value}}) - if self.writeback_es.is_atleastfive(): - query = {'query': {'bool': query}} + query = {'query': {'bool': query}} query['sort'] = {'alert_time': {'order': 'desc'}} try: - if self.writeback_es.is_atleastsixtwo(): - res = self.writeback_es.search(index=self.writeback_index, body=query, size=1) - else: - res = self.writeback_es.deprecated_search(index=self.writeback_index, doc_type='elastalert', body=query, size=1) + #modded for elasticsearch ver 6 library compatibility + res = self.writeback_es.search(index=self.writeback_index, + doc_type='elastalert', + body=query, + size=1) if len(res['hits']['hits']) == 0: return None except (KeyError, ElasticsearchException) as e: @@ -1802,11 +1827,14 @@ def add_aggregated_alert(self, match, rule): except Exception as e: self.handle_error("Error parsing aggregate send time Cron format %s" % (e), rule['aggregation']['schedule']) else: - if rule.get('aggregate_by_match_time', False): - match_time = ts_to_dt(lookup_es_key(match, rule['timestamp_field'])) - alert_time = match_time + rule['aggregation'] - else: - alert_time = ts_now() + rule['aggregation'] + try: + if rule.get('aggregate_by_match_time', False): + match_time = ts_to_dt(lookup_es_key(match, rule['timestamp_field'])) + alert_time = match_time + rule['aggregation'] + else: + alert_time = ts_now() + rule['aggregation'] + except Exception as e: + self.handle_error("[add_aggregated_alert]Error parsing aggregate send time format %s" % (e), rule['aggregation']) rule['aggregate_alert_time'][aggregation_key_value] = alert_time agg_id = None @@ -1846,25 +1874,28 @@ def add_aggregated_alert(self, match, rule): def silence(self, silence_cache_key=None): """ Silence an alert for a period of time. --silence and --rule must be passed as args. """ if self.debug: - logging.error('--silence not compatible with --debug') + elastalert_logger.error('--silence not compatible with --debug') exit(1) if not self.args.rule: - logging.error('--silence must be used with --rule') + elastalert_logger.error('--silence must be used with --rule') exit(1) # With --rule, self.rules will only contain that specific rule if not silence_cache_key: - silence_cache_key = self.rules[0]['name'] + "._silence" + if self.args.silence_qk_value: + silence_cache_key = self.rules[0]['realert_key'] + "." + self.args.silence_qk_value + else: + silence_cache_key = self.rules[0]['name'] + "._silence" try: silence_ts = parse_deadline(self.args.silence) except (ValueError, TypeError): - logging.error('%s is not a valid time period' % (self.args.silence)) + elastalert_logger.error('%s is not a valid time period' % (self.args.silence)) exit(1) if not self.set_realert(silence_cache_key, silence_ts, 0): - logging.error('Failed to save silence command to Elasticsearch') + elastalert_logger.error('Failed to save silence command to Elasticsearch') exit(1) elastalert_logger.info('Success. %s will be silenced until %s' % (silence_cache_key, silence_ts)) @@ -1889,25 +1920,15 @@ def is_silenced(self, rule_name): return False query = {'term': {'rule_name': rule_name}} sort = {'sort': {'until': {'order': 'desc'}}} - if self.writeback_es.is_atleastfive(): - query = {'query': query} - else: - query = {'filter': query} + query = {'query': query} query.update(sort) try: doc_type = 'silence' index = self.writeback_es.resolve_writeback_index(self.writeback_index, doc_type) - if self.writeback_es.is_atleastsixtwo(): - if self.writeback_es.is_atleastsixsix(): - res = self.writeback_es.search(index=index, size=1, body=query, - _source_includes=['until', 'exponent']) - else: - res = self.writeback_es.search(index=index, size=1, body=query, - _source_include=['until', 'exponent']) - else: - res = self.writeback_es.deprecated_search(index=index, doc_type=doc_type, - size=1, body=query, _source_include=['until', 'exponent']) + #modded for elasticsearch ver 6 library compatibility + res = self.writeback_es.search(index=index, size=1, body=query, + _source_includes=['until', 'exponent']) except ElasticsearchException as e: self.handle_error("Error while querying for alert silence status: %s" % (e), {'rule': rule_name}) @@ -1925,7 +1946,7 @@ def is_silenced(self, rule_name): def handle_error(self, message, data=None): ''' Logs message at error level and writes message, data and traceback to Elasticsearch. ''' - logging.error(message) + elastalert_logger.error(message) body = {'message': message} tb = traceback.format_exc() body['traceback'] = tb.strip().split('\n') @@ -1935,7 +1956,7 @@ def handle_error(self, message, data=None): def handle_uncaught_exception(self, exception, rule): """ Disables a rule and sends a notification. """ - logging.error(traceback.format_exc()) + elastalert_logger.error(traceback.format_exc()) self.handle_error('Uncaught exception running rule %s: %s' % (rule['name'], exception), {'rule': rule['name']}) if self.disable_rules_on_error: self.rules = [running_rule for running_rule in self.rules if running_rule['name'] != rule['name']] @@ -1993,7 +2014,7 @@ def get_top_counts(self, rule, starttime, endtime, keys, number=None, qk=None): index = self.get_index(rule, starttime, endtime) hits_terms = self.get_hits_terms(rule, starttime, endtime, index, key, qk, number) - if hits_terms is None: + if hits_terms is None or not hits_terms: top_events_count = {} else: buckets = list(hits_terms.values())[0] @@ -2049,6 +2070,11 @@ def main(args=None): if not args: args = sys.argv[1:] client = ElastAlerter(args) + + if client.prometheus_port and not client.debug: + p = PrometheusWrapper(client) + p.start() + if not client.args.silence: client.start() diff --git a/elastalert/enhancements.py b/elastalert/enhancements.py index 6cc1cdd57..bad17875d 100644 --- a/elastalert/enhancements.py +++ b/elastalert/enhancements.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from .util import pretty_ts +from elastalert.util import pretty_ts class BaseEnhancement(object): diff --git a/elastalert/es_mappings/5/elastalert.json b/elastalert/es_mappings/5/elastalert.json deleted file mode 100644 index b522933b3..000000000 --- a/elastalert/es_mappings/5/elastalert.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "elastalert": { - "properties": { - "rule_name": { - "index": "not_analyzed", - "type": "string" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - }, - "alert_time": { - "type": "date", - "format": "dateOptionalTime" - }, - "match_time": { - "type": "date", - "format": "dateOptionalTime" - }, - "match_body": { - "type": "object", - "enabled": "false" - }, - "aggregate_id": { - "index": "not_analyzed", - "type": "string" - } - } - } -} diff --git a/elastalert/es_mappings/5/elastalert_error.json b/elastalert/es_mappings/5/elastalert_error.json deleted file mode 100644 index 7f1b3c0a8..000000000 --- a/elastalert/es_mappings/5/elastalert_error.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "elastalert_error": { - "properties": { - "data": { - "type": "object", - "enabled": "false" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - } - } - } -} diff --git a/elastalert/es_mappings/5/elastalert_status.json b/elastalert/es_mappings/5/elastalert_status.json deleted file mode 100644 index f8cd9643f..000000000 --- a/elastalert/es_mappings/5/elastalert_status.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "elastalert_status": { - "properties": { - "rule_name": { - "index": "not_analyzed", - "type": "string" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - } - } - } -} diff --git a/elastalert/es_mappings/5/past_elastalert.json b/elastalert/es_mappings/5/past_elastalert.json deleted file mode 100644 index e10783748..000000000 --- a/elastalert/es_mappings/5/past_elastalert.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "past_elastalert": { - "properties": { - "rule_name": { - "index": "not_analyzed", - "type": "string" - }, - "match_body": { - "type": "object", - "enabled": "false" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - }, - "aggregate_id": { - "index": "not_analyzed", - "type": "string" - } - } - } -} diff --git a/elastalert/es_mappings/5/silence.json b/elastalert/es_mappings/5/silence.json deleted file mode 100644 index b04006da8..000000000 --- a/elastalert/es_mappings/5/silence.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "silence": { - "properties": { - "rule_name": { - "index": "not_analyzed", - "type": "string" - }, - "until": { - "type": "date", - "format": "dateOptionalTime" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - } - } - } -} diff --git a/elastalert/es_mappings/6/elastalert.json b/elastalert/es_mappings/7/elastalert.json similarity index 96% rename from elastalert/es_mappings/6/elastalert.json rename to elastalert/es_mappings/7/elastalert.json index 645a67762..2cc97bcfb 100644 --- a/elastalert/es_mappings/6/elastalert.json +++ b/elastalert/es_mappings/7/elastalert.json @@ -29,6 +29,7 @@ "format": "dateOptionalTime" }, "match_body": { + "enabled": "false", "type": "object" }, "aggregate_id": { diff --git a/elastalert/es_mappings/6/elastalert_error.json b/elastalert/es_mappings/7/elastalert_error.json similarity index 100% rename from elastalert/es_mappings/6/elastalert_error.json rename to elastalert/es_mappings/7/elastalert_error.json diff --git a/elastalert/es_mappings/6/elastalert_status.json b/elastalert/es_mappings/7/elastalert_status.json similarity index 100% rename from elastalert/es_mappings/6/elastalert_status.json rename to elastalert/es_mappings/7/elastalert_status.json diff --git a/elastalert/es_mappings/6/past_elastalert.json b/elastalert/es_mappings/7/past_elastalert.json similarity index 100% rename from elastalert/es_mappings/6/past_elastalert.json rename to elastalert/es_mappings/7/past_elastalert.json diff --git a/elastalert/es_mappings/6/silence.json b/elastalert/es_mappings/7/silence.json similarity index 100% rename from elastalert/es_mappings/6/silence.json rename to elastalert/es_mappings/7/silence.json diff --git a/elastalert/es_mappings/8/elastalert.json b/elastalert/es_mappings/8/elastalert.json new file mode 100644 index 000000000..9517163e7 --- /dev/null +++ b/elastalert/es_mappings/8/elastalert.json @@ -0,0 +1,39 @@ +{ + "numeric_detection": true, + "date_detection": false, + "dynamic_templates": [ + { + "strings_as_keyword": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string" + } + } + ], + "properties": { + "rule_name": { + "type": "keyword" + }, + "@timestamp": { + "type": "date", + "format": "date_optional_time" + }, + "alert_time": { + "type": "date", + "format": "date_optional_time" + }, + "match_time": { + "type": "date", + "format": "date_optional_time" + }, + "match_body": { + "enabled": "false", + "type": "object" + }, + "aggregate_id": { + "type": "keyword" + } + } +} diff --git a/elastalert/es_mappings/8/elastalert_error.json b/elastalert/es_mappings/8/elastalert_error.json new file mode 100644 index 000000000..50cbcef74 --- /dev/null +++ b/elastalert/es_mappings/8/elastalert_error.json @@ -0,0 +1,12 @@ +{ + "properties": { + "data": { + "type": "object", + "enabled": "false" + }, + "@timestamp": { + "type": "date", + "format": "date_optional_time" + } + } +} diff --git a/elastalert/es_mappings/8/elastalert_status.json b/elastalert/es_mappings/8/elastalert_status.json new file mode 100644 index 000000000..05763e3ac --- /dev/null +++ b/elastalert/es_mappings/8/elastalert_status.json @@ -0,0 +1,11 @@ +{ + "properties": { + "rule_name": { + "type": "keyword" + }, + "@timestamp": { + "type": "date", + "format": "date_optional_time" + } + } +} diff --git a/elastalert/es_mappings/8/past_elastalert.json b/elastalert/es_mappings/8/past_elastalert.json new file mode 100644 index 000000000..be8ef80f0 --- /dev/null +++ b/elastalert/es_mappings/8/past_elastalert.json @@ -0,0 +1,18 @@ +{ + "properties": { + "rule_name": { + "type": "keyword" + }, + "match_body": { + "type": "object", + "enabled": "false" + }, + "@timestamp": { + "type": "date", + "format": "date_optional_time" + }, + "aggregate_id": { + "type": "keyword" + } + } +} diff --git a/elastalert/es_mappings/8/silence.json b/elastalert/es_mappings/8/silence.json new file mode 100644 index 000000000..63ec2de3f --- /dev/null +++ b/elastalert/es_mappings/8/silence.json @@ -0,0 +1,15 @@ +{ + "properties": { + "rule_name": { + "type": "keyword" + }, + "until": { + "type": "date", + "format": "date_optional_time" + }, + "@timestamp": { + "type": "date", + "format": "date_optional_time" + } + } +} diff --git a/elastalert/kibana.py b/elastalert/kibana.py deleted file mode 100644 index de690494e..000000000 --- a/elastalert/kibana.py +++ /dev/null @@ -1,288 +0,0 @@ -# -*- coding: utf-8 -*- -# flake8: noqa -import os.path -import urllib.error -import urllib.parse -import urllib.request - -from .util import EAException - - -dashboard_temp = {'editable': True, - 'failover': False, - 'index': {'default': 'NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED', - 'interval': 'none', - 'pattern': '', - 'warm_fields': True}, - 'loader': {'hide': False, - 'load_elasticsearch': True, - 'load_elasticsearch_size': 20, - 'load_gist': True, - 'load_local': True, - 'save_default': True, - 'save_elasticsearch': True, - 'save_gist': False, - 'save_local': True, - 'save_temp': True, - 'save_temp_ttl': '30d', - 'save_temp_ttl_enable': True}, - 'nav': [{'collapse': False, - 'enable': True, - 'filter_id': 0, - 'notice': False, - 'now': False, - 'refresh_intervals': ['5s', - '10s', - '30s', - '1m', - '5m', - '15m', - '30m', - '1h', - '2h', - '1d'], - 'status': 'Stable', - 'time_options': ['5m', - '15m', - '1h', - '6h', - '12h', - '24h', - '2d', - '7d', - '30d'], - 'timefield': '@timestamp', - 'type': 'timepicker'}], - 'panel_hints': True, - 'pulldowns': [{'collapse': False, - 'enable': True, - 'notice': True, - 'type': 'filtering'}], - 'refresh': False, - 'rows': [{'collapsable': True, - 'collapse': False, - 'editable': True, - 'height': '350px', - 'notice': False, - 'panels': [{'annotate': {'enable': False, - 'field': '_type', - 'query': '*', - 'size': 20, - 'sort': ['_score', 'desc']}, - 'auto_int': True, - 'bars': True, - 'derivative': False, - 'editable': True, - 'fill': 3, - 'grid': {'max': None, 'min': 0}, - 'group': ['default'], - 'interactive': True, - 'interval': '1m', - 'intervals': ['auto', - '1s', - '1m', - '5m', - '10m', - '30m', - '1h', - '3h', - '12h', - '1d', - '1w', - '1M', - '1y'], - 'legend': True, - 'legend_counts': True, - 'lines': False, - 'linewidth': 3, - 'mode': 'count', - 'options': True, - 'percentage': False, - 'pointradius': 5, - 'points': False, - 'queries': {'ids': [0], 'mode': 'all'}, - 'resolution': 100, - 'scale': 1, - 'show_query': True, - 'span': 12, - 'spyable': True, - 'stack': True, - 'time_field': '@timestamp', - 'timezone': 'browser', - 'title': 'Events over time', - 'tooltip': {'query_as_alias': True, - 'value_type': 'cumulative'}, - 'type': 'histogram', - 'value_field': None, - 'x-axis': True, - 'y-axis': True, - 'y_format': 'none', - 'zerofill': True, - 'zoomlinks': True}], - 'title': 'Graph'}, - {'collapsable': True, - 'collapse': False, - 'editable': True, - 'height': '350px', - 'notice': False, - 'panels': [{'all_fields': False, - 'editable': True, - 'error': False, - 'field_list': True, - 'fields': [], - 'group': ['default'], - 'header': True, - 'highlight': [], - 'localTime': True, - 'normTimes': True, - 'offset': 0, - 'overflow': 'min-height', - 'pages': 5, - 'paging': True, - 'queries': {'ids': [0], 'mode': 'all'}, - 'size': 100, - 'sort': ['@timestamp', 'desc'], - 'sortable': True, - 'span': 12, - 'spyable': True, - 'status': 'Stable', - 'style': {'font-size': '9pt'}, - 'timeField': '@timestamp', - 'title': 'All events', - 'trimFactor': 300, - 'type': 'table'}], - 'title': 'Events'}], - 'services': {'filter': {'ids': [0], - 'list': {'0': {'active': True, - 'alias': '', - 'field': '@timestamp', - 'from': 'now-24h', - 'id': 0, - 'mandate': 'must', - 'to': 'now', - 'type': 'time'}}}, - 'query': {'ids': [0], - 'list': {'0': {'alias': '', - 'color': '#7EB26D', - 'enable': True, - 'id': 0, - 'pin': False, - 'query': '', - 'type': 'lucene'}}}}, - 'style': 'dark', - 'title': 'ElastAlert Alert Dashboard'} - -kibana4_time_temp = "(refreshInterval:(display:Off,section:0,value:0),time:(from:'%s',mode:absolute,to:'%s'))" - - -def set_time(dashboard, start, end): - dashboard['services']['filter']['list']['0']['from'] = start - dashboard['services']['filter']['list']['0']['to'] = end - - -def set_index_name(dashboard, name): - dashboard['index']['default'] = name - - -def set_timestamp_field(dashboard, field): - # set the nav timefield if we don't want @timestamp - dashboard['nav'][0]['timefield'] = field - - # set the time_field for each of our panels - for row in dashboard.get('rows'): - for panel in row.get('panels'): - panel['time_field'] = field - - # set our filter's time field - dashboard['services']['filter']['list']['0']['field'] = field - - -def add_filter(dashboard, es_filter): - next_id = max(dashboard['services']['filter']['ids']) + 1 - - kibana_filter = {'active': True, - 'alias': '', - 'id': next_id, - 'mandate': 'must'} - - if 'not' in es_filter: - es_filter = es_filter['not'] - kibana_filter['mandate'] = 'mustNot' - - if 'query' in es_filter: - es_filter = es_filter['query'] - if 'query_string' in es_filter: - kibana_filter['type'] = 'querystring' - kibana_filter['query'] = es_filter['query_string']['query'] - elif 'term' in es_filter: - kibana_filter['type'] = 'field' - f_field, f_query = list(es_filter['term'].items())[0] - # Wrap query in quotes, otherwise certain characters cause Kibana to throw errors - if isinstance(f_query, str): - f_query = '"%s"' % (f_query.replace('"', '\\"')) - if isinstance(f_query, list): - # Escape quotes - f_query = [item.replace('"', '\\"') for item in f_query] - # Wrap in quotes - f_query = ['"%s"' % (item) for item in f_query] - # Convert into joined query - f_query = '(%s)' % (' AND '.join(f_query)) - kibana_filter['field'] = f_field - kibana_filter['query'] = f_query - elif 'range' in es_filter: - kibana_filter['type'] = 'range' - f_field, f_range = list(es_filter['range'].items())[0] - kibana_filter['field'] = f_field - kibana_filter.update(f_range) - else: - raise EAException("Could not parse filter %s for Kibana" % (es_filter)) - - dashboard['services']['filter']['ids'].append(next_id) - dashboard['services']['filter']['list'][str(next_id)] = kibana_filter - - -def set_name(dashboard, name): - dashboard['title'] = name - - -def set_included_fields(dashboard, fields): - dashboard['rows'][1]['panels'][0]['fields'] = list(set(fields)) - - -def filters_from_dashboard(db): - filters = db['services']['filter']['list'] - config_filters = [] - or_filters = [] - for filter in list(filters.values()): - filter_type = filter['type'] - if filter_type == 'time': - continue - - if filter_type == 'querystring': - config_filter = {'query': {'query_string': {'query': filter['query']}}} - - if filter_type == 'field': - config_filter = {'term': {filter['field']: filter['query']}} - - if filter_type == 'range': - config_filter = {'range': {filter['field']: {'from': filter['from'], 'to': filter['to']}}} - - if filter['mandate'] == 'mustNot': - config_filter = {'not': config_filter} - - if filter['mandate'] == 'either': - or_filters.append(config_filter) - else: - config_filters.append(config_filter) - - if or_filters: - config_filters.append({'or': or_filters}) - - return config_filters - - -def kibana4_dashboard_link(dashboard, starttime, endtime): - dashboard = os.path.expandvars(dashboard) - time_settings = kibana4_time_temp % (starttime, endtime) - time_settings = urllib.parse.quote(time_settings) - return "%s?_g=%s" % (dashboard, time_settings) diff --git a/elastalert/kibana_discover.py b/elastalert/kibana_discover.py index 7e4dbb5d1..1aa45a910 100644 --- a/elastalert/kibana_discover.py +++ b/elastalert/kibana_discover.py @@ -8,20 +8,23 @@ import urllib.parse from .util import EAException +from .util import elastalert_logger from .util import lookup_es_key from .util import ts_add kibana_default_timedelta = datetime.timedelta(minutes=10) -kibana5_kibana6_versions = frozenset(['5.6', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8']) -kibana7_versions = frozenset(['7.0', '7.1', '7.2', '7.3']) +kibana_versions = frozenset([ + '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '7.10', '7.11', '7.12', '7.13', '7.14', '7.15', '7.16', '7.17', + '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6' + ]) def generate_kibana_discover_url(rule, match): ''' Creates a link for a kibana discover app. ''' discover_app_url = rule.get('kibana_discover_app_url') if not discover_app_url: - logging.warning( + elastalert_logger.warning( 'Missing kibana_discover_app_url for rule %s' % ( rule.get('name', '') ) @@ -30,7 +33,7 @@ def generate_kibana_discover_url(rule, match): kibana_version = rule.get('kibana_discover_version') if not kibana_version: - logging.warning( + elastalert_logger.warning( 'Missing kibana_discover_version for rule %s' % ( rule.get('name', '') ) @@ -39,7 +42,7 @@ def generate_kibana_discover_url(rule, match): index = rule.get('kibana_discover_index_pattern_id') if not index: - logging.warning( + elastalert_logger.warning( 'Missing kibana_discover_index_pattern_id for rule %s' % ( rule.get('name', '') ) @@ -61,16 +64,12 @@ def generate_kibana_discover_url(rule, match): to_timedelta = rule.get('kibana_discover_to_timedelta', timeframe) to_time = ts_add(timestamp, to_timedelta) - if kibana_version in kibana5_kibana6_versions: - globalState = kibana6_disover_global_state(from_time, to_time) - appState = kibana_discover_app_state(index, columns, filters, query_keys, match) - - elif kibana_version in kibana7_versions: + if kibana_version in kibana_versions: globalState = kibana7_disover_global_state(from_time, to_time) appState = kibana_discover_app_state(index, columns, filters, query_keys, match) else: - logging.warning( + elastalert_logger.warning( 'Unknown kibana discover application version %s for rule %s' % ( kibana_version, rule.get('name', '') @@ -85,20 +84,6 @@ def generate_kibana_discover_url(rule, match): ) -def kibana6_disover_global_state(from_time, to_time): - return prison.dumps( { - 'refreshInterval': { - 'pause': True, - 'value': 0 - }, - 'time': { - 'from': from_time, - 'mode': 'absolute', - 'to': to_time - } - } ) - - def kibana7_disover_global_state(from_time, to_time): return prison.dumps( { 'filters': [], @@ -117,6 +102,15 @@ def kibana_discover_app_state(index, columns, filters, query_keys, match): app_filters = [] if filters: + + # Remove nested query since the outer most query key will break Kibana 8. + new_filters = [] + for filter in filters: + if 'query' in filter: + filter = filter['query'] + new_filters.append(filter) + filters = new_filters + bool_filter = { 'must': filters } app_filters.append( { '$state': { diff --git a/elastalert/kibana_external_url_formatter.py b/elastalert/kibana_external_url_formatter.py new file mode 100644 index 000000000..5d739e72a --- /dev/null +++ b/elastalert/kibana_external_url_formatter.py @@ -0,0 +1,159 @@ +import boto3 +import os +from urllib.parse import parse_qsl, urlencode, urljoin, urlparse, urlsplit, urlunsplit + +import requests +from requests import RequestException +from requests.auth import AuthBase, HTTPBasicAuth + +from elastalert.auth import RefeshableAWSRequestsAuth +from elastalert.util import EAException + +def append_security_tenant(url, security_tenant): + '''Appends the security_tenant query string parameter to the url''' + parsed = urlsplit(url) + + if parsed.query: + qs = parse_qsl(parsed.query, keep_blank_values=True, strict_parsing=True) + else: + qs = [] + qs.append(('security_tenant', security_tenant)) + + new_query = urlencode(qs) + new_args = parsed._replace(query=new_query) + new_url = urlunsplit(new_args) + return new_url + +class KibanaExternalUrlFormatter: + '''Interface for formatting external Kibana urls''' + + def format(self, relative_url: str) -> str: + raise NotImplementedError() + +class AbsoluteKibanaExternalUrlFormatter(KibanaExternalUrlFormatter): + '''Formats absolute external Kibana urls''' + + def __init__(self, base_url: str, security_tenant: str) -> None: + self.base_url = base_url + self.security_tenant = security_tenant + + def format(self, relative_url: str) -> str: + url = urljoin(self.base_url, relative_url) + if self.security_tenant: + url = append_security_tenant(url, self.security_tenant) + return url + +class ShortKibanaExternalUrlFormatter(KibanaExternalUrlFormatter): + '''Formats external urls using the Kibana Shorten URL API''' + + def __init__(self, base_url: str, auth: AuthBase, security_tenant: str, new_shortener: bool, verify: bool) -> None: + self.auth = auth + self.security_tenant = security_tenant + self.goto_url = urljoin(base_url, 'goto/') + self.use_new_shortener = new_shortener + self.verify = verify + + if self.use_new_shortener: + path = 'api/short_url' + else: + path = 'api/shorten_url' + + shorten_url = urljoin(base_url, path) + if security_tenant: + shorten_url = append_security_tenant(shorten_url, security_tenant) + self.shorten_url = shorten_url + + def format(self, relative_url: str) -> str: + # join with '/' to ensure relative to root of app + long_url = urljoin('/', relative_url) + if self.security_tenant: + long_url = append_security_tenant(long_url, self.security_tenant) + + if self.use_new_shortener: + json = { 'locatorId': 'LEGACY_SHORT_URL_LOCATOR', 'params': { 'url': long_url } } + response_param = 'id' + else: + json = { 'url': long_url } + response_param = 'urlId' + + try: + response = requests.post( + url=self.shorten_url, + auth=self.auth, + headers={ + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + json=json, + verify=self.verify + ) + response.raise_for_status() + except RequestException as e: + raise EAException("Failed to invoke Kibana Shorten URL API: %s" % e) + + response_body = response.json() + url_id = response_body.get(response_param) + + goto_url = urljoin(self.goto_url, url_id) + if self.security_tenant: + goto_url = append_security_tenant(goto_url, self.security_tenant) + return goto_url + + +def create_kibana_auth(kibana_url, rule) -> AuthBase: + '''Creates a Kibana http authentication for use by requests''' + + # Basic + username = rule.get('kibana_username') + password = rule.get('kibana_password') + if username and password: + return HTTPBasicAuth(username, password) + + # AWS SigV4 + aws_region = rule.get('aws_region') + if not aws_region: + aws_region = os.environ.get('AWS_DEFAULT_REGION') + if aws_region: + + aws_profile = rule.get('profile') + session = boto3.session.Session( + profile_name=aws_profile, + region_name=aws_region + ) + credentials = session.get_credentials() + + kibana_host = urlparse(kibana_url).hostname + + return RefeshableAWSRequestsAuth( + refreshable_credential=credentials, + aws_host=kibana_host, + aws_region=aws_region, + aws_service='es' + ) + + # Unauthenticated + return None + +def is_kibana_atleastsevensixteen(version: str): + """ + Returns True when the Kibana server version >= 7.16 + """ + major, minor = list(map(int, version.split(".")[:2])) + return major > 7 or (major == 7 and minor >= 16) + +def create_kibana_external_url_formatter( + rule, + shorten: bool, + security_tenant: str, +) -> KibanaExternalUrlFormatter: + '''Creates a Kibana external url formatter''' + + base_url = rule.get('kibana_url') + new_shortener = is_kibana_atleastsevensixteen(rule.get('kibana_discover_version', '0.0')) + + if shorten: + verify = rule.get('kibana_verify_certs', True) + auth = create_kibana_auth(base_url, rule) + return ShortKibanaExternalUrlFormatter(base_url, auth, security_tenant, new_shortener, verify) + + return AbsoluteKibanaExternalUrlFormatter(base_url, security_tenant) diff --git a/elastalert/loaders.py b/elastalert/loaders.py index 771194768..18ea2479d 100644 --- a/elastalert/loaders.py +++ b/elastalert/loaders.py @@ -2,34 +2,76 @@ import copy import datetime import hashlib -import logging import os import sys import jsonschema import yaml import yaml.scanner -from staticconf.loader import yaml_loader - -from . import alerts -from . import enhancements -from . import ruletypes -from .opsgenie import OpsGenieAlerter -from .util import dt_to_ts -from .util import dt_to_ts_with_format -from .util import dt_to_unix -from .util import dt_to_unixms -from .util import EAException -from .util import get_module -from .util import ts_to_dt -from .util import ts_to_dt_with_format -from .util import unix_to_dt -from .util import unixms_to_dt +from jinja2 import Environment +from jinja2 import FileSystemLoader +from jinja2 import Template + +import elastalert.alerters.alerta +import elastalert.alerters.chatwork +import elastalert.alerters.command +import elastalert.alerters.datadog +import elastalert.alerters.debug +import elastalert.alerters.dingtalk +import elastalert.alerters.discord +import elastalert.alerters.exotel +import elastalert.alerters.gitter +import elastalert.alerters.gelf +import elastalert.alerters.googlechat +import elastalert.alerters.httppost +import elastalert.alerters.httppost2 +import elastalert.alerters.line +import elastalert.alerters.pagertree +import elastalert.alerters.rocketchat +import elastalert.alerters.servicenow +import elastalert.alerters.ses +import elastalert.alerters.stomp +import elastalert.alerters.telegram +import elastalert.alerters.thehive +import elastalert.alerters.twilio +import elastalert.alerters.victorops +from elastalert import alerts +from elastalert import enhancements +from elastalert import ruletypes +from elastalert.alerters.alertmanager import AlertmanagerAlerter +from elastalert.alerters.email import EmailAlerter +from elastalert.alerters.jira import JiraAlerter +from elastalert.alerters.mattermost import MattermostAlerter +from elastalert.alerters.opsgenie import OpsGenieAlerter +from elastalert.alerters.pagerduty import PagerDutyAlerter +from elastalert.alerters.slack import SlackAlerter +from elastalert.alerters.sns import SnsAlerter +from elastalert.alerters.teams import MsTeamsAlerter +from elastalert.alerters.zabbix import ZabbixAlerter +from elastalert.alerters.tencentsms import TencentSMSAlerter +from elastalert.util import dt_to_ts +from elastalert.util import dt_to_ts_with_format +from elastalert.util import dt_to_unix +from elastalert.util import dt_to_unixms +from elastalert.util import EAException +from elastalert.util import elastalert_logger +from elastalert.util import get_module +from elastalert.util import ts_to_dt +from elastalert.util import ts_to_dt_with_format +from elastalert.util import unix_to_dt +from elastalert.util import unixms_to_dt +from elastalert.yaml import read_yaml + + +# load rules schema +def load_rule_schema(): + schema_path = os.path.join(os.path.dirname(__file__), 'schema.yaml') + with open(schema_path) as schema_file: + schema_yml = yaml.load(schema_file, Loader=yaml.FullLoader) + return jsonschema.Draft7Validator(schema_yml) class RulesLoader(object): - # import rule dependency - import_rules = {} # Required global (config.yaml) configuration options for the loader required_globals = frozenset([]) @@ -51,33 +93,46 @@ class RulesLoader(object): 'metric_aggregation': ruletypes.MetricAggregationRule, 'percentage_match': ruletypes.PercentageMatchRule, 'spike_aggregation': ruletypes.SpikeMetricAggregationRule, + 'error_rate': ruletypes.ErrorRateRule, #Adding Error Rate Rule type + 'advanced_query': ruletypes.AdvancedQueryRule } # Used to map names of alerts to their classes alerts_mapping = { - 'email': alerts.EmailAlerter, - 'jira': alerts.JiraAlerter, + 'alertmanager': AlertmanagerAlerter, + 'tencent_sms': TencentSMSAlerter, + 'email': EmailAlerter, + 'jira': JiraAlerter, 'opsgenie': OpsGenieAlerter, - 'stomp': alerts.StompAlerter, - 'debug': alerts.DebugAlerter, - 'command': alerts.CommandAlerter, - 'sns': alerts.SnsAlerter, - 'hipchat': alerts.HipChatAlerter, - 'stride': alerts.StrideAlerter, - 'ms_teams': alerts.MsTeamsAlerter, - 'slack': alerts.SlackAlerter, - 'mattermost': alerts.MattermostAlerter, - 'pagerduty': alerts.PagerDutyAlerter, - 'exotel': alerts.ExotelAlerter, - 'twilio': alerts.TwilioAlerter, - 'victorops': alerts.VictorOpsAlerter, - 'telegram': alerts.TelegramAlerter, - 'googlechat': alerts.GoogleChatAlerter, - 'gitter': alerts.GitterAlerter, - 'servicenow': alerts.ServiceNowAlerter, - 'alerta': alerts.AlertaAlerter, - 'post': alerts.HTTPPostAlerter, - 'hivealerter': alerts.HiveAlerter + 'stomp': elastalert.alerters.stomp.StompAlerter, + 'debug': elastalert.alerters.debug.DebugAlerter, + 'command': elastalert.alerters.command.CommandAlerter, + 'sns': SnsAlerter, + 'ms_teams': MsTeamsAlerter, + 'slack': SlackAlerter, + 'mattermost': MattermostAlerter, + 'pagerduty': PagerDutyAlerter, + 'exotel': elastalert.alerters.exotel.ExotelAlerter, + 'twilio': elastalert.alerters.twilio.TwilioAlerter, + 'victorops': elastalert.alerters.victorops.VictorOpsAlerter, + 'telegram': elastalert.alerters.telegram.TelegramAlerter, + 'googlechat': elastalert.alerters.googlechat.GoogleChatAlerter, + 'gitter': elastalert.alerters.gitter.GitterAlerter, + 'servicenow': elastalert.alerters.servicenow.ServiceNowAlerter, + 'alerta': elastalert.alerters.alerta.AlertaAlerter, + 'post': elastalert.alerters.httppost.HTTPPostAlerter, + 'post2': elastalert.alerters.httppost2.HTTPPost2Alerter, + 'pagertree': elastalert.alerters.pagertree.PagerTreeAlerter, + 'linenotify': elastalert.alerters.line.LineNotifyAlerter, + 'hivealerter': elastalert.alerters.thehive.HiveAlerter, + 'zabbix': ZabbixAlerter, + 'discord': elastalert.alerters.discord.DiscordAlerter, + 'dingtalk': elastalert.alerters.dingtalk.DingTalkAlerter, + 'chatwork': elastalert.alerters.chatwork.ChatworkAlerter, + 'datadog': elastalert.alerters.datadog.DatadogAlerter, + 'ses': elastalert.alerters.ses.SesAlerter, + 'rocketchat': elastalert.alerters.rocketchat.RocketChatAlerter, + 'gelf': elastalert.alerters.gelf.GelfAlerter } # A partial ordering of alert types. Relative order will be preserved in the resulting alerts list @@ -89,12 +144,12 @@ class RulesLoader(object): base_config = {} - def __init__(self, conf): - # schema for rule yaml - self.rule_schema = jsonschema.Draft7Validator( - yaml.load(open(os.path.join(os.path.dirname(__file__), 'schema.yaml')), Loader=yaml.FullLoader)) + jinja_environment = Environment(loader=FileSystemLoader("")) + def __init__(self, conf): + self.rule_schema = load_rule_schema() self.base_config = copy.deepcopy(conf) + self.import_rules = {} # import rule dependency def load(self, conf, args=None): """ @@ -115,14 +170,11 @@ def load(self, conf, args=None): rule = self.load_configuration(rule_file, conf, args) # A rule failed to load, don't try to process it if not rule: - logging.error('Invalid rule file skipped: %s' % rule_file) - continue - # By setting "is_enabled: False" in rule file, a rule is easily disabled - if 'is_enabled' in rule and not rule['is_enabled']: + elastalert_logger.error('Invalid rule file skipped: %s' % rule_file) continue if rule['name'] in names: raise EAException('Duplicate rule named %s' % (rule['name'])) - except EAException as e: + except EAException as e: raise EAException('Error loading file %s: %s' % (rule_file, e)) rules.append(rule) @@ -164,7 +216,7 @@ def get_import_rule(self, rule): Retrieve the name of the rule to import. :param dict rule: Rule dict :return: rule name that will all `get_yaml` to retrieve the yaml of the rule - :rtype: str + :rtype: str or List[str] """ return rule['import'] @@ -192,9 +244,24 @@ def load_yaml(self, filename): 'rule_file': filename, } - self.import_rules.pop(filename, None) # clear `filename` dependency + current_path = filename + + # Imports are applied using a Depth First Search (DFS) traversal. + # If a rule defines multiple imports, both of which define the same value, + # the value from the later import will take precedent. + import_paths_stack = [] + while True: - loaded = self.get_yaml(filename) + loaded = self.get_yaml(current_path) + + #Setting default operator for filters as AND as in elastalert-0.1.35 + if 'filter' in loaded: + for filter in loaded['filter']: + if 'query' in filter and filter['query'] != None: + if 'query_string' in filter['query'] and filter['query']['query_string']!= None: + filter['query']['query_string']['default_operator'] = "AND" + else: + elastalert_logger.info("Query is None in file: %s",filename) # Special case for merging filters - if both files specify a filter merge (AND) them if 'filter' in rule and 'filter' in loaded: @@ -202,15 +269,30 @@ def load_yaml(self, filename): loaded.update(rule) rule = loaded - if 'import' in rule: - # Find the path of the next file. - import_filename = self.get_import_rule(rule) - # set dependencies - rules = self.import_rules.get(filename, []) - rules.append(import_filename) - self.import_rules[filename] = rules - filename = import_filename - del (rule['import']) # or we could go on forever! + + if 'import' not in rule: + # clear import_rules cache for current path + self.import_rules.pop(current_path, None) + + else: + # read the set of import paths from the rule + import_paths = self.get_import_rule(rule) + if type(import_paths) is str: + import_paths = [import_paths] + + # remove the processed import property to prevent infinite loop + del (rule['import']) + + # update import_rules cache for current path + self.import_rules[current_path] = import_paths + + # push the imports paths onto the top of the stack + for import_path in import_paths: + import_paths_stack.append(import_path) + + # pop the next import path from the top of the stack + if len(import_paths_stack) > 0: + current_path = import_paths_stack.pop() else: break @@ -224,8 +306,6 @@ def load_options(self, rule, conf, filename, args=None): :param filename: Name of the rule :param args: Arguments """ - self.adjust_deprecated_values(rule) - try: self.rule_schema.validate(rule) except jsonschema.ValidationError as e: @@ -254,10 +334,6 @@ def load_options(self, rule, conf, filename, args=None): rule['bucket_interval_timedelta'] = datetime.timedelta(**rule['bucket_interval']) if 'exponential_realert' in rule: rule['exponential_realert'] = datetime.timedelta(**rule['exponential_realert']) - if 'kibana4_start_timedelta' in rule: - rule['kibana4_start_timedelta'] = datetime.timedelta(**rule['kibana4_start_timedelta']) - if 'kibana4_end_timedelta' in rule: - rule['kibana4_end_timedelta'] = datetime.timedelta(**rule['kibana4_end_timedelta']) if 'kibana_discover_from_timedelta' in rule: rule['kibana_discover_from_timedelta'] = datetime.timedelta(**rule['kibana_discover_from_timedelta']) if 'kibana_discover_to_timedelta' in rule: @@ -270,6 +346,7 @@ def load_options(self, rule, conf, filename, args=None): rule.setdefault(key, val) rule.setdefault('name', os.path.splitext(filename)[0]) rule.setdefault('realert', datetime.timedelta(seconds=0)) + rule.setdefault('realert_key', rule['name']) rule.setdefault('aggregation', datetime.timedelta(seconds=0)) rule.setdefault('query_delay', datetime.timedelta(seconds=0)) rule.setdefault('timestamp_field', '@timestamp') @@ -279,6 +356,8 @@ def load_options(self, rule, conf, filename, args=None): rule.setdefault('_source_enabled', True) rule.setdefault('use_local_time', True) rule.setdefault('description', "") + rule.setdefault('jinja_root_name', "_data") + rule.setdefault('query_timezone', "") # Set timestamp_type conversion function, used when generating queries and processing hits rule['timestamp_type'] = rule['timestamp_type'].strip().lower() @@ -293,6 +372,9 @@ def load_options(self, rule, conf, filename, args=None): rule['dt_to_ts'] = dt_to_unixms elif rule['timestamp_type'] == 'custom': def _ts_to_dt_with_format(ts): + if 'timestamp_to_datetime_format_expr' in rule: + # eval expression passing 'ts' before trying to parse it into datetime. + ts = eval(rule['timestamp_to_datetime_format_expr'], {'ts': ts}) return ts_to_dt_with_format(ts, ts_format=rule['timestamp_format']) def _dt_to_ts_with_format(dt): @@ -315,13 +397,6 @@ def _dt_to_ts_with_format(dt): rule.setdefault('client_cert', conf.get('client_cert')) rule.setdefault('client_key', conf.get('client_key')) - # Set HipChat options from global config - rule.setdefault('hipchat_msg_color', 'red') - rule.setdefault('hipchat_domain', 'api.hipchat.com') - rule.setdefault('hipchat_notify', True) - rule.setdefault('hipchat_from', '') - rule.setdefault('hipchat_ignore_ssl_errors', False) - # Make sure we have required options if self.required_locals - frozenset(list(rule.keys())): raise EAException('Missing required option(s): %s' % (', '.join(self.required_locals - frozenset(list(rule.keys()))))) @@ -329,6 +404,11 @@ def _dt_to_ts_with_format(dt): if 'include' in rule and type(rule['include']) != list: raise EAException('include option must be a list') + #setting default config fields for error_rate + if (rule['type'] == 'error_rate'): + rule.setdefault('error_condition','exception.type:*') + rule.setdefault('unique_column','traceID') + raw_query_key = rule.get('query_key') if isinstance(raw_query_key, list): if len(raw_query_key) > 1: @@ -365,24 +445,6 @@ def _dt_to_ts_with_format(dt): include.append(rule['timestamp_field']) rule['include'] = list(set(include)) - # Check that generate_kibana_url is compatible with the filters - if rule.get('generate_kibana_link'): - for es_filter in rule.get('filter'): - if es_filter: - if 'not' in es_filter: - es_filter = es_filter['not'] - if 'query' in es_filter: - es_filter = es_filter['query'] - if list(es_filter.keys())[0] not in ('term', 'query_string', 'range'): - raise EAException( - 'generate_kibana_link is incompatible with filters other than term, query_string and range.' - 'Consider creating a dashboard and using use_kibana_dashboard instead.') - - # Check that doc_type is provided if use_count/terms_query - if rule.get('use_count_query') or rule.get('use_terms_query'): - if 'doc_type' not in rule: - raise EAException('doc_type must be specified.') - # Check that query_key is set if use_terms_query if rule.get('use_terms_query'): if 'query_key' not in rule: @@ -393,14 +455,24 @@ def _dt_to_ts_with_format(dt): if rule.get('use_strftime_index'): for token in ['%y', '%M', '%D']: if token in rule.get('index'): - logging.warning('Did you mean to use %s in the index? ' - 'The index will be formatted like %s' % (token, - datetime.datetime.now().strftime( - rule.get('index')))) + elastalert_logger.warning('Did you mean to use %s in the index? ' + 'The index will be formatted like %s' % (token, + datetime.datetime.now().strftime( + rule.get('index')))) if rule.get('scan_entire_timeframe') and not rule.get('timeframe'): raise EAException('scan_entire_timeframe can only be used if there is a timeframe specified') + self.load_jinja_template(rule) + + def load_jinja_template(self, rule): + if rule.get('alert_text_type') == 'alert_text_jinja': + jinja_template_path = rule.get('jinja_template_path') + if jinja_template_path: + rule["jinja_template"] = self.jinja_environment.get_or_select_template(jinja_template_path) + else: + rule["jinja_template"] = Template(str(rule.get('alert_text', ''))) + def load_modules(self, rule, args=None): """ Loads things that could be modules. Enhancements, alerts and rule type. """ # Set match enhancements @@ -448,6 +520,7 @@ def normalize_config(alert): name, config = next(iter(list(alert.items()))) config_copy = copy.copy(rule) config_copy.update(config) # warning, this (intentionally) mutates the rule dict + self.load_jinja_template(config_copy) return name, config_copy else: raise EAException() @@ -476,18 +549,6 @@ def create_alert(alert, alert_config): return alert_field - @staticmethod - def adjust_deprecated_values(rule): - # From rename of simple HTTP alerter - if rule.get('type') == 'simple': - rule['type'] = 'post' - if 'simple_proxy' in rule: - rule['http_post_proxy'] = rule['simple_proxy'] - if 'simple_webhook_url' in rule: - rule['http_post_url'] = rule['simple_webhook_url'] - logging.warning( - '"simple" alerter has been renamed "post" and comptability may be removed in a future release.') - class FileRulesLoader(RulesLoader): @@ -498,20 +559,30 @@ def get_names(self, conf, use_rule=None): # Passing a filename directly can bypass rules_folder and .yaml checks if use_rule and os.path.isfile(use_rule): return [use_rule] - rule_folder = conf['rules_folder'] + + # In case of a bad type, convert string to list: + rule_folders = conf['rules_folder'] if isinstance(conf['rules_folder'], list) else [conf['rules_folder']] rule_files = [] if 'scan_subdirectories' in conf and conf['scan_subdirectories']: - for root, folders, files in os.walk(rule_folder): - for filename in files: - if use_rule and use_rule != filename: - continue - if self.is_yaml(filename): - rule_files.append(os.path.join(root, filename)) + for ruledir in rule_folders: + if not os.path.exists(ruledir): + raise EAException('Specified rule_folder does not exist: %s ' % ruledir) + for root, folders, files in os.walk(ruledir, followlinks=True): + # Openshift/k8s configmap fix for ..data and ..2021_05..date directories that loop with os.walk() + folders[:] = [d for d in folders if not d.startswith('..')] + for filename in files: + if use_rule and use_rule != filename: + continue + if self.is_yaml(filename): + rule_files.append(os.path.join(root, filename)) else: - for filename in os.listdir(rule_folder): - fullpath = os.path.join(rule_folder, filename) - if os.path.isfile(fullpath) and self.is_yaml(filename): - rule_files.append(fullpath) + for ruledir in rule_folders: + if not os.path.isdir(ruledir): + continue + for file in os.scandir(ruledir): + fullpath = os.path.join(ruledir, file.name) + if os.path.isfile(fullpath) and self.is_yaml(file.name): + rule_files.append(fullpath) return rule_files def get_hashes(self, conf, use_rule=None): @@ -523,7 +594,7 @@ def get_hashes(self, conf, use_rule=None): def get_yaml(self, filename): try: - return yaml_loader(filename) + return read_yaml(filename) except yaml.scanner.ScannerError as e: raise EAException('Could not parse file %s: %s' % (filename, e)) @@ -532,20 +603,28 @@ def get_import_rule(self, rule): Allow for relative paths to the import rule. :param dict rule: :return: Path the import rule - :rtype: str + :rtype: List[str] """ - if os.path.isabs(rule['import']): - return rule['import'] - else: - return os.path.join(os.path.dirname(rule['rule_file']), rule['import']) + rule_imports = rule['import'] + if type(rule_imports) is str: + rule_imports = [rule_imports] + expanded_imports = [] + for rule_import in rule_imports: + if os.path.isabs(rule_import): + expanded_imports.append(rule_import) + else: + expanded_imports.append(os.path.join(os.path.dirname(rule['rule_file']), rule_import)) + return expanded_imports def get_rule_file_hash(self, rule_file): - rule_file_hash = '' if os.path.exists(rule_file): with open(rule_file, 'rb') as fh: rule_file_hash = hashlib.sha1(fh.read()).digest() for import_rule_file in self.import_rules.get(rule_file, []): rule_file_hash += self.get_rule_file_hash(import_rule_file) + else: + not_found = 'ENOENT ' + rule_file + rule_file_hash = hashlib.sha1(not_found.encode('utf-8')).digest() return rule_file_hash @staticmethod diff --git a/elastalert/prometheus_wrapper.py b/elastalert/prometheus_wrapper.py new file mode 100644 index 000000000..9c806fa7c --- /dev/null +++ b/elastalert/prometheus_wrapper.py @@ -0,0 +1,58 @@ +import prometheus_client + + +class PrometheusWrapper: + """ Exposes ElastAlert metrics on a Prometheus metrics endpoint. + Wraps ElastAlerter run_rule and writeback to collect metrics. """ + + def __init__(self, client): + self.prometheus_port = client.prometheus_port + self.run_rule = client.run_rule + self.writeback = client.writeback + + client.run_rule = self.metrics_run_rule + client.writeback = self.metrics_writeback + + # initialize prometheus metrics to be exposed + self.prom_scrapes = prometheus_client.Counter('elastalert_scrapes', 'Number of scrapes for rule', ['rule_name']) + self.prom_hits = prometheus_client.Counter('elastalert_hits', 'Number of hits for rule', ['rule_name']) + self.prom_matches = prometheus_client.Counter('elastalert_matches', 'Number of matches for rule', ['rule_name']) + self.prom_time_taken = prometheus_client.Counter('elastalert_time_taken', 'Time taken to evaluate rule', ['rule_name']) + self.prom_alerts_sent = prometheus_client.Counter('elastalert_alerts_sent', 'Number of alerts sent for rule', ['rule_name']) + self.prom_alerts_not_sent = prometheus_client.Counter('elastalert_alerts_not_sent', 'Number of alerts not sent', ['rule_name']) + self.prom_errors = prometheus_client.Counter('elastalert_errors', 'Number of errors for rule') + self.prom_alerts_silenced = prometheus_client.Counter('elastalert_alerts_silenced', 'Number of silenced alerts', ['rule_name']) + + def start(self): + prometheus_client.start_http_server(self.prometheus_port) + + def metrics_run_rule(self, rule, endtime, starttime=None): + """ Increment counter every time rule is run """ + try: + self.prom_scrapes.labels(rule['name']).inc() + finally: + return self.run_rule(rule, endtime, starttime) + + def metrics_writeback(self, doc_type, body, rule=None, match_body=None): + """ Update various prometheus metrics accoording to the doc_type """ + + res = self.writeback(doc_type, body) + try: + if doc_type == 'elastalert_status': + self.prom_hits.labels(body['rule_name']).inc(int(body['hits'])) + self.prom_matches.labels(body['rule_name']).inc(int(body['matches'])) + self.prom_time_taken.labels(body['rule_name']).inc(float(body['time_taken'])) + elif doc_type == 'elastalert': + if body['alert_sent']: + self.prom_alerts_sent.labels(body['rule_name']).inc() + else: + self.prom_alerts_not_sent.labels(body['rule_name']).inc() + elif doc_type == 'elastalert_error': + print("coming_here") + print(body) + print("pt 2") + self.prom_errors.inc() + elif doc_type == 'silence': + self.prom_alerts_silenced.labels(body['rule_name']).inc() + finally: + return res \ No newline at end of file diff --git a/elastalert/rule_from_kibana.py b/elastalert/rule_from_kibana.py deleted file mode 100644 index 4a0634954..000000000 --- a/elastalert/rule_from_kibana.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import json - -import yaml - -from elastalert.kibana import filters_from_dashboard -from elastalert.util import elasticsearch_client - - -def main(): - es_host = input("Elasticsearch host: ") - es_port = input("Elasticsearch port: ") - db_name = input("Dashboard name: ") - send_get_body_as = input("Method for querying Elasticsearch[GET]: ") or 'GET' - - es = elasticsearch_client({'es_host': es_host, 'es_port': es_port, 'send_get_body_as': send_get_body_as}) - - print("Elastic Version:" + es.es_version) - - query = {'query': {'term': {'_id': db_name}}} - - if es.is_atleastsixsix(): - # TODO check support for kibana 7 - # TODO use doc_type='_doc' instead - res = es.deprecated_search(index='kibana-int', doc_type='dashboard', body=query, _source_includes=['dashboard']) - else: - res = es.deprecated_search(index='kibana-int', doc_type='dashboard', body=query, _source_include=['dashboard']) - - if not res['hits']['hits']: - print("No dashboard %s found" % (db_name)) - exit() - - db = json.loads(res['hits']['hits'][0]['_source']['dashboard']) - config_filters = filters_from_dashboard(db) - - print("\nPartial Config file") - print("-----------\n") - print("name: %s" % (db_name)) - print("es_host: %s" % (es_host)) - print("es_port: %s" % (es_port)) - print("filter:") - print(yaml.safe_dump(config_filters)) - - -if __name__ == '__main__': - main() diff --git a/elastalert/ruletypes.py b/elastalert/ruletypes.py index 2f1d2f82c..ddefc58e9 100644 --- a/elastalert/ruletypes.py +++ b/elastalert/ruletypes.py @@ -2,22 +2,15 @@ import copy import datetime import sys +import time +import itertools -from blist import sortedlist -from .util import add_raw_postfix -from .util import dt_to_ts -from .util import EAException -from .util import elastalert_logger -from .util import elasticsearch_client -from .util import format_index -from .util import hashable -from .util import lookup_es_key -from .util import new_get_event_ts -from .util import pretty_ts -from .util import total_seconds -from .util import ts_now -from .util import ts_to_dt +from sortedcontainers import SortedKeyList as sortedlist + +from elastalert.util import (add_raw_postfix, dt_to_ts, EAException, elastalert_logger, elasticsearch_client, + format_index, get_msearch_query, hashable, kibana_adapter_client, lookup_es_key, new_get_event_ts, pretty_ts, total_seconds, + ts_now, ts_to_dt, expand_string_into_dict, format_string) class RuleType(object): @@ -219,25 +212,64 @@ def __init__(self, *args): self.ts_field = self.rules.get('timestamp_field', '@timestamp') self.get_ts = new_get_event_ts(self.ts_field) self.attach_related = self.rules.get('attach_related', False) + + # def add_count_data(self, data): + # """ Add count data to the rule. Data should be of the form {ts: count}. """ + # if len(data) > 1: + # raise EAException('add_count_data can only accept one count at a time') - def add_count_data(self, data): - """ Add count data to the rule. Data should be of the form {ts: count}. """ - if len(data) > 1: - raise EAException('add_count_data can only accept one count at a time') + # (ts, count), = list(data.items()) - (ts, count), = list(data.items()) + # event = ({self.ts_field: ts}, count) + # self.occurrences.setdefault('all', EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event) + # self.check_for_match('all') - event = ({self.ts_field: ts}, count) + def add_count_data(self, data): + # data struncture should be -> data: {endtime:,count:,event:[{}]} + # if data doesn't have endtime and count as above example, raise an exception + if not 'endtime' in data or not 'count' in data: + raise EAException('add_count_data should have endtime and count') + ts = data['endtime'] + count = data['count'] + doc = {} + if 'event' in data and data['event'][0]: + doc = data['event'][0] + else: + doc = {self.ts_field: ts} + event = (doc, count) self.occurrences.setdefault('all', EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event) self.check_for_match('all') + #nested query key optimizations def add_terms_data(self, terms): - for timestamp, buckets in terms.items(): - for bucket in buckets: + if 'nested_query_key' in self.rules and self.rules['nested_query_key'] == True: + #letting this log message stay inorder to debug issues in future + elastalert_logger.info(terms) + for timestamp, buckets in terms.items(): + self.flatten_nested_aggregations(timestamp,buckets) + else: + for timestamp, buckets in terms.items(): + for bucket in buckets: + event = ({self.ts_field: timestamp, + self.rules['query_key']: bucket['key']}, bucket['doc_count']) + self.occurrences.setdefault(bucket['key'], EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event) + self.check_for_match(bucket['key']) + + #nested query key optimizations + def flatten_nested_aggregations(self,timestamp,buckets,key=None): + for bucket in buckets: + if key == None: + nestedkey = str(bucket['key']) + else: + nestedkey = key + ',' + str(bucket['key']) + if 'counts' in bucket: + self.flatten_nested_aggregations(timestamp,bucket['counts']['buckets'],nestedkey) + else: event = ({self.ts_field: timestamp, - self.rules['query_key']: bucket['key']}, bucket['doc_count']) - self.occurrences.setdefault(bucket['key'], EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event) - self.check_for_match(bucket['key']) + self.rules['query_key']: nestedkey}, bucket['doc_count']) + self.occurrences.setdefault(nestedkey, EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event) + self.check_for_match(nestedkey) + def add_data(self, data): if 'query_key' in self.rules: @@ -269,6 +301,7 @@ def check_for_match(self, key, end=False): event = self.occurrences[key].data[-1][0] if self.attach_related: event['related_events'] = [data[0] for data in self.occurrences[key].data[:-1]] + event['count'] = self.occurrences[key].count() self.add_match(event) self.occurrences.pop(key) @@ -282,9 +315,10 @@ def garbage_collect(self, timestamp): def get_match_str(self, match): lt = self.rules.get('use_local_time') + fmt = self.rules.get('custom_pretty_ts_format') match_ts = lookup_es_key(match, self.ts_field) - starttime = pretty_ts(dt_to_ts(ts_to_dt(match_ts) - self.rules['timeframe']), lt) - endtime = pretty_ts(match_ts, lt) + starttime = pretty_ts(dt_to_ts(ts_to_dt(match_ts) - self.rules['timeframe']), lt, fmt) + endtime = pretty_ts(match_ts, lt, fmt) message = 'At least %d events occurred between %s and %s\n\n' % (self.rules['num_events'], starttime, endtime) @@ -318,12 +352,14 @@ def append(self, event): This will also pop the oldest events and call onRemoved on them until the window size is less than timeframe. """ self.data.add(event) - self.running_count += event[1] - + if event and event[1]: + self.running_count += event[1] + while self.duration() >= self.timeframe: oldest = self.data[0] self.data.remove(oldest) - self.running_count -= oldest[1] + if oldest and oldest[1]: + self.running_count -= oldest[1] self.onRemoved and self.onRemoved(oldest) def duration(self): @@ -351,6 +387,20 @@ def mean(self): else: return None + def min(self): + """ The minimum of the value_field in the window. """ + if len(self.data) > 0: + return min([x[1] for x in self.data]) + else: + return None + + def max(self): + """ The maximum of the value_field in the window. """ + if len(self.data) > 0: + return max([x[1] for x in self.data]) + else: + return None + def __iter__(self): return iter(self.data) @@ -363,7 +413,8 @@ def append_middle(self, event): # Append left if ts is earlier than first event if self.get_ts(self.data[0]) > ts: self.data.appendleft(event) - self.running_count += event[1] + if event and event[1]: + self.running_count += event[1] return # Rotate window until we can insert event @@ -374,9 +425,108 @@ def append_middle(self, event): # This should never happen return self.data.append(event) - self.running_count += event[1] + if event and event[1]: + self.running_count += event[1] self.data.rotate(-rotation) +class TermsWindow: + + """ For each field configured in new_term rule, This term window is created and maintained. + A sliding window is maintained and count of all the existing terms are stored. + + data - Sliding window which holds the queried terms and counts along with timestamp. This list is sorted in ascending order based on the timestamp + existing_terms - A set containing existing terms. mainly used for looking up new terms. + new_terms - Dictionary of EventWindows created for new terms. + count_dict - Dictionary containing the count of existing terms. When something is added to or popped from the sliding window - data, this count is updated + """ + def __init__(self, term_window_size, ts_field , threshold, threshold_window_size, get_ts): + self.term_window_size = term_window_size + self.ts_field = ts_field + self.threshold = threshold + self.threshold_window_size = threshold_window_size + self.get_ts = get_ts + + self.data = sortedlist(key= lambda x: x[0]) #sorted by timestamp + self.existing_terms = set() + self.potential_new_term_windows = {} + self.count_dict = {} + + """ used to add new terms and their counts for a timestamp into the sliding window - data """ + def add(self, timestamp, terms, counts): + for (term, count) in zip(terms, counts): + if term not in self.count_dict: + self.count_dict[term] = 0 + self.count_dict[term] += count + self.existing_terms.add(term) + self.data.add((timestamp, terms,counts)) + self.resize() + + """ function to split new terms and existing terms when given timestamp, terms and counts""" + def split(self,timestamp, terms, counts): + unseen_terms = [] + unseen_counts = [] + seen_terms = [] + seen_counts = [] + self.resize(till = timestamp - self.term_window_size) + for (term, count) in zip(terms, counts): + if term not in self.existing_terms: + unseen_terms.append(term) + unseen_counts.append(count) + else: + seen_terms.append(term) + seen_counts.append(count) + return seen_terms, seen_counts, unseen_terms, unseen_counts + + """ function to update the potential new terms windows""" + def update_potential_new_term_windows(self, timestamp, unseen_terms, unseen_counts): + for (term, count) in zip(unseen_terms, unseen_counts): + event = ({self.ts_field: timestamp}, count) + window = self.potential_new_term_windows.setdefault( term , EventWindow(self.threshold_window_size, getTimestamp=self.get_ts)) + window.append(event) + + + """function to get the matched new_terms that have crossed the threshold configured""" + def extract_new_terms(self, potential_new_terms, potential_term_counts): + new_terms = [] + new_counts = [] + for (potential_new_term, potential_term_count) in zip(potential_new_terms, potential_term_counts): + window = self.potential_new_term_windows.get(potential_new_term) + if window.count() >= self.threshold: + new_terms.append(potential_new_term) + new_counts.append(potential_term_count) + self.potential_new_term_windows.pop(potential_new_term) + return new_terms, new_counts + + def get_new_terms(self, timestamp, terms, counts): + existing_terms, existing_counts, potential_new_terms, potential_term_counts = self.split(timestamp, terms, counts) # Split the potential_new_terms and existing terms along with their counts based on current timestamp + self.update_potential_new_term_windows(timestamp, potential_new_terms, potential_term_counts) # Update the potential_new_term_windows + new_terms, new_counts = self.extract_new_terms( potential_new_terms, potential_term_counts) # extract and delete new terms from the potential_new_terms_window. + self.add(timestamp, existing_terms + new_terms, existing_counts + new_counts) # Add the exiting terms and new_terms to the terms_window + return new_terms, new_counts + + + """ This fn makes sure that the duration of the sliding window does not exceed term_window_size + all the events with their timestamp lesser than 'till' are popped and the counts of keys in popped events are subtracted from count_dict + After subtraction, if a term's count reaches 0, they are removed from count_dict and existing_terms, i.e they have not occured in terms_window duration + by default, till = (last event's timestamp - term_window_size ) , + """ + def resize(self, till=None): + if len(self.data)==0: + return + + if till == None: + till = self.data[-1][0] - self.term_window_size + + while len(self.data)!=0 and self.data[0][0] < till: + timestamp, keys, counts = self.data.pop(0) + for i in range(len(keys)): + self.count_dict[keys[i]] -= counts[i] + if self.count_dict[keys[i]] <= 0: + self.count_dict.pop(keys[i]) + self.existing_terms.discard(keys[i]) + + + class SpikeRule(RuleType): """ A rule that uses two sliding windows to compare relative event frequency. """ @@ -399,11 +549,17 @@ def __init__(self, *args): self.ref_window_filled_once = False def add_count_data(self, data): - """ Add count data to the rule. Data should be of the form {ts: count}. """ - if len(data) > 1: - raise EAException('add_count_data can only accept one count at a time') - for ts, count in data.items(): - self.handle_event({self.ts_field: ts}, count, 'all') + #""" Add count data to the rule. Data should be of the form {ts: count}. """ + # if len(data) > 1: + # raise EAException('add_count_data can only accept one count at a time') + # for ts, count in data.items(): + # self.handle_event({self.ts_field: ts}, count, 'all') + + # data struncture should be -> data: {endtime:,count:,event:[{}]} + # if data doesn't have endtime and count as above example, raise an exception + ts = data['endtime'] + count = data['count'] + self.handle_event({self.ts_field: ts}, count, 'all') def add_terms_data(self, terms): for timestamp, buckets in terms.items(): @@ -422,17 +578,33 @@ def add_data(self, data): if qk is None: qk = 'other' if self.field_value is not None: - count = lookup_es_key(event, self.field_value) - if count is not None: - try: - count = int(count) - except ValueError: - elastalert_logger.warn('{} is not a number: {}'.format(self.field_value, count)) - else: - self.handle_event(event, count, qk) + if self.field_value in event: + count = lookup_es_key(event, self.field_value) + if count is not None: + try: + count = int(count) + except ValueError: + elastalert_logger.warn('{} is not a number: {}'.format(self.field_value, count)) + else: + self.handle_event(event, count, qk) else: self.handle_event(event, 1, qk) + def get_spike_values(self, qk): + """ + extending ref/cur value retrieval logic for spike aggregations + """ + spike_check_type = self.rules.get('metric_agg_type') + if spike_check_type in [None, 'sum', 'value_count', 'cardinality', 'percentile']: + # default count logic is appropriate in all these cases + return self.ref_windows[qk].count(), self.cur_windows[qk].count() + elif spike_check_type == 'avg': + return self.ref_windows[qk].mean(), self.cur_windows[qk].mean() + elif spike_check_type == 'min': + return self.ref_windows[qk].min(), self.cur_windows[qk].min() + elif spike_check_type == 'max': + return self.ref_windows[qk].max(), self.cur_windows[qk].max() + def clear_windows(self, qk, event): # Reset the state and prevent alerts until windows filled again self.ref_windows[qk].clear() @@ -470,7 +642,8 @@ def handle_event(self, event, count, qk='all'): self.add_match(match, qk) self.clear_windows(qk, match) else: - if self.find_matches(self.ref_windows[qk].count(), self.cur_windows[qk].count()): + ref, cur = self.get_spike_values(qk) + if self.find_matches(ref, cur): # skip over placeholder events which have count=0 for match, count in self.cur_windows[qk].data: if count: @@ -482,8 +655,7 @@ def handle_event(self, event, count, qk='all'): def add_match(self, match, qk): extra_info = {} if self.field_value is None: - spike_count = self.cur_windows[qk].count() - reference_count = self.ref_windows[qk].count() + reference_count, spike_count = self.get_spike_values(qk) else: spike_count = self.cur_windows[qk].mean() reference_count = self.ref_windows[qk].mean() @@ -519,7 +691,7 @@ def get_match_str(self, match): if self.field_value is None: message = 'An abnormal number (%d) of events occurred around %s.\n' % ( match['spike_count'], - pretty_ts(match[self.rules['timestamp_field']], self.rules.get('use_local_time')) + pretty_ts(match[self.rules['timestamp_field']], self.rules.get('use_local_time'), self.rules.get('custom_pretty_ts_format')) ) message += 'Preceding that time, there were only %d events within %s\n\n' % (match['reference_count'], self.rules['timeframe']) else: @@ -527,7 +699,8 @@ def get_match_str(self, match): match['spike_count'], self.field_value, pretty_ts(match[self.rules['timestamp_field']], - self.rules.get('use_local_time')) + self.rules.get('use_local_time'), + self.rules.get('custom_pretty_ts_format')) ) message += 'Preceding that time, the field had an average value of (%.2f) within %s\n\n' % ( match['reference_count'], self.rules['timeframe']) @@ -548,6 +721,74 @@ def garbage_collect(self, ts): placeholder.update({self.rules['query_key']: qk}) self.handle_event(placeholder, 0, qk) +class AdvancedQueryRule(RuleType): + """ A rule that uses a query_string query to perform a advanced search like parsing, evaluating conditions, calculating aggs etc """ + required_options = frozenset(['alert_field']) + + def __init__(self, *args): + super(AdvancedQueryRule, self).__init__(*args) + if 'max_threshold' not in self.rules and 'min_threshold' not in self.rules: + raise EAException("AdvancedQueryRule must have one of either max_threshold or min_threshold") + #self.query_string = self.rules.get('query_string') + self.rules['aggregation_query_element'] = {"query": ""} + + def add_aggregation_data(self, payload): + for timestamp, payload_data in payload.items(): + self.check_matches(payload_data,timestamp) + + def check_matches(self,data,timestamp): + results=[] + for key, value in data.items(): + if 'buckets' in value: + if len(value['buckets']) >0 : + results = self.flatten_results(key,value['buckets'],self.rules['alert_field'],{},results) + else: + if self.crossed_thresholds(value['value']): + match={"key":self.rules['alert_field'],"count":value['value'],self.rules['timestamp_field']:timestamp} + self.add_match(match) + if len(results) > 0: + for event in results: + if self.crossed_thresholds(event[self.rules['alert_field']]): + #looping the object to form data structure in required format + group_by_keys=[] + group_by_values=[] + for k,v in event.items(): + if self.rules['alert_field'] not in k : + group_by_keys.append(str(k)) + group_by_values.append(str(v)) + else: + count = v + group_by_key = ','.join(group_by_keys) + group_by_value = ','.join(group_by_values) + match={"key":group_by_key,"value":group_by_value,"count":count,self.rules['timestamp_field']:timestamp} + self.add_match(match) + + #function to flatten the aggregated data. This returns an array of dictionaries which has corresponding key, value + #group starts initially empty and as we progress we keep adding this groups. + def flatten_results(self,key,value,alert_field,group,results=[]): + for item in value: + temp_group={} #temp group to start the loop back again with empty, if at all one iteration is completed + group[key]=item['key'] + for k,v in item.items(): + if isinstance(v,dict): + if "buckets" in v: + self.flatten_results(k,v['buckets'],alert_field,group,results) + elif alert_field in k: + temp_group.update(group) + group[alert_field] = v['value'] + results.append(group) + group=temp_group + return results + + def crossed_thresholds(self, metric_value): + if metric_value is None: + return False + if 'max_threshold' in self.rules and float(metric_value) > self.rules['max_threshold']: + return True + if 'min_threshold' in self.rules and float(metric_value) < self.rules['min_threshold']: + return True + return False + class FlatlineRule(FrequencyRule): """ A rule that matches when there is a low number of events given a timeframe. """ @@ -580,6 +821,8 @@ def check_for_match(self, key, end=True): # Do a deep-copy, otherwise we lose the datetime type in the timestamp field of the last event event = copy.deepcopy(self.occurrences[key].data[-1][0]) event.update(key=key, count=count) + if self.rules['query_key']: + event[self.rules['query_key']]=key self.add_match(event) if not self.rules.get('forget_keys'): @@ -597,10 +840,11 @@ def check_for_match(self, key, end=True): def get_match_str(self, match): ts = match[self.rules['timestamp_field']] lt = self.rules.get('use_local_time') - message = 'An abnormally low number of events occurred around %s.\n' % (pretty_ts(ts, lt)) + fmt = self.rules.get('custom_pretty_ts_format') + message = 'An abnormally low number of events occurred around %s.\n' % (pretty_ts(ts, lt, fmt)) message += 'Between %s and %s, there were less than %s events.\n\n' % ( - pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt), - pretty_ts(ts, lt), + pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt, fmt), + pretty_ts(ts, lt, fmt), self.rules['threshold'] ) return message @@ -625,7 +869,26 @@ class NewTermsRule(RuleType): def __init__(self, rule, args=None): super(NewTermsRule, self).__init__(rule, args) - self.seen_values = {} + self.term_windows = {} + self.last_updated_at = None + self.es = kibana_adapter_client(self.rules) + self.ts_field = self.rules.get('timestamp_field', '@timestamp') + self.get_ts = new_get_event_ts(self.ts_field) + self.new_terms = {} + + self.threshold = rule.get('threshold',0) + + # terms_window_size : Default & Upperbound - 7 Days + self.window_size = min(datetime.timedelta(**self.rules.get('terms_window_size', {'days': 7})), datetime.timedelta(**{'days': 7})) + + self.step = datetime.timedelta(**{'hours': 1}) + + # terms_size : Default - 500, Upperbound: 1000 + self.terms_size = min(self.rules.get('terms_size', 500),1000) + + # threshold_window_size + self.threshold_window_size = min( datetime.timedelta(**self.rules.get('threshold_window_size', {'hours': 1})), datetime.timedelta(**{'days': 2}) ) + # Allow the use of query_key or fields if 'fields' not in self.rules: if 'query_key' not in self.rules: @@ -644,97 +907,139 @@ def __init__(self, rule, args=None): if [self.rules['query_key']] != self.fields: raise EAException('If use_terms_query is specified, you cannot specify different query_key and fields') if not self.rules.get('query_key').endswith('.keyword') and not self.rules.get('query_key').endswith('.raw'): - if self.rules.get('use_keyword_postfix', True): + if self.rules.get('use_keyword_postfix', False): # making it false by default as we wont use the keyword suffix elastalert_logger.warn('Warning: If query_key is a non-keyword field, you must set ' 'use_keyword_postfix to false, or add .keyword/.raw to your query_key.') try: - self.get_all_terms(args) + self.get_all_terms(args=args) except Exception as e: # Refuse to start if we cannot get existing terms raise EAException('Error searching for existing terms: %s' % (repr(e))).with_traceback(sys.exc_info()[2]) + + + + def get_new_term_query(self,starttime,endtime,field): + + field_name = { + "field": "", + "size": self.terms_size, + "order": { + "_count": "desc" + } + } + + query = { + "aggs": { + "values": { + "terms": field_name + } + } + } - def get_all_terms(self, args): + query["query"] = { + 'bool': { + 'filter': { + 'bool': { + 'must': [{ + 'range': { + self.rules['timestamp_field']: { + 'lt': self.rules['dt_to_ts'](endtime), + 'gte': self.rules['dt_to_ts'](starttime) + } + } + }] + } + } + } + } + + filter_level = query['query']['bool']['filter']['bool']['must'] + if 'filter' in self.rules: + for item in self.rules['filter']: + if "query" in item: + filter_level.append(item['query']) + else: + filter_level.append(item) + + # For composite keys, we will need to perform sub-aggregations + if type(field) == list: + self.term_windows.setdefault(tuple(field), TermsWindow(self.window_size, self.ts_field , self.threshold, self.threshold_window_size, self.get_ts)) + level = query['aggs'] + # Iterate on each part of the composite key and add a sub aggs clause to the elastic search query + for i, sub_field in enumerate(field): + if self.rules.get('use_keyword_postfix', False): # making it false by default as we wont use the keyword suffix + level['values']['terms']['field'] = add_raw_postfix(sub_field, True) + else: + level['values']['terms']['field'] = sub_field + if i < len(field) - 1: + # If we have more fields after the current one, then set up the next nested structure + level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}} + level = level['values']['aggs'] + else: + self.term_windows.setdefault(field, TermsWindow(self.window_size, self.ts_field , self.threshold, self.threshold_window_size, self.get_ts)) + # For non-composite keys, only a single agg is needed + if self.rules.get('use_keyword_postfix', False):# making it false by default as we wont use the keyword suffix + field_name['field'] = add_raw_postfix(field, True) + else: + field_name['field'] = field + + return query + + def get_terms_data(self, es, starttime, endtime, field, request_timeout= None): + terms = [] + counts = [] + query = self.get_new_term_query(starttime,endtime,field) + request = get_msearch_query(query,self.rules) + + if request_timeout == None: + res = es.msearch(body=request) + else: + res = es.msearch(body=request, request_timeout=request_timeout) + res = res['responses'][0] + + if 'aggregations' in res: + buckets = res['aggregations']['values']['buckets'] + if type(field) == list: + for bucket in buckets: + keys, doc_counts = self.flatten_aggregation_hierarchy(bucket) + terms += keys + counts += doc_counts + else: + for bucket in buckets: + terms.append(bucket['key']) + counts.append(bucket['doc_count']) + + return terms, counts + + + + + def get_all_terms(self,args): """ Performs a terms aggregation for each field to get every existing term. """ - self.es = elasticsearch_client(self.rules) - window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30})) - field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE - query_template = {"aggs": {"values": {"terms": field_name}}} + if args and hasattr(args, 'start') and args.start: end = ts_to_dt(args.start) elif 'start_date' in self.rules: end = ts_to_dt(self.rules['start_date']) else: end = ts_now() - start = end - window_size - step = datetime.timedelta(**self.rules.get('window_step_size', {'days': 1})) + start = end - self.window_size + for field in self.fields: tmp_start = start - tmp_end = min(start + step, end) - - time_filter = {self.rules['timestamp_field']: {'lt': self.rules['dt_to_ts'](tmp_end), 'gte': self.rules['dt_to_ts'](tmp_start)}} - query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}} - query = {'aggs': {'filtered': query_template}} - - if 'filter' in self.rules: - for item in self.rules['filter']: - query_template['filter']['bool']['must'].append(item) - - # For composite keys, we will need to perform sub-aggregations - if type(field) == list: - self.seen_values.setdefault(tuple(field), []) - level = query_template['aggs'] - # Iterate on each part of the composite key and add a sub aggs clause to the elastic search query - for i, sub_field in enumerate(field): - if self.rules.get('use_keyword_postfix', True): - level['values']['terms']['field'] = add_raw_postfix(sub_field, self.is_five_or_above()) - else: - level['values']['terms']['field'] = sub_field - if i < len(field) - 1: - # If we have more fields after the current one, then set up the next nested structure - level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}} - level = level['values']['aggs'] - else: - self.seen_values.setdefault(field, []) - # For non-composite keys, only a single agg is needed - if self.rules.get('use_keyword_postfix', True): - field_name['field'] = add_raw_postfix(field, self.is_five_or_above()) - else: - field_name['field'] = field - + # Query the entire time range in small chunks while tmp_start < end: - if self.rules.get('use_strftime_index'): - index = format_index(self.rules['index'], tmp_start, tmp_end) - else: - index = self.rules['index'] - res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s') - if 'aggregations' in res: - buckets = res['aggregations']['filtered']['values']['buckets'] - if type(field) == list: - # For composite keys, make the lookup based on all fields - # Make it a tuple since it can be hashed and used in dictionary lookups - for bucket in buckets: - # We need to walk down the hierarchy and obtain the value at each level - self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket) - else: - keys = [bucket['key'] for bucket in buckets] - self.seen_values[field] += keys - else: - if type(field) == list: - self.seen_values.setdefault(tuple(field), []) - else: - self.seen_values.setdefault(field, []) - if tmp_start == tmp_end: - break + tmp_end = min(tmp_start + self.step, end) + terms, counts = self.get_terms_data(self.es, tmp_start, tmp_end, field, request_timeout=50) + self.term_windows[self.get_lookup_key(field)].add(tmp_end,terms,counts) tmp_start = tmp_end - tmp_end = min(tmp_start + step, end) - time_filter[self.rules['timestamp_field']] = {'lt': self.rules['dt_to_ts'](tmp_end), - 'gte': self.rules['dt_to_ts'](tmp_start)} + - for key, values in self.seen_values.items(): - if not values: - if type(key) == tuple: + for lookup_key, window in self.term_windows.items(): + if not window.existing_terms: + if type(lookup_key) == tuple: # If we don't have any results, it could either be because of the absence of any baseline data # OR it may be because the composite key contained a non-primitive type. Either way, give the # end-users a heads up to help them debug what might be going on. @@ -745,8 +1050,8 @@ def get_all_terms(self, args): else: elastalert_logger.info('Found no values for %s' % (field)) continue - self.seen_values[key] = list(set(values)) - elastalert_logger.info('Found %s unique values for %s' % (len(set(values)), key)) + elastalert_logger.info('Found %s unique values for %s' % (len(window.existing_terms), lookup_key)) + # self.last_updated_at = ts_now() def flatten_aggregation_hierarchy(self, root, hierarchy_tuple=()): """ For nested aggregations, the results come back in the following format: @@ -837,62 +1142,88 @@ def flatten_aggregation_hierarchy(self, root, hierarchy_tuple=()): A similar formatting will be performed in the add_data method and used as the basis for comparison """ - results = [] + final_keys = [] + final_counts = [] # There are more aggregation hierarchies left. Traverse them. if 'values' in root: - results += self.flatten_aggregation_hierarchy(root['values']['buckets'], hierarchy_tuple + (root['key'],)) + keys, counts = self.flatten_aggregation_hierarchy(root['values']['buckets'], hierarchy_tuple + (root['key'],)) + final_keys += keys + final_counts += counts else: # We've gotten to a sub-aggregation, which may have further sub-aggregations # See if we need to traverse further for node in root: if 'values' in node: - results += self.flatten_aggregation_hierarchy(node, hierarchy_tuple) - else: - results.append(hierarchy_tuple + (node['key'],)) - return results - - def add_data(self, data): - for document in data: - for field in self.fields: - value = () - lookup_field = field - if type(field) == list: - # For composite keys, make the lookup based on all fields - # Make it a tuple since it can be hashed and used in dictionary lookups - lookup_field = tuple(field) - for sub_field in field: - lookup_result = lookup_es_key(document, sub_field) - if not lookup_result: - value = None - break - value += (lookup_result,) + keys, counts = self.flatten_aggregation_hierarchy(node, hierarchy_tuple) + final_keys += keys + final_counts += counts else: - value = lookup_es_key(document, field) - if not value and self.rules.get('alert_on_missing_field'): - document['missing_field'] = lookup_field - self.add_match(copy.deepcopy(document)) - elif value: - if value not in self.seen_values[lookup_field]: - document['new_field'] = lookup_field - self.add_match(copy.deepcopy(document)) - self.seen_values[lookup_field].append(value) + final_keys.append(hierarchy_tuple + (node['key'],)) + final_counts.append(node['doc_count']) + return final_keys, final_counts - def add_terms_data(self, terms): - # With terms query, len(self.fields) is always 1 and the 0'th entry is always a string - field = self.fields[0] - for timestamp, buckets in terms.items(): - for bucket in buckets: - if bucket['doc_count']: - if bucket['key'] not in self.seen_values[field]: - match = {field: bucket['key'], - self.rules['timestamp_field']: timestamp, - 'new_field': field} - self.add_match(match) - self.seen_values[field].append(bucket['key']) - - def is_five_or_above(self): - version = self.es.info()['version']['number'] - return int(version[0]) >= 5 + def add_terms_data(self, payload): + timestamp = list(payload.keys())[0] + data = payload[timestamp] + for field in self.fields: + lookup_key = self.get_lookup_key(field) + keys, counts = data[lookup_key] + + new_terms, new_counts = self.term_windows[lookup_key].get_new_terms(timestamp, keys, counts ) + + # append and get all match keys and counts + for (new_term, new_count) in zip(new_terms, new_counts): + match = { + "field": lookup_key, + self.rules['timestamp_field']: timestamp, + "new_value": tuple(new_term) if type(new_term) == list else new_term, + "hits" : new_count + } + self.add_match(copy.deepcopy(match)) + + ### NOT USED ANYMORE ### + # def add_data(self, data): + # for document in data: + # for field in self.fields: + # value = () + # lookup_field = field + # if type(field) == list: + # # For composite keys, make the lookup based on all fields + # # Make it a tuple since it can be hashed and used in dictionary lookups + # lookup_field = tuple(field) + # for sub_field in field: + # lookup_result = lookup_es_key(document, sub_field) + # if not lookup_result: + # value = None + # break + # value += (lookup_result,) + # else: + # value = lookup_es_key(document, field) + # if not value and self.rules.get('alert_on_missing_field'): + # document['missing_field'] = lookup_field + # self.add_match(copy.deepcopy(document)) + # elif value: + # if value not in self.seen_values[lookup_field]: + # document['new_field'] = lookup_field + # self.add_match(copy.deepcopy(document)) + # self.seen_values[lookup_field].append(value) + + ### NOT USED ANYMORE ### + # def add_terms_data(self, terms): + # # With terms query, len(self.fields) is always 1 and the 0'th entry is always a string + # field = self.fields[0] + # for timestamp, buckets in terms.items(): + # for bucket in buckets: + # if bucket['doc_count']: + # if bucket['key'] not in self.seen_values[field]: + # match = {field: bucket['key'], + # self.rules['timestamp_field']: timestamp, + # 'new_field': field} + # self.add_match(match) + # self.seen_values[field].append(bucket['key']) + + def get_lookup_key(self,field): + return tuple(field) if type(field) == list else field class CardinalityRule(RuleType): @@ -956,8 +1287,9 @@ def garbage_collect(self, timestamp): def get_match_str(self, match): lt = self.rules.get('use_local_time') - starttime = pretty_ts(dt_to_ts(ts_to_dt(lookup_es_key(match, self.ts_field)) - self.rules['timeframe']), lt) - endtime = pretty_ts(lookup_es_key(match, self.ts_field), lt) + fmt = self.rules.get('custom_pretty_ts_format') + starttime = pretty_ts(dt_to_ts(ts_to_dt(lookup_es_key(match, self.ts_field)) - self.rules['timeframe']), lt, fmt) + endtime = pretty_ts(lookup_es_key(match, self.ts_field), lt, fmt) if 'max_cardinality' in self.rules: message = ('A maximum of %d unique %s(s) occurred since last alert or between %s and %s\n\n' % (self.rules['max_cardinality'], self.rules['cardinality_field'], @@ -1021,11 +1353,38 @@ def unwrap_term_buckets(self, timestamp, term_buckets): def check_matches(self, timestamp, query_key, aggregation_data): raise NotImplementedError() +#Error Rate Rule Definition +class ErrorRateRule(BaseAggregationRule): + """ A rule that determines error rate with sampling rate""" + required_options = frozenset(['sampling', 'threshold','error_condition','unique_column']) + def __init__(self, *args): + super(ErrorRateRule, self).__init__(*args) + + self.ts_field = self.rules.get('timestamp_field', '@timestamp') + self.rules['total_agg_key'] = self.rules['unique_column'] + self.rules['count_all_errors'] = True + + if ( 'error_calculation_method' in self.rules and self.rules['error_calculation_method']=='count_traces_with_errors' ): + self.rules['count_all_errors'] = False + + # hardcoding uniq aggregation for total count + self.rules['total_agg_type'] = "uniq" + + def calculate_err_rate(self,payload): + for timestamp, payload_data in payload.items(): + if int(payload_data['total_count']) > 0: + rate = float(payload_data['error_count'])/float(payload_data['total_count']) + rate = float(rate)/float(self.rules['sampling']) + rate = rate*100 + if 'threshold' in self.rules and rate > self.rules['threshold']: + match = {self.rules['timestamp_field']: timestamp, 'error_rate': rate, 'from': payload_data['start_time'], 'to': payload_data['end_time']} + self.add_match(match) class MetricAggregationRule(BaseAggregationRule): """ A rule that matches when there is a low number of events given a timeframe. """ required_options = frozenset(['metric_agg_key', 'metric_agg_type']) allowed_aggregations = frozenset(['min', 'max', 'avg', 'sum', 'cardinality', 'value_count']) + allowed_percent_aggregations = frozenset(['percentiles']) def __init__(self, *args): super(MetricAggregationRule, self).__init__(*args) @@ -1035,35 +1394,55 @@ def __init__(self, *args): self.metric_key = 'metric_' + self.rules['metric_agg_key'] + '_' + self.rules['metric_agg_type'] - if not self.rules['metric_agg_type'] in self.allowed_aggregations: + if not self.rules['metric_agg_type'] in self.allowed_aggregations.union(self.allowed_percent_aggregations): raise EAException("metric_agg_type must be one of %s" % (str(self.allowed_aggregations))) + if self.rules['metric_agg_type'] in self.allowed_percent_aggregations and self.rules['percentile_range'] is None: + raise EAException("percentile_range must be specified for percentiles aggregation") self.rules['aggregation_query_element'] = self.generate_aggregation_query() def get_match_str(self, match): + metric_format_string = self.rules.get('metric_format_string', None) message = 'Threshold violation, %s:%s %s (min: %s max : %s) \n\n' % ( self.rules['metric_agg_type'], self.rules['metric_agg_key'], - match[self.metric_key], + format_string(metric_format_string, match[self.metric_key]) if metric_format_string else match[self.metric_key], self.rules.get('min_threshold'), self.rules.get('max_threshold') ) return message def generate_aggregation_query(self): - return {self.metric_key: {self.rules['metric_agg_type']: {'field': self.rules['metric_agg_key']}}} + if self.rules.get('metric_agg_script'): + return {self.metric_key: {self.rules['metric_agg_type']: self.rules['metric_agg_script']}} + query = {self.metric_key: {self.rules['metric_agg_type']: {'field': self.rules['metric_agg_key']}}} + if self.rules['metric_agg_type'] in self.allowed_percent_aggregations: + query[self.metric_key][self.rules['metric_agg_type']]['percents'] = [self.rules['percentile_range']] + query[self.metric_key][self.rules['metric_agg_type']]['keyed'] = False + return query def check_matches(self, timestamp, query_key, aggregation_data): if "compound_query_key" in self.rules: self.check_matches_recursive(timestamp, query_key, aggregation_data, self.rules['compound_query_key'], dict()) else: - metric_val = aggregation_data[self.metric_key]['value'] + if self.rules['metric_agg_type'] in self.allowed_percent_aggregations: + #backwards compatibility with existing elasticsearch library + #aggregation_data = {"doc_count":258757,"key":"appmailer","metric_qt_percentiles":{"values":[{"key":95,"value":0}]}} + metric_val = aggregation_data[self.metric_key]['values'][0]['value'] + else: + metric_val = aggregation_data[self.metric_key]['value'] if self.crossed_thresholds(metric_val): match = {self.rules['timestamp_field']: timestamp, - self.metric_key: metric_val} + self.metric_key: metric_val, + 'metric_agg_value': metric_val + } + metric_format_string = self.rules.get('metric_format_string', None) + if metric_format_string is not None: + match[self.metric_key +'_formatted'] = format_string(metric_format_string, metric_val) + match['metric_agg_value_formatted'] = format_string(metric_format_string, metric_val) if query_key is not None: - match[self.rules['query_key']] = query_key + match = expand_string_into_dict(match, self.rules['query_key'], query_key) self.add_match(match) def check_matches_recursive(self, timestamp, query_key, aggregation_data, compound_keys, match_data): @@ -1079,25 +1458,27 @@ def check_matches_recursive(self, timestamp, query_key, aggregation_data, compou result, compound_keys[1:], match_data) - else: - metric_val = aggregation_data[self.metric_key]['value'] - if self.crossed_thresholds(metric_val): - match_data[self.rules['timestamp_field']] = timestamp - match_data[self.metric_key] = metric_val - - # add compound key to payload to allow alerts to trigger for every unique occurence - compound_value = [match_data[key] for key in self.rules['compound_query_key']] - match_data[self.rules['query_key']] = ",".join([str(value) for value in compound_value]) + if 'interval_aggs' in aggregation_data: + metric_val_arr = [term[self.metric_key]['value'] for term in aggregation_data['interval_aggs']['buckets']] + else: + metric_val_arr = [aggregation_data[self.metric_key]['value']] + for metric_val in metric_val_arr: + if self.crossed_thresholds(metric_val): + match_data[self.rules['timestamp_field']] = timestamp + match_data[self.metric_key] = metric_val - self.add_match(match_data) + # add compound key to payload to allow alerts to trigger for every unique occurence + compound_value = [match_data[key] for key in self.rules['compound_query_key']] + match_data[self.rules['query_key']] = ",".join([str(value) for value in compound_value]) + self.add_match(match_data) def crossed_thresholds(self, metric_value): if metric_value is None: return False - if 'max_threshold' in self.rules and metric_value > self.rules['max_threshold']: + if 'max_threshold' in self.rules and float(metric_value) > self.rules['max_threshold']: return True - if 'min_threshold' in self.rules and metric_value < self.rules['min_threshold']: + if 'min_threshold' in self.rules and float(metric_value) < self.rules['min_threshold']: return True return False @@ -1106,6 +1487,7 @@ class SpikeMetricAggregationRule(BaseAggregationRule, SpikeRule): """ A rule that matches when there is a spike in an aggregated event compared to its reference point """ required_options = frozenset(['metric_agg_key', 'metric_agg_type', 'spike_height', 'spike_type']) allowed_aggregations = frozenset(['min', 'max', 'avg', 'sum', 'cardinality', 'value_count']) + allowed_percent_aggregations = frozenset(['percentiles']) def __init__(self, *args): # We inherit everything from BaseAggregation and Spike, overwrite only what we need in functions below @@ -1113,8 +1495,11 @@ def __init__(self, *args): # MetricAgg alert things self.metric_key = 'metric_' + self.rules['metric_agg_key'] + '_' + self.rules['metric_agg_type'] - if not self.rules['metric_agg_type'] in self.allowed_aggregations: + + if not self.rules['metric_agg_type'] in self.allowed_aggregations.union(self.allowed_percent_aggregations): raise EAException("metric_agg_type must be one of %s" % (str(self.allowed_aggregations))) + if self.rules['metric_agg_type'] in self.allowed_percent_aggregations and self.rules['percentile_range'] is None: + raise EAException("percentile_range must be specified for percentiles aggregation") # Disabling bucket intervals (doesn't make sense in context of spike to split up your time period) if self.rules.get('bucket_interval'): @@ -1123,10 +1508,13 @@ def __init__(self, *args): self.rules['aggregation_query_element'] = self.generate_aggregation_query() def generate_aggregation_query(self): - """Lifted from MetricAggregationRule, added support for scripted fields""" + """Lifted from MetricAggregationRule""" if self.rules.get('metric_agg_script'): return {self.metric_key: {self.rules['metric_agg_type']: self.rules['metric_agg_script']}} - return {self.metric_key: {self.rules['metric_agg_type']: {'field': self.rules['metric_agg_key']}}} + query = {self.metric_key: {self.rules['metric_agg_type']: {'field': self.rules['metric_agg_key']}}} + if self.rules['metric_agg_type'] in self.allowed_percent_aggregations: + query[self.metric_key][self.rules['metric_agg_type']]['percents'] = [self.rules['percentile_range']] + return query def add_aggregation_data(self, payload): """ @@ -1140,7 +1528,10 @@ def add_aggregation_data(self, payload): else: # no time / term split, just focus on the agg event = {self.ts_field: timestamp} - agg_value = payload_data[self.metric_key]['value'] + if self.rules['metric_agg_type'] in self.allowed_percent_aggregations: + agg_value = list(payload_data[self.metric_key]['values'].values())[0] + else: + agg_value = payload_data[self.metric_key]['value'] self.handle_event(event, agg_value, 'all') return @@ -1160,7 +1551,10 @@ def unwrap_term_buckets(self, timestamp, term_buckets, qk=[]): continue qk_str = ','.join(qk) - agg_value = term_data[self.metric_key]['value'] + if self.rules['metric_agg_type'] in self.allowed_percent_aggregations: + agg_value = list(term_data[self.metric_key]['values'].values())[0] + else: + agg_value = term_data[self.metric_key]['value'] event = {self.ts_field: timestamp, self.rules['query_key']: qk_str} # pass to SpikeRule's tracker @@ -1176,7 +1570,7 @@ def get_match_str(self, match): """ message = 'An abnormal {0} of {1} ({2}) occurred around {3}.\n'.format( self.rules['metric_agg_type'], self.rules['metric_agg_key'], round(match['spike_count'], 2), - pretty_ts(match[self.rules['timestamp_field']], self.rules.get('use_local_time')) + pretty_ts(match[self.rules['timestamp_field']], self.rules.get('use_local_time'), self.rules.get('custom_pretty_ts_format')) ) message += 'Preceding that time, there was a {0} of {1} of ({2}) within {3}\n\n'.format( self.rules['metric_agg_type'], self.rules['metric_agg_key'], @@ -1200,7 +1594,7 @@ def __init__(self, *args): def get_match_str(self, match): percentage_format_string = self.rules.get('percentage_format_string', None) message = 'Percentage violation, value: %s (min: %s max : %s) of %s items\n\n' % ( - percentage_format_string % (match['percentage']) if percentage_format_string else match['percentage'], + format_string(percentage_format_string, match['percentage']) if percentage_format_string else match['percentage'], self.rules.get('min_percentage'), self.rules.get('max_percentage'), match['denominator'] @@ -1211,12 +1605,16 @@ def generate_aggregation_query(self): return { 'percentage_match_aggs': { 'filters': { - 'other_bucket': True, 'filters': { 'match_bucket': { 'bool': { 'must': self.match_bucket_filter } + }, + '_other_': { + 'bool': { + 'must_not': self.match_bucket_filter + } } } } @@ -1237,8 +1635,11 @@ def check_matches(self, timestamp, query_key, aggregation_data): match_percentage = (match_bucket_count * 1.0) / (total_count * 1.0) * 100 if self.percentage_violation(match_percentage): match = {self.rules['timestamp_field']: timestamp, 'percentage': match_percentage, 'denominator': total_count} + percentage_format_string = self.rules.get('percentage_format_string', None) + if percentage_format_string is not None: + match['percentage_formatted'] = format_string(percentage_format_string, match_percentage) if query_key is not None: - match[self.rules['query_key']] = query_key + match = expand_string_into_dict(match, self.rules['query_key'], query_key) self.add_match(match) def percentage_violation(self, match_percentage): diff --git a/elastalert/schema.yaml b/elastalert/schema.yaml index 1241315dc..7893778d1 100644 --- a/elastalert/schema.yaml +++ b/elastalert/schema.yaml @@ -34,7 +34,28 @@ definitions: milliseconds: {type: number} schedule: {type: string} - filter: &filter {} + slackField: &slackField + type: object + additionalProperties: false + properties: + title: {type: string} + value: {type: string} + short: {type: boolean} + + arrayOfSlackFields: &arrayOfSlackField + type: array + items: *slackField + + msTeamsFact: &msTeamsFact + type: object + additionalProperties: false + properties: + name: {type: string} + value: {type: string} + + arrayOfMsTeamsFacts: &arrayOfMsTeamsFacts + type: array + items: *msTeamsFact mattermostField: &mattermostField type: object @@ -45,7 +66,13 @@ definitions: args: *arrayOfString short: {type: boolean} -required: [type, index, alert] + arrayOfMattermostFields: &arrayOfMattermostField + type: array + items: *mattermostField + + filter: &filter {} + +required: [type, alert] type: object ### Rule Types section @@ -84,7 +111,6 @@ oneOf: num_events: {type: integer} timeframe: *timeframe use_count_query: {type: boolean} - doc_type: {type: string} use_terms_query: {type: boolean} terms_size: {type: integer} attach_related: {type: boolean} @@ -97,12 +123,12 @@ oneOf: spike_type: {enum: ["up", "down", "both"]} timeframe: *timeframe use_count_query: {type: boolean} - doc_type: {type: string} use_terms_query: {type: boolean} terms_size: {type: integer} alert_on_new_data: {type: boolean} threshold_ref: {type: integer} threshold_cur: {type: integer} + field_value: {type: string} - title: Spike Aggregation required: [spike_height, spike_type, timeframe] @@ -110,16 +136,16 @@ oneOf: type: {enum: [spike_aggregation]} spike_height: {type: number} spike_type: {enum: ["up", "down", "both"]} - metric_agg_type: {enum: ["min", "max", "avg", "sum", "cardinality", "value_count"]} + metric_agg_type: {enum: ["min", "max", "avg", "sum", "cardinality", "value_count", "percentiles"]} timeframe: *timeframe use_count_query: {type: boolean} - doc_type: {type: string} use_terms_query: {type: boolean} terms_size: {type: integer} alert_on_new_data: {type: boolean} threshold_ref: {type: number} threshold_cur: {type: number} min_doc_count: {type: integer} + percentile_range: {type: integer} - title: Flatline required: [threshold, timeframe] @@ -128,7 +154,9 @@ oneOf: timeframe: *timeframe threshold: {type: integer} use_count_query: {type: boolean} - doc_type: {type: string} + use_terms_query: {type: boolean} + terms_size: {type: integer} + forget_keys: {type: boolean} - title: New Term required: [] @@ -139,6 +167,8 @@ oneOf: alert_on_missing_field: {type: boolean} use_terms_query: {type: boolean} terms_size: {type: integer} + window_step_size: *timeframe + use_keyword_postfix: {type: boolean} - title: Cardinality required: [cardinality_field, timeframe] @@ -153,18 +183,48 @@ oneOf: required: [metric_agg_key,metric_agg_type] properties: type: {enum: [metric_aggregation]} - metric_agg_type: {enum: ["min", "max", "avg", "sum", "cardinality", "value_count"]} - #timeframe: *timeframe + metric_agg_type: {enum: ["min", "max", "avg", "sum", "cardinality", "value_count", "percentiles"]} + percentile_range: {type: integer} + max_threshold: {type: number} + min_threshold: {type: number} + min_doc_count: {type: integer} + use_run_every_query_size: {type: boolean} + allow_buffer_time_overlap: {type: boolean} + bucket_interval: *timeframe + sync_bucket_interval: {type: boolean} + metric_format_string: {type: string} - title: Percentage Match required: [match_bucket_filter] properties: type: {enum: [percentage_match]} + min_percentage: {type: number} + max_percentage: {type: number} + use_run_every_query_size: {type: boolean} + allow_buffer_time_overlap: {type: boolean} + bucket_interval: *timeframe + sync_bucket_interval: {type: boolean} + percentage_format_string: {type: string} + min_denominator: {type: integer} - title: Custom Rule from Module properties: # custom rules include a period in the rule type type: {pattern: "[.]"} + + - title: Error Rate + required: [sampling, threshold] + properties: + sampling: {type: integer} + threshold: {type: number} + error_condition: {type: string} + unique_column: {type: string} + + - title: Advanced Query + required: [alert_field] + properties: + type: {enum: [advanced_query]} + properties: @@ -178,130 +238,366 @@ properties: verify_certs: {type: boolean} es_username: {type: string} es_password: {type: string} + es_bearer: {type: string} use_strftime_index: {type: boolean} # Optional Settings - import: {type: string} + es_hosts: {type: array, items: {type: string}} + import: + anyOf: + - type: array + items: + type: string + - type: string aggregation: *timeframe realert: *timeframe + realert_key: {type: string} exponential_realert: *timeframe buffer_time: *timeframe query_delay: *timeframe max_query_size: {type: integer} max_scrolling: {type: integer} + max_threads: {type: integer} + misfire_grace_time: {type: integer} owner: {type: string} priority: {type: integer} filter : - type: [array, object] + type: array items: *filter - additionalProperties: false - properties: - download_dashboard: {type: string} include: {type: array, items: {type: string}} top_count_keys: {type: array, items: {type: string}} top_count_number: {type: integer} raw_count_keys: {type: boolean} - generate_kibana_link: {type: boolean} - kibana_dashboard: {type: string} - use_kibana_dashboard: {type: string} + kibana_url: {type: string, format: uri} + kibana_username: {type: string} + kibana_password: {type: string} + kibana_verify_certs: {type: boolean} use_local_time: {type: boolean} + custom_pretty_ts_format: {type: string} match_enhancements: {type: array, items: {type: string}} query_key: *arrayOfString replace_dots_in_field_names: {type: boolean} scan_entire_timeframe: {type: boolean} + ### summary table + summary_table_fields: {type: array, items: {type: string}} + summary_table_type: {type: string, enum: ['ascii', 'markdown']} + summary_table_max_rows: {type: integer, minimum: 0} + summary_prefix: {type: string} + summary_suffix: {type: string} + ### Kibana Discover App Link generate_kibana_discover_url: {type: boolean} - kibana_discover_app_url: {type: string, format: uri} - kibana_discover_version: {type: string, enum: ['7.3', '7.2', '7.1', '7.0', '6.8', '6.7', '6.6', '6.5', '6.4', '6.3', '6.2', '6.1', '6.0', '5.6']} + shorten_kibana_discover_url: {type: boolean} + kibana_discover_app_url: {type: string} + kibana_discover_version: {type: string, enum: ['8.6', '8.5', '8.4', '8.3', '8.2', '8.1', '8.0', '7.17', '7.16', '7.15', '7.14', '7.13', '7.12', '7.11', '7.10', '7.9', '7.8', '7.7', '7.6', '7.5', '7.4', '7.3', '7.2', '7.1', '7.0']} kibana_discover_index_pattern_id: {type: string, minLength: 1} kibana_discover_columns: {type: array, items: {type: string, minLength: 1}, minItems: 1} kibana_discover_from_timedelta: *timedelta kibana_discover_to_timedelta: *timedelta + kibana_discover_security_tenant: {type:string} # Alert Content alert_text: {type: string} # Python format string alert_text_args: {type: array, items: {type: string}} alert_text_kw: {type: object} - alert_text_type: {enum: [alert_text_only, exclude_fields, aggregation_summary_only]} + alert_text_type: {enum: [alert_text_only, alert_text_jinja, exclude_fields, aggregation_summary_only]} alert_missing_value: {type: string} timestamp_field: {type: string} field: {} - ### Commands + ### Alerta + alerta_api_url: {type: string} + alerta_api_key: {type: string} + alerta_use_qk_as_resource: {type: boolean} + alerta_use_match_timestamp: {type: boolean} + alerta_api_skip_ssl: {type: boolean} + alerta_severity: {type: string} + alerta_timeout: {type: integer} + alerta_type: {type: string} + alerta_resource: {type: string} # Python format string + alerta_service: {type: array, items: {type: string}} # Python format string + alerta_origin: {type: string} # Python format string + alerta_environment: {type: string} # Python format string + alerta_group: {type: string} # Python format string + alerta_correlate: {type: array, items: {type: string}} # Python format string + alerta_tags: {type: array, items: {type: string}} # Python format string + alerta_event: {type: string} # Python format stringalerta_use_match_timestamp + alerta_text: {type: string} # Python format string + alerta_value: {type: string} # Python format string + alerta_attributes_keys: {type: array, items: {type: string}} + alerta_attributes_values: {type: array, items: {type: string}} # Python format string + + ### Alertmanager + alertmanager_alertname: {type: string} + alertmanager_hosts: {type: array, items: {type: string}} + alertmanager_api_version: {type: string, enum: ['v1', 'v2']} + alertmanager_alert_subject_labelname: {type: string} + alertmanager_alert_text_labelname: {type: string} + alertmanager_proxy: {type: string} + alertmanager_ca_certs: {type: [boolean, string]} + alertmanager_ignore_ssl_errors: {type: boolean} + alertmanager_timeout: {type: integer} + alertmanager_basic_auth_login: {type: string} + alertmanager_basic_auth_password: {type: string} + alertmanager_labels: + type: object + minProperties: 1 + patternProperties: + "^.+$": + oneOf: + - type: string + - type: object + additionalProperties: false + required: [field] + properties: + field: {type: string, minLength: 1} + alertmanager_annotations: + type: object + minProperties: 1 + patternProperties: + "^.+$": + oneOf: + - type: string + - type: object + additionalProperties: false + required: [field] + properties: + field: {type: string, minLength: 1} + alertmanager_fields: + type: object + minProperties: 1 + patternProperties: + "^.+$": + oneOf: + - type: string + - type: object + additionalProperties: false + required: [field] + properties: + field: {type: string, minLength: 1} + + ### AWS SES + ses_email: *arrayOfString + ses_from_addr: {type: string} + ses_aws_access_key: {type: string} + ses_aws_secret_key: {type: string} + ses_aws_region: {type: string} + ses_aws_profile: {type: string} + ses_email_reply_to: {type: string} + ses_cc: *arrayOfString + ses_bcc: *arrayOfString + + ### AWS SNS + sns_topic_arn: {type: string} + sns_aws_access_key_id: {type: string} + sns_aws_secret_access_key: {type: string} + sns_aws_region: {type: string} + sns_aws_profile: {type: string} + + ### Chatwork + chatwork_apikey: {type: string} + chatwork_room_id: {type: string} + chatwork_proxy: {type: string} + chatwork_proxy_login: {type: string} + chatwork_proxy_pass: {type: string} + + ### Command command: *arrayOfString pipe_match_json: {type: boolean} + pipe_alert_text: {type: boolean} fail_on_non_zero_exit: {type: boolean} + ### Datadog + datadog_api_key: {type: string} + datadog_app_key: {type: string} + + ### Dingtalk + dingtalk_access_token: {type: string} + dingtalk_msgtype: {type: string, enum: ['text', 'markdown', 'single_action_card', 'action_card']} + dingtalk_single_title: {type: string} + dingtalk_single_url: {type: string} + dingtalk_btn_orientation: {type: string} + dingtalk_proxy: {type: string} + dingtalk_proxy_login: {type: string} + dingtalk_proxy_pass: {type: string} + + ## Discord + discord_webhook_url: {type: string} + discord_emoji_title: {type: string} + discord_proxy: {type: string} + discord_proxy_login: {type: string} + discord_proxy_password: {type: string} + discord_embed_color: {type: integer} + discord_embed_footer: {type: string} + discord_embed_icon_url: {type: string} + ### Email email: *arrayOfString - email_reply_to: {type: string} - notify_email: *arrayOfString # if rule is slow or erroring, send to this email + email_from_field: {type: string} + email_add_domain: {type: string} smtp_host: {type: string} + smtp_port: {type: integer} + smtp_ssl: {type: boolean} + smtp_auth_file: {type: string} + smtp_cert_file: {type: string} + smtp_key_file: {type: string} + email_reply_to: {type: string} from_addr: {type: string} + cc: *arrayOfString + bcc: *arrayOfString + email_format: {type: string} + assets_dir: {type: string} + email_image_keys: {type: array, items: {type: string}} + email_image_values: {type: array, items: {type: string}} + notify_email: *arrayOfString # if rule is slow or erroring, send to this email + + ### Exotel + exotel_account_sid: {type: string} + exotel_auth_token: {type: string} + exotel_to_number: {type: string} + exotel_from_number: {type: string} + exotel_message_body: {type: string} + + ### Gitter + gitter_webhook_url: {type: string} + gitter_msg_level: {enum: [info, error]} + gitter_proxy: {type: string} - ### JIRA + ### Gelf + gelf_type: {type: string, enum: ['http', 'tcp']} + gelf_endpoint: {type: string} + gelf_host: {type: string} + gelf_port: {type: integer} + gelf_payload: + type: object + minProperties: 1 + patternProperties: + "^.+$": + oneOf: + - type: string + - type: object + additionalProperties: false + required: [field] + properties: + field: {type: string, minLength: 1} + gelf_log_level: {type: integer, enum: ['0', 1', '2', '3', '4', '5', '6', '7']} + gelf_http_headers: + type: object + minProperties: 1 + patternProperties: + "^.+$": + oneOf: + - type: string + - type: object + additionalProperties: false + required: [ field ] + properties: + field: { type: string, minLength: 1 } + gelf_ca_cert: {type: string} + gelf_http_ignore_ssl_errors: {type: boolean} + gelf_timeout: {type: integer} + + ### GoogleChat + googlechat_webhook_url: *arrayOfString + googlechat_format: {type: string, enum: ['basic', 'card']} + googlechat_header_title: {type: string} + googlechat_header_subtitle: {type: string} + googlechat_header_image: {type: string} + googlechat_footer_kibanalink: {type: string} + googlechat_proxy: {type: string} + + ### HTTP POST + http_post_url: *arrayOfString + http_post_proxy: {type: string} + http_post_ca_certs: {type: [boolean, string]} + http_post_ignore_ssl_errors: {type: boolean} + http_post_timeout: {type: integer} + + ### HTTP POST 2 + http_post2_url: *arrayOfString + http_post2_proxy: {type: string} + http_post2_ca_certs: {type: [boolean, string]} + http_post2_ignore_ssl_errors: {type: boolean} + http_post2_timeout: {type: integer} + + ### Jira jira_server: {type: string} jira_project: {type: string} jira_issuetype: {type: string} jira_account_file: {type: string} # a Yaml file that includes the keys {user:, password:} - jira_assignee: {type: string} jira_component: *arrayOfString jira_components: *arrayOfString + jira_description: {type: string} jira_label: *arrayOfString jira_labels: *arrayOfString + jira_priority: {type: number} + jira_watchers: *arrayOfString jira_bump_tickets: {type: boolean} - jira_bump_in_statuses: *arrayOfString - jira_bump_not_in_statuses: *arrayOfString + jira_ignore_in_title: {type: string} jira_max_age: {type: number} - jira_watchers: *arrayOfString - - ### HipChat - hipchat_auth_token: {type: string} - hipchat_room_id: {type: [string, integer]} - hipchat_domain: {type: string} - hipchat_ignore_ssl_errors: {type: boolean} - hipchat_notify: {type: boolean} - hipchat_from: {type: string} - hipchat_mentions: {type: array, items: {type: string}} - - ### Stride - stride_access_token: {type: string} - stride_cloud_id: {type: string} - stride_conversation_id: {type: string} - stride_ignore_ssl_errors: {type: boolean} + jira_bump_not_in_statuses: *arrayOfString + jira_bump_in_statuses: *arrayOfString + jira_bump_only: {type: boolean} + jira_transition_to: {type: string} + jira_bump_after_inactivity: {type: number} - ### Slack - slack_webhook_url: *arrayOfString - slack_username_override: {type: string} - slack_emoji_override: {type: string} - slack_icon_url_override: {type: string} - slack_msg_color: {enum: [good, warning, danger]} - slack_parse_override: {enum: [none, full]} - slack_text_string: {type: string} - slack_ignore_ssl_errors: {type: boolean} - slack_ca_certs: {type: string} - slack_attach_kibana_discover_url: {type: boolean} - slack_kibana_discover_color: {type: string} - slack_kibana_discover_title: {type: string} + ### Line Notify + linenotify_access_token: {type: string} ### Mattermost mattermost_webhook_url: *arrayOfString mattermost_proxy: {type: string} mattermost_ignore_ssl_errors: {type: boolean} mattermost_username_override: {type: string} + mattermost_channel_override: *arrayOfString + mattermost_emoji_override: {type: string} mattermost_icon_url_override: {type: string} - mattermost_channel_override: {type: string} - mattermost_msg_color: {enum: [good, warning, danger]} mattermost_msg_pretext: {type: string} - mattermost_msg_fields: *mattermostField - - ## Opsgenie + mattermost_msg_color: {enum: [good, warning, danger]} + mattermost_msg_fields: *arrayOfMattermostField + mattermost_title: {type: string} + mattermost_title_link: {type: string} + mattermost_footer: {type: string} + mattermost_footer_icon: {type: string} + mattermost_image_url: {type: string} + mattermost_thumb_url: {type: string} + mattermost_author_name: {type: string} + mattermost_author_link: {type: string} + mattermost_author_icon: {type: string} + mattermost_attach_kibana_discover_url: {type: boolean} + mattermost_kibana_discover_color: {type: string} + mattermost_kibana_discover_title: {type: string} + + ### Microsoft Teams + ms_teams_webhook_url: *arrayOfString + ms_teams_alert_summary: {type: string} + ms_teams_theme_color: {type: string} + ms_teams_proxy: {type: string} + ms_teams_alert_fixed_width: {type: boolean} + ms_teams_alert_facts: *arrayOfMsTeamsFacts + ms_teams_attach_kibana_discover_url: {type: boolean} + ms_teams_kibana_discover_title: {type: string} + ms_teams_ca_certs: {type: [boolean, string]} + ms_teams_ignore_ssl_errors: {type: boolean} + + ### Opsgenie + opsgenie_key: {type: string} + opsgenie_account: {type: string} + opsgenie_addr: {type: string} + opsgenie_description: {type:string} + opsgenie_message: {type: string} + opsgenie_alias: {type: string} + opsgenie_subject: {type: string} + opsgenie_priority: {type: string} + opsgenie_proxy: {type: string} + opsgenie_source: {type: string} + opsgenie_entity: {type: string} opsgenie_details: type: object minProperties: 1 @@ -319,71 +615,131 @@ properties: pagerduty_service_key: {type: string} pagerduty_client_name: {type: string} pagerduty_event_type: {enum: [none, trigger, resolve, acknowledge]} - -### PagerTree + pagerduty_incident_key: {type: string} + pagerduty_incident_key_args: {type: array, items: {type: string}} + pagerduty_proxy: {type: string} + pagerduty_api_version: {type: string, enum: ['v1', 'v2']} + pagerduty_v2_payload_class: {type: string} + pagerduty_v2_payload_class_args: {type: array, items: {type: string}} + pagerduty_v2_payload_component: {type: string} + pagerduty_v2_payload_component_args: {type: array, items: {type: string}} + pagerduty_v2_payload_group: {type: string} + pagerduty_v2_payload_group_args: {type: array, items: {type: string}} + pagerduty_v2_payload_severity: {type: string, enum: ['critical', 'error', 'warning', 'info']} + pagerduty_v2_payload_source: {type: string} + pagerduty_v2_payload_source_args: {type: array, items: {type: string}} + pagerduty_v2_payload_include_all_info: {type: boolean} + + ### PagerTree pagertree_integration_url: {type: string} + pagertree_proxy: {type: string} + + ### RocketChat + rocket_chat_webhook_url: *arrayOfString + rocket_chat_username_override: {type: string} + rocket_chat_channel_override: *arrayOfString + rocket_chat_emoji_override: {type: string} + rocket_chat_msg_color: {enum: [good, warning, danger]} + rocket_chat_text_string: {type: string} + rocket_chat_proxy: {type: string} + rocket_chat_attach_kibana_discover_url: {type: boolean} + rocket_chat_kibana_discover_color: {type: string} + rocket_chat_kibana_discover_title: {type: string} + rocket_chat_ca_certs: {type: [boolean, string]} + rocket_chat_ignore_ssl_errors: {type: boolean} + rocket_chat_timeout: {type: integer} + + ### ServiceNow + servicenow_rest_url: {type: string} + username: {type: string} + password: {type: string} + short_description: {type: string} + comments: {type: string} + assignment_group: {type: string} + category: {type: string} + subcategory: {type: string} + cmdb_ci: {type: string} + caller_id: {type: string} + servicenow_proxy: {type: string} + servicenow_impact: {type: integer, minimum: 1, maximum: 3} + servicenow_urgency: {type: integer, minimum: 1, maximum: 3} - - ### Exotel - exotel_account_sid: {type: string} - exotel_auth_token: {type: string} - exotel_to_number: {type: string} - exotel_from_number: {type: string} - - ### Twilio - twilio_account_sid: {type: string} - twilio_auth_token: {type: string} - twilio_to_number: {type: string} - twilio_from_number: {type: string} - - ### VictorOps + ### Slack + slack_webhook_url: *arrayOfString + slack_username_override: {type: string} + slack_channel_override: *arrayOfString + slack_emoji_override: {type: string} + slack_icon_url_override: {type: string} + slack_msg_color: {enum: [good, warning, danger]} + slack_parse_override: {enum: [none, full]} + slack_text_string: {type: string} + slack_proxy: {type: string} + slack_alert_fields: *arrayOfSlackField + slack_ignore_ssl_errors: {type: boolean} + slack_title: {type: string} + slack_title_link: {type: string} + slack_timeout: {type: integer} + slack_attach_kibana_discover_url: {type: boolean} + slack_kibana_discover_color: {type: string} + slack_kibana_discover_title: {type: string} + slack_attach_jira_ticket_url: {type: boolean} + slack_jira_ticket_color: {type: string} + slack_jira_ticket_title: {type: string} + slack_ca_certs: {type: [boolean, string]} + slack_footer: {type: string} + slack_footer_icon: {type: string} + slack_image_url: {type: string} + slack_thumb_url: {type: string} + slack_author_name: {type: string} + slack_author_link: {type: string} + slack_author_icon: {type: string} + slack_msg_pretext: {type: string} + + ### Splunk On-Call (Formerly VictorOps) victorops_api_key: {type: string} victorops_routing_key: {type: string} victorops_message_type: {enum: [INFO, WARNING, ACKNOWLEDGEMENT, CRITICAL, RECOVERY]} victorops_entity_id: {type: string} victorops_entity_display_name: {type: string} + victorops_proxy: {type: string} + + ### Stomp + stomp_hostname: {type: string} + stomp_hostport: {type: string} + stomp_login: {type: string} + stomp_password: {type: string} + stomp_destination: {type: string} ### Telegram telegram_bot_token: {type: string} telegram_room_id: {type: string} telegram_api_url: {type: string} - - ### Gitter - gitter_webhook_url: {type: string} - gitter_proxy: {type: string} - gitter_msg_level: {enum: [info, error]} - - ### Alerta - alerta_api_url: {type: string} - alerta_api_key: {type: string} - alerta_severity: {enum: [unknown, security, debug, informational, ok, normal, cleared, indeterminate, warning, minor, major, critical]} - alerta_resource: {type: string} # Python format string - alerta_environment: {type: string} # Python format string - alerta_origin: {type: string} # Python format string - alerta_group: {type: string} # Python format string - alerta_service: {type: array, items: {type: string}} # Python format string - alerta_service: {type: array, items: {type: string}} # Python format string - alerta_correlate: {type: array, items: {type: string}} # Python format string - alerta_tags: {type: array, items: {type: string}} # Python format string - alerta_event: {type: string} # Python format string - alerta_customer: {type: string} - alerta_text: {type: string} # Python format string - alerta_type: {type: string} - alerta_value: {type: string} # Python format string - alerta_attributes_keys: {type: array, items: {type: string}} - alerta_attributes_values: {type: array, items: {type: string}} # Python format string - alerta_new_style_string_format: {type: boolean} - - - ### Simple - simple_webhook_url: *arrayOfString - simple_proxy: {type: string} - - ### LineNotify - linenotify_access_token: {type: string} + telegram_proxy: {type: string} + telegram_proxy_login: {type: string} + telegram_proxy_pass: {type: string} + telegram_parse_mode: {type: string, enum: ['markdown', 'markdownV2', 'html']} + + ### Tencent SMS + tencent_sms_secret_id: {type: string} + tencent_sms_secret_key: {type: string} + tencent_sms_sdk_appid: {type: string} + tencent_sms_to_number: {type: array, items: {type: string}} + tencent_sms_region: {type: string} + tencent_sms_sign_name: {type: string} + tencent_sms_template_id: {type: string} + tencent_sms_template_parm: {type: array, items: {type: string}} + + ### Twilio + twilio_account_sid: {type: string} + twilio_auth_token: {type: string} + twilio_to_number: {type: string} + twilio_from_number: {type: string} + twilio_message_service_sid: {type: string} + twilio_use_copilot: {type: boolean} ### Zabbix zbx_sender_host: {type: string} zbx_sender_port: {type: integer} + zbx_host_from_field: {type: boolean} zbx_host: {type: string} - zbx_item: {type: string} + zbx_key: {type: string} diff --git a/elastalert/test_rule.py b/elastalert/test_rule.py index 06100aa0f..b738bd502 100644 --- a/elastalert/test_rule.py +++ b/elastalert/test_rule.py @@ -10,11 +10,10 @@ import string import sys -import mock +from unittest import mock from elastalert.config import load_conf from elastalert.elastalert import ElastAlerter -from elastalert.util import EAException from elastalert.util import elasticsearch_client from elastalert.util import lookup_es_key from elastalert.util import ts_now @@ -42,52 +41,167 @@ def print_terms(terms, parent): class MockElastAlerter(object): - def __init__(self): + def _parse_args(self, args: list) -> argparse.Namespace: + """Uses args to run the various components of MockElastAlerter such as loading the file, saving data, loading data""" + parser = argparse.ArgumentParser(description="Validate a rule configuration") + parser.add_argument( + "file", metavar="rule", type=str, help="rule configuration filename" + ) + parser.add_argument( + "--schema-only", + action="store_true", + help="Show only schema errors; do not run query", + ) + parser.add_argument( + "--days", + type=int, + default=0, + action="store", + help="Query the previous N days with this rule", + ) + parser.add_argument( + "--start", + dest="start", + help="YYYY-MM-DDTHH:MM:SS Start querying from this timestamp.", + ) + parser.add_argument( + "--end", + dest="end", + help="YYYY-MM-DDTHH:MM:SS Query to this timestamp. (Default: present) " + 'Use "NOW" to start from current time. (Default: present)', + ) + parser.add_argument( + "--stop-error", + action="store_true", + help="Stop the entire test right after the first error", + ) + parser.add_argument( + "--formatted-output", + action="store_true", + help="Output results in formatted JSON", + ) + parser.add_argument( + "--data", + type=str, + metavar="FILENAME", + action="store", + dest="json", + help="A JSON file containing data to run the rule against", + ) + parser.add_argument( + "--alert", + action="store_true", + help="Use actual alerts instead of debug output", + ) + parser.add_argument( + "--save-json", + type=str, + metavar="FILENAME", + action="store", + dest="save", + help="A file to which documents from the last day or --days will be saved", + ) + parser.add_argument( + "--use-downloaded", + action="store_true", + dest="use_downloaded", + help="Use the downloaded", + ) + parser.add_argument( + "--max-query-size", + type=int, + default=10000, + action="store", + dest="max_query_size", + help="Maximum size of any query", + ) + parser.add_argument( + "--count-only", + action="store_true", + dest="count", + help="Only display the number of documents matching the filter", + ) + parser.add_argument( + "--config", + action="store", + dest="config", + help="Global config file.", + ) + parsed_args = parser.parse_args(args) + + # Set arguments that ElastAlerter needs + parsed_args.verbose = parsed_args.alert + parsed_args.debug = not parsed_args.alert + parsed_args.es_debug = False + parsed_args.es_debug_trace = False + + return parsed_args + + def str_to_ts(self, input: str) -> datetime: + if input == "NOW": + return self.ts_now + + try: + return ts_to_dt(input) + except (TypeError, ValueError): + raise Exception( + f"Input is not a valid ISO8601 timestamp (YYYY-MM-DDTHH:MM:SS+XX:00): {input}" + ) + + def parse_starttime(self, timeframe=None) -> datetime: + if self.args.start: + try: + return self.str_to_ts(self.args.start) + except Exception: + raise + + if self.args.days > 0: + return self.endtime - datetime.timedelta(days=self.args.days) + + # Special case executed later after initialisation + if timeframe is not None: + return self.endtime - datetime.timedelta( + seconds=timeframe.total_seconds() * 1.01 + ) + + # Default is 1 days / 24 hours + return self.endtime - datetime.timedelta(days=1) + + def __init__(self, args): + self.args = self._parse_args(args) self.data = [] self.formatted_output = {} - - def test_file(self, conf, args): - """ Loads a rule config file, performs a query over the last day (args.days), lists available keys - and prints the number of results. """ - if args.schema_only: + self.ts_now = ts_now() + # We need to store endtime before starttime, please see method `parse_starttime` + self.endtime = self.str_to_ts(self.args.end) if self.args.end else self.ts_now + self.starttime = self.parse_starttime() + + def test_file(self, conf): + """Loads a rule config file, performs a query over the last day (self.args.days), lists available keys + and prints the number of results.""" + if self.args.schema_only: return [] # Set up Elasticsearch client and query es_client = elasticsearch_client(conf) - try: - ElastAlerter.modify_rule_for_ES5(conf) - except EAException as ea: - print('Invalid filter provided:', str(ea), file=sys.stderr) - if args.stop_error: - exit(3) - return None - except Exception as e: - print("Error connecting to ElasticSearch:", file=sys.stderr) - print(repr(e)[:2048], file=sys.stderr) - if args.stop_error: - exit(1) - return None - start_time = ts_now() - datetime.timedelta(days=args.days) - end_time = ts_now() ts = conf.get('timestamp_field', '@timestamp') query = ElastAlerter.get_query( conf['filter'], - starttime=start_time, - endtime=end_time, + starttime=self.starttime, + endtime=self.endtime, timestamp_field=ts, - to_ts_func=conf['dt_to_ts'], - five=conf['five'] + to_ts_func=conf['dt_to_ts'] ) - index = ElastAlerter.get_index(conf, start_time, end_time) + index = ElastAlerter.get_index(conf, self.starttime, self.endtime) # Get one document for schema try: - res = es_client.search(index, size=1, body=query, ignore_unavailable=True) + res = es_client.search(index=index, size=1, body=query, ignore_unavailable=True) except Exception as e: print("Error running your filter:", file=sys.stderr) print(repr(e)[:2048], file=sys.stderr) - if args.stop_error: + if self.args.stop_error: exit(3) return None num_hits = len(res['hits']['hits']) @@ -96,36 +210,37 @@ def test_file(self, conf, args): return [] terms = res['hits']['hits'][0]['_source'] - doc_type = res['hits']['hits'][0]['_type'] # Get a count of all docs count_query = ElastAlerter.get_query( conf['filter'], - starttime=start_time, - endtime=end_time, + starttime=self.starttime, + endtime=self.endtime, timestamp_field=ts, to_ts_func=conf['dt_to_ts'], - sort=False, - five=conf['five'] + sort=False ) try: - res = es_client.count(index, doc_type=doc_type, body=count_query, ignore_unavailable=True) + res = es_client.count(index=index, body=count_query, ignore_unavailable=True) except Exception as e: print("Error querying Elasticsearch:", file=sys.stderr) print(repr(e)[:2048], file=sys.stderr) - if args.stop_error: + if self.args.stop_error: exit(2) return None num_hits = res['count'] - if args.formatted_output: + if self.args.formatted_output: self.formatted_output['hits'] = num_hits - self.formatted_output['days'] = args.days + self.formatted_output['days'] = self.args.days self.formatted_output['terms'] = list(terms.keys()) self.formatted_output['result'] = terms else: - print("Got %s hits from the last %s day%s" % (num_hits, args.days, 's' if args.days > 1 else '')) + print( + "Got %s hits from the last %s day%s" + % (num_hits, self.args.days, "s" if self.args.days > 1 else "") + ) print("\nAvailable terms in first hit:") print_terms(terms, '') @@ -147,22 +262,22 @@ def test_file(self, conf, args): # If the index starts with 'logstash', fields with .raw will be available but won't in _source if term not in terms and not (term.endswith('.raw') and term[:-4] in terms and index.startswith('logstash')): print("top_count_key %s may be missing" % (term), file=sys.stderr) - if not args.formatted_output: + if not self.args.formatted_output: print('') # Newline # Download up to max_query_size (defaults to 10,000) documents to save - if (args.save or args.formatted_output) and not args.count: + if (self.args.save or self.args.formatted_output) and not self.args.count: try: - res = es_client.search(index, size=args.max_query_size, body=query, ignore_unavailable=True) + res = es_client.search(index=index, size=self.args.max_query_size, body=query, ignore_unavailable=True) except Exception as e: print("Error running your filter:", file=sys.stderr) print(repr(e)[:2048], file=sys.stderr) - if args.stop_error: + if self.args.stop_error: exit(2) return None num_hits = len(res['hits']['hits']) - if args.save: + if self.args.save: print("Downloaded %s documents to save" % (num_hits)) return res['hits']['hits'] @@ -222,29 +337,30 @@ def mock_elastalert(self, elastalert): elastalert.get_hits = self.mock_hits elastalert.elasticsearch_client = mock.Mock() - def run_elastalert(self, rule, conf, args): + def run_elastalert(self, rule, conf): """ Creates an ElastAlert instance and run's over for a specific rule using either real or mock data. """ # Load and instantiate rule # Pass an args containing the context of whether we're alerting or not # It is needed to prevent unnecessary initialization of unused alerters load_modules_args = argparse.Namespace() - load_modules_args.debug = not args.alert + load_modules_args.debug = not self.args.alert conf['rules_loader'].load_modules(rule, load_modules_args) # If using mock data, make sure it's sorted and find appropriate time range timestamp_field = rule.get('timestamp_field', '@timestamp') - if args.json: + if self.args.json: if not self.data: return None try: + if isinstance(self.data, dict): + self.data = [self.data] self.data.sort(key=lambda x: x[timestamp_field]) - starttime = ts_to_dt(self.data[0][timestamp_field]) - endtime = self.data[-1][timestamp_field] - endtime = ts_to_dt(endtime) + datetime.timedelta(seconds=1) + self.starttime = self.str_to_ts(self.data[0][timestamp_field]) + self.endtime = self.str_to_ts(self.data[-1][timestamp_field]) + datetime.timedelta(seconds=1) except KeyError as e: print("All documents must have a timestamp and _id: %s" % (e), file=sys.stderr) - if args.stop_error: + if self.args.stop_error: exit(4) return None @@ -261,39 +377,14 @@ def get_id(): for doc in self.data: doc.update({'_id': doc.get('_id', get_id())}) else: - if args.end: - if args.end == 'NOW': - endtime = ts_now() - else: - try: - endtime = ts_to_dt(args.end) - except (TypeError, ValueError): - self.handle_error("%s is not a valid ISO8601 timestamp (YYYY-MM-DDTHH:MM:SS+XX:00)" % (args.end)) - exit(4) - else: - endtime = ts_now() - if args.start: - try: - starttime = ts_to_dt(args.start) - except (TypeError, ValueError): - self.handle_error("%s is not a valid ISO8601 timestamp (YYYY-MM-DDTHH:MM:SS+XX:00)" % (args.start)) - exit(4) - else: - # if days given as command line argument - if args.days > 0: - starttime = endtime - datetime.timedelta(days=args.days) - else: - # if timeframe is given in rule - if 'timeframe' in rule: - starttime = endtime - datetime.timedelta(seconds=rule['timeframe'].total_seconds() * 1.01) - # default is 1 days / 24 hours - else: - starttime = endtime - datetime.timedelta(days=1) + # Updating starttime based on timeframe rule + if "timeframe" in rule: + self.starttime = self.parse_starttime(timeframe=rule["timeframe"]) # Set run_every to cover the entire time range unless count query, terms query or agg query used # This is to prevent query segmenting which unnecessarily slows down tests if not rule.get('use_terms_query') and not rule.get('use_count_query') and not rule.get('aggregation_query_element'): - conf['run_every'] = endtime - starttime + conf['run_every'] = self.endtime - self.starttime # Instantiate ElastAlert to use mock config and special rule with mock.patch.object(conf['rules_loader'], 'get_hashes'): @@ -301,13 +392,13 @@ def get_id(): load_rules.return_value = [rule] with mock.patch('elastalert.elastalert.load_conf') as load_conf: load_conf.return_value = conf - if args.alert: + if self.args.alert: client = ElastAlerter(['--verbose']) else: client = ElastAlerter(['--debug']) # Replace get_hits_* functions to use mock data - if args.json: + if self.args.json: self.mock_elastalert(client) # Mock writeback to return empty results @@ -315,82 +406,33 @@ def get_id(): client.writeback_es.search.return_value = {"hits": {"hits": []}} with mock.patch.object(client, 'writeback') as mock_writeback: - client.run_rule(rule, endtime, starttime) + client.run_rule(rule, self.endtime, self.starttime) if mock_writeback.call_count: - if args.formatted_output: + if self.args.formatted_output: self.formatted_output['writeback'] = {} else: print("\nWould have written the following documents to writeback index (default is elastalert_status):\n") errors = False for call in mock_writeback.call_args_list: - if args.formatted_output: + if self.args.formatted_output: self.formatted_output['writeback'][call[0][0]] = json.loads(json.dumps(call[0][1], default=str)) else: print("%s - %s\n" % (call[0][0], call[0][1])) if call[0][0] == 'elastalert_error': errors = True - if errors and args.stop_error: + if errors and self.args.stop_error: exit(2) def run_rule_test(self): - """ - Uses args to run the various components of MockElastAlerter such as loading the file, saving data, loading data, and running. - """ - parser = argparse.ArgumentParser(description='Validate a rule configuration') - parser.add_argument('file', metavar='rule', type=str, help='rule configuration filename') - parser.add_argument('--schema-only', action='store_true', help='Show only schema errors; do not run query') - parser.add_argument('--days', type=int, default=0, action='store', help='Query the previous N days with this rule') - parser.add_argument('--start', dest='start', help='YYYY-MM-DDTHH:MM:SS Start querying from this timestamp.') - parser.add_argument('--end', dest='end', help='YYYY-MM-DDTHH:MM:SS Query to this timestamp. (Default: present) ' - 'Use "NOW" to start from current time. (Default: present)') - parser.add_argument('--stop-error', action='store_true', help='Stop the entire test right after the first error') - parser.add_argument('--formatted-output', action='store_true', help='Output results in formatted JSON') - parser.add_argument( - '--data', - type=str, - metavar='FILENAME', - action='store', - dest='json', - help='A JSON file containing data to run the rule against') - parser.add_argument('--alert', action='store_true', help='Use actual alerts instead of debug output') - parser.add_argument( - '--save-json', - type=str, - metavar='FILENAME', - action='store', - dest='save', - help='A file to which documents from the last day or --days will be saved') - parser.add_argument( - '--use-downloaded', - action='store_true', - dest='use_downloaded', - help='Use the downloaded ' - ) - parser.add_argument( - '--max-query-size', - type=int, - default=10000, - action='store', - dest='max_query_size', - help='Maximum size of any query') - parser.add_argument( - '--count-only', - action='store_true', - dest='count', - help='Only display the number of documents matching the filter') - parser.add_argument('--config', action='store', dest='config', help='Global config file.') - args = parser.parse_args() - defaults = { 'rules_folder': 'rules', 'es_host': 'localhost', 'es_port': 14900, 'writeback_index': 'wb', - 'writeback_alias': 'wb_a', 'max_query_size': 10000, 'alert_time_limit': {'hours': 24}, 'old_query_limit': {'weeks': 1}, @@ -399,51 +441,64 @@ def run_rule_test(self): 'buffer_time': {'minutes': 45}, 'scroll_keepalive': '30s' } - overwrites = { - 'rules_loader': 'file', - } - # Set arguments that ElastAlerter needs - args.verbose = args.alert - args.debug = not args.alert - args.es_debug = False - args.es_debug_trace = False - - conf = load_conf(args, defaults, overwrites) - rule_yaml = conf['rules_loader'].load_yaml(args.file) - conf['rules_loader'].load_options(rule_yaml, conf, args.file) - - if args.json: - with open(args.json, 'r') as data_file: - self.data = json.loads(data_file.read()) + conf = load_conf(self.args, defaults) + rule_yaml = conf['rules_loader'].load_yaml(self.args.file) + conf['rules_loader'].load_options(rule_yaml, conf, self.args.file) + + if self.args.json: + try: + with open(self.args.json, "r") as data_file: + self.data = json.loads(data_file.read()) + except OSError: + raise else: - hits = self.test_file(copy.deepcopy(rule_yaml), args) - if hits and args.formatted_output: + # Temporarily remove the jinja_template, if it exists, to avoid deepcopy issues + template = rule_yaml.get("jinja_template") + rule_yaml["jinja_template"] = None + + # Copy the rule object without the template in it + copied_rule = copy.deepcopy(rule_yaml) + + # Set the template back onto the original rule object and the newly copied object + rule_yaml["jinja_template"] = template + copied_rule["jinja_template"] = template + + hits = self.test_file(copied_rule) + if hits and self.args.formatted_output: self.formatted_output['results'] = json.loads(json.dumps(hits)) - if hits and args.save: - with open(args.save, 'wb') as data_file: - # Add _id to _source for dump - [doc['_source'].update({'_id': doc['_id']}) for doc in hits] - data_file.write(json.dumps([doc['_source'] for doc in hits], indent=4)) - if args.use_downloaded: + if hits and self.args.save: + try: + with open(self.args.save, "wb") as data_file: + # Add _id to _source for dump + [doc['_source'].update({'_id': doc['_id']}) for doc in hits] + data_file.write(str.encode(json.dumps([doc['_source'] for doc in hits], indent=4))) + except OSError: + raise + if self.args.use_downloaded: if hits: - args.json = args.save - with open(args.json, 'r') as data_file: - self.data = json.loads(data_file.read()) + self.args.json = self.args.save + try: + with open(self.args.json, "r") as data_file: + self.data = json.loads(data_file.read()) + except OSError: + raise else: self.data = [] - if not args.schema_only and not args.count: - self.run_elastalert(rule_yaml, conf, args) + if not self.args.schema_only and not self.args.count: + self.run_elastalert(rule_yaml, conf) - if args.formatted_output: + if self.args.formatted_output: print(json.dumps(self.formatted_output)) -def main(): - test_instance = MockElastAlerter() +def main(args=None): + if args is None: + args = sys.argv[1:] + test_instance = MockElastAlerter(args) test_instance.run_rule_test() -if __name__ == '__main__': - main() +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/elastalert/util.py b/elastalert/util.py index bbb0600ff..dedb8b42a 100644 --- a/elastalert/util.py +++ b/elastalert/util.py @@ -5,17 +5,37 @@ import os import re import sys +import time +import types +import json import dateutil.parser import pytz from six import string_types -from . import ElasticSearchClient -from .auth import Auth +from elastalert import ElasticSearchClient +from elastalert.auth import Auth +from elasticsearch.exceptions import TransportError logging.basicConfig() +logging.captureWarnings(True) elastalert_logger = logging.getLogger('elastalert') +#backwards compatibility with es6 msearch +def get_msearch_query(query, rule): + search_arr = [] + search_arr.append({'index': [rule['index']]}) + if rule.get('use_count_query'): + query['size'] = 1 + if rule.get('include'): + query['_source'] = {} + query['_source']['includes'] = rule['include'] + search_arr.append(query) + request = '' + for each in search_arr: + request += '%s \n' %json.dumps(each) + return request + def get_module(module_name): """ Loads a module and returns a specific object. @@ -152,7 +172,7 @@ def ts_to_dt(timestamp): def dt_to_ts(dt): if not isinstance(dt, datetime.datetime): - logging.warning('Expected datetime, got %s' % (type(dt))) + elastalert_logger.warning('Expected datetime, got %s' % (type(dt))) return dt ts = dt.isoformat() # Round microseconds to milliseconds @@ -176,7 +196,7 @@ def ts_to_dt_with_format(timestamp, ts_format): def dt_to_ts_with_format(dt, ts_format): if not isinstance(dt, datetime.datetime): - logging.warning('Expected datetime, got %s' % (type(dt))) + elastalert_logger.warning('Expected datetime, got %s' % (type(dt))) return dt ts = dt.strftime(ts_format) return ts @@ -186,6 +206,11 @@ def ts_now(): return datetime.datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc()) +def ts_utc_to_tz(ts, tz_name): + """Convert utc time to local time.""" + return ts.astimezone(dateutil.tz.gettz(tz_name)) + + def inc_ts(timestamp, milliseconds=1): """Increment a timestamp by milliseconds.""" dt = ts_to_dt(timestamp) @@ -193,7 +218,7 @@ def inc_ts(timestamp, milliseconds=1): return dt_to_ts(dt) -def pretty_ts(timestamp, tz=True): +def pretty_ts(timestamp, tz=True, ts_format=None): """Pretty-format the given timestamp (to be printed or logged hereafter). If tz, the timestamp will be converted to local time. Format: YYYY-MM-DD HH:MM TZ""" @@ -202,7 +227,10 @@ def pretty_ts(timestamp, tz=True): dt = ts_to_dt(timestamp) if tz: dt = dt.astimezone(dateutil.tz.tzlocal()) - return dt.strftime('%Y-%m-%d %H:%M %Z') + if ts_format is None: + return dt.strftime('%Y-%m-%d %H:%M %Z') + else: + return dt.strftime(ts_format) def ts_add(ts, td): @@ -250,13 +278,10 @@ def seconds(td): def total_seconds(dt): - # For python 2.6 compatability if dt is None: return 0 - elif hasattr(dt, 'total_seconds'): - return dt.total_seconds() else: - return (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6 + return dt.total_seconds() def dt_to_int(dt): @@ -269,7 +294,10 @@ def unixms_to_dt(ts): def unix_to_dt(ts): - dt = datetime.datetime.utcfromtimestamp(float(ts)) + if(type(ts) == types.UnicodeType): + dt = datetime.datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f') + else: #if timestamp is in float format + dt = datetime.datetime.utcfromtimestamp(float(ts)) dt = dt.replace(tzinfo=dateutil.tz.tzutc()) return dt @@ -318,6 +346,29 @@ def elasticsearch_client(conf): """ returns an :class:`ElasticSearchClient` instance configured using an es_conn_config """ es_conn_conf = build_es_conn_config(conf) auth = Auth() + username = es_conn_conf['es_username'] + password = es_conn_conf['es_password'] + if es_conn_conf['es_bearer'] or es_conn_conf['es_api_key']: + username = None + password = None + es_conn_conf['http_auth'] = auth(host=es_conn_conf['es_host'], + username=username, + password=password, + aws_region=es_conn_conf['aws_region'], + profile_name=es_conn_conf['profile']) + if es_conn_conf['es_bearer']: + es_conn_conf['headers'] = {"Authorization": "Bearer " + es_conn_conf['es_bearer']} + if es_conn_conf['es_api_key']: + es_conn_conf['headers'] = {"Authorization": "ApiKey " + es_conn_conf['es_api_key']} + + return ElasticSearchClient(es_conn_conf) + + +#modded version of elasticsearch_client that suits haystack's needs +def kibana_adapter_client(conf): + """ returns an Elasticsearch instance configured using an es_conn_config """ + es_conn_conf = build_adapter_conn_config(conf) + auth = Auth() es_conn_conf['http_auth'] = auth(host=es_conn_conf['es_host'], username=es_conn_conf['es_username'], password=es_conn_conf['es_password'], @@ -326,6 +377,85 @@ def elasticsearch_client(conf): return ElasticSearchClient(es_conn_conf) +def build_adapter_conn_config(conf): + """ Given a conf dictionary w/ raw config properties 'use_ssl', 'es_host', 'es_port' + 'es_username' and 'es_password', this will return a new dictionary + with properly initialized values for 'es_host', 'es_port', 'use_ssl' and 'http_auth' which + will be a basicauth username:password formatted string """ + parsed_conf = {} + parsed_conf['use_ssl'] = os.environ.get('ES_USE_SSL', False) + parsed_conf['verify_certs'] = True + parsed_conf['ca_certs'] = None + parsed_conf['client_cert'] = None + parsed_conf['client_key'] = None + parsed_conf['http_auth'] = None + parsed_conf['es_username'] = None + parsed_conf['es_password'] = None + parsed_conf['es_api_key'] = None + parsed_conf['es_bearer'] = None + parsed_conf['aws_region'] = None + parsed_conf['profile'] = None + parsed_conf['headers'] = {} + parsed_conf['es_host'] = conf['kibana_adapter'] + parsed_conf['es_port'] = conf['kibana_adapter_port'] + parsed_conf['es_url_prefix'] = '' + parsed_conf['es_conn_timeout'] = conf.get('es_conn_timeout', 20) + parsed_conf['send_get_body_as'] = conf.get('es_send_get_body_as', 'GET') + parsed_conf['ssl_show_warn'] = conf.get('ssl_show_warn', True) + + if os.environ.get('ES_USERNAME'): + parsed_conf['es_username'] = os.environ.get('ES_USERNAME') + parsed_conf['es_password'] = os.environ.get('ES_PASSWORD') + elif 'es_username' in conf: + parsed_conf['es_username'] = conf['es_username'] + parsed_conf['es_password'] = conf['es_password'] + + if os.environ.get('ES_API_KEY'): + parsed_conf['es_api_key'] = os.environ.get('ES_API_KEY') + elif 'es_api_key' in conf: + parsed_conf['es_api_key'] = conf['es_api_key'] + + if os.environ.get('ES_BEARER'): + parsed_conf['es_bearer'] = os.environ.get('ES_BEARER') + elif 'es_bearer' in conf: + parsed_conf['es_bearer'] = conf['es_bearer'] + + if os.environ.get('X_ENV'): + parsed_conf['headers']['X-ENV'] = os.environ.get('X_ENV') + elif 'X_ENV' in conf: + parsed_conf['headers']['X-ENV'] = os.environ.get('X_ENV') + + if 'aws_region' in conf: + parsed_conf['aws_region'] = conf['aws_region'] + + if 'profile' in conf: + parsed_conf['profile'] = conf['profile'] + + if 'use_ssl' in conf: + parsed_conf['use_ssl'] = conf['use_ssl'] + + if 'verify_certs' in conf: + parsed_conf['verify_certs'] = conf['verify_certs'] + + if 'ca_certs' in conf: + parsed_conf['ca_certs'] = conf['ca_certs'] + + if 'client_cert' in conf: + parsed_conf['client_cert'] = conf['client_cert'] + + if 'client_key' in conf: + parsed_conf['client_key'] = conf['client_key'] + + if 'es_url_prefix' in conf: + parsed_conf['es_url_prefix'] = conf['es_url_prefix'] + + if 'kibana_adapter_url_prefix' in conf: + parsed_conf['es_url_prefix'] = conf['kibana_adapter_url_prefix'] + + + return parsed_conf + + def build_es_conn_config(conf): """ Given a conf dictionary w/ raw config properties 'use_ssl', 'es_host', 'es_port' @@ -341,13 +471,22 @@ def build_es_conn_config(conf): parsed_conf['http_auth'] = None parsed_conf['es_username'] = None parsed_conf['es_password'] = None + parsed_conf['es_api_key'] = None + parsed_conf['es_bearer'] = None parsed_conf['aws_region'] = None parsed_conf['profile'] = None + parsed_conf['headers'] = None parsed_conf['es_host'] = os.environ.get('ES_HOST', conf['es_host']) parsed_conf['es_port'] = int(os.environ.get('ES_PORT', conf['es_port'])) + + es_hosts = os.environ.get('ES_HOSTS') + es_hosts = parse_hosts(es_hosts, parsed_conf.get('es_port')) if es_hosts else conf.get('es_hosts') + parsed_conf['es_hosts'] = es_hosts + parsed_conf['es_url_prefix'] = '' parsed_conf['es_conn_timeout'] = conf.get('es_conn_timeout', 20) parsed_conf['send_get_body_as'] = conf.get('es_send_get_body_as', 'GET') + parsed_conf['ssl_show_warn'] = conf.get('ssl_show_warn', True) if os.environ.get('ES_USERNAME'): parsed_conf['es_username'] = os.environ.get('ES_USERNAME') @@ -356,14 +495,19 @@ def build_es_conn_config(conf): parsed_conf['es_username'] = conf['es_username'] parsed_conf['es_password'] = conf['es_password'] + if os.environ.get('ES_API_KEY'): + parsed_conf['es_api_key'] = os.environ.get('ES_API_KEY') + elif 'es_api_key' in conf: + parsed_conf['es_api_key'] = conf['es_api_key'] + + if os.environ.get('ES_BEARER'): + parsed_conf['es_bearer'] = os.environ.get('ES_BEARER') + elif 'es_bearer' in conf: + parsed_conf['es_bearer'] = conf['es_bearer'] + if 'aws_region' in conf: parsed_conf['aws_region'] = conf['aws_region'] - # Deprecated - if 'boto_profile' in conf: - logging.warning('Found deprecated "boto_profile", use "profile" instead!') - parsed_conf['profile'] = conf['boto_profile'] - if 'profile' in conf: parsed_conf['profile'] = conf['profile'] @@ -460,3 +604,97 @@ def should_scrolling_continue(rule_conf): stop_the_scroll = 0 < max_scrolling <= rule_conf.get('scrolling_cycle') return not stop_the_scroll + + +def _expand_string_into_dict(string, value, sep='.'): + """ + Converts a encapsulated string-dict to a sequence of dict. Use separator (default '.') to split the string. + Example: + string1.string2.stringN : value -> {string1: {string2: {string3: value}} + + :param string: The encapsulated "string-dict" + :param value: Value associated to the last field of the "string-dict" + :param sep: Separator character. Default: '.' + :rtype: dict + """ + if sep not in string: + return {string: value} + key, val = string.split(sep, 1) + return {key: _expand_string_into_dict(val, value)} + + +def expand_string_into_dict(dictionary, string, value, sep='.'): + """ + Useful function to "compile" a string-dict string used in metric and percentage rules into a dictionary sequence. + + :param dictionary: The dictionary dict + :param string: String Key + :param value: String Value + :param sep: Separator character. Default: '.' + :rtype: dict + """ + + if sep not in string: + dictionary[string] = value + return dictionary + else: + field1, new_string = string.split(sep, 1) + dictionary[field1] = _expand_string_into_dict(new_string, value) + return dictionary + + +def format_string(format_config, target_value): + """ + Formats number, supporting %-format and str.format() syntax. + + :param format_config: string format syntax, for example '{:.2%}' or '%.2f' + :param target_value: number to format + :rtype: string + """ + if (format_config.startswith('{')): + return format_config.format(target_value) + else: + return format_config % (target_value) + + +def format_host_port(host, port): + host = host.strip() + if ":" not in host: + return "{host}:{port}".format(host=host, port=port) + return host + + +def parse_hosts(host, port=9200): + """ + Convert host str like "host1:port1, host2:port2" to list + :param host str: hostnames (separated with comma ) or single host name + :param port: default to 9200 + :return: list of hosts + """ + host_list = host.split(",") + host_list = [format_host_port(x, port) for x in host_list] + return host_list + + +def get_version_from_cluster_info(client): + esversion = None + for retry in range(3): + try: + esinfo = client.info()['version'] + esversion = esinfo['number'] + if esinfo.get('distribution') == "opensearch": + # https://opensearch.org/ + if esversion[0] == "1": + # OpenSearch 1.x is based on Elasticsearch 7.10.2 + esversion = "7.10.2" + else: + # OpenSearch 2.x has qualities similar to 8.2.0 + esversion = "8.2.0" + break + except TransportError: + if retry == 2: + raise + elastalert_logger.warning('Failed to retrieve cluster version information, retrying in 3 seconds') + time.sleep(3) + + return esversion \ No newline at end of file diff --git a/elastalert/yaml.py b/elastalert/yaml.py new file mode 100644 index 000000000..35810f102 --- /dev/null +++ b/elastalert/yaml.py @@ -0,0 +1,8 @@ +import os +import yaml + + +def read_yaml(path): + with open(path) as f: + yamlContent = os.path.expandvars(f.read()) + return yaml.load(yamlContent, Loader=yaml.FullLoader) diff --git a/elastalert/zabbix.py b/elastalert/zabbix.py deleted file mode 100644 index e3f13aa03..000000000 --- a/elastalert/zabbix.py +++ /dev/null @@ -1,75 +0,0 @@ -from alerts import Alerter # , BasicMatchString -import logging -from pyzabbix.api import ZabbixAPI -from pyzabbix import ZabbixSender, ZabbixMetric -from datetime import datetime - - -class ZabbixClient(ZabbixAPI): - - def __init__(self, url='http://localhost', use_authenticate=False, user='Admin', password='zabbix', sender_host='localhost', - sender_port=10051): - self.url = url - self.use_authenticate = use_authenticate - self.sender_host = sender_host - self.sender_port = sender_port - self.metrics_chunk_size = 200 - self.aggregated_metrics = [] - self.logger = logging.getLogger(self.__class__.__name__) - super(ZabbixClient, self).__init__(url=self.url, use_authenticate=self.use_authenticate, user=user, password=password) - - def send_metric(self, hostname, key, data): - zm = ZabbixMetric(hostname, key, data) - if self.send_aggregated_metrics: - - self.aggregated_metrics.append(zm) - if len(self.aggregated_metrics) > self.metrics_chunk_size: - self.logger.info("Sending: %s metrics" % (len(self.aggregated_metrics))) - try: - ZabbixSender(zabbix_server=self.sender_host, zabbix_port=self.sender_port).send(self.aggregated_metrics) - self.aggregated_metrics = [] - except Exception as e: - self.logger.exception(e) - pass - else: - try: - ZabbixSender(zabbix_server=self.sender_host, zabbix_port=self.sender_port).send(zm) - except Exception as e: - self.logger.exception(e) - pass - - -class ZabbixAlerter(Alerter): - - # By setting required_options to a set of strings - # You can ensure that the rule config file specifies all - # of the options. Otherwise, ElastAlert will throw an exception - # when trying to load the rule. - required_options = frozenset(['zbx_sender_host', 'zbx_sender_port', 'zbx_host', 'zbx_key']) - - def __init__(self, *args): - super(ZabbixAlerter, self).__init__(*args) - - self.zbx_sender_host = self.rule.get('zbx_sender_host', 'localhost') - self.zbx_sender_port = self.rule.get('zbx_sender_port', 10051) - self.zbx_host = self.rule.get('zbx_host') - self.zbx_key = self.rule.get('zbx_key') - - # Alert is called - def alert(self, matches): - - # Matches is a list of match dictionaries. - # It contains more than one match when the alert has - # the aggregation option set - zm = [] - for match in matches: - ts_epoch = int(datetime.strptime(match['@timestamp'], "%Y-%m-%dT%H:%M:%S.%fZ").strftime('%s')) - zm.append(ZabbixMetric(host=self.zbx_host, key=self.zbx_key, value=1, clock=ts_epoch)) - - ZabbixSender(zabbix_server=self.zbx_sender_host, zabbix_port=self.zbx_sender_port).send(zm) - - # get_info is called after an alert is sent to get data that is written back - # to Elasticsearch in the field "alert_info" - # It should return a dict of information relevant to what the alert does - def get_info(self): - return {'type': 'zabbix Alerter'} diff --git a/config.yaml.example b/examples/config.yaml.example similarity index 67% rename from config.yaml.example rename to examples/config.yaml.example index 9d9176382..d02291c7c 100644 --- a/config.yaml.example +++ b/examples/config.yaml.example @@ -1,6 +1,7 @@ # This is the folder that contains the rule yaml files +# This can also be a list of directories # Any .yaml file will be loaded as a rule -rules_folder: example_rules +rules_folder: examples/rules # How often ElastAlert will query Elasticsearch # The unit can be anything from weeks to seconds @@ -30,15 +31,24 @@ es_port: 9200 # Optional URL prefix for Elasticsearch #es_url_prefix: elasticsearch +# Optional prefix for statsd metrics +#statsd_instance_tag: elastalert + +# Optional statsd host +#statsd_host: dogstatsd + # Connect with TLS to Elasticsearch #use_ssl: True # Verify TLS certificates #verify_certs: True +# Show TLS or certificate related warnings +#ssl_show_warn: True + # GET request with body is the default option for Elasticsearch. # If it fails for some reason, you can pass 'GET', 'POST' or 'source'. -# See http://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport +# See https://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport # for details #es_send_get_body_as: GET @@ -48,7 +58,6 @@ es_port: 9200 # Use SSL authentication with client certificates client_cert must be # a pem file containing both cert and key for client -#verify_certs: True #ca_certs: /path/to/cacert.pem #client_cert: /path/to/client_cert.pem #client_key: /path/to/client_key.key @@ -57,13 +66,16 @@ es_port: 9200 # This can be a unmapped index, but it is recommended that you run # elastalert-create-index to set a mapping writeback_index: elastalert_status -writeback_alias: elastalert_alerts # If an alert fails for some reason, ElastAlert will retry # sending the alert until this time period has elapsed alert_time_limit: days: 2 +# Optional timestamp format. +# ElastAlert will print timestamps in alert messages and in log messages using this format. +#custom_pretty_ts_format: '%Y-%m-%d %H:%M' + # Custom logging configuration # If you want to setup your own logging configuration to log into # files as well or to Logstash and/or modify log levels, use @@ -78,38 +90,38 @@ alert_time_limit: # logline: # format: '%(asctime)s %(levelname)+8s %(name)+20s %(message)s' # -# handlers: -# console: -# class: logging.StreamHandler -# formatter: logline -# level: DEBUG -# stream: ext://sys.stderr +# handlers: +# console: +# class: logging.StreamHandler +# formatter: logline +# level: DEBUG +# stream: ext://sys.stderr # -# file: -# class : logging.FileHandler -# formatter: logline -# level: DEBUG -# filename: elastalert.log +# file: +# class : logging.FileHandler +# formatter: logline +# level: DEBUG +# filename: elastalert.log # -# loggers: -# elastalert: -# level: WARN -# handlers: [] -# propagate: true +# loggers: +# elastalert: +# level: WARN +# handlers: [] +# propagate: true # -# elasticsearch: -# level: WARN -# handlers: [] -# propagate: true +# elasticsearch: +# level: WARN +# handlers: [] +# propagate: true # -# elasticsearch.trace: -# level: WARN -# handlers: [] -# propagate: true +# elasticsearch.trace: +# level: WARN +# handlers: [] +# propagate: true # -# '': # root logger -# level: WARN -# handlers: -# - console -# - file -# propagate: false +# '': # root logger +# level: WARN +# handlers: +# - console +# - file +# propagate: false diff --git a/examples/ex_flatline.yaml b/examples/ex_flatline.yaml new file mode 100644 index 000000000..70cd7033e --- /dev/null +++ b/examples/ex_flatline.yaml @@ -0,0 +1,20 @@ +name: freshemail debug rule +type: flatline +index: traces* +threshold: 3 +# use_count_query: true +timestamp_field: timestamp +timeframe: + minutes: 1 +filter: +- query: + query_string: + query: "*" +alert: +- "debug" +scan_entire_timeframe: true + +realert: + minutes: 0 +query_delay: + minutes: 3 \ No newline at end of file diff --git a/example_rules/example_cardinality.yaml b/examples/rules/example_cardinality.yaml similarity index 92% rename from example_rules/example_cardinality.yaml rename to examples/rules/example_cardinality.yaml index 5ca7eecbf..5ab44c52f 100755 --- a/example_rules/example_cardinality.yaml +++ b/examples/rules/example_cardinality.yaml @@ -44,7 +44,7 @@ timeframe: # (Required) # A list of Elasticsearch filters used for find events # These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html filter: - term: status: "active" diff --git a/example_rules/example_change.yaml b/examples/rules/example_change.yaml similarity index 94% rename from example_rules/example_change.yaml rename to examples/rules/example_change.yaml index 107c43ec1..55a3d68d4 100755 --- a/example_rules/example_change.yaml +++ b/examples/rules/example_change.yaml @@ -52,7 +52,7 @@ timeframe: # (Required) # A list of Elasticsearch filters used for find events # These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html filter: - query: query_string: diff --git a/example_rules/example_frequency.yaml b/examples/rules/example_frequency.yaml similarity index 92% rename from example_rules/example_frequency.yaml rename to examples/rules/example_frequency.yaml index 8c54106ae..8bda0714b 100755 --- a/example_rules/example_frequency.yaml +++ b/examples/rules/example_frequency.yaml @@ -40,7 +40,7 @@ timeframe: # (Required) # A list of Elasticsearch filters used for find events # These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html filter: - term: some_field: "some_value" diff --git a/example_rules/example_new_term.yaml b/examples/rules/example_new_term.yaml similarity index 83% rename from example_rules/example_new_term.yaml rename to examples/rules/example_new_term.yaml index 2a6a823b2..5e67f7821 100755 --- a/example_rules/example_new_term.yaml +++ b/examples/rules/example_new_term.yaml @@ -43,10 +43,12 @@ terms_window_size: # (Required) # A list of Elasticsearch filters used for find events # These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html # We are filtering for only "login_event" type documents with username "admin" filter: - term: + # The _type field is deprecated in Elasticsearch 7.0 and removed in Elasticsearch 8.0 + # For more information, see https://www.elastic.co/guide/en/elasticsearch/reference/7.17/removal-of-types.html _type: "login_event" - term: username: admin diff --git a/example_rules/example_opsgenie_frequency.yaml b/examples/rules/example_opsgenie_frequency.yaml similarity index 86% rename from example_rules/example_opsgenie_frequency.yaml rename to examples/rules/example_opsgenie_frequency.yaml index 9876f9162..b80e674dc 100755 --- a/example_rules/example_opsgenie_frequency.yaml +++ b/examples/rules/example_opsgenie_frequency.yaml @@ -24,7 +24,7 @@ opsgenie_key: ogkey # (Optional) # OpsGenie recipients with args # opsgenie_recipients: -# - {recipient} +# - {recipient} # opsgenie_recipients_args: # team_prefix:'user.email' @@ -36,7 +36,7 @@ opsgenie_key: ogkey # (Optional) # OpsGenie teams with args # opsgenie_teams: -# - {team_prefix}-Team +# - {team_prefix}-Team # opsgenie_teams_args: # team_prefix:'team' @@ -45,6 +45,12 @@ opsgenie_key: ogkey opsgenie_tags: - "Production" +# (Optional) OpsGenie source +# opsgenie_source: ElastAlert_EMEA + +# (Optional) OpsGenie entity +# opsgenie_entity: '{hostname}' + # (OptionaL) Connect with SSL to Elasticsearch #use_ssl: True @@ -65,8 +71,6 @@ type: frequency # Index to search, wildcard supported index: logstash-* -#doc_type: "golog" - # (Required, frequency specific) # Alert when this many documents matching the query occur within a timeframe num_events: 50 @@ -79,7 +83,7 @@ timeframe: # (Required) # A list of Elasticsearch filters used for find events # These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html filter: - query: query_string: diff --git a/example_rules/example_percentage_match.yaml b/examples/rules/example_percentage_match.yaml similarity index 74% rename from example_rules/example_percentage_match.yaml rename to examples/rules/example_percentage_match.yaml index cb7809622..0f7afede8 100644 --- a/example_rules/example_percentage_match.yaml +++ b/examples/rules/example_percentage_match.yaml @@ -9,13 +9,14 @@ description: "95% of all http requests should be successful" filter: - term: + # The _type field is deprecated in Elasticsearch 7.0 and removed in Elasticsearch 8.0 + # For more information, see https://www.elastic.co/guide/en/elasticsearch/reference/7.17/removal-of-types.html _type: http_request buffer_time: minutes: 5 query_key: Hostname.keyword -doc_type: http_request match_bucket_filter: - terms: diff --git a/example_rules/example_single_metric_agg.yaml b/examples/rules/example_single_metric_agg.yaml similarity index 95% rename from example_rules/example_single_metric_agg.yaml rename to examples/rules/example_single_metric_agg.yaml index 921afe30e..cdff72d52 100644 --- a/example_rules/example_single_metric_agg.yaml +++ b/examples/rules/example_single_metric_agg.yaml @@ -12,8 +12,7 @@ buffer_time: metric_agg_key: system.cpu.user.pct metric_agg_type: avg query_key: beat.hostname -doc_type: metricsets - + bucket_interval: minutes: 5 diff --git a/example_rules/example_spike.yaml b/examples/rules/example_spike.yaml similarity index 95% rename from example_rules/example_spike.yaml rename to examples/rules/example_spike.yaml index cb7064c2e..799c2cd55 100755 --- a/example_rules/example_spike.yaml +++ b/examples/rules/example_spike.yaml @@ -56,7 +56,7 @@ spike_type: "up" # (Required) # A list of Elasticsearch filters used for find events # These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html filter: - query: query_string: diff --git a/example_rules/example_spike_single_metric_agg.yaml b/examples/rules/example_spike_single_metric_agg.yaml similarity index 98% rename from example_rules/example_spike_single_metric_agg.yaml rename to examples/rules/example_spike_single_metric_agg.yaml index b26ade15a..007237ca4 100644 --- a/example_rules/example_spike_single_metric_agg.yaml +++ b/examples/rules/example_spike_single_metric_agg.yaml @@ -15,7 +15,6 @@ buffer_time: metric_agg_key: system.cpu.user.pct metric_agg_type: avg query_key: beat.hostname -doc_type: metricsets #allow_buffer_time_overlap: true #use_run_every_query_size: true diff --git a/examples/rules/example_tencent_sms.yaml b/examples/rules/example_tencent_sms.yaml new file mode 100755 index 000000000..d360b97ba --- /dev/null +++ b/examples/rules/example_tencent_sms.yaml @@ -0,0 +1,65 @@ +# Alert when the rate of events exceeds a threshold + +# (Optional) +# Elasticsearch host +# es_host: elasticsearch.example.com + +# (Optional) +# Elasticsearch port +#es_port: 9200 + +# (OptionaL) Connect with SSL to Elasticsearch +#use_ssl: True + +# (Optional) basic-auth username and password for Elasticsearch +#es_username: someusername +#es_password: somepassword + +# (Required) +# Rule name, must be unique +name: tencent_sms + +# (Required) +# Type of alert. +# the frequency rule type alerts when num_events events occur with timeframe time +type: frequency + +# (Required) +# Index to search, wildcard supported +index: test + +# (Required, frequency specific) +# Alert when this many documents matching the query occur within a timeframe +num_events: 50 + +# (Required, frequency specific) +# num_events must occur within this amount of time to trigger an alert +timeframe: + hours: 4 + +# (Required) +# A list of Elasticsearch filters used for find events +# These filters are joined with AND and nested in a filtered query +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html +filter: + - query: + query_string: + query: "message: err" + +timestamp_field: date + +# (Required) +# The alert is use when a match is found +alert: + - "tencent_sms" + +tencent_sms_secret_id: "secret_id" +tencent_sms_secret_key: "secret_key" +tencent_sms_sdk_appid: "1400006666" +tencent_sms_to_number: + - "+8613711112222" +tencent_sms_region: "ap-guangzhou" +tencent_sms_sign_name: "tencent" +tencent_sms_template_id: "1123835" +tencent_sms_template_parm: + - "/kubernetes/pod_name" diff --git a/examples/rules/example_thehive_frequency.yaml b/examples/rules/example_thehive_frequency.yaml new file mode 100644 index 000000000..bd12a9505 --- /dev/null +++ b/examples/rules/example_thehive_frequency.yaml @@ -0,0 +1,52 @@ +# This example will look at values in the query_key for a number of events that occured during the timeframe. +# Unique alerts will be sent to TheHive containing a value from the alert in the tags as well as Kibana link in the description. We will be using alert_text in order to generate a custom description. + +name: "Example TheHive with frequency and query_key" +index: your_indice_%Y-%m-%d + +type: frequency +num_events: 30 + +#Count frequecny based on values in "dest_ip" +query_key: dest_ip + +# Exemple query +filter: +- query: + query_string: + query: "event_id:4625" + +# The alert text below allows for the alert description in TheHive to contain new lines for easier reading . +alert_text_args: [ kibana_discover_url, dest_ip ] +alert_text_type: alert_text_only +alert_text: | + 'Example bruteforce alert to the destination IP {1} . ' + + Kibana URL: + + {0} + +# Details needed in order to generate Kibana discover URL in alert_text +generate_kibana_discover_url: true +kibana_discover_app_url: http://your.kibana.server/app/kibana#/discover +kibana_discover_version: '7.5' +kibana_discover_index_pattern_id: 477b4a90-25ead-11b9-ad2c-19e82a454d17 + + +# Needed +alert: +- hivealerter + +hive_alert_config: + type: 'test' + source: 'elastalert' + # description: 'description disabled as we will be using alert_text to insert our Kibana URL' + severity: 1 + tags: [field_1, 'bruteforce' ] + tlp: 2 + status: 'New' + follow: True + +hive_observable_data_mapping: + - ip: dest_ip + - fqdn: host_name diff --git a/examples/rules/exemple_discord_any.yaml b/examples/rules/exemple_discord_any.yaml new file mode 100644 index 000000000..e19b4a4b7 --- /dev/null +++ b/examples/rules/exemple_discord_any.yaml @@ -0,0 +1,40 @@ +# This exemple will provide you every alerts that occured between the sleeping time your configured in your config file. +# Every match will be send as a unique alert to discord. If you got 3 match, the alerter will send 3 alert to your discord. + +name: "Exemple discord webhook alert" +type: any +index: your_indice_%Y-%m-%d +use_strftime_index: true + +# Exemple query +filter: +- query: + query_string: + query: "id: 2501 OR id: 5503" + +realert: + minutes: 0 + +# I only add the code content here. This way, it prevent to encode the entire description section. Only the log will be encoded and it will provide more visibility. +include: ["timestamp","name","computer"] +alert_text: "Alerts at {0} on the computer {1}.\n```" +alert_text_args: ["timestamp","computer"] + +# Needed +alert: +- discord +discord_webhook_url: "Your discord webhook url" + +# ----- Optional Section ----- + +discord_proxy: "proxy_address" + +# Must be in "" and must be valid emoji supported by discord. +discord_emoji_title: ":lock:" + +# Must be an hexadecimal value according to the exemple below +discord_embed_color: 0xE24D42 + +# This content will be displayed at the very end of your embed message. If you don't add one of these 2 lines, the footer will not be added. +discord_embed_footer: "Message sent by ElastAlert from your computer" +discord_embed_icon_url: "https://humancoders-formations.s3.amazonaws.com/uploads/course/logo/38/thumb_bigger_formation-elasticsearch.png" \ No newline at end of file diff --git a/example_rules/jira_acct.txt b/examples/rules/jira_acct.txt similarity index 100% rename from example_rules/jira_acct.txt rename to examples/rules/jira_acct.txt diff --git a/example_rules/ssh-repeat-offender.yaml b/examples/rules/ssh-repeat-offender.yaml similarity index 78% rename from example_rules/ssh-repeat-offender.yaml rename to examples/rules/ssh-repeat-offender.yaml index 27a439fcd..2618fe9f8 100644 --- a/example_rules/ssh-repeat-offender.yaml +++ b/examples/rules/ssh-repeat-offender.yaml @@ -13,7 +13,7 @@ timeframe: # A list of elasticsearch filters used for find events # These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html filter: - term: rule_name: "SSH abuse" @@ -32,10 +32,9 @@ include: - match_body.user.name - match_body.source.ip -alert_subject: "SSH abuse (repeat offender) on <{}> | <{}|Show Dashboard>" +alert_subject: "SSH abuse (repeat offender) on <{}>" alert_subject_args: - match_body.host.hostname - - kibana_link alert_text: |- An reapeat offender has been active on {}. @@ -56,6 +55,3 @@ slack_username_override: "ElastAlert" # Alert body only cointains a title and text alert_text_type: alert_text_only - -# Link to BitSensor Kibana Dashboard -use_kibana4_dashboard: "https://dev.securely.ai/app/kibana#/dashboard/37739d80-a95c-11e9-b5ba-33a34ca252fb" diff --git a/example_rules/ssh.yaml b/examples/rules/ssh.yaml similarity index 76% rename from example_rules/ssh.yaml rename to examples/rules/ssh.yaml index 7af890784..5b95b5620 100644 --- a/example_rules/ssh.yaml +++ b/examples/rules/ssh.yaml @@ -1,5 +1,5 @@ # Rule name, must be unique - name: SSH abuse (ElastAlert 3.0.1) - 2 +name: SSH abuse (ElastAlert 3.0.1) - 2 # Alert on x events in y seconds type: frequency @@ -13,7 +13,7 @@ timeframe: # A list of elasticsearch filters used for find events # These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# For more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html filter: - query: query_string: @@ -35,10 +35,9 @@ include: include_match_in_root: true -alert_subject: "SSH abuse on <{}> | <{}|Show Dashboard>" +alert_subject: "SSH abuse on <{}>" alert_subject_args: - host.hostname - - kibana_link alert_text: |- An attack on {} is detected. @@ -59,6 +58,3 @@ slack_username_override: "ElastAlert" # Alert body only cointains a title and text alert_text_type: alert_text_only - -# Link to BitSensor Kibana Dashboard -use_kibana4_dashboard: "https://dev.securely.ai/app/kibana#/dashboard/37739d80-a95c-11e9-b5ba-33a34ca252fb" diff --git a/supervisord.conf.example b/examples/supervisord.conf.example similarity index 100% rename from supervisord.conf.example rename to examples/supervisord.conf.example diff --git a/requirements-dev.txt b/requirements-dev.txt index 558761d9e..3b80d79b8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,9 +1,13 @@ -r requirements.txt -coverage==4.5.4 flake8 +flake8-absolute-import +m2r2 pre-commit -pylint<1.4 -pytest<3.3.0 +pylint==2.15.8 +pytest==7.2.0 +pytest-cov==4.0.0 +pytest-xdist==3.1.0 setuptools +sphinx==5.3.0 sphinx_rtd_theme -tox<2.0 +tox==3.27.1 diff --git a/requirements.txt b/requirements.txt index 9c32052d0..149d162b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,22 +1,26 @@ -apscheduler>=3.3.0 -aws-requests-auth>=0.3.0 -blist>=1.3.6 -boto3>=1.4.4 -cffi>=1.11.5 -configparser>=3.5.0 -croniter>=0.3.16 -elasticsearch>=7.0.0 -envparse>=0.2.0 -exotel>=0.1.3 -jira>=1.0.10,<1.0.15 -jsonschema>=3.0.2 -mock>=2.0.0 -prison>=0.1.2 -py-zabbix==1.1.3 -PyStaticConfiguration>=0.10.3 -python-dateutil>=2.6.0,<2.7.0 -PyYAML>=5.1 -requests>=2.0.0 -stomp.py>=4.1.17 -texttable>=0.8.8 -twilio==6.0.0 +apscheduler==3.10.4 +aws-requests-auth==0.4.3 +sortedcontainers==2.4.0 +boto3==1.34.129 +cffi==1.16.0 +croniter==2.0.5 +elasticsearch==7.10.1 +envparse==0.2.0 +exotel==0.1.5 +Jinja2==3.1.2 +jira==3.8.0 +jsonschema==4.17.3 +mock>=5.1.0 +prison==0.2.1 +prometheus_client==0.13.1 +py-zabbix==1.1.7 +python-dateutil==2.9.0 +PyYAML==6.0.1 +requests==2.32.3 +stomp.py==8.1.2 +texttable>=1.7.0 +statsd-tags==3.2.1.post1 +twilio==6.57.0 +tencentcloud-sdk-python==3.0.1171 +jsonpointer==3.0.0 +tzlocal==2.1 diff --git a/setup.py b/setup.py index 2845836a7..b3db43383 100644 --- a/setup.py +++ b/setup.py @@ -7,46 +7,55 @@ base_dir = os.path.dirname(__file__) setup( - name='elastalert', - version='0.2.4', - description='Runs custom filters on Elasticsearch and alerts on matches', - author='Quentin Long', - author_email='qlo@yelp.com', + name='elastalert2', + version='2.9.0', + description='Automated rule-based alerting for Elasticsearch', + long_description=open('README.md').read(), + long_description_content_type="text/markdown", + url="https://github.com/jertel/elastalert2", setup_requires='setuptools', - license='Copyright 2014 Yelp', + license='Apache 2.0', + project_urls={ + "Documentation": "https://elastalert2.readthedocs.io", + "Source Code": "https://github.com/jertel/elastalert2", + "Discussion Forum": "https://github.com/jertel/elastalert2/discussions", + }, classifiers=[ - 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.11', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', ], entry_points={ 'console_scripts': ['elastalert-create-index=elastalert.create_index:main', 'elastalert-test-rule=elastalert.test_rule:main', - 'elastalert-rule-from-kibana=elastalert.rule_from_kibana:main', 'elastalert=elastalert.elastalert:main']}, - packages=find_packages(), + packages=find_packages(exclude=["tests"]), package_data={'elastalert': ['schema.yaml', 'es_mappings/**/*.json']}, install_requires=[ - 'apscheduler>=3.3.0', - 'aws-requests-auth>=0.3.0', - 'blist>=1.3.6', - 'boto3>=1.4.4', - 'configparser>=3.5.0', - 'croniter>=0.3.16', - 'elasticsearch==7.0.0', + 'apscheduler>=3.8.1.post1,<4.0', + 'aws-requests-auth>=0.4.3', + 'boto3>=1.20.53', + 'cffi>=1.15.0', + 'croniter>=1.2.0', + 'elasticsearch==6.3.1', 'envparse>=0.2.0', - 'exotel>=0.1.3', - 'jira>=2.0.0', - 'jsonschema>=3.0.2', - 'mock>=2.0.0', - 'prison>=0.1.2', - 'PyStaticConfiguration>=0.10.3', - 'python-dateutil>=2.6.0,<2.7.0', - 'PyYAML>=3.12', - 'requests>=2.10.0', - 'stomp.py>=4.1.17', - 'texttable>=0.8.8', - 'twilio>=6.0.0,<6.1', - 'cffi>=1.11.5' + 'exotel==0.1.5', + 'Jinja2>=3.1.2', + 'jira>=3.1.1', + 'jsonpointer>=2.2', + 'jsonschema>=4.4.0', + 'prison>=0.2.1', + 'prometheus_client>=0.13.1', + 'python-dateutil>=2.8.2', + 'PyYAML>=6.0', + 'py-zabbix>=1.1.7', + 'requests>=2.27.1', + 'sortedcontainers>=2.4.0', + 'statsd-tags==3.2.1.post1', + 'stomp.py>=8.0.1', + 'tencentcloud-sdk-python>=3.0.577', + 'texttable>=1.6.4', + 'twilio==6.57.0', + 'tzlocal==2.1' ] ) diff --git a/tests/Dockerfile-test b/tests/Dockerfile-test new file mode 100644 index 000000000..38949446f --- /dev/null +++ b/tests/Dockerfile-test @@ -0,0 +1,10 @@ +FROM python:3-slim-buster + +RUN apt update && apt upgrade -y +RUN apt install -y gcc libffi-dev + +WORKDIR /home/elastalert + +ADD requirements*.txt ./ + +RUN pip3 install -r requirements-dev.txt diff --git a/tests/alerters/__init__.py b/tests/alerters/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/alerters/alerta_test.py b/tests/alerters/alerta_test.py new file mode 100644 index 000000000..efb4fdfbf --- /dev/null +++ b/tests/alerters/alerta_test.py @@ -0,0 +1,901 @@ +import datetime +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.alerta import AlertaAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_alerta_no_auth(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_api_skip_ssl': True, + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["%(key)s", "%(logdate)s", "%(sender_ip)s"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe %(hostname)s is UP at %(logdate)s GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + # 'key': ---- missing field on purpose, to verify that simply the text is left empty + # 'logdate': ---- missing field on purpose, to verify that simply the text is left empty + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert"], + "tags": [], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + headers={ + 'content-type': 'application/json'}, + verify=False + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'Alert sent to Alerta') == caplog.record_tuples[0] + + +def test_alerta_auth(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'alerta_api_key': '123456789ABCDEF', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_severity': "debug", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json', + 'Authorization': 'Key {}'.format(rule['alerta_api_key'])}) + + +def test_alerta_new_style(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + # 'key': ---- missing field on purpose, to verify that simply the text is left empty + # 'logdate': ---- missing field on purpose, to verify that simply the text is left empty + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert"], + "tags": [], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + + +def test_alerta_use_qk_as_resource(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_use_qk_as_resource': True, + 'query_key': 'hostname', + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "aProbe", + "severity": "debug", + "service": ["elastalert"], + "tags": [], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + + +def test_alerta_timeout(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_timeout': 86450, + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert"], + "tags": [], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86450, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + + +def test_alerta_type(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_type': 'elastalert2', + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert"], + "tags": [], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert2", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + + +def test_alerta_resource(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_resource': 'elastalert2', + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert2", + "severity": "debug", + "service": ["elastalert"], + "tags": [], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + + +def test_alerta_service(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_service': ['elastalert2'], + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert2"], + "tags": [], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + + +def test_alerta_environment(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_environment': 'Production2', + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert"], + "tags": [], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production2", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + + +def test_alerta_tags(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_tags': ['elastalert2'], + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert"], + "tags": ['elastalert2'], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "createTime": "2014-10-10T00:00:00.000000Z", + "environment": "Production", + "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + assert expected_data == json.loads( + mock_post_request.call_args_list[0][1]['data']) + + +def test_alerta_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_tags': ['elastalert2'], + 'alert': 'alerta' + } + + match = { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to Alerta: ' in str(ea) + + +def test_alerta_getinfo(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'type': 'any', + 'alert': 'alerta' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + + expected_data = { + 'type': 'alerta', + 'alerta_url': 'http://elastalerthost:8080/api/alert' + } + actual_data = alert.get_info() + + assert expected_data == actual_data + + +@pytest.mark.parametrize('alerta_api_url, expected_data', [ + ('', 'Missing required option(s): alerta_api_url'), + ('http://elastalerthost:8080/api/alert', + { + 'type': 'alerta', + 'alerta_url': 'http://elastalerthost:8080/api/alert' + }), +]) +def test_alerta_required_error(alerta_api_url, expected_data): + try: + rule = { + 'name': 'Test Alerta rule!', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'type': 'any', + 'alert': 'alerta' + } + + if alerta_api_url: + rule['alerta_api_url'] = alerta_api_url + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +@pytest.mark.parametrize('query_key, expected_data', [ + ('hostname', 'Test Alerta rule!.aProbe'), + ('test', 'Test Alerta rule!'), + ('', 'Test Alerta rule!'), +]) +def test_alerta_create_default_title(query_key, expected_data): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'type': 'any', + 'alert': 'alerta' + } + if query_key: + rule['query_key'] = query_key + + match = [ + { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + }, + { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname2': 'aProbe' + } + ] + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + + result = alert.create_default_title(match) + assert expected_data == result + + +def test_alerta_match_timestamp_none(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': True, + 'alerta_tags': ['elastalert2'], + 'alert': 'alerta' + } + + match = { + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert"], + "tags": ['elastalert2'], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "environment": "Production", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + + actual_data = json.loads( + mock_post_request.call_args_list[0][1]['data']) + del actual_data['createTime'] + del actual_data['rawData'] + assert expected_data == actual_data + + +def test_alerta_use_match_timestamp(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], + 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], + 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], + 'alerta_event': "ProbeUP", + 'alerta_group': "Health", + 'alerta_origin': "ElastAlert 2", + 'alerta_severity': "debug", + 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", + 'alerta_value': "UP", + 'type': 'any', + 'alerta_use_match_timestamp': False, + 'alerta_tags': ['elastalert2'], + 'alert': 'alerta' + } + + match = { + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + "origin": "ElastAlert 2", + "resource": "elastalert", + "severity": "debug", + "service": ["elastalert"], + "tags": ['elastalert2'], + "text": "Probe aProbe is UP at GMT", + "value": "UP", + "environment": "Production", + "timeout": 86400, + "correlate": ["ProbeUP", "ProbeDOWN"], + "group": "Health", + "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, + "type": "elastalert", + "event": "ProbeUP" + } + + mock_post_request.assert_called_once_with( + alert.url, + data=mock.ANY, + verify=True, + headers={ + 'content-type': 'application/json'} + ) + + actual_data = json.loads( + mock_post_request.call_args_list[0][1]['data']) + del actual_data['createTime'] + del actual_data['rawData'] + assert expected_data == actual_data + + +def test_get_json_payload_error(): + rule = { + 'name': 'Test Alerta rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'type': 'any', + 'alert': 'alerta', + 'query_key': 'hostname' + } + match = [{ + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + }] + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertaAlerter(rule) + + mock_run = mock.MagicMock(side_effect=Exception) + with mock.patch('json.dumps', mock_run): + + with pytest.raises(Exception) as e: + alert.get_json_payload(match) + + assert 'Error building Alerta request: ' in str(e) diff --git a/tests/alerters/alertmanager_test.py b/tests/alerters/alertmanager_test.py new file mode 100644 index 000000000..5c8314133 --- /dev/null +++ b/tests/alerters/alertmanager_test.py @@ -0,0 +1,388 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException +from requests.auth import HTTPBasicAuth + +from elastalert.alerters.alertmanager import AlertmanagerAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_alertmanager(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Alertmanager Rule', + 'type': 'any', + 'alertmanager_hosts': ['http://alertmanager:9093'], + 'alertmanager_alertname': 'Title', + 'alertmanager_annotations': {'severity': 'error'}, + 'alertmanager_labels': {'source': 'elastalert'}, + 'alertmanager_fields': {'msg': 'message', 'log': '@log_name'}, + 'alert_subject_args': ['message', '@log_name'], + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertmanagerAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'message': 'Quit 123', + '@log_name': 'mysqld.general' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = [ + { + 'annotations': + { + 'severity': 'error', + 'summary': 'Test Alertmanager Rule', + 'description': 'Test Alertmanager Rule\n\n' + + '@log_name: mysqld.general\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'message: Quit 123\nsomefield: foobarbaz\n' + }, + 'labels': { + 'source': 'elastalert', + 'msg': 'Quit 123', + 'log': 'mysqld.general', + 'alertname': 'Title', + 'elastalert_rule': 'Test Alertmanager Rule' + } + } + ] + + mock_post_request.assert_called_once_with( + 'http://alertmanager:9093/api/v1/alerts', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None, + verify=True, + timeout=10, + auth=None + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, "Alert sent to Alertmanager") == caplog.record_tuples[0] + + +def test_alertmanager_porxy(): + rule = { + 'name': 'Test Alertmanager Rule', + 'type': 'any', + 'alertmanager_hosts': ['http://alertmanager:9093'], + 'alertmanager_alertname': 'Title', + 'alertmanager_annotations': {'severity': 'error'}, + 'alertmanager_labels': {'source': 'elastalert'}, + 'alertmanager_fields': {'msg': 'message', 'log': '@log_name'}, + 'alertmanager_proxy': 'http://proxy.url', + 'alert_subject_args': ['message', '@log_name'], + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertmanagerAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'message': 'Quit 123', + '@log_name': 'mysqld.general' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = [ + { + 'annotations': + { + 'severity': 'error', + 'summary': 'Test Alertmanager Rule', + 'description': 'Test Alertmanager Rule\n\n' + + '@log_name: mysqld.general\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'message: Quit 123\nsomefield: foobarbaz\n' + }, + 'labels': { + 'source': 'elastalert', + 'msg': 'Quit 123', + 'log': 'mysqld.general', + 'alertname': 'Title', + 'elastalert_rule': 'Test Alertmanager Rule' + } + } + ] + + mock_post_request.assert_called_once_with( + 'http://alertmanager:9093/api/v1/alerts', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies={'https': 'http://proxy.url'}, + verify=True, + timeout=10, + auth=None + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_alertmanager_timeout(): + rule = { + 'name': 'Test Alertmanager Rule', + 'type': 'any', + 'alertmanager_hosts': ['http://alertmanager:9093'], + 'alertmanager_alertname': 'Title', + 'alertmanager_annotations': {'severity': 'error'}, + 'alertmanager_labels': {'source': 'elastalert'}, + 'alertmanager_fields': {'msg': 'message', 'log': '@log_name'}, + 'alertmanager_timeout': 20, + 'alert_subject_args': ['message', '@log_name'], + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertmanagerAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'message': 'Quit 123', + '@log_name': 'mysqld.general' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = [ + { + 'annotations': + { + 'severity': 'error', + 'summary': 'Test Alertmanager Rule', + 'description': 'Test Alertmanager Rule\n\n' + + '@log_name: mysqld.general\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'message: Quit 123\nsomefield: foobarbaz\n' + }, + 'labels': { + 'source': 'elastalert', + 'msg': 'Quit 123', + 'log': 'mysqld.general', + 'alertname': 'Title', + 'elastalert_rule': 'Test Alertmanager Rule' + } + } + ] + + mock_post_request.assert_called_once_with( + 'http://alertmanager:9093/api/v1/alerts', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None, + verify=True, + timeout=20, + auth=None + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +@pytest.mark.parametrize('ca_certs, ignore_ssl_errors, excpet_verify', [ + ('', '', True), + ('', True, False), + ('', False, True), + (True, '', True), + (True, True, True), + (True, False, True), + (False, '', True), + (False, True, False), + (False, False, True) +]) +def test_alertmanager_ca_certs(ca_certs, ignore_ssl_errors, excpet_verify): + rule = { + 'name': 'Test Alertmanager Rule', + 'type': 'any', + 'alertmanager_hosts': ['http://alertmanager:9093'], + 'alertmanager_alertname': 'Title', + 'alertmanager_annotations': {'severity': 'error'}, + 'alertmanager_labels': {'source': 'elastalert'}, + 'alertmanager_fields': {'msg': 'message', 'log': '@log_name'}, + 'alert_subject_args': ['message', '@log_name'], + 'alert': [] + } + if ca_certs: + rule['alertmanager_ca_certs'] = ca_certs + + if ignore_ssl_errors: + rule['alertmanager_ignore_ssl_errors'] = ignore_ssl_errors + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertmanagerAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'message': 'Quit 123', + '@log_name': 'mysqld.general' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = [ + { + 'annotations': + { + 'severity': 'error', + 'summary': 'Test Alertmanager Rule', + 'description': 'Test Alertmanager Rule\n\n' + + '@log_name: mysqld.general\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'message: Quit 123\nsomefield: foobarbaz\n' + }, + 'labels': { + 'source': 'elastalert', + 'msg': 'Quit 123', + 'log': 'mysqld.general', + 'alertname': 'Title', + 'elastalert_rule': 'Test Alertmanager Rule' + } + } + ] + + mock_post_request.assert_called_once_with( + 'http://alertmanager:9093/api/v1/alerts', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None, + verify=excpet_verify, + timeout=10, + auth=None + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_alertmanager_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Alertmanager Rule', + 'type': 'any', + 'alertmanager_hosts': ['http://alertmanager:9093'], + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertmanagerAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to Alertmanager' in str(ea) + + +def test_alertmanager_getinfo(): + rule = { + 'name': 'Test Alertmanager Rule', + 'type': 'any', + 'alertmanager_hosts': 'http://alertmanager:9093', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertmanagerAlerter(rule) + + expected_data = { + 'type': 'alertmanager' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('alertmanager_hosts, expected_data', [ + ([], 'Missing required option(s): alertmanager_hosts'), + (['http://alertmanager:9093'], + { + 'type': 'alertmanager' + }), +]) +def test_alertmanager_required_error(alertmanager_hosts, expected_data): + try: + rule = { + 'name': 'Test Alertmanager Rule', + 'type': 'any', + 'alert': [] + } + + if alertmanager_hosts: + rule['alertmanager_hosts'] = alertmanager_hosts + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertmanagerAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + print('ea %s' % str(ea)) + assert expected_data in str(ea) + + +def test_alertmanager_basic_auth(): + rule = { + 'name': 'Test Alertmanager Rule', + 'type': 'any', + 'alertmanager_hosts': ['http://alertmanager:9093'], + 'alertmanager_alertname': 'Title', + 'alertmanager_annotations': {'severity': 'error'}, + 'alertmanager_labels': {'source': 'elastalert'}, + 'alertmanager_fields': {'msg': 'message', 'log': '@log_name'}, + 'alertmanager_basic_auth_login': 'user', + 'alertmanager_basic_auth_password': 'password', + 'alert_subject_args': ['message', '@log_name'], + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = AlertmanagerAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'message': 'Quit 123', + '@log_name': 'mysqld.general' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = [ + { + 'annotations': + { + 'severity': 'error', + 'summary': 'Test Alertmanager Rule', + 'description': 'Test Alertmanager Rule\n\n' + + '@log_name: mysqld.general\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'message: Quit 123\nsomefield: foobarbaz\n' + }, + 'labels': { + 'source': 'elastalert', + 'msg': 'Quit 123', + 'log': 'mysqld.general', + 'alertname': 'Title', + 'elastalert_rule': 'Test Alertmanager Rule' + } + } + ] + + mock_post_request.assert_called_once_with( + 'http://alertmanager:9093/api/v1/alerts', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None, + verify=True, + timeout=10, + auth=HTTPBasicAuth('user', 'password') + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) diff --git a/tests/alerters/chatwork_test.py b/tests/alerters/chatwork_test.py new file mode 100644 index 000000000..9e8a8969a --- /dev/null +++ b/tests/alerters/chatwork_test.py @@ -0,0 +1,239 @@ +import logging +import pytest + +from unittest import mock + +from requests import RequestException +from requests.auth import HTTPProxyAuth + +from elastalert.alerters.chatwork import ChatworkAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_chatwork(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Chatwork Rule', + 'type': 'any', + 'chatwork_apikey': 'xxxx1', + 'chatwork_room_id': 'xxxx2', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ChatworkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'body': 'Test Chatwork Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + } + + mock_post_request.assert_called_once_with( + 'https://api.chatwork.com/v2/rooms/xxxx2/messages', + params=mock.ANY, + headers={'X-ChatWorkToken': 'xxxx1'}, + proxies=None, + auth=None + ) + + actual_data = mock_post_request.call_args_list[0][1]['params'] + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Alert sent to Chatwork room xxxx2') == caplog.record_tuples[0] + + +def test_chatwork_proxy(): + rule = { + 'name': 'Test Chatwork Rule', + 'type': 'any', + 'chatwork_apikey': 'xxxx1', + 'chatwork_room_id': 'xxxx2', + 'chatwork_proxy': 'http://proxy.url', + 'chatwork_proxy_login': 'admin', + 'chatwork_proxy_pass': 'password', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ChatworkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'body': 'Test Chatwork Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + } + + mock_post_request.assert_called_once_with( + 'https://api.chatwork.com/v2/rooms/xxxx2/messages', + params=mock.ANY, + headers={'X-ChatWorkToken': 'xxxx1'}, + proxies={'https': 'http://proxy.url'}, + auth=HTTPProxyAuth('admin', 'password') + ) + + actual_data = mock_post_request.call_args_list[0][1]['params'] + assert expected_data == actual_data + + +def test_chatwork_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Chatwork Rule', + 'type': 'any', + 'chatwork_apikey': 'xxxx1', + 'chatwork_room_id': 'xxxx2', + 'chatwork_proxy': 'http://proxy.url', + 'chatwork_proxy_login': 'admin', + 'chatwork_proxy_pass': 'password', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ChatworkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to Chattwork: . Details: ' in str(ea) + + +def test_chatwork_getinfo(): + rule = { + 'name': 'Test Chatwork Rule', + 'type': 'any', + 'chatwork_apikey': 'xxxx1', + 'chatwork_room_id': 'xxxx2', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ChatworkAlerter(rule) + + expected_data = { + "type": "chatwork", + "chatwork_room_id": "xxxx2" + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('chatwork_apikey, chatwork_room_id, expected_data', [ + ('', '', 'Missing required option(s): chatwork_apikey, chatwork_room_id'), + ('xxxx1', '', 'Missing required option(s): chatwork_apikey, chatwork_room_id'), + ('', 'xxxx2', '1Missing required option(s): chatwork_apikey, chatwork_room_id'), + ('xxxx1', 'xxxx2', + { + "type": "chatwork", + "chatwork_room_id": "xxxx2" + }), +]) +def test_chatwork_required_error(chatwork_apikey, chatwork_room_id, expected_data): + try: + rule = { + 'name': 'Test Chatwork Rule', + 'type': 'any', + 'alert': [] + } + + if chatwork_apikey: + rule['chatwork_apikey'] = chatwork_apikey + + if chatwork_room_id: + rule['chatwork_room_id'] = chatwork_room_id + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ChatworkAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +def test_chatwork_maxlength(): + rule = { + 'name': 'Test Chatwork Rule' + ('a' * 2069), + 'type': 'any', + 'chatwork_apikey': 'xxxx1', + 'chatwork_room_id': 'xxxx2', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ChatworkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'body': 'Test Chatwork Rule' + ('a' * 1932) + + '\n *message was cropped according to chatwork embed description limits!*' + } + + mock_post_request.assert_called_once_with( + 'https://api.chatwork.com/v2/rooms/xxxx2/messages', + params=mock.ANY, + headers={'X-ChatWorkToken': 'xxxx1'}, + proxies=None, + auth=None + ) + + actual_data = mock_post_request.call_args_list[0][1]['params'] + assert expected_data == actual_data + + +def test_chatwork_matchs(): + rule = { + 'name': 'Test Chatwork Rule', + 'type': 'any', + 'chatwork_apikey': 'xxxx1', + 'chatwork_room_id': 'xxxx2', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ChatworkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match, match]) + expected_data = { + 'body': 'Test Chatwork Rule\n' + + '\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'somefield: foobarbaz\n' + + '\n' + + '----------------------------------------\n' + + 'Test Chatwork Rule\n' + + '\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'somefield: foobarbaz\n' + + '\n' + + '----------------------------------------\n', + } + + mock_post_request.assert_called_once_with( + 'https://api.chatwork.com/v2/rooms/xxxx2/messages', + params=mock.ANY, + headers={'X-ChatWorkToken': 'xxxx1'}, + proxies=None, + auth=None + ) + + actual_data = mock_post_request.call_args_list[0][1]['params'] + assert expected_data == actual_data diff --git a/tests/alerters/command_test.py b/tests/alerters/command_test.py new file mode 100644 index 000000000..170cc6108 --- /dev/null +++ b/tests/alerters/command_test.py @@ -0,0 +1,131 @@ +import json +import subprocess +import logging + +import pytest +from unittest import mock + +from elastalert.alerters.command import CommandAlerter +from elastalert.alerts import BasicMatchString +from elastalert.util import EAException +from tests.alerts_test import mock_rule + + +def test_command_getinfo(): + # Test command as list with a formatted arg + rule = {'command': ['/bin/test/', '--arg', '%(somefield)s']} + alert = CommandAlerter(rule) + match = {'@timestamp': '2014-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'nested': {'field': 1}} + with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen: + alert.alert([match]) + assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False) + expected_data = { + 'type': 'command', + 'command': '/bin/test/ --arg foobarbaz' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +def test_command_old_style_string_format1(caplog): + caplog.set_level(logging.INFO) + # Test command as string with formatted arg (old-style string format) + rule = {'command': '/bin/test/ --arg %(somefield)s'} + match = {'@timestamp': '2014-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'nested': {'field': 1}} + alert = CommandAlerter(rule) + with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen: + alert.alert([match]) + assert mock_popen.called_with('/bin/test --arg foobarbaz', stdin=subprocess.PIPE, shell=False) + assert ('elastalert', logging.WARNING, 'Warning! You could be vulnerable to shell injection!') == caplog.record_tuples[0] + assert ('elastalert', logging.INFO, 'Alert sent to Command') == caplog.record_tuples[1] + + +def test_command_old_style_string_format2(): + # Test command as string without formatted arg (old-style string format) + rule = {'command': '/bin/test/foo.sh'} + match = {'@timestamp': '2014-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'nested': {'field': 1}} + alert = CommandAlerter(rule) + with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen: + alert.alert([match]) + assert mock_popen.called_with('/bin/test/foo.sh', stdin=subprocess.PIPE, shell=True) + + +def test_command_pipe_match_json(): + rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'], + 'pipe_match_json': True} + alert = CommandAlerter(rule) + match = {'@timestamp': '2014-01-01T00:00:00', + 'somefield': 'foobarbaz'} + with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen: + mock_subprocess = mock.Mock() + mock_popen.return_value = mock_subprocess + mock_subprocess.communicate.return_value = (None, None) + alert.alert([match]) + assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False) + assert mock_subprocess.communicate.called_with(input=json.dumps(match)) + + +def test_command_pipe_alert_text(): + rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'], + 'pipe_alert_text': True, 'type': mock_rule(), 'name': 'Test'} + alert = CommandAlerter(rule) + match = {'@timestamp': '2014-01-01T00:00:00', + 'somefield': 'foobarbaz'} + alert_text = str(BasicMatchString(rule, match)) + with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen: + mock_subprocess = mock.Mock() + mock_popen.return_value = mock_subprocess + mock_subprocess.communicate.return_value = (None, None) + alert.alert([match]) + assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False) + assert mock_subprocess.communicate.called_with(input=alert_text.encode()) + + +def test_command_fail_on_non_zero_exit(): + rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'], + 'fail_on_non_zero_exit': True} + alert = CommandAlerter(rule) + match = {'@timestamp': '2014-01-01T00:00:00', + 'somefield': 'foobarbaz'} + with pytest.raises(Exception) as exception: + with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen: + mock_subprocess = mock.Mock() + mock_popen.return_value = mock_subprocess + mock_subprocess.wait.return_value = 1 + alert.alert([match]) + assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False) + assert "Non-zero exit code while running command" in str(exception) + + +def test_command_os_error(): + rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'], + 'pipe_alert_text': True, 'type': mock_rule(), 'name': 'Test'} + alert = CommandAlerter(rule) + match = {'@timestamp': '2014-01-01T00:00:00', + 'somefield': 'foobarbaz'} + with pytest.raises(EAException) as ea: + mock_run = mock.MagicMock(side_effect=OSError) + with mock.patch("elastalert.alerters.command.subprocess.Popen", mock_run), pytest.raises(OSError) as mock_popen: + mock_subprocess = mock.Mock() + mock_popen.return_value = mock_subprocess + mock_subprocess.communicate.return_value = (None, None) + alert.alert([match]) + assert 'Error while running command /bin/test/ --arg foobarbaz: ' in str(ea) + + +def test_command_key_error(): + with pytest.raises(EAException) as ea: + rule = {} + alert = CommandAlerter(rule) + match = {'@timestamp': '2014-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'nested': {'field': 1}} + with mock.patch("elastalert.alerters.command.subprocess.Popen"): + alert.alert([match]) + assert 'Error formatting command:' in str(ea) diff --git a/tests/alerters/datadog_test.py b/tests/alerters/datadog_test.py new file mode 100644 index 000000000..ad0b77f34 --- /dev/null +++ b/tests/alerters/datadog_test.py @@ -0,0 +1,124 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.datadog import DatadogAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_datadog_alerter(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Datadog Event Alerter', + 'type': 'any', + 'datadog_api_key': 'test-api-key', + 'datadog_app_key': 'test-app-key', + 'alert': [], + 'alert_subject': 'Test Datadog Event Alert' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DatadogAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'name': 'datadog-test-name' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'title': rule['alert_subject'], + 'text': "Test Datadog Event Alerter\n\n@timestamp: 2021-01-01T00:00:00\nname: datadog-test-name\n" + } + mock_post_request.assert_called_once_with( + "https://api.datadoghq.com/api/v1/events", + data=mock.ANY, + headers={ + 'Content-Type': 'application/json', + 'DD-API-KEY': rule['datadog_api_key'], + 'DD-APPLICATION-KEY': rule['datadog_app_key'] + } + ) + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Alert sent to Datadog') == caplog.record_tuples[0] + + +def test_datadog_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Datadog Event Alerter', + 'type': 'any', + 'datadog_api_key': 'test-api-key', + 'datadog_app_key': 'test-app-key', + 'alert': [], + 'alert_subject': 'Test Datadog Event Alert' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DatadogAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'name': 'datadog-test-name' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting event to Datadog:' in str(ea) + + +def test_datadog_getinfo(): + rule = { + 'name': 'Test Datadog Event Alerter', + 'type': 'any', + 'datadog_api_key': 'test-api-key', + 'datadog_app_key': 'test-app-key', + 'alert': [], + 'alert_subject': 'Test Datadog Event Alert' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DatadogAlerter(rule) + + expected_data = {'type': 'datadog'} + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('datadog_api_key, datadog_app_key, expected_data', [ + ('', '', 'Missing required option(s): datadog_api_key, datadog_app_key'), + ('xxxx1', '', 'Missing required option(s): datadog_api_key, datadog_app_key'), + ('', 'xxxx2', 'Missing required option(s): datadog_api_key, datadog_app_key'), + ('xxxx1', 'xxxx2', + { + 'type': 'datadog' + }), +]) +def test_datadog_required_error(datadog_api_key, datadog_app_key, expected_data): + try: + rule = { + 'name': 'Test Datadog Event Alerter', + 'type': 'any', + 'alert': [], + 'alert_subject': 'Test Datadog Event Alert' + } + + if datadog_api_key: + rule['datadog_api_key'] = datadog_api_key + + if datadog_app_key: + rule['datadog_app_key'] = datadog_app_key + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DatadogAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) diff --git a/tests/alerters/debug_test.py b/tests/alerters/debug_test.py new file mode 100644 index 000000000..f35053c9c --- /dev/null +++ b/tests/alerters/debug_test.py @@ -0,0 +1,74 @@ +import logging + +from elastalert.alerters.debug import DebugAlerter +from elastalert.loaders import FileRulesLoader + + +def test_debug_getinfo(): + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DebugAlerter(rule) + + expected_data = { + 'type': 'debug' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +def test_debug_alerter(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Debug Event Alerter', + 'type': 'any', + 'alert': [], + 'timestamp_field': 'timestamp' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DebugAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'name': 'debug-test-name' + } + alert.alert([match]) + + excepted1 = 'Alert for Test Debug Event Alerter at None:' + assert ('elastalert', logging.INFO, excepted1) == caplog.record_tuples[0] + + excepted2 = 'Test Debug Event Alerter\n\n@timestamp: 2021-01-01T00:00:00\n' + excepted2 += 'name: debug-test-name\n' + assert ('elastalert', logging.INFO, excepted2) == caplog.record_tuples[1] + + +def test_debug_alerter_querykey(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Debug Event Alerter', + 'type': 'any', + 'alert': [], + 'timestamp_field': 'timestamp', + 'query_key': 'hostname' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DebugAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'name': 'debug-test-name', + 'hostname': 'aProbe' + } + alert.alert([match]) + + excepted1 = 'Alert for Test Debug Event Alerter, aProbe at None:' + assert ('elastalert', logging.INFO, excepted1) == caplog.record_tuples[0] + + excepted2 = 'Test Debug Event Alerter\n\n@timestamp: 2021-01-01T00:00:00\n' + excepted2 += 'hostname: aProbe\nname: debug-test-name\n' + assert ('elastalert', logging.INFO, excepted2) == caplog.record_tuples[1] diff --git a/tests/alerters/dingtalk_test.py b/tests/alerters/dingtalk_test.py new file mode 100644 index 000000000..bdda888f5 --- /dev/null +++ b/tests/alerters/dingtalk_test.py @@ -0,0 +1,391 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException +from requests.auth import HTTPProxyAuth + +from elastalert.alerters.dingtalk import DingTalkAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_dingtalk_text(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'dingtalk_access_token': 'xxxxxxx', + 'dingtalk_msgtype': 'text', + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'msgtype': 'text', + 'text': {'content': 'Test DingTalk Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n'} + } + + mock_post_request.assert_called_once_with( + 'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx', + data=mock.ANY, + headers={ + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8' + }, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Trigger sent to dingtalk') == caplog.record_tuples[0] + + +def test_dingtalk_markdown(): + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'dingtalk_access_token': 'xxxxxxx', + 'dingtalk_msgtype': 'markdown', + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'msgtype': 'markdown', + 'markdown': { + 'title': 'Test DingTalk', + 'text': 'Test DingTalk Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n' + } + } + + mock_post_request.assert_called_once_with( + 'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx', + data=mock.ANY, + headers={ + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8' + }, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_dingtalk_single_action_card(): + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'dingtalk_access_token': 'xxxxxxx', + 'dingtalk_msgtype': 'single_action_card', + 'dingtalk_single_title': 'elastalert', + 'dingtalk_single_url': 'http://xxxxx2', + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'msgtype': 'actionCard', + 'actionCard': { + 'title': 'Test DingTalk', + 'text': 'Test DingTalk Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'singleTitle': rule['dingtalk_single_title'], + 'singleURL': rule['dingtalk_single_url'] + } + } + + mock_post_request.assert_called_once_with( + 'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx', + data=mock.ANY, + headers={ + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8' + }, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_dingtalk_action_card(): + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'dingtalk_access_token': 'xxxxxxx', + 'dingtalk_msgtype': 'action_card', + 'dingtalk_single_title': 'elastalert', + 'dingtalk_single_url': 'http://xxxxx2', + 'dingtalk_btn_orientation': '1', + 'dingtalk_btns': [ + { + 'title': 'test1', + 'actionURL': 'https://xxxxx0/' + }, + { + 'title': 'test2', + 'actionURL': 'https://xxxxx1/' + } + ], + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'msgtype': 'actionCard', + 'actionCard': { + 'title': 'Test DingTalk', + 'text': 'Test DingTalk Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'btnOrientation': rule['dingtalk_btn_orientation'], + 'btns': rule['dingtalk_btns'] + } + } + + mock_post_request.assert_called_once_with( + 'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx', + data=mock.ANY, + headers={ + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8' + }, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_dingtalk_action_card2(): + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'dingtalk_access_token': 'xxxxxxx', + 'dingtalk_msgtype': 'action_card', + 'dingtalk_single_title': 'elastalert', + 'dingtalk_single_url': 'http://xxxxx2', + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'msgtype': 'actionCard', + 'actionCard': { + 'title': 'Test DingTalk', + 'text': 'Test DingTalk Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n' + } + } + + mock_post_request.assert_called_once_with( + 'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx', + data=mock.ANY, + headers={ + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8' + }, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_dingtalk_proxy(): + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'dingtalk_access_token': 'xxxxxxx', + 'dingtalk_msgtype': 'action_card', + 'dingtalk_single_title': 'elastalert', + 'dingtalk_single_url': 'http://xxxxx2', + 'dingtalk_btn_orientation': '1', + 'dingtalk_btns': [ + { + 'title': 'test1', + 'actionURL': 'https://xxxxx0/' + }, + { + 'title': 'test2', + 'actionURL': 'https://xxxxx1/' + } + ], + 'dingtalk_proxy': 'http://proxy.url', + 'dingtalk_proxy_login': 'admin', + 'dingtalk_proxy_pass': 'password', + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'msgtype': 'actionCard', + 'actionCard': { + 'title': 'Test DingTalk', + 'text': 'Test DingTalk Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'btnOrientation': rule['dingtalk_btn_orientation'], + 'btns': rule['dingtalk_btns'] + } + } + + mock_post_request.assert_called_once_with( + 'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx', + data=mock.ANY, + headers={ + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8' + }, + proxies={'https': 'http://proxy.url'}, + auth=HTTPProxyAuth('admin', 'password') + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_dingtalk_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'dingtalk_access_token': 'xxxxxxx', + 'dingtalk_msgtype': 'action_card', + 'dingtalk_single_title': 'elastalert', + 'dingtalk_single_url': 'http://xxxxx2', + 'dingtalk_btn_orientation': '1', + 'dingtalk_btns': [ + { + 'title': 'test1', + 'actionURL': 'https://xxxxx0/' + }, + { + 'title': 'test2', + 'actionURL': 'https://xxxxx1/' + } + ], + 'dingtalk_proxy': 'http://proxy.url', + 'dingtalk_proxy_login': 'admin', + 'dingtalk_proxy_pass': 'password', + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to dingtalk: ' in str(ea) + + +def test_dingtalk_getinfo(): + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'dingtalk_access_token': 'xxxxxxx', + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + + expected_data = { + 'type': 'dingtalk', + "dingtalk_webhook_url": 'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('dingtalk_access_token, expected_data', [ + ('', 'Missing required option(s): dingtalk_access_token'), + ('xxxxxxx', + { + 'type': 'dingtalk', + "dingtalk_webhook_url": 'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx' + }), +]) +def test_dingtalk_required_error(dingtalk_access_token, expected_data): + try: + rule = { + 'name': 'Test DingTalk Rule', + 'type': 'any', + 'alert': [], + 'alert_subject': 'Test DingTalk' + } + + if dingtalk_access_token: + rule['dingtalk_access_token'] = dingtalk_access_token + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DingTalkAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) diff --git a/tests/alerters/discord_test.py b/tests/alerters/discord_test.py new file mode 100644 index 000000000..bec8b67fb --- /dev/null +++ b/tests/alerters/discord_test.py @@ -0,0 +1,320 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException +from requests.auth import HTTPProxyAuth + +from elastalert.alerters.discord import DiscordAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_discord(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Discord Rule', + 'type': 'any', + 'discord_webhook_url': 'http://xxxxxxx', + 'discord_emoji_title': ':warning:', + 'discord_embed_color': 0xffffff, + 'discord_embed_footer': 'footer', + 'discord_embed_icon_url': 'http://xxxx/image.png', + 'alert': [], + 'alert_subject': 'Test Discord' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DiscordAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'content': ':warning: Test Discord :warning:', + 'embeds': + [{ + 'description': 'Test Discord Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'color': 0xffffff, + 'footer': { + 'text': 'footer', + 'icon_url': 'http://xxxx/image.png' + } + }] + } + + mock_post_request.assert_called_once_with( + rule['discord_webhook_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json'}, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Alert sent to the webhook http://xxxxxxx') == caplog.record_tuples[0] + + +def test_discord_not_footer(): + rule = { + 'name': 'Test Discord Rule', + 'type': 'any', + 'discord_webhook_url': 'http://xxxxxxx', + 'discord_emoji_title': ':warning:', + 'discord_embed_color': 0xffffff, + 'alert': [], + 'alert_subject': 'Test Discord' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DiscordAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'content': ':warning: Test Discord :warning:', + 'embeds': + [{ + 'description': 'Test Discord Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'color': 0xffffff + }] + } + + mock_post_request.assert_called_once_with( + rule['discord_webhook_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json'}, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_discord_proxy(): + rule = { + 'name': 'Test Discord Rule', + 'type': 'any', + 'discord_webhook_url': 'http://xxxxxxx', + 'discord_emoji_title': ':warning:', + 'discord_embed_color': 0xffffff, + 'discord_proxy': 'http://proxy.url', + 'discord_proxy_login': 'admin', + 'discord_proxy_password': 'password', + 'alert': [], + 'alert_subject': 'Test Discord' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DiscordAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'content': ':warning: Test Discord :warning:', + 'embeds': + [{ + 'description': 'Test Discord Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'color': 0xffffff + }] + } + + mock_post_request.assert_called_once_with( + rule['discord_webhook_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json'}, + proxies={'https': 'http://proxy.url'}, + auth=HTTPProxyAuth('admin', 'password') + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_discord_description_maxlength(): + rule = { + 'name': 'Test Discord Rule' + ('a' * 2069), + 'type': 'any', + 'discord_webhook_url': 'http://xxxxxxx', + 'discord_emoji_title': ':warning:', + 'discord_embed_color': 0xffffff, + 'alert': [], + 'alert_subject': 'Test Discord' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DiscordAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'content': ':warning: Test Discord :warning:', + 'embeds': + [{ + 'description': 'Test Discord Rule' + ('a' * 1933) + + '\n *message was cropped according to discord embed description limits!*', + 'color': 0xffffff + }] + } + + mock_post_request.assert_called_once_with( + rule['discord_webhook_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json'}, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_discord_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Discord Rule' + ('a' * 2069), + 'type': 'any', + 'discord_webhook_url': 'http://xxxxxxx', + 'discord_emoji_title': ':warning:', + 'discord_embed_color': 0xffffff, + 'alert': [], + 'alert_subject': 'Test Discord' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DiscordAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to Discord: . Details: ' in str(ea) + + +def test_discord_getinfo(): + rule = { + 'name': 'Test Discord Rule' + ('a' * 2069), + 'type': 'any', + 'discord_webhook_url': 'http://xxxxxxx', + 'alert': [], + 'alert_subject': 'Test Discord' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DiscordAlerter(rule) + + expected_data = { + 'type': 'discord', + 'discord_webhook_url': 'http://xxxxxxx' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('discord_webhook_url, expected_data', [ + ('', 'Missing required option(s): discord_webhook_url'), + ('http://xxxxxxx', + { + 'type': 'discord', + 'discord_webhook_url': 'http://xxxxxxx' + }), +]) +def test_discord_required_error(discord_webhook_url, expected_data): + try: + rule = { + 'name': 'Test Discord Rule' + ('a' * 2069), + 'type': 'any', + 'alert': [], + 'alert_subject': 'Test Discord' + } + + if discord_webhook_url: + rule['discord_webhook_url'] = discord_webhook_url + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DiscordAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +def test_discord_matches(): + rule = { + 'name': 'Test Discord Rule', + 'type': 'any', + 'discord_webhook_url': 'http://xxxxxxx', + 'discord_emoji_title': ':warning:', + 'discord_embed_color': 0xffffff, + 'discord_embed_footer': 'footer', + 'discord_embed_icon_url': 'http://xxxx/image.png', + 'alert': [], + 'alert_subject': 'Test Discord' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = DiscordAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match, match]) + + expected_data = { + 'content': ':warning: Test Discord :warning:', + 'embeds': + [{ + 'description': 'Test Discord Rule\n' + + '\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'somefield: foobarbaz\n' + + '\n' + + '----------------------------------------\n' + + 'Test Discord Rule\n' + + '\n' + + '@timestamp: 2021-01-01T00:00:00\n' + + 'somefield: foobarbaz\n' + + '\n' + + '----------------------------------------\n', + 'color': 0xffffff, + 'footer': { + 'text': 'footer', + 'icon_url': 'http://xxxx/image.png' + } + }] + } + + mock_post_request.assert_called_once_with( + rule['discord_webhook_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json'}, + proxies=None, + auth=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data diff --git a/tests/alerters/email_test.py b/tests/alerters/email_test.py new file mode 100644 index 000000000..5d06a35f5 --- /dev/null +++ b/tests/alerters/email_test.py @@ -0,0 +1,661 @@ +import base64 +import datetime +import logging +import pytest + +from unittest import mock + +from elastalert.alerters.email import EmailAlerter +from elastalert.util import EAException +from tests.alerts_test import mock_rule + + +def test_email(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'owner': 'owner_value', + 'alert_subject': 'Test alert for {0}, owned by {1}', + 'alert_subject_args': ['test_term', 'owner'], + 'snowman': '☃' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost'), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[4][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'From: testfrom@test.test' in body + assert 'Subject: Test alert for test_value, owned by owner_value' in body + assert ('elastalert', logging.INFO, "Sent email to ['testing@test.test', 'test@test.test']") == caplog.record_tuples[0] + + +@pytest.mark.parametrize('email_from_field, email_add_domain, match_data, expected_data', [ + ('data.user', '', [{'data': {'user': 'qlo'}}], ['qlo@example.com']), + ('data.user', '@example.com', [{'data': {'user': 'qlo'}}], ['qlo@example.com']), + ('data.user', 'example.com', [{'data': {'user': '@qlo'}}], ['@qlo']), + ('data.user', 'example.com', [{'data': {'user': ['qlo', 'foo']}}], ['qlo@example.com', 'foo@example.com']), + ('data.user', 'example.com', [{'data': {'foo': 'qlo'}}], ['testing@test.test']), + ('data.user', 'example.com', [{'data': {'user': 17}}], ['testing@test.test']) +]) +def test_email_from_field(email_from_field, email_add_domain, match_data, expected_data): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test'], + 'email_add_domain': 'example.com', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_from_field': 'data.user', + 'owner': 'owner_value' + } + if email_from_field: + rule['email_from_field'] = email_from_field + if email_add_domain: + rule['email_add_domain'] = email_add_domain + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + alert = EmailAlerter(rule) + alert.alert(match_data) + assert mock_smtp.mock_calls[4][1][1] == expected_data + + +def test_email_with_unicode_strings(): + rule = { + 'name': 'test alert', + 'email': 'testing@test.test', + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'owner': 'owner_value', + 'alert_subject': 'Test alert for {0}, owned by {1}', + 'alert_subject_args': ['test_term', 'owner'], + 'snowman': '☃' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost'), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile=None, keyfile=None), + mock.call().sendmail(mock.ANY, ['testing@test.test'], mock.ANY), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[4][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'From: testfrom@test.test' in body + assert 'Subject: Test alert for test_value, owned by owner_value' in body + + +def test_email_with_auth(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'alert_subject': 'Test alert for {0}', + 'alert_subject_args': ['test_term'], + 'smtp_auth_file': 'file.txt', + 'rule_file': '/tmp/foo.yaml' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + with mock.patch('elastalert.alerts.read_yaml') as mock_open: + mock_open.return_value = {'user': 'someone', 'password': 'hunter2'} + mock_smtp.return_value = mock.Mock() + alert = EmailAlerter(rule) + + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost'), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile=None, keyfile=None), + mock.call().login('someone', 'hunter2'), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + +def test_email_with_cert_key(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'alert_subject': 'Test alert for {0}', + 'alert_subject_args': ['test_term'], + 'smtp_auth_file': 'file.txt', + 'smtp_cert_file': 'dummy/cert.crt', + 'smtp_key_file': 'dummy/client.key', + 'rule_file': '/tmp/foo.yaml' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + with mock.patch('elastalert.alerts.read_yaml') as mock_open: + mock_open.return_value = {'user': 'someone', 'password': 'hunter2'} + mock_smtp.return_value = mock.Mock() + alert = EmailAlerter(rule) + + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost'), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile='dummy/cert.crt', keyfile='dummy/client.key'), + mock.call().login('someone', 'hunter2'), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + +def test_email_with_cc(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'cc': 'tester@testing.testing' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost'), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test', + 'tester@testing.testing' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[4][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'CC: tester@testing.testing' in body + assert 'From: testfrom@test.test' in body + + +def test_email_with_bcc(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'bcc': 'tester@testing.testing' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost'), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test', + 'tester@testing.testing' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[4][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'CC: tester@testing.testing' not in body + assert 'From: testfrom@test.test' in body + + +def test_email_with_cc_and_bcc(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'cc': ['test1@test.com', 'test2@test.com'], + 'bcc': 'tester@testing.testing' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost'), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test', + 'test1@test.com', + 'test2@test.com', + 'tester@testing.testing' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[4][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'CC: test1@test.com,test2@test.com' in body + assert 'From: testfrom@test.test' in body + + +def test_email_with_args(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'alert_subject': 'Test alert for {0} {1}', + 'alert_subject_args': ['test_term', 'test.term'], + 'alert_text': 'Test alert for {0} and {1} {2}', + 'alert_text_args': ['test_arg1', 'test_arg2', 'test.arg3'], + 'alert_missing_value': '' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value', 'test_arg1': 'testing', 'test': {'term': ':)', 'arg3': '☃'}}]) + expected = [mock.call('localhost'), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[4][1][2] + # Extract the MIME encoded message body + body_text = base64.b64decode(body.split('\n\n')[-1][:-1]).decode('utf-8') + + assert 'testing' in body_text + assert '' in body_text + assert '☃' in body_text + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'From: testfrom@test.test' in body + assert 'Subject: Test alert for test_value :)' in body + + +def test_email_query_key_in_subject(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'query_key': 'username' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value', 'username': 'werbenjagermanjensen'}]) + + body = mock_smtp.mock_calls[4][1][2] + lines = body.split('\n') + found_subject = False + for line in lines: + if line.startswith('Subject'): + assert 'werbenjagermanjensen' in line + found_subject = True + assert found_subject + + +def test_email_getinfo(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'owner': 'owner_value', + 'alert_subject': 'Test alert for {0}, owned by {1}', + 'alert_subject_args': ['test_term', 'owner'], + 'snowman': '☃' + } + alert = EmailAlerter(rule) + + expected_data = { + 'type': 'email', + 'recipients': ['testing@test.test', 'test@test.test']} + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('email, expected_data', [ + (['testing@test.test', 'test@test.test'], True), + (['testing@test.test', 'test@test.test'], + { + 'type': 'email', + 'recipients': ['testing@test.test', 'test@test.test'] + }), +]) +def test_email_key_error(email, expected_data): + try: + rule = { + 'name': 'test alert', + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'owner': 'owner_value', + 'alert_subject': 'Test alert for {0}, owned by {1}', + 'alert_subject_args': ['test_term', 'owner'], + 'snowman': '☃' + } + + if email: + rule['email'] = email + + alert = EmailAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception: + assert expected_data + + +@pytest.mark.parametrize('query_key, expected_data', [ + ('hostname', 'ElastAlert: Test email rule! - aProbe'), + ('test', 'ElastAlert: Test email rule!'), + ('', 'ElastAlert: Test email rule!'), +]) +def test_email_create_default_title(query_key, expected_data): + rule = { + 'name': 'Test email rule!', + 'alerta_api_url': 'http://elastalerthost:8080/api/alert', + 'timeframe': datetime.timedelta(hours=1), + 'timestamp_field': '@timestamp', + 'type': 'any', + 'alert': 'email', + 'email': 'test@test.com' + } + if query_key: + rule['query_key'] = query_key + + match = [ + { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + }, + { + '@timestamp': '2014-10-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname2': 'aProbe' + } + ] + alert = EmailAlerter(rule) + + result = alert.create_default_title(match) + assert expected_data == result + + +def test_email_smtp_port(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'smtp_port': 35, + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'owner': 'owner_value', + 'alert_subject': 'Test alert for {0}, owned by {1}', + 'alert_subject_args': ['test_term', 'owner'], + 'snowman': '☃' + } + with mock.patch('elastalert.alerters.email.SMTP') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost', 35), + mock.call().ehlo(), + mock.call().has_extn('STARTTLS'), + mock.call().starttls(certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[4][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'From: testfrom@test.test' in body + assert 'Subject: Test alert for test_value, owned by owner_value' in body + + +def test_email_smtp_ssl_true(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'smtp_ssl': True, + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'owner': 'owner_value', + 'alert_subject': 'Test alert for {0}, owned by {1}', + 'alert_subject_args': ['test_term', 'owner'], + 'snowman': '☃' + } + with mock.patch('elastalert.alerters.email.SMTP_SSL') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost', certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[1][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'From: testfrom@test.test' in body + assert 'Subject: Test alert for test_value, owned by owner_value' in body + + +def test_email_smtp_ssl_true_and_smtp_port(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'smtp_ssl': True, + 'smtp_port': 455, + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'owner': 'owner_value', + 'alert_subject': 'Test alert for {0}, owned by {1}', + 'alert_subject_args': ['test_term', 'owner'], + 'snowman': '☃' + } + with mock.patch('elastalert.alerters.email.SMTP_SSL') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost', 455, certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[1][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'From: testfrom@test.test' in body + assert 'Subject: Test alert for test_value, owned by owner_value' in body + + +def test_email_smtp_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'alert_subject': 'Test alert for {0}', + 'alert_subject_args': ['test_term'], + 'smtp_auth_file': 'file.txt', + 'rule_file': '/tmp/foo.yaml' + } + with mock.patch('elastalert.alerters.email.SMTP_SSL'): + with mock.patch('elastalert.alerts.read_yaml') as mock_open: + mock_open.return_value = {'user': 'someone', 'password': 'hunter2'} + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + assert 'Error connecting to SMTP host: ' in str(ea) + + +def test_email_format_html(): + rule = { + 'name': 'test alert', + 'email': ['testing@test.test', 'test@test.test'], + 'smtp_ssl': True, + 'smtp_port': 455, + 'email_format': 'html', + 'from_addr': 'testfrom@test.test', + 'type': mock_rule(), + 'timestamp_field': '@timestamp', + 'email_reply_to': 'test@example.com', + 'owner': 'owner_value', + 'alert_subject': 'Test alert for {0}, owned by {1}', + 'alert_subject_args': ['test_term', 'owner'], + 'snowman': '☃' + } + with mock.patch('elastalert.alerters.email.SMTP_SSL') as mock_smtp: + mock_smtp.return_value = mock.Mock() + + alert = EmailAlerter(rule) + alert.alert([{'test_term': 'test_value'}]) + expected = [mock.call('localhost', 455, certfile=None, keyfile=None), + mock.call().sendmail( + mock.ANY, + [ + 'testing@test.test', + 'test@test.test' + ], + mock.ANY + ), + mock.call().quit()] + assert mock_smtp.mock_calls == expected + + body = mock_smtp.mock_calls[1][1][2] + + assert 'Reply-To: test@example.com' in body + assert 'To: testing@test.test' in body + assert 'From: testfrom@test.test' in body + assert 'Subject: Test alert for test_value, owned by owner_value' in body + assert 'Content-Type: text/html; charset="utf-8"' in body diff --git a/tests/alerters/exotel_test.py b/tests/alerters/exotel_test.py new file mode 100644 index 000000000..b4cb7e7c2 --- /dev/null +++ b/tests/alerters/exotel_test.py @@ -0,0 +1,176 @@ +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.exotel import ExotelAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_exotel_getinfo(): + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'exotel_account_sid': 'xxxxx1', + 'exotel_auth_token': 'xxxxx2', + 'exotel_to_number': 'xxxxx3', + 'exotel_from_number': 'xxxxx4', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ExotelAlerter(rule) + + expected_data = { + 'type': 'exotel', + 'exotel_account': 'xxxxx1' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +exotel_required_error_expected_data = 'Missing required option(s): exotel_account_sid, ' +exotel_required_error_expected_data += 'exotel_auth_token, exotel_to_number, exotel_from_number' + + +@pytest.mark.parametrize('exotel_account_sid, exotel_auth_token, exotel_to_number, exotel_from_number, expected_data', [ + ('', '', '', '', exotel_required_error_expected_data), + ('xxxx1', '', '', '', exotel_required_error_expected_data), + ('', 'xxxx2', '', '', exotel_required_error_expected_data), + ('', '', 'xxxx3', '', exotel_required_error_expected_data), + ('', '', '', 'xxxx4', exotel_required_error_expected_data), + ('xxxx1', 'xxxx2', '', '', exotel_required_error_expected_data), + ('xxxx1', '', 'xxxx3', '', exotel_required_error_expected_data), + ('xxxx1', '', '', 'xxxx4', exotel_required_error_expected_data), + ('', 'xxxx2', 'xxxx3', '', exotel_required_error_expected_data), + ('', 'xxxx2', '', 'xxxx4', exotel_required_error_expected_data), + ('', '', 'xxxx3', 'xxxx4', exotel_required_error_expected_data), + ('xxxx1', 'xxxx2', 'xxxx3', '', exotel_required_error_expected_data), + ('xxxx1', '', 'xxxx3', 'xxxx4', exotel_required_error_expected_data), + ('', 'xxxx2', 'xxxx3', 'xxxx4', exotel_required_error_expected_data), + ('xxxx1', 'xxxx2', 'xxxx3', 'xxxx4', + { + 'type': 'exotel', + 'exotel_account': 'xxxx1' + }), +]) +def test_exotel_required_error(exotel_account_sid, exotel_auth_token, exotel_to_number, exotel_from_number, expected_data): + try: + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert': [] + } + + if exotel_account_sid: + rule['exotel_account_sid'] = exotel_account_sid + + if exotel_auth_token: + rule['exotel_auth_token'] = exotel_auth_token + + if exotel_to_number: + rule['exotel_to_number'] = exotel_to_number + + if exotel_from_number: + rule['exotel_from_number'] = exotel_from_number + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ExotelAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +def test_exotel(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'exotel_account_sid': 'xxxxx1', + 'exotel_auth_token': 'xxxxx2', + 'exotel_to_number': 'xxxxx3', + 'exotel_from_number': 'xxxxx4', + 'alert': [] + } + match = { + '@timestamp': '2021-01-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + + with mock.patch('elastalert.alerters.exotel.Exotel.sms') as mock_exotel: + mock_exotel.return_value = 200 + alert = ExotelAlerter(rule) + alert.alert([match]) + expected = [ + mock.call()('xxxxx4', 'xxxxx3', 'Test Rule') + ] + + assert mock_exotel.mock_calls == expected + assert ('elastalert', logging.INFO, 'Trigger sent to Exotel') == caplog.record_tuples[0] + + +def test_exotel_status_cod_not_200(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'exotel_account_sid': 'xxxxx1', + 'exotel_auth_token': 'xxxxx2', + 'exotel_to_number': 'xxxxx3', + 'exotel_from_number': 'xxxxx4', + 'alert': [] + } + match = { + '@timestamp': '2021-01-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + + with mock.patch('elastalert.alerters.exotel.Exotel.sms') as mock_exotel: + mock_exotel.return_value = 201 + alert = ExotelAlerter(rule) + alert.alert([match]) + + assert 'Error posting to Exotel, response code is' in str(ea) + + +def test_exotel_request_error(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'exotel_account_sid': 'xxxxx1', + 'exotel_auth_token': 'xxxxx2', + 'exotel_to_number': 'xxxxx3', + 'exotel_from_number': 'xxxxx4', + 'alert': [] + } + match = { + '@timestamp': '2021-01-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('elastalert.alerters.exotel.Exotel.sms', mock_run), pytest.raises(RequestException) as mock_exotel: + mock_exotel.return_value = 200 + alert = ExotelAlerter(rule) + alert.alert([match]) + assert 'Error posting to Exotel' in str(ea) diff --git a/tests/alerters/gelf_test.py b/tests/alerters/gelf_test.py new file mode 100644 index 000000000..e663264be --- /dev/null +++ b/tests/alerters/gelf_test.py @@ -0,0 +1,311 @@ +import json +import logging +import socket + +from unittest import mock +from elastalert.alerters.gelf import GelfAlerter +from elastalert.loaders import FileRulesLoader + + +def test_gelf_sent_http(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'gelf_type': 'http', + 'gelf_endpoint': 'http://example.graylog.site', + 'gelf_payload': {'username': 'username', 'account_status': 'account_status'}, + 'alert': [], + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GelfAlerter(rule) + + match = { + 'username': 'test_user', + 'account_status': 'disabled', + } + + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'version': '1.1', + 'host': socket.getfqdn(), + 'short_message': '{"Title": "Test Rule", "username": "test_user", "account_status": "disabled"}', + 'level': 5, + } + + mock_post_request.assert_called_once_with( + url=rule['gelf_endpoint'], + headers={'Content-Type': 'application/json'}, + json=mock.ANY, + verify=False, + timeout=30, + ) + + assert expected_data == mock_post_request.call_args_list[0][1]['json'] + assert ('elastalert', logging.INFO, 'GELF message sent via HTTP.') == caplog.record_tuples[0] + + +def test_gelf_sent_http_with_custom_ca(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'gelf_type': 'http', + 'gelf_endpoint': 'https://example.graylog.site', + 'gelf_ca_cert': './ca.crt', + 'gelf_http_ignore_ssl_errors': False, + 'gelf_payload': {'username': 'username', 'account_status': 'account_status'}, + 'alert': [], + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GelfAlerter(rule) + + match = { + 'username': 'test_user', + 'account_status': 'disabled', + } + + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'version': '1.1', + 'host': socket.getfqdn(), + 'short_message': '{"Title": "Test Rule", "username": "test_user", "account_status": "disabled"}', + 'level': 5, + } + + mock_post_request.assert_called_once_with( + url=rule['gelf_endpoint'], + headers={'Content-Type': 'application/json'}, + json=mock.ANY, + verify=rule['gelf_ca_cert'], + timeout=30, + ) + + assert expected_data == mock_post_request.call_args_list[0][1]['json'] + assert ('elastalert', logging.INFO, 'GELF message sent via HTTP.') == caplog.record_tuples[0] + + +def test_gelf_sent_http_with_optional_fields(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'gelf_type': 'http', + 'gelf_endpoint': 'http://example.graylog.site', + 'gelf_http_headers': {'Accept': 'application/json;charset=utf-8'}, + 'gelf_log_level': 1, + 'gelf_http_ignore_ssl_errors': True, + 'gelf_timeout': 10, + 'gelf_payload': {'username': 'username', 'account_status': 'account_status'}, + 'alert': [], + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GelfAlerter(rule) + + match = { + 'username': 'test_user', + 'account_status': 'disabled', + } + + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'version': '1.1', + 'host': socket.getfqdn(), + 'short_message': '{"Title": "Test Rule", "username": "test_user", "account_status": "disabled"}', + 'level': rule['gelf_log_level'], + } + + mock_post_request.assert_called_once_with( + url=rule['gelf_endpoint'], + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + json=mock.ANY, + verify=False, + timeout=10, + ) + + assert expected_data == mock_post_request.call_args_list[0][1]['json'] + assert ('elastalert', logging.INFO, 'GELF message sent via HTTP.') == caplog.record_tuples[0] + + +def test_gelf_sent_tcp(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'gelf_type': 'tcp', + 'gelf_host': '127.0.0.1', + 'gelf_port': 12201, + 'gelf_payload': {'username': 'username', 'account_status': 'account_status'}, + 'alert': [], + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GelfAlerter(rule) + + match = { + 'username': 'test_user', + 'account_status': 'disabled', + } + + expected_data = { + 'version': '1.1', + 'host': socket.getfqdn(), + 'short_message': '{"Title": "Test Rule", "username": "test_user", "account_status": "disabled"}', + 'level': 5, + } + + expected_data = json.dumps(expected_data).encode('utf-8') + b'\x00' + + with mock.patch('socket.socket') as mock_socket: + alert.alert([match]) + + mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM) + mock_socket.return_value.connect.assert_called_once_with(mock.ANY) + + assert expected_data == mock_socket.return_value.sendall.call_args[0][0] + assert ('elastalert', logging.INFO, 'GELF message sent via TCP.') == caplog.record_tuples[0] + + +def test_gelf_sent_tcp_with_custom_ca(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'gelf_type': 'tcp', + 'gelf_host': '127.0.0.1', + 'gelf_port': 12201, + 'gelf_ca_cert': './ca.pem', + 'gelf_payload': {'username': 'username', 'account_status': 'account_status'}, + 'alert': [], + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GelfAlerter(rule) + + match = { + 'username': 'test_user', + 'account_status': 'disabled', + } + + expected_data = { + 'version': '1.1', + 'host': socket.getfqdn(), + 'short_message': '{"Title": "Test Rule", "username": "test_user", "account_status": "disabled"}', + 'level': 5, + } + + expected_data = json.dumps(expected_data).encode('utf-8') + b'\x00' + + with mock.patch('socket.socket') as mock_socket: + with mock.patch('ssl.wrap_socket') as mock_ssl_wrap_socket: + mock_ssl_wrap_socket.return_value = mock_socket + alert.alert([match]) + mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM) + mock_socket.return_value.connect.assert_called_once_with(mock.ANY) + mock_ssl_wrap_socket.assert_called_once_with(mock_socket.return_value, ca_certs=rule['gelf_ca_cert']) + + assert expected_data == mock_ssl_wrap_socket.return_value.sendall.call_args[0][0] + assert ('elastalert', logging.INFO, 'GELF message sent via TCP.') == caplog.record_tuples[0] + + +def test_gelf_sent_tcp_with_optional_fields(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'gelf_type': 'tcp', + 'gelf_host': '127.0.0.1', + 'gelf_port': 12201, + 'gelf_payload': {'username': 'username', 'account_status': 'account_status'}, + 'gelf_timeout': 10, + 'gelf_log_level': 1, + 'alert': [], + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GelfAlerter(rule) + + match = { + 'username': 'test_user', + 'account_status': 'disabled', + } + + expected_data = { + 'version': '1.1', + 'host': socket.getfqdn(), + 'short_message': '{"Title": "Test Rule", "username": "test_user", "account_status": "disabled"}', + 'level': rule['gelf_log_level'], + } + + expected_data = json.dumps(expected_data).encode('utf-8') + b'\x00' + + with mock.patch('socket.socket') as mock_socket: + alert.alert([match]) + + mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM) + mock_socket.return_value.connect.assert_called_once_with(mock.ANY) + mock_socket.return_value.settimeout.assert_called_once_with(rule['gelf_timeout']) + + assert expected_data == mock_socket.return_value.sendall.call_args[0][0] + assert ('elastalert', logging.INFO, 'GELF message sent via TCP.') == caplog.record_tuples[0] + + +def test_gelf_http_getinfo(): + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'gelf_type': 'http', + 'gelf_endpoint': 'http://graylog.url/gelf', + 'alert': [], + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GelfAlerter(rule) + + expected_data = { + 'type': 'gelf', + 'gelf_type': rule['gelf_type'] + } + + actual_data = alert.get_info() + assert expected_data == actual_data + + +def test_gelf_tcp_getinfo(): + + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'gelf_type': 'tcp', + 'gelf_host': '127.0.0.1', + 'gelf_port': '12201', + 'alert': [], + } + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GelfAlerter(rule) + + expected_data = { + 'type': 'gelf', + 'gelf_type': rule['gelf_type'], + } + + actual_data = alert.get_info() + assert expected_data == actual_data diff --git a/tests/alerters/gitter_test.py b/tests/alerters/gitter_test.py new file mode 100644 index 000000000..8935d01c6 --- /dev/null +++ b/tests/alerters/gitter_test.py @@ -0,0 +1,160 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.gitter import GitterAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +@pytest.mark.parametrize('msg_level, except_msg_level', [ + ('', 'error'), + ('error', 'error'), + ('info', 'info') +]) +def test_gitter_msg_level(msg_level, except_msg_level, caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Gitter Rule', + 'type': 'any', + 'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx', + 'alert': [] + } + + if msg_level: + rule['gitter_msg_level'] = msg_level + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GitterAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'message': 'Test Gitter Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'level': except_msg_level + } + + mock_post_request.assert_called_once_with( + rule['gitter_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Alert sent to Gitter') == caplog.record_tuples[0] + + +def test_gitter_proxy(): + rule = { + 'name': 'Test Gitter Rule', + 'type': 'any', + 'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx', + 'gitter_msg_level': 'error', + 'gitter_proxy': 'http://proxy.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GitterAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'message': 'Test Gitter Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'level': 'error' + } + + mock_post_request.assert_called_once_with( + rule['gitter_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies={'https': 'http://proxy.url'} + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_gitter_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Gitter Rule', + 'type': 'any', + 'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx', + 'gitter_msg_level': 'error', + 'gitter_proxy': 'http://proxy.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GitterAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to Gitter: ' in str(ea) + + +def test_gitter_getinfo(): + rule = { + 'name': 'Test Gitter Rule', + 'type': 'any', + 'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GitterAlerter(rule) + + expected_data = { + 'type': 'gitter', + 'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('gitter_webhook_url, expected_data', [ + ('', 'Missing required option(s): gitter_webhook_url'), + ('https://webhooks.gitter.im/e/xxxxx', + { + 'type': 'gitter', + 'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx' + }) +]) +def test_gitter_required_error(gitter_webhook_url, expected_data): + try: + rule = { + 'name': 'Test Gitter Rule', + 'type': 'any', + 'alert': [] + } + + if gitter_webhook_url: + rule['gitter_webhook_url'] = gitter_webhook_url + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GitterAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) diff --git a/tests/alerters/googlechat_test.py b/tests/alerters/googlechat_test.py new file mode 100644 index 000000000..fd4e4b5db --- /dev/null +++ b/tests/alerters/googlechat_test.py @@ -0,0 +1,324 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.googlechat import GoogleChatAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_google_chat_basic(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test GoogleChat Rule', + 'type': 'any', + 'googlechat_webhook_url': 'http://xxxxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GoogleChatAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'text': 'Test GoogleChat Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n' + } + + mock_post_request.assert_called_once_with( + rule['googlechat_webhook_url'], + data=mock.ANY, + proxies=None, + headers={'content-type': 'application/json'} + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Alert sent to Google Chat!') == caplog.record_tuples[0] + + +def test_google_chat_card(): + rule = { + 'name': 'Test GoogleChat Rule', + 'type': 'any', + 'googlechat_webhook_url': 'http://xxxxxxx', + 'googlechat_format': 'card', + 'googlechat_header_title': 'xxxx1', + 'googlechat_header_subtitle': 'xxxx2', + 'googlechat_header_image': 'http://xxxx/image.png', + 'googlechat_footer_kibanalink': 'http://xxxxx/kibana', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GoogleChatAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'cards': [{ + 'header': { + 'title': rule['googlechat_header_title'], + 'subtitle': rule['googlechat_header_subtitle'], + 'imageUrl': rule['googlechat_header_image'] + }, + 'sections': [ + { + 'widgets': [{ + "textParagraph": { + 'text': 'Test GoogleChat Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n' + } + }] + }, + { + 'widgets': [{ + 'buttons': [{ + 'textButton': { + 'text': 'VISIT KIBANA', + 'onClick': { + 'openLink': { + 'url': rule['googlechat_footer_kibanalink'] + } + } + } + }] + }] + } + ]} + ] + } + + mock_post_request.assert_called_once_with( + rule['googlechat_webhook_url'], + data=mock.ANY, + proxies=None, + headers={'content-type': 'application/json'} + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_google_chat_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test GoogleChat Rule', + 'type': 'any', + 'googlechat_webhook_url': 'http://xxxxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GoogleChatAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to google chat: ' in str(ea) + + +def test_google_chat_getinfo(): + rule = { + 'name': 'Test GoogleChat Rule', + 'type': 'any', + 'googlechat_webhook_url': 'http://xxxxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GoogleChatAlerter(rule) + + expected_data = { + 'type': 'googlechat', + 'googlechat_webhook_url': ['http://xxxxxxx'] + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('googlechat_webhook_url, expected_data', [ + ('', 'Missing required option(s): googlechat_webhook_url'), + ('http://xxxxxxx', + { + 'type': 'googlechat', + 'googlechat_webhook_url': ['http://xxxxxxx'] + }), +]) +def test_google_chat_required_error(googlechat_webhook_url, expected_data): + try: + rule = { + 'name': 'Test GoogleChat Rule', + 'type': 'any', + 'alert': [] + } + + if googlechat_webhook_url: + rule['googlechat_webhook_url'] = googlechat_webhook_url + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GoogleChatAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +def test_ggooglechat_header_title_none(): + rule = { + 'name': 'Test GoogleChat Rule', + 'type': 'any', + 'googlechat_webhook_url': 'http://xxxxxxx', + 'googlechat_format': 'card', + 'googlechat_header_subtitle': 'xxxx2', + 'googlechat_header_image': 'http://xxxx/image.png', + 'googlechat_footer_kibanalink': 'http://xxxxx/kibana', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GoogleChatAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'cards': [{ + 'sections': [ + { + 'widgets': [{ + "textParagraph": { + 'text': 'Test GoogleChat Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n' + } + }] + }, + { + 'widgets': [{ + 'buttons': [{ + 'textButton': { + 'text': 'VISIT KIBANA', + 'onClick': { + 'openLink': { + 'url': rule['googlechat_footer_kibanalink'] + } + } + } + }] + }] + } + ]} + ] + } + + mock_post_request.assert_called_once_with( + rule['googlechat_webhook_url'], + data=mock.ANY, + proxies=None, + headers={'content-type': 'application/json'} + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_googlechat_footer_kibanalink_none(): + rule = { + 'name': 'Test GoogleChat Rule', + 'type': 'any', + 'googlechat_webhook_url': 'http://xxxxxxx', + 'googlechat_format': 'card', + 'googlechat_header_title': 'xxxx1', + 'googlechat_header_subtitle': 'xxxx2', + 'googlechat_header_image': 'http://xxxx/image.png', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GoogleChatAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'cards': [{ + 'header': { + 'title': rule['googlechat_header_title'], + 'subtitle': rule['googlechat_header_subtitle'], + 'imageUrl': rule['googlechat_header_image'] + }, + 'sections': [ + { + 'widgets': [{ + "textParagraph": { + 'text': 'Test GoogleChat Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n' + } + }] + } + ]} + ] + } + + mock_post_request.assert_called_once_with( + rule['googlechat_webhook_url'], + data=mock.ANY, + proxies=None, + headers={'content-type': 'application/json'} + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_googlechat_proxy(): + rule = { + 'name': 'Test GoogleChat Rule', + 'type': 'any', + 'googlechat_webhook_url': 'http://xxxxxxx', + 'googlechat_proxy': 'http://proxy.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = GoogleChatAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'text': 'Test GoogleChat Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n' + } + + mock_post_request.assert_called_once_with( + rule['googlechat_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies={'https': 'http://proxy.url'}, + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data diff --git a/tests/alerters/httppost2_test.py b/tests/alerters/httppost2_test.py new file mode 100644 index 000000000..eae8fa256 --- /dev/null +++ b/tests/alerters/httppost2_test.py @@ -0,0 +1,2004 @@ +import json +import logging +from unittest import mock + +import pytest +import yaml +from requests import RequestException + +from elastalert.alerters.httppost2 import HTTPPost2Alerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_http_alerter_with_payload(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload as JSON string + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + { + "posted_name": "toto" + } + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_raw_fields(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload and raw fields', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': 'toto'}, + 'http_post2_raw_fields': {'posted_raw_field': 'somefield'}, + 'http_post2_static_payload': {'name': 'somestaticname'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'posted_raw_field': 'foobarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_raw_fields_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload and raw fields as JSON string + type: any + http_post2_url: http://test.webhook.url + http_post2_raw_fields: + posted_raw_field: somefield + http_post2_static_payload: + name: somestaticname + http_post2_payload: | + { + "posted_name": "toto" + } + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'posted_raw_field': 'foobarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_raw_fields_overwrite(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter raw fields overwrite payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': 'toto', 'overwrite_field': 'tata'}, + 'http_post2_raw_fields': {'overwrite_field': 'somefield'}, + 'http_post2_static_payload': {'name': 'somestaticname'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'overwrite_field': 'foobarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_raw_fields_overwrite_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter raw fields overwrite payload as a JSON string + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + { + "posted_name": "toto", + "overwrite_field": "tata" + } + http_post2_raw_fields: + overwrite_field: somefield + http_post2_static_payload: + name: somestaticname + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'overwrite_field': 'foobarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_no_clash(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload has no clash with the match fields', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'toto': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_no_clash_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload (as JSON string) has no clash with the match fields + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + {"posted_name": "toto"} + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'toto': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_keys(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload args for the key', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'args_{{some_field}}': 'tata'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'to\tto' # include some specially handled control char + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'args_to\tto': 'tata', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_keys_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload (as JSON string) args for the key + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + {"args_{{some_field}}": "tata"} + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'to\tto' # include some specially handled control char + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'args_to\tto': 'tata', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_template_error(caplog): + with pytest.raises(ValueError) as error: + rule = { + 'name': 'Test HTTP Post Alerter With unexpected template syntax error', + 'type': 'any', + 'jinja_root_name': '_data', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': 'toto'}, + 'http_post2_payload': {'posted_name': '{{ _data["titi"] }}'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: The value of 'http_post2_payload' has an invalid Jinja2 syntax." in str(error) + + +def test_http_alerter_with_payload_unexpected_error(caplog): + with pytest.raises(ValueError) as error: + rule = { + 'name': 'Test HTTP Post Alerter With unexpected error', + 'type': 'any', + 'jinja_root_name': '_data', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': 'toto'}, + 'http_post2_payload': {'posted_name': '{% for k,v in titi %}{% endfor %}'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': {'foobarbaz': 'tata'} + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: An unexpected error occurred with the 'http_post2_payload' value." in str(error) + + +def test_http_alerter_with_payload_json_decode_error(caplog): + with pytest.raises(ValueError) as error: + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With json decode error + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + this is invalid json + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: The rendered value for 'http_post2_payload' contains invalid JSON." in str(error) + + +def test_http_alerter_with_payload_args_keys_jinja_root(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload args for the key using custom jinja root + type: any + jinja_root_name: _data + http_post2_url: http://test.webhook.url + http_post2_payload: | + { + "args_{{_data['key1']}}": "tata", + "args_{{_data["key2"]}}": "toto" + } + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'key1': 'ta\tta', # include some specially handled control char + 'key2': 'to\tto', + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'args_to\tto': 'toto', + 'args_ta\tta': 'tata', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_nested_keys(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload args for the key', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'key': {'nested_key': 'some_value_{{some_field}}'}}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'toto' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'key': {'nested_key': 'some_value_toto'}, + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_nested_keys_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload (as JSON string) args for the key + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + { + "key": {"nested_key": "some_value_{{some_field}}"} + } + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'toto' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'key': {'nested_key': 'some_value_toto'}, + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_none_value(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload args for the key', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'key': None}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'toto' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'key': None, + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_none_value_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload (as JSON string) args for the key + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + {"key": null} + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'toto' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'key': None, + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_key_not_found(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload args for the key if not found', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'args_{{some_field1}}': 'tata'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'toto' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'args_': 'tata', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_key_not_found_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload (as JSON string) args for the key if not found + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + {"args_{{some_field1}}": "tata"} + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'toto' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'args_': 'tata', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_value(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload args for the value', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': 'toto', 'args_name': '{{some_field}}'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'foo\tbar\nbaz' # include some specially handled control chars + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'args_name': 'foo\tbar\nbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_value_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + """ + name: Test HTTP Post Alerter With Payload as json string and args for the value + type: any + http_post2_url: 'http://test.webhook.url' + http_post2_payload: | + { + "posted_name": "toto", + "args_name": "{{some_field}}" + } + alert: [] + """ + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'foo\tbar\nbaz' # include some specially handled control chars + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'args_name': 'foo\tbar\nbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_value_jinja_root(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload args for the value using custom jinja root', + 'jinja_root_name': '_data', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': 'toto', 'args_name': "{{_data['some_field']}}"}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'foo\tbar\nbaz' # include some specially handled control chars + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'args_name': 'foo\tbar\nbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_value_jinja_root_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload (as JSON string) args for the value using custom jinja root + jinja_root_name: _data + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + { + "posted_name": "toto", + "args_name1": "{{_data['some_field']}}", + "args_name2": "{{_data["some_field"]}}" + } + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'foo\tbar\nbaz' # include some specially handled control chars + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'args_name1': 'foo\tbar\nbaz', + 'args_name2': 'foo\tbar\nbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_value_not_found(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload args for the value if not found', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': 'toto', 'args_name': '{{some_field1}}'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'args_name': '', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_args_value_not_found_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload (as JSON string) args for the value if not found + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + { + "posted_name": "toto", + "args_name": "{{some_field1}}" + } + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'some_field': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + 'args_name': '', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_no_clash(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Headers has no clash with the match fields', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': 'titi'}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_name': 'titi' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_no_clash_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers has no clash with the match fields + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_name": "titi"} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_name': 'titi' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_value(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Headers args value', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': '{{titi}}'}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foo\tbarbaz' # include some specially handled control chars + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_name': 'foo\tbarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_value_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args value + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_name": "{{titi}}"} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foo\tbarbaz' # include some specially handled control chars + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_name': 'foo\tbarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_template_error(caplog): + with pytest.raises(ValueError) as error: + rule = { + 'name': 'Test HTTP Post Alerter With Headers args template error', + 'jinja_root_name': '_data', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': '{{ _data["titi"] }}'}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: The value of 'http_post2_headers' has an invalid Jinja2 syntax." in str(error) + + +def test_http_alerter_with_header_json_decode_error(caplog): + with pytest.raises(ValueError) as error: + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args json decode error + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + this is invalid json + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: The rendered value for 'http_post2_headers' contains invalid JSON." in str(error) + + +def test_http_alerter_with_header_args_value_jinja_root(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Headers args value using custom jinja root', + 'jinja_root_name': '_data', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': "{{_data['titi']}}"}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foo\tbarbaz' # include some specially handled control chars + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_name': 'foo\tbarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_value_jinja_root_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args value using custom jinja root + type: any + jinja_root_name: _data + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_name": "{{_data['titi']}}"} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foo\tbarbaz' # include some specially handled control chars + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_name': 'foo\tbarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_value_list(caplog): + with pytest.raises(ValueError) as error: + rule = { + 'name': 'Test HTTP Post Alerter With Headers args value', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': ["test1", "test2"]}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: Can't send a header value which is not a string! " \ + "Forbidden header header_name: ['test1', 'test2']" in str(error) + + +def test_http_alerter_with_header_args_value_list_as_json_string(caplog): + with pytest.raises(ValueError) as error: + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args value as json string + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_name": ["test1", "test2"]} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: Can't send a header value which is not a string! " \ + "Forbidden header header_name: ['test1', 'test2']" in str(error) + + +def test_http_alerter_with_header_args_value_dict(caplog): + with pytest.raises(ValueError) as error: + rule = { + 'name': 'Test HTTP Post Alerter With Headers args value', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': {'test': 'val'}}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: Can't send a header value which is not a string! " \ + "Forbidden header header_name: {'test': 'val'}" in str(error) + + +def test_http_alerter_with_header_args_value_dict_as_json_string(caplog): + with pytest.raises(ValueError) as error: + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args value as json string + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_name": {"test": "val"}} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: Can't send a header value which is not a string! " \ + "Forbidden header header_name: {'test': 'val'}" in str(error) + + +def test_http_alerter_with_header_args_value_none(caplog): + with pytest.raises(ValueError) as error: + rule = { + 'name': 'Test HTTP Post Alerter With Headers args value', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': None}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: Can't send a header value which is not a string! " \ + "Forbidden header header_name: None" in str(error) + + +def test_http_alerter_with_header_args_value_none_as_json_string(caplog): + with pytest.raises(ValueError) as error: + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args value as json string + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_name": null} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post'): + alert.alert([match]) + + assert "HTTP Post 2: Can't send a header value which is not a string! " \ + "Forbidden header header_name: None" in str(error) + + +def test_http_alerter_with_header_args_value_not_found(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Headers args value if not found', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_name': '{{titi1}}'}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_name': '' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_value_not_found_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args value if not found as json string + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_name": "{{titi1}}"} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_name': '' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_key(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Headers args key', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_{{titi}}': 'tata'}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_foobarbaz': 'tata' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_key_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args key as json string + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_{{titi}}": "tata"} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_foobarbaz': 'tata' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_key_jinja_root(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Headers args key using custom jinja root', + 'jinja_root_name': '_data', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {"header_{{_data['titi']}}": 'tata'}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2023-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_foobarbaz': 'tata' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_key_not_found(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Headers args key if not found', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'header_{{titi1}}': 'tata'}, + 'http_post2_payload': {'posted_name': 'toto'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_': 'tata' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_header_args_key_not_found_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Headers args key if not found as json string + type: any + http_post2_url: http://test.webhook.url + http_post2_headers: | + {"header_{{titi1}}": "tata"} + http_post2_payload: + posted_name: toto + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'titi': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json;charset=utf-8', + 'header_': 'tata' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers=expected_headers, + proxies=None, + timeout=10, + verify=True + ) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_nested(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': '{{ toto.tata }}'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'toto': {'tata': 'titi'} + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'titi', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_nested_as_json_string(caplog): + caplog.set_level(logging.INFO) + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + {"posted_name": "{{ toto.tata }}"} + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'toto': {'tata': 'titi'} + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'titi', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_all_values(): + rule = { + 'name': 'Test HTTP Post Alerter With Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_payload': {'posted_name': 'toto'}, + 'http_post2_all_values': True, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_with_payload_all_values_as_json_string(): + rule = yaml.safe_load( + ''' + name: Test HTTP Post Alerter With Payload (as JSON string) + type: any + http_post2_url: http://test.webhook.url + http_post2_payload: | + {"posted_name": "toto"} + http_post2_all_values: true + alert: [] + ''' + ) + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'toto', + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_without_payload(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_proxy(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_proxy': 'http://proxy.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies={'https': 'http://proxy.url'}, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_timeout(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_timeout': 20, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=20, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_headers(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_headers': {'authorization': 'Basic 123dr3234'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8', + 'authorization': 'Basic 123dr3234'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +@pytest.mark.parametrize('ca_certs, ignore_ssl_errors, excpet_verify', [ + ('', '', True), + ('', True, False), + ('', False, True), + (True, '', True), + (True, True, True), + (True, False, True), + (False, '', True), + (False, True, False), + (False, False, True) +]) +def test_http_alerter_post_ca_certs(ca_certs, ignore_ssl_errors, excpet_verify): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'alert': [] + } + if ca_certs: + rule['http_post2_ca_certs'] = ca_certs + + if ignore_ssl_errors: + rule['http_post2_ignore_ssl_errors'] = ignore_ssl_errors + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + } + mock_post_request.assert_called_once_with( + rule['http_post2_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=excpet_verify + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_post_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'http_post2_ca_certs': False, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting HTTP Post 2 alert: ' in str(ea) + + +def test_http_getinfo(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post2_url': 'http://test.webhook.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + + expected_data = { + 'type': 'http_post2', + 'http_post2_webhook_url': ['http://test.webhook.url'] + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('http_post2_url, expected_data', [ + ('', 'Missing required option(s): http_post2_url'), + ('http://test.webhook.url', + { + 'type': 'http_post2', + 'http_post2_webhook_url': ['http://test.webhook.url'] + }), +]) +def test_http_required_error(http_post2_url, expected_data): + try: + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'alert': [] + } + + if http_post2_url: + rule['http_post2_url'] = http_post2_url + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPost2Alerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) diff --git a/tests/alerters/httppost_test.py b/tests/alerters/httppost_test.py new file mode 100644 index 000000000..507331902 --- /dev/null +++ b/tests/alerters/httppost_test.py @@ -0,0 +1,338 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.httppost import HTTPPostAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_http_alerter_with_payload(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test HTTP Post Alerter With Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'http_post_payload': {'posted_name': 'somefield'}, + 'http_post_static_payload': {'name': 'somestaticname'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'foobarbaz', + 'name': 'somestaticname' + } + mock_post_request.assert_called_once_with( + rule['http_post_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert ('elastalert', logging.INFO, 'HTTP Post alert sent.') == caplog.record_tuples[0] + + +def test_http_alerter_with_payload_all_values(): + rule = { + 'name': 'Test HTTP Post Alerter With Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'http_post_payload': {'posted_name': 'somefield'}, + 'http_post_static_payload': {'name': 'somestaticname'}, + 'http_post_all_values': True, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + 'posted_name': 'foobarbaz', + 'name': 'somestaticname', + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_post_request.assert_called_once_with( + rule['http_post_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_without_payload(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'http_post_static_payload': {'name': 'somestaticname'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'name': 'somestaticname' + } + mock_post_request.assert_called_once_with( + rule['http_post_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_proxy(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'http_post_static_payload': {'name': 'somestaticname'}, + 'http_post_proxy': 'http://proxy.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'name': 'somestaticname' + } + mock_post_request.assert_called_once_with( + rule['http_post_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies={'https': 'http://proxy.url'}, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_timeout(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'http_post_static_payload': {'name': 'somestaticname'}, + 'http_post_timeout': 20, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'name': 'somestaticname' + } + mock_post_request.assert_called_once_with( + rule['http_post_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=20, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_headers(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'http_post_static_payload': {'name': 'somestaticname'}, + 'http_post_headers': {'authorization': 'Basic 123dr3234'}, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'name': 'somestaticname' + } + mock_post_request.assert_called_once_with( + rule['http_post_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8', 'authorization': 'Basic 123dr3234'}, + proxies=None, + timeout=10, + verify=True + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +@pytest.mark.parametrize('ca_certs, ignore_ssl_errors, excpet_verify', [ + ('', '', True), + ('', True, False), + ('', False, True), + (True, '', True), + (True, True, True), + (True, False, True), + (False, '', True), + (False, True, False), + (False, False, True) +]) +def test_http_alerter_post_ca_certs(ca_certs, ignore_ssl_errors, excpet_verify): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'http_post_static_payload': {'name': 'somestaticname'}, + 'alert': [] + } + if ca_certs: + rule['http_post_ca_certs'] = ca_certs + + if ignore_ssl_errors: + rule['http_post_ignore_ssl_errors'] = ignore_ssl_errors + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + expected_data = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz', + 'name': 'somestaticname' + } + mock_post_request.assert_called_once_with( + rule['http_post_url'], + data=mock.ANY, + headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, + proxies=None, + timeout=10, + verify=excpet_verify + ) + assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + + +def test_http_alerter_post_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'http_post_static_payload': {'name': 'somestaticname'}, + 'http_post_ca_certs': False, + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + match = { + '@timestamp': '2017-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting HTTP Post alert: ' in str(ea) + + +def test_http_getinfo(): + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'http_post_url': 'http://test.webhook.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + + expected_data = { + 'type': 'http_post', + 'http_post_webhook_url': ['http://test.webhook.url'] + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('http_post_url, expected_data', [ + ('', 'Missing required option(s): http_post_url'), + ('http://test.webhook.url', + { + 'type': 'http_post', + 'http_post_webhook_url': ['http://test.webhook.url'] + }), +]) +def test_http_required_error(http_post_url, expected_data): + try: + rule = { + 'name': 'Test HTTP Post Alerter Without Payload', + 'type': 'any', + 'alert': [] + } + + if http_post_url: + rule['http_post_url'] = http_post_url + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HTTPPostAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) diff --git a/tests/alerters/jira_test.py b/tests/alerters/jira_test.py new file mode 100644 index 000000000..fbcab13d7 --- /dev/null +++ b/tests/alerters/jira_test.py @@ -0,0 +1,431 @@ +import datetime +import logging +import pytest + +from jira import JIRAError +from unittest import mock + +from elastalert.alerters.jira import JiraFormattedMatchString, JiraAlerter +from elastalert.util import ts_now +from tests.alerts_test import mock_rule + + +def test_jira_formatted_match_string(ea): + match = {'foo': {'bar': ['one', 2, 'three']}, 'top_events_poof': 'phew'} + alert_text = str(JiraFormattedMatchString(ea.rules[0], match)) + tab = 4 * ' ' + expected_alert_text_snippet = '{code}{\n' \ + + tab + '"foo": {\n' \ + + 2 * tab + '"bar": [\n' \ + + 3 * tab + '"one",\n' \ + + 3 * tab + '2,\n' \ + + 3 * tab + '"three"\n' \ + + 2 * tab + ']\n' \ + + tab + '}\n' \ + + '}{code}' + assert expected_alert_text_snippet in alert_text + + +def test_jira(caplog): + caplog.set_level(logging.INFO) + description_txt = "Description stuff goes here like a runbook link." + rule = { + 'name': 'test alert', + 'jira_account_file': 'jirafile', + 'type': mock_rule(), + 'jira_project': 'testproject', + 'jira_priority': 0, + 'jira_issuetype': 'testtype', + 'jira_server': 'jiraserver', + 'jira_label': 'testlabel', + 'jira_component': 'testcomponent', + 'jira_description': description_txt, + 'jira_assignee': 'testuser', + 'jira_watchers': ['testwatcher1', 'testwatcher2'], + 'timestamp_field': '@timestamp', + 'alert_subject': 'Issue {0} occurred at {1}', + 'alert_subject_args': ['test_term', '@timestamp'], + 'rule_file': '/tmp/foo.yaml' + } + + mock_priority = mock.Mock(id='5') + + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = [] + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + + expected = [ + mock.call('jiraserver', basic_auth=('jirauser', 'jirapassword')), + mock.call().priorities(), + mock.call().fields(), + mock.call().create_issue( + issuetype={'name': 'testtype'}, + priority={'id': '5'}, + project={'key': 'testproject'}, + labels=['testlabel'], + components=[{'name': 'testcomponent'}], + description=mock.ANY, + summary='Issue test_value occurred at 2014-10-31T00:00:00', + ), + mock.call().assign_issue(mock.ANY, 'testuser'), + mock.call().add_watcher(mock.ANY, 'testwatcher1'), + mock.call().add_watcher(mock.ANY, 'testwatcher2'), + ] + + # We don't care about additional calls to mock_jira, such as __str__ + assert mock_jira.mock_calls[:7] == expected + assert mock_jira.mock_calls[3][2]['description'].startswith(description_txt) + user, level, message = caplog.record_tuples[0] + assert 'elastalert' == user + assert logging.INFO == level + assert 'pened Jira ticket: ' in message + + # Search called if jira_bump_tickets + rule['jira_bump_tickets'] = True + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value = mock.Mock() + mock_jira.return_value.search_issues.return_value = [] + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = [] + + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + + expected.insert(3, mock.call().search_issues(mock.ANY)) + assert mock_jira.mock_calls == expected + + # Remove a field if jira_ignore_in_title set + rule['jira_ignore_in_title'] = 'test_term' + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value = mock.Mock() + mock_jira.return_value.search_issues.return_value = [] + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = [] + + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + + assert 'test_value' not in mock_jira.mock_calls[3][1][0] + + # Issue is still created if search_issues throws an exception + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value = mock.Mock() + mock_jira.return_value.search_issues.side_effect = JIRAError + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = [] + + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + + assert mock_jira.mock_calls == expected + user, level, message = caplog.record_tuples[3] + assert 'elastalert' in user + assert logging.ERROR == level + assert 'Error while searching for Jira ticket using jql' in message + + # Only bump after 3d of inactivity + rule['jira_bump_after_inactivity'] = 3 + mock_issue = mock.Mock() + + # Check ticket is bumped if it is updated 4 days ago + mock_issue.fields.updated = str(ts_now() - datetime.timedelta(days=4)) + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value = mock.Mock() + mock_jira.return_value.search_issues.return_value = [mock_issue] + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = [] + + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + # Check add_comment is called + assert len(mock_jira.mock_calls) == 5 + assert '().add_comment' == mock_jira.mock_calls[4][0] + + # Check ticket is bumped is not bumped if ticket is updated right now + mock_issue.fields.updated = str(ts_now()) + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value = mock.Mock() + mock_jira.return_value.search_issues.return_value = [mock_issue] + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = [] + + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + # Only 4 calls for mock_jira since add_comment is not called + assert len(mock_jira.mock_calls) == 4 + + # Test match resolved values + rule = { + 'name': 'test alert', + 'jira_account_file': 'jirafile', + 'type': mock_rule(), + 'owner': 'the_owner', + 'jira_project': 'testproject', + 'jira_issuetype': 'testtype', + 'jira_server': 'jiraserver', + 'jira_label': 'testlabel', + 'jira_component': 'testcomponent', + 'jira_description': "DESC", + 'jira_watchers': ['testwatcher1', 'testwatcher2'], + 'timestamp_field': '@timestamp', + 'jira_affected_user': "#gmail.the_user", + 'rule_file': '/tmp/foo.yaml' + } + mock_issue = mock.Mock() + mock_issue.fields.updated = str(ts_now() - datetime.timedelta(days=4)) + mock_fields = [ + {'name': 'affected user', 'id': 'affected_user_id', 'schema': {'type': 'string'}} + ] + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value = mock.Mock() + mock_jira.return_value.search_issues.return_value = [mock_issue] + mock_jira.return_value.fields.return_value = mock_fields + mock_jira.return_value.priorities.return_value = [mock_priority] + alert = JiraAlerter(rule) + alert.alert([{'gmail.the_user': 'jdoe', '@timestamp': '2014-10-31T00:00:00'}]) + assert mock_jira.mock_calls[4][2]['affected_user_id'] == "jdoe" + + +def test_jira_arbitrary_field_support(): + description_txt = "Description stuff goes here like a runbook link." + rule = { + 'name': 'test alert', + 'jira_account_file': 'jirafile', + 'type': mock_rule(), + 'owner': 'the_owner', + 'jira_project': 'testproject', + 'jira_issuetype': 'testtype', + 'jira_server': 'jiraserver', + 'jira_label': 'testlabel', + 'jira_component': 'testcomponent', + 'jira_description': description_txt, + 'jira_assignee': 'testuser', + 'jira_watchers': ['testwatcher1', 'testwatcher2'], + 'jira_arbitrary_reference_string_field': '$owner$', + 'jira_arbitrary_string_field': 'arbitrary_string_value', + 'jira_arbitrary_string_array_field': ['arbitrary_string_value1', 'arbitrary_string_value2'], + 'jira_arbitrary_string_array_field_provided_as_single_value': 'arbitrary_string_value_in_array_field', + 'jira_arbitrary_number_field': 1, + 'jira_arbitrary_number_array_field': [2, 3], + 'jira_arbitrary_number_array_field_provided_as_single_value': 1, + 'jira_arbitrary_complex_field': 'arbitrary_complex_value', + 'jira_arbitrary_complex_array_field': ['arbitrary_complex_value1', 'arbitrary_complex_value2'], + 'jira_arbitrary_complex_array_field_provided_as_single_value': 'arbitrary_complex_value_in_array_field', + 'timestamp_field': '@timestamp', + 'alert_subject': 'Issue {0} occurred at {1}', + 'alert_subject_args': ['test_term', '@timestamp'], + 'rule_file': '/tmp/foo.yaml' + } + + mock_priority = mock.MagicMock(id='5') + + mock_fields = [ + {'name': 'arbitrary reference string field', 'id': 'arbitrary_reference_string_field', 'schema': {'type': 'string'}}, + {'name': 'arbitrary string field', 'id': 'arbitrary_string_field', 'schema': {'type': 'string'}}, + {'name': 'arbitrary string array field', 'id': 'arbitrary_string_array_field', 'schema': {'type': 'array', 'items': 'string'}}, + { + 'name': 'arbitrary string array field provided as single value', + 'id': 'arbitrary_string_array_field_provided_as_single_value', + 'schema': {'type': 'array', 'items': 'string'} + }, + {'name': 'arbitrary number field', 'id': 'arbitrary_number_field', 'schema': {'type': 'number'}}, + {'name': 'arbitrary number array field', 'id': 'arbitrary_number_array_field', 'schema': {'type': 'array', 'items': 'number'}}, + { + 'name': 'arbitrary number array field provided as single value', + 'id': 'arbitrary_number_array_field_provided_as_single_value', + 'schema': {'type': 'array', 'items': 'number'} + }, + {'name': 'arbitrary complex field', 'id': 'arbitrary_complex_field', 'schema': {'type': 'ArbitraryType'}}, + { + 'name': 'arbitrary complex array field', + 'id': 'arbitrary_complex_array_field', + 'schema': {'type': 'array', 'items': 'ArbitraryType'} + }, + { + 'name': 'arbitrary complex array field provided as single value', + 'id': 'arbitrary_complex_array_field_provided_as_single_value', + 'schema': {'type': 'array', 'items': 'ArbitraryType'} + }, + ] + + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = mock_fields + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + + expected = [ + mock.call('jiraserver', basic_auth=('jirauser', 'jirapassword')), + mock.call().priorities(), + mock.call().fields(), + mock.call().create_issue( + issuetype={'name': 'testtype'}, + project={'key': 'testproject'}, + labels=['testlabel'], + components=[{'name': 'testcomponent'}], + description=mock.ANY, + summary='Issue test_value occurred at 2014-10-31T00:00:00', + arbitrary_reference_string_field='the_owner', + arbitrary_string_field='arbitrary_string_value', + arbitrary_string_array_field=['arbitrary_string_value1', 'arbitrary_string_value2'], + arbitrary_string_array_field_provided_as_single_value=['arbitrary_string_value_in_array_field'], + arbitrary_number_field=1, + arbitrary_number_array_field=[2, 3], + arbitrary_number_array_field_provided_as_single_value=[1], + arbitrary_complex_field={'name': 'arbitrary_complex_value'}, + arbitrary_complex_array_field=[{'name': 'arbitrary_complex_value1'}, {'name': 'arbitrary_complex_value2'}], + arbitrary_complex_array_field_provided_as_single_value=[{'name': 'arbitrary_complex_value_in_array_field'}], + ), + mock.call().assign_issue(mock.ANY, 'testuser'), + mock.call().add_watcher(mock.ANY, 'testwatcher1'), + mock.call().add_watcher(mock.ANY, 'testwatcher2'), + ] + + # We don't care about additional calls to mock_jira, such as __str__ + assert mock_jira.mock_calls[:7] == expected + assert mock_jira.mock_calls[3][2]['description'].startswith(description_txt) + + # Reference an arbitrary string field that is not defined on the Jira server + rule['jira_nonexistent_field'] = 'nonexistent field value' + + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = mock_fields + + with pytest.raises(Exception) as exception: + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + assert "Could not find a definition for the jira field 'nonexistent field'" in str(exception) + + del rule['jira_nonexistent_field'] + + # Reference a watcher that does not exist + rule['jira_watchers'] = 'invalid_watcher' + + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = mock_fields + + # Cause add_watcher to raise, which most likely means that the user did not exist + mock_jira.return_value.add_watcher.side_effect = Exception() + + with pytest.raises(Exception) as exception: + alert = JiraAlerter(rule) + alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + assert "Exception encountered when trying to add 'invalid_watcher' as a watcher. Does the user exist?" in str(exception) + + +def test_jira_getinfo(): + description_txt = "Description stuff goes here like a runbook link." + rule = { + 'name': 'test alert', + 'jira_account_file': 'jirafile', + 'type': mock_rule(), + 'jira_project': 'testproject', + 'jira_priority': 0, + 'jira_issuetype': 'testtype', + 'jira_server': 'jiraserver', + 'jira_label': 'testlabel', + 'jira_component': 'testcomponent', + 'jira_description': description_txt, + 'jira_watchers': ['testwatcher1', 'testwatcher2'], + 'timestamp_field': '@timestamp', + 'alert_subject': 'Issue {0} occurred at {1}', + 'alert_subject_args': ['test_term', '@timestamp'], + 'rule_file': '/tmp/foo.yaml' + } + + mock_priority = mock.Mock(id='5') + + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + mock_jira.return_value.priorities.return_value = [mock_priority] + mock_jira.return_value.fields.return_value = [] + alert = JiraAlerter(rule) + + expected_data = { + 'type': 'jira' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +def test_jira_set_priority(caplog): + description_txt = "Description stuff goes here like a runbook link." + rule = { + 'name': 'test alert', + 'jira_account_file': 'jirafile', + 'type': mock_rule(), + 'jira_project': 'testproject', + 'jira_priority': 0, + 'jira_issuetype': 'testtype', + 'jira_server': 'jiraserver', + 'jira_description': description_txt, + 'jira_assignee': 'testuser', + 'timestamp_field': '@timestamp', + 'alert_subject': 'Issue {0} occurred at {1}', + 'alert_subject_args': ['test_term', '@timestamp'], + 'rule_file': '/tmp/foo.yaml' + } + with mock.patch('elastalert.alerters.jira.JIRA'), \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} + alert = JiraAlerter(rule) + alert.set_priority + + assert ('elastalert', logging.ERROR, + 'Priority 0 not found. Valid priorities are []') == caplog.record_tuples[0] + assert ('elastalert', logging.ERROR, + 'Priority 0 not found. Valid priorities are []') == caplog.record_tuples[1] + + +def test_jira_auth_token(caplog): + description_txt = "Test authentication via apitoken" + rule = { + 'name': 'test alert', + 'jira_account_file': 'jirafile', + 'type': mock_rule(), + 'jira_project': 'testproject', + 'jira_priority': 0, + 'jira_issuetype': 'testtype', + 'jira_server': 'jiraserver', + 'jira_description': description_txt, + 'jira_assignee': 'testuser', + 'timestamp_field': '@timestamp', + 'alert_subject': 'Issue {0} occurred at {1}', + 'alert_subject_args': ['test_term', '@timestamp'], + 'rule_file': '/tmp/foo.yaml' + } + with mock.patch('elastalert.alerters.jira.JIRA') as mock_jira, \ + mock.patch('elastalert.alerters.jira.read_yaml') as mock_open: + mock_open.return_value = {'apikey': 'theapikey'} + alert = JiraAlerter(rule) + alert.set_priority + expected = [ + mock.call('jiraserver', token_auth=('theapikey')), + ] + # we only want to test authentication via apikey, the rest we don't care of + assert mock_jira.mock_calls[:1] == expected diff --git a/tests/alerters/line_test.py b/tests/alerters/line_test.py new file mode 100644 index 000000000..8e4ba3ccf --- /dev/null +++ b/tests/alerters/line_test.py @@ -0,0 +1,196 @@ +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.line import LineNotifyAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_line_notify(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test LineNotify Rule', + 'type': 'any', + 'linenotify_access_token': 'xxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = LineNotifyAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'message': 'Test LineNotify Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n' + } + + mock_post_request.assert_called_once_with( + 'https://notify-api.line.me/api/notify', + data=mock.ANY, + headers={ + 'Content-Type': 'application/x-www-form-urlencoded', + 'Authorization': 'Bearer {}'.format('xxxxx') + } + ) + + actual_data = mock_post_request.call_args_list[0][1]['data'] + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Alert sent to Line Notify') == caplog.record_tuples[0] + + +def test_line_notify_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test LineNotify Rule', + 'type': 'any', + 'linenotify_access_token': 'xxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = LineNotifyAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + + assert 'Error posting to Line Notify: ' in str(ea) + + +def test_line_getinfo(): + rule = { + 'name': 'Test LineNotify Rule', + 'type': 'any', + 'linenotify_access_token': 'xxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = LineNotifyAlerter(rule) + + expected_data = { + "type": "linenotify", + "linenotify_access_token": 'xxxxx' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('linenotify_access_token, expected_data', [ + ('', 'Missing required option(s): linenotify_access_token'), + ('xxxxx', + { + "type": "linenotify", + "linenotify_access_token": 'xxxxx' + }), +]) +def test_line_required_error(linenotify_access_token, expected_data): + try: + rule = { + 'name': 'Test LineNotify Rule', + 'type': 'any', + 'alert': [] + } + + if linenotify_access_token: + rule['linenotify_access_token'] = linenotify_access_token + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = LineNotifyAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +def test_line_notify_maxlength(): + rule = { + 'name': 'Test LineNotify Rule' + ('a' * 1000), + 'type': 'any', + 'linenotify_access_token': 'xxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = LineNotifyAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'message': 'Test LineNotify Rule' + ('a' * 880) + + '\n *message was cropped according to line notify embed description limits!*' + } + + mock_post_request.assert_called_once_with( + 'https://notify-api.line.me/api/notify', + data=mock.ANY, + headers={ + 'Content-Type': 'application/x-www-form-urlencoded', + 'Authorization': 'Bearer {}'.format('xxxxx') + } + ) + + actual_data = mock_post_request.call_args_list[0][1]['data'] + assert expected_data == actual_data + + +def test_line_notify_matchs(): + rule = { + 'name': 'Test LineNotify Rule', + 'type': 'any', + 'linenotify_access_token': 'xxxxx', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = LineNotifyAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match, match]) + + expected_data = { + 'message': 'Test LineNotify Rule\n' + '\n' + '@timestamp: 2021-01-01T00:00:00\n' + 'somefield: foobarbaz\n' + '\n' + '----------------------------------------\n' + 'Test LineNotify Rule\n' + '\n' + '@timestamp: 2021-01-01T00:00:00\n' + 'somefield: foobarbaz\n' + '\n' + '----------------------------------------\n' + } + + mock_post_request.assert_called_once_with( + 'https://notify-api.line.me/api/notify', + data=mock.ANY, + headers={ + 'Content-Type': 'application/x-www-form-urlencoded', + 'Authorization': 'Bearer {}'.format('xxxxx') + } + ) + + actual_data = mock_post_request.call_args_list[0][1]['data'] + assert expected_data == actual_data diff --git a/tests/alerters/mattermost_test.py b/tests/alerters/mattermost_test.py new file mode 100644 index 000000000..d8358a18f --- /dev/null +++ b/tests/alerters/mattermost_test.py @@ -0,0 +1,1257 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.mattermost import MattermostAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_mattermost_proxy(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_proxy': 'https://proxy.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies={'https': 'https://proxy.url'} + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Alert sent to Mattermost') == caplog.record_tuples[0] + + +def test_mattermost_alert_text_only(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_not_alert_text_only(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'exclude_fields', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [] + } + ], + 'text': 'Test Mattermost Rule\n\n', + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_msg_fields(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_msg_fields': [ + { + 'title': 'Stack', + 'value': "{0} {1}", + 'short': False, + 'args': ["type", "msg.status_code"] + }, + { + 'title': 'Name', + 'value': 'static field', + 'short': False + } + ], + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [ + {'title': 'Stack', 'value': ' ', 'short': False}, + {'title': 'Name', 'value': 'static field', 'short': False} + ], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_icon_url_override(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_icon_url_override': 'http://xxxx/icon.png', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_url': 'http://xxxx/icon.png' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_channel_override(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_channel_override': 'test channel', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': 'test channel', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_ignore_ssl_errors(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_ignore_ssl_errors': True, + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=False, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_title_link(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_title': 'mattermost.title', + 'mattermost_title_link': 'http://title.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'title': 'mattermost.title', + 'title_link': 'http://title.url' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_footer(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_footer': 'Mattermost footer', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'footer': 'Mattermost footer' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_footer_icon(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_footer_icon': 'http://icon.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'footer_icon': 'http://icon.url' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_image_url(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_image_url': 'http://image.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'image_url': 'http://image.url' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_thumb_url(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_thumb_url': 'http://thumb.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'thumb_url': 'http://thumb.url' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_author_name(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_author_name': 'author name', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'author_name': 'author name' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_author_link(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_author_link': 'http://author.link.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'author_link': 'http://author.link.url' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_author_icon(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_author_icon': 'http://author.icon.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'author_icon': 'http://author.icon.url' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_author_icon': 'http://author.icon.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to Mattermost: ' in str(ea) + + +def test_mattermost_get_aggregation_summary_text__maximum_width(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_author_icon': 'http://author.icon.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + assert 75 == alert.get_aggregation_summary_text__maximum_width() + + +@pytest.mark.parametrize('msg_color, except_msg_color', [ + ('', 'danger'), + ('danger', 'danger'), + ('good', 'good'), + ('warning', 'warning') +]) +def test_mattermost_msg_color(msg_color, except_msg_color): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_author_icon': 'http://author.icon.url', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + + if msg_color: + rule['mattermost_msg_color'] = msg_color + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': except_msg_color, + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n', + 'author_icon': 'http://author.icon.url' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_getinfo(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + + expected_data = { + 'type': 'mattermost', + 'mattermost_username_override': 'elastalert', + 'mattermost_webhook_url': ['http://xxxxx'] + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('mattermost_webhook_url, expected_data', [ + ('', 'Missing required option(s): mattermost_webhook_url'), + ('http://xxxxx', + { + 'type': 'mattermost', + 'mattermost_username_override': 'elastalert', + 'mattermost_webhook_url': ['http://xxxxx'] + }), +]) +def test_mattermost_required_error(mattermost_webhook_url, expected_data): + try: + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + + if mattermost_webhook_url: + rule['mattermost_webhook_url'] = mattermost_webhook_url + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +def test_mattermost_attach_kibana_discover_url_when_generated(): + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_attach_kibana_discover_url': True, + 'mattermost_webhook_url': 'http://please.dontgohere.mattermost', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'kibana_discover_url': 'http://localhost:5601/app/discover#/' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Rule: ', + 'color': 'danger', + 'title': 'Test Rule', + 'pretext': '', + 'fields': [], + 'text': 'Test Rule\n\n' + }, + { + 'color': '#ec4b98', + 'title': 'Discover in Kibana', + 'title_link': 'http://localhost:5601/app/discover#/' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_attach_kibana_discover_url_when_not_generated(): + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_attach_kibana_discover_url': True, + 'mattermost_webhook_url': 'http://please.dontgohere.mattermost', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Rule: ', + 'color': 'danger', + 'title': 'Test Rule', + 'pretext': '', + 'fields': [], + 'text': 'Test Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_kibana_discover_title(): + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_attach_kibana_discover_url': True, + 'mattermost_kibana_discover_title': 'Click to discover in Kibana', + 'mattermost_webhook_url': 'http://please.dontgohere.mattermost', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'kibana_discover_url': 'http://localhost:5601/app/discover#/' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Rule: ', + 'color': 'danger', + 'title': 'Test Rule', + 'pretext': '', + 'fields': [], + 'text': 'Test Rule\n\n' + }, + { + 'color': '#ec4b98', + 'title': 'Click to discover in Kibana', + 'title_link': 'http://localhost:5601/app/discover#/' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_kibana_discover_color(): + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_attach_kibana_discover_url': True, + 'mattermost_kibana_discover_color': 'blue', + 'mattermost_webhook_url': 'http://please.dontgohere.mattermost', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'kibana_discover_url': 'http://localhost:5601/app/discover#/' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Rule: ', + 'color': 'danger', + 'title': 'Test Rule', + 'pretext': '', + 'fields': [], + 'text': 'Test Rule\n\n' + }, + { + 'color': 'blue', + 'title': 'Discover in Kibana', + 'title_link': 'http://localhost:5601/app/discover#/' + } + ], + 'username': 'elastalert', + 'channel': '', + 'icon_emoji': ':ghost:' + } + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_username_override(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_username_override': 'test user', + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'test user', + 'channel': '', + 'icon_emoji': ':ghost:' + } + + mock_post_request.assert_called_once_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_mattermost_uses_list_of_custom_mattermost_channel(): + rule = { + 'name': 'Test Mattermost Rule', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'mattermost_webhook_url': 'http://xxxxx', + 'mattermost_msg_pretext': 'aaaaa', + 'mattermost_msg_color': 'danger', + 'mattermost_channel_override': ['#test-alert', '#test-alert2'], + 'alert': [], + 'alert_subject': 'Test Mattermost' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = MattermostAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data1 = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': '#test-alert', + 'icon_emoji': ':ghost:' + } + expected_data2 = { + 'attachments': [ + { + 'fallback': 'Test Mattermost: aaaaa', + 'color': 'danger', + 'title': 'Test Mattermost', + 'pretext': 'aaaaa', + 'fields': [], + 'text': 'Test Mattermost Rule\n\n' + } + ], + 'username': 'elastalert', + 'channel': '#test-alert2', + 'icon_emoji': ':ghost:' + } + mock_post_request.assert_called_with( + rule['mattermost_webhook_url'], + data=mock.ANY, + headers={'content-type': 'application/json'}, + verify=True, + proxies=None + ) + + assert expected_data1 == json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data2 == json.loads(mock_post_request.call_args_list[1][1]['data']) diff --git a/tests/alerters/opsgenie_test.py b/tests/alerters/opsgenie_test.py new file mode 100644 index 000000000..615088baa --- /dev/null +++ b/tests/alerters/opsgenie_test.py @@ -0,0 +1,1189 @@ +import logging +import pytest +import requests + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.opsgenie import OpsGenieAlerter +from elastalert.alerts import BasicMatchString +from elastalert.util import EAException +from tests.alerts_test import mock_rule + + +def test_opsgenie_basic(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'testOGalert', + 'opsgenie_key': 'ogkey', + 'opsgenie_addr': 'https://api.opsgenie.com/v2/alerts', + 'type': mock_rule() + } + with mock.patch('requests.post') as mock_post: + rep = requests + rep.status_code = 202 + mock_post.return_value = rep + + alert = OpsGenieAlerter(rule) + alert.alert([{'@timestamp': '2014-10-31T00:00:00'}]) + print(("mock_post: {0}".format(mock_post._mock_call_args_list))) + mcal = mock_post._mock_call_args_list + + print(('mcal: {0}'.format(mcal[0]))) + assert mcal[0][0][0] == ('https://api.opsgenie.com/v2/alerts') + + assert mock_post.called + + assert mcal[0][1]['headers']['Authorization'] == 'GenieKey ogkey' + # Should be default source 'ElastAlert', because 'opsgenie_source' param isn't set in rule + assert mcal[0][1]['json']['source'] == 'ElastAlert' + user, level, message = caplog.record_tuples[0] + assert "Error response from https://api.opsgenie.com/v2/alerts \n API Response: ', + 'description_args': ['title', 'test.ip', 'host'], + 'description': '{0} from host:{2} to {1}', + 'status': 'New', + 'tags': ['test.port'], + 'tlp': 3, + 'type': 'external'}, + 'hive_connection': {'hive_apikey': '', + 'hive_host': 'https://localhost', + 'hive_port': 9000}, + 'hive_observable_data_mapping': [{'ip': 'test.ip'}, {'autonomous-system': 'test.as_number'}], + 'name': 'test-thehive', + 'tags': ['a', 'b'], + 'type': 'any'} + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HiveAlerter(rule) + match = { + "test": { + "ip": "127.0.0.1", + "port": 9876, + "as_number": 1234 + }, + "@timestamp": "2021-05-09T14:43:30", + } + actual = alert.load_args("description", rule['hive_alert_config']['description'], match) + expected = "Unit test from host: to 127.0.0.1" + assert actual == expected + + +# Test without description_missing_value, missing values a replaced by a default value +def test_load_description_missing_value_default(): + rule = {'alert': [], + 'alert_text': '', + 'alert_text_type': 'alert_text_only', + 'title': 'Unit test', + 'description': 'test', + 'hive_alert_config': {'customFields': [{'name': 'test', + 'type': 'string', + 'value': 2}], + 'follow': True, + 'severity': 2, + 'source': 'elastalert', + 'description_args': ['title', 'test.ip', 'host'], + 'description': '{0} from host:{2} to {1}', + 'status': 'New', + 'tags': ['test.port'], + 'tlp': 3, + 'type': 'external'}, + 'hive_connection': {'hive_apikey': '', + 'hive_host': 'https://localhost', + 'hive_port': 9000}, + 'hive_observable_data_mapping': [{'ip': 'test.ip'}, {'autonomous-system': 'test.as_number'}], + 'name': 'test-thehive', + 'tags': ['a', 'b'], + 'type': 'any'} + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HiveAlerter(rule) + match = { + "test": { + "ip": "127.0.0.1", + "port": 9876, + "as_number": 1234 + }, + "@timestamp": "2021-05-09T14:43:30", + } + actual = alert.load_args("description", rule['hive_alert_config']['description'], match) + expected = "Unit test from host: to 127.0.0.1" + assert actual == expected + + +def test_load_observable_artifacts(): + rule = {'alert': [], + 'alert_text': '', + 'alert_text_type': 'alert_text_only', + 'title': 'Unit test', + 'description': 'test', + 'hive_alert_config': {'customFields': [{'name': 'test', + 'type': 'string', + 'value': 2}], + 'follow': True, + 'severity': 2, + 'source': 'elastalert', + 'description_args': ['title', 'test.ip', 'host'], + 'description': '{0} from host:{2} to {1}', + 'status': 'New', + 'tags': ['test.port'], + 'tlp': 3, + 'type': 'external'}, + 'hive_connection': {'hive_apikey': '', + 'hive_host': 'https://localhost', + 'hive_port': 9000}, + 'hive_observable_data_mapping': [ + {'ip': 'test.ip', 'tlp': 1, 'tags': ['ip', 'test'], 'message': 'test tags'}, + {'autonomous-system': 'test.as_number', 'tlp': 2, 'tags': ['autonomous']}, + {'username': 'user.name', 'tlp': 1}, {'filename': 'process.name'}, {'ip': 'destination.ip'} + ], + 'name': 'test-thehive', + 'tags': ['a', 'b'], + 'type': 'any'} + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = HiveAlerter(rule) + match = { + "test": { + "ip": "127.0.0.1", + "port": 9876, + "as_number": 1234 + }, + "user": { + "name": "toto" + }, + "process": { + "name": "mstc.exe" + }, + "@timestamp": "2021-05-09T14:43:30", + } + actual = alert.load_observable_artifacts(match) + expected = [ + {'tlp': 1, 'tags': ['ip', 'test'], 'message': 'test tags', 'dataType': 'ip', 'data': '127.0.0.1'}, + {'tlp': 2, 'tags': ['autonomous'], 'message': None, 'dataType': 'autonomous-system', 'data': '1234'}, + {'tlp': 1, 'tags': [], 'message': None, 'dataType': 'username', 'data': 'toto'}, + {'tlp': 2, 'tags': [], 'message': None, 'dataType': 'filename', 'data': 'mstc.exe'} + ] + assert actual == expected diff --git a/tests/alerters/twilio_test.py b/tests/alerters/twilio_test.py new file mode 100644 index 000000000..615ec38d3 --- /dev/null +++ b/tests/alerters/twilio_test.py @@ -0,0 +1,206 @@ +import logging +import pytest + +from unittest import mock + +from elastalert.alerters.twilio import TwilioAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_twilio_getinfo(): + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'twilio_account_sid': 'xxxxx1', + 'twilio_auth_token': 'xxxxx2', + 'twilio_to_number': 'xxxxx3', + 'twilio_from_number': 'xxxxx4', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = TwilioAlerter(rule) + + expected_data = { + 'type': 'twilio', + 'twilio_client_name': 'xxxxx4' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('twilio_account_sid, twilio_auth_token, twilio_to_number, expected_data', [ + ('', '', '', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'), + ('xxxx1', '', '', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'), + ('', 'xxxx2', '', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'), + ('', '', 'INFO', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'), + ('xxxx1', 'xxxx2', '', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'), + ('xxxx1', '', 'INFO', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'), + ('', 'xxxx2', 'INFO', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'), + ('xxxx1', 'xxxx2', 'INFO', + { + 'type': 'twilio', + 'twilio_client_name': 'xxxxx4' + }), +]) +def test_twilio_required_error(twilio_account_sid, twilio_auth_token, twilio_to_number, expected_data): + try: + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'twilio_from_number': 'xxxxx4', + 'alert': [] + } + + if twilio_account_sid: + rule['twilio_account_sid'] = twilio_account_sid + + if twilio_auth_token: + rule['twilio_auth_token'] = twilio_auth_token + + if twilio_to_number: + rule['twilio_to_number'] = twilio_to_number + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = TwilioAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +@pytest.mark.parametrize('twilio_use_copilot, twilio_message_service_sid, twilio_from_number, expected_data', [ + (True, None, 'test', True), + (False, 'test', None, True), +]) +def test_twilio_use_copilot(twilio_use_copilot, twilio_message_service_sid, twilio_from_number, expected_data): + try: + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'twilio_account_sid': 'xxxxx1', + 'twilio_auth_token': 'xxxxx2', + 'twilio_to_number': 'xxxxx3', + 'alert': [] + } + + if twilio_use_copilot: + rule['twilio_use_copilot'] = twilio_use_copilot + + if twilio_message_service_sid: + rule['twilio_message_service_sid'] = twilio_message_service_sid + + if twilio_from_number: + rule['twilio_from_number'] = twilio_from_number + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = TwilioAlerter(rule) + + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + alert.alert([match]) + except EAException: + assert expected_data + + +def test_twilio(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'twilio_account_sid': 'xxxxx1', + 'twilio_auth_token': 'xxxxx2', + 'twilio_to_number': 'xxxxx3', + 'twilio_from_number': 'xxxxx4', + 'alert': [] + } + match = { + '@timestamp': '2021-01-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + + with mock.patch('elastalert.alerters.twilio.TwilioClient.messages') as mock_twilio: + mock_twilio.messages.create() + mock_twilio.return_value = 200 + alert = TwilioAlerter(rule) + alert.alert([match]) + expected = [ + mock.call.messages.create(), + mock.call.create(body='Test Rule', from_='xxxxx4', to='xxxxx3'), + ] + + assert mock_twilio.mock_calls == expected + assert ('elastalert', logging.INFO, 'Trigger sent to Twilio') == caplog.record_tuples[0] + + +def test_twilio_copilot(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'twilio_account_sid': 'xxxxx1', + 'twilio_auth_token': 'xxxxx2', + 'twilio_to_number': 'xxxxx3', + 'twilio_message_service_sid': 'xxxxx5', + 'twilio_use_copilot': True, + 'alert': [] + } + match = { + '@timestamp': '2021-01-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + + with mock.patch('elastalert.alerters.twilio.TwilioClient.messages') as mock_twilio: + mock_twilio.messages.create() + mock_twilio.return_value = 200 + alert = TwilioAlerter(rule) + alert.alert([match]) + expected = [ + mock.call.messages.create(), + mock.call.create(body='Test Rule', messaging_service_sid='xxxxx5', to='xxxxx3'), + ] + + assert mock_twilio.mock_calls == expected + assert ('elastalert', logging.INFO, 'Trigger sent to Twilio') == caplog.record_tuples[0] + + +def test_twilio_rest_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test Rule', + 'type': 'any', + 'alert_subject': 'Cool subject', + 'twilio_account_sid': 'xxxxx1', + 'twilio_auth_token': 'xxxxx2', + 'twilio_to_number': 'xxxxx3', + 'twilio_from_number': 'xxxxx4', + 'alert': [] + } + match = { + '@timestamp': '2021-01-10T00:00:00', + 'sender_ip': '1.1.1.1', + 'hostname': 'aProbe' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = TwilioAlerter(rule) + alert.alert([match]) + + assert 'Error posting to twilio: ' in str(ea) diff --git a/tests/alerters/victorops_test.py b/tests/alerters/victorops_test.py new file mode 100644 index 000000000..3a497b569 --- /dev/null +++ b/tests/alerters/victorops_test.py @@ -0,0 +1,311 @@ +import json +import logging +import pytest + +from unittest import mock + +from requests import RequestException + +from elastalert.alerters.victorops import VictorOpsAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_victorops(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test VictorOps Rule', + 'type': 'any', + 'victorops_api_key': 'xxxx1', + 'victorops_routing_key': 'xxxx2', + 'victorops_message_type': 'INFO', + 'victorops_entity_display_name': 'no entity display name', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = VictorOpsAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'message_type': rule['victorops_message_type'], + 'entity_display_name': rule['victorops_entity_display_name'], + 'monitoring_tool': 'ElastAlert', + 'state_message': 'Test VictorOps Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + + mock_post_request.assert_called_once_with( + 'https://alert.victorops.com/integrations/generic/20131114/alert/xxxx1/xxxx2', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Trigger sent to VictorOps') == caplog.record_tuples[0] + + +def test_victorops_no_title(caplog): + caplog.set_level(logging.INFO) + rule = { + 'name': 'Test VictorOps Rule', + 'type': 'any', + 'victorops_api_key': 'xxxx1', + 'victorops_routing_key': 'xxxx2', + 'victorops_message_type': 'INFO', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = VictorOpsAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'message_type': rule['victorops_message_type'], + 'entity_display_name': rule['name'], + 'monitoring_tool': 'ElastAlert', + 'state_message': 'Test VictorOps Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + + mock_post_request.assert_called_once_with( + 'https://alert.victorops.com/integrations/generic/20131114/alert/xxxx1/xxxx2', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + assert ('elastalert', logging.INFO, 'Trigger sent to VictorOps') == caplog.record_tuples[0] + + +def test_victorops_proxy(): + rule = { + 'name': 'Test VictorOps Rule', + 'type': 'any', + 'victorops_api_key': 'xxxx1', + 'victorops_routing_key': 'xxxx2', + 'victorops_message_type': 'INFO', + 'victorops_entity_display_name': 'no entity display name', + 'victorops_proxy': 'http://proxy.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = VictorOpsAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'message_type': rule['victorops_message_type'], + 'entity_display_name': rule['victorops_entity_display_name'], + 'monitoring_tool': 'ElastAlert', + 'state_message': 'Test VictorOps Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + + mock_post_request.assert_called_once_with( + 'https://alert.victorops.com/integrations/generic/20131114/alert/xxxx1/xxxx2', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies={'https': 'http://proxy.url'} + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_victorops_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Test VictorOps Rule', + 'type': 'any', + 'victorops_api_key': 'xxxx1', + 'victorops_routing_key': 'xxxx2', + 'victorops_message_type': 'INFO', + 'victorops_entity_display_name': 'no entity display name', + 'victorops_proxy': 'http://proxy.url', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = VictorOpsAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + mock_run = mock.MagicMock(side_effect=RequestException) + with mock.patch('requests.post', mock_run), pytest.raises(RequestException): + alert.alert([match]) + assert 'Error posting to VictorOps:' in str(ea) + + +def test_victorops_entity_id(): + rule = { + 'name': 'Test VictorOps Rule', + 'type': 'any', + 'victorops_api_key': 'xxxx1', + 'victorops_routing_key': 'xxxx2', + 'victorops_message_type': 'INFO', + 'victorops_entity_display_name': 'no entity display name', + 'victorops_entity_id': '12345', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = VictorOpsAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'message_type': rule['victorops_message_type'], + 'entity_display_name': rule['victorops_entity_display_name'], + 'monitoring_tool': 'ElastAlert', + 'state_message': 'Test VictorOps Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + 'entity_id': '12345', + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + + mock_post_request.assert_called_once_with( + 'https://alert.victorops.com/integrations/generic/20131114/alert/xxxx1/xxxx2', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +@pytest.mark.parametrize('message_type, except_message_type', [ + ('INFO', 'INFO'), + ('WARNING', 'WARNING'), + ('ACKNOWLEDGEMENT', 'ACKNOWLEDGEMENT'), + ('CRITICAL', 'CRITICAL'), + ('RECOVERY', 'RECOVERY') +]) +def test_victorops_message_type(message_type, except_message_type): + rule = { + 'name': 'Test VictorOps Rule', + 'type': 'any', + 'victorops_api_key': 'xxxx1', + 'victorops_routing_key': 'xxxx2', + 'victorops_message_type': message_type, + 'victorops_entity_display_name': 'no entity display name', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = VictorOpsAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + with mock.patch('requests.post') as mock_post_request: + alert.alert([match]) + + expected_data = { + 'message_type': except_message_type, + 'entity_display_name': rule['victorops_entity_display_name'], + 'monitoring_tool': 'ElastAlert', + 'state_message': 'Test VictorOps Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n', + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + + mock_post_request.assert_called_once_with( + 'https://alert.victorops.com/integrations/generic/20131114/alert/xxxx1/xxxx2', + data=mock.ANY, + headers={'content-type': 'application/json'}, + proxies=None + ) + + actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) + assert expected_data == actual_data + + +def test_victorops_getinfo(): + rule = { + 'name': 'Test VictorOps Rule', + 'type': 'any', + 'victorops_api_key': 'xxxx1', + 'victorops_routing_key': 'xxxx2', + 'victorops_message_type': 'INFO', + 'alert': [] + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = VictorOpsAlerter(rule) + + expected_data = { + 'type': 'victorops', + 'victorops_routing_key': 'xxxx2' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('victorops_api_key, victorops_routing_key, victorops_message_type, expected_data', [ + ('', '', '', 'Missing required option(s): victorops_api_key, victorops_routing_key, victorops_message_type'), + ('xxxx1', '', '', 'Missing required option(s): victorops_api_key, victorops_routing_key, victorops_message_type'), + ('', 'xxxx2', '', 'Missing required option(s): victorops_api_key, victorops_routing_key, victorops_message_type'), + ('', '', 'INFO', 'Missing required option(s): victorops_api_key, victorops_routing_key, victorops_message_type'), + ('xxxx1', 'xxxx2', '', 'Missing required option(s): victorops_api_key, victorops_routing_key, victorops_message_type'), + ('xxxx1', '', 'INFO', 'Missing required option(s): victorops_api_key, victorops_routing_key, victorops_message_type'), + ('', 'xxxx2', 'INFO', 'Missing required option(s): victorops_api_key, victorops_routing_key, victorops_message_type'), + ('xxxx1', 'xxxx2', 'INFO', + { + 'type': 'victorops', + 'victorops_routing_key': 'xxxx2' + }), +]) +def test_victoropst_required_error(victorops_api_key, victorops_routing_key, victorops_message_type, expected_data): + try: + rule = { + 'name': 'Test VictorOps Rule', + 'type': 'any', + 'alert': [] + } + + if victorops_api_key: + rule['victorops_api_key'] = victorops_api_key + + if victorops_routing_key: + rule['victorops_routing_key'] = victorops_routing_key + + if victorops_message_type: + rule['victorops_message_type'] = victorops_message_type + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = VictorOpsAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) diff --git a/tests/alerters/zabbix_test.py b/tests/alerters/zabbix_test.py new file mode 100644 index 000000000..c01daa9f3 --- /dev/null +++ b/tests/alerters/zabbix_test.py @@ -0,0 +1,171 @@ +import logging +import pytest + +from unittest import mock + +from elastalert.alerters.zabbix import ZabbixAlerter +from elastalert.loaders import FileRulesLoader +from elastalert.util import EAException + + +def test_zabbix_basic(caplog): + caplog.set_level(logging.WARNING) + rule = { + 'name': 'Basic Zabbix test', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'alert': [], + 'alert_subject': 'Test Zabbix', + 'zbx_host': 'example.com', + 'zbx_key': 'example-key' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ZabbixAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00Z', + 'somefield': 'foobarbaz' + } + with mock.patch('pyzabbix.ZabbixSender.send') as mock_zbx_send: + alert.alert([match]) + + zabbix_metrics = { + "host": "example.com", + "key": "example-key", + "value": "1", + "clock": 1609459200 + } + alerter_args = mock_zbx_send.call_args.args + assert vars(alerter_args[0][0]) == zabbix_metrics + log_messeage = "Missing zabbix host 'example.com' or host's item 'example-key', alert will be discarded" + assert ('elastalert', logging.WARNING, log_messeage) == caplog.record_tuples[0] + + +@pytest.mark.parametrize('zbx_host_from_field, zbx_host, zbx_key, log_messeage', [ + (True, 'hostname', 'example-key', + "Missing zabbix host 'example.com' or host's item 'example-key', alert will be discarded"), + (True, 'unavailable_field', 'example-key', + "Missing term 'unavailable_field' or host's item 'example-key', alert will be discarded"), + (False, 'hostname', 'example-key', + "Missing zabbix host 'hostname' or host's item 'example-key', alert will be discarded"), + (False, 'unavailable_field', 'example-key', + "Missing zabbix host 'unavailable_field' or host's item 'example-key', alert will be discarded") +]) +def test_zabbix_enhanced(caplog, zbx_host_from_field, zbx_host, zbx_key, log_messeage): + caplog.set_level(logging.WARNING) + rule = { + 'name': 'Enhanced Zabbix test', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'alert': [], + 'alert_subject': 'Test Zabbix', + 'zbx_host_from_field': zbx_host_from_field, + 'zbx_host': zbx_host, + 'zbx_key': zbx_key + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ZabbixAlerter(rule) + match = { + '@timestamp': '2021-01-01T00:00:00Z', + 'somefield': 'foobarbaz', + 'hostname': 'example.com' + } + with mock.patch('pyzabbix.ZabbixSender.send') as mock_zbx_send: + alert.alert([match]) + + hosts = { + (True, 'hostname'): 'example.com', + (True, 'unavailable_field'): 'None', + (False, 'hostname'): 'hostname', + (False, 'unavailable_field'): 'unavailable_field' + } + + zabbix_metrics = { + 'host': hosts[(zbx_host_from_field, zbx_host)], + 'key': 'example-key', + 'value': '1', + 'clock': 1609459200 + } + alerter_args = mock_zbx_send.call_args.args + assert vars(alerter_args[0][0]) == zabbix_metrics + assert ('elastalert', logging.WARNING, log_messeage) == caplog.record_tuples[0] + + +def test_zabbix_getinfo(): + rule = { + 'name': 'Basic Zabbix test', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'alert': [], + 'alert_subject': 'Test Zabbix', + 'zbx_host': 'example.com', + 'zbx_key': 'example-key' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ZabbixAlerter(rule) + + expected_data = { + 'type': 'zabbix Alerter' + } + actual_data = alert.get_info() + assert expected_data == actual_data + + +@pytest.mark.parametrize('zbx_host, zbx_key, expected_data', [ + ('', '', 'Missing required option(s): zbx_host, zbx_key'), + ('example.com', '', 'Missing required option(s): zbx_host, zbx_key'), + ('', 'example-key', 'Missing required option(s): zbx_host, zbx_key'), + ('example.com', 'example-key', + { + 'type': 'zabbix Alerter' + }) +]) +def test_zabbix_required_error(zbx_host, zbx_key, expected_data): + try: + rule = { + 'name': 'Basic Zabbix test', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'alert': [], + 'alert_subject': 'Test Zabbix' + } + + if zbx_host: + rule['zbx_host'] = zbx_host + + if zbx_key: + rule['zbx_key'] = zbx_key + + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ZabbixAlerter(rule) + + actual_data = alert.get_info() + assert expected_data == actual_data + except Exception as ea: + assert expected_data in str(ea) + + +def test_zabbix_ea_exception(): + with pytest.raises(EAException) as ea: + rule = { + 'name': 'Basic Zabbix test', + 'type': 'any', + 'alert_text_type': 'alert_text_only', + 'alert': [], + 'alert_subject': 'Test Zabbix', + 'zbx_host': 'example.com', + 'zbx_key': 'example-key' + } + match = { + '@timestamp': '2021-01-01T00:00:00Z', + 'somefield': 'foobarbaz' + } + rules_loader = FileRulesLoader({}) + rules_loader.load_modules(rule) + alert = ZabbixAlerter(rule) + alert.alert([match]) + + assert 'Error sending alert to Zabbix: ' in str(ea) diff --git a/tests/alerts_test.py b/tests/alerts_test.py index 5cd61ae75..3cb27f4d5 100644 --- a/tests/alerts_test.py +++ b/tests/alerts_test.py @@ -1,1997 +1,57 @@ # -*- coding: utf-8 -*- -import base64 -import datetime -import json -import subprocess +from jinja2 import Template -import mock -import pytest -from jira.exceptions import JIRAError - -from elastalert.alerts import AlertaAlerter -from elastalert.alerts import Alerter -from elastalert.alerts import BasicMatchString -from elastalert.alerts import CommandAlerter -from elastalert.alerts import EmailAlerter -from elastalert.alerts import HipChatAlerter -from elastalert.alerts import HTTPPostAlerter -from elastalert.alerts import JiraAlerter -from elastalert.alerts import JiraFormattedMatchString -from elastalert.alerts import MsTeamsAlerter -from elastalert.alerts import PagerDutyAlerter -from elastalert.alerts import SlackAlerter -from elastalert.alerts import StrideAlerter -from elastalert.loaders import FileRulesLoader -from elastalert.opsgenie import OpsGenieAlerter -from elastalert.util import ts_add -from elastalert.util import ts_now - - -class mock_rule: - def get_match_str(self, event): - return str(event) - - -def test_basic_match_string(ea): - ea.rules[0]['top_count_keys'] = ['username'] - match = {'@timestamp': '1918-01-17', 'field': 'value', 'top_events_username': {'bob': 10, 'mallory': 5}} - alert_text = str(BasicMatchString(ea.rules[0], match)) - assert 'anytest' in alert_text - assert 'some stuff happened' in alert_text - assert 'username' in alert_text - assert 'bob: 10' in alert_text - assert 'field: value' in alert_text - - # Non serializable objects don't cause errors - match['non-serializable'] = {open: 10} - alert_text = str(BasicMatchString(ea.rules[0], match)) - - # unicode objects dont cause errors - match['snowman'] = '☃' - alert_text = str(BasicMatchString(ea.rules[0], match)) - - # Pretty printed objects - match.pop('non-serializable') - match['object'] = {'this': {'that': [1, 2, "3"]}} - alert_text = str(BasicMatchString(ea.rules[0], match)) - assert '"this": {\n "that": [\n 1,\n 2,\n "3"\n ]\n }' in alert_text - - ea.rules[0]['alert_text'] = 'custom text' - alert_text = str(BasicMatchString(ea.rules[0], match)) - assert 'custom text' in alert_text - assert 'anytest' not in alert_text - - ea.rules[0]['alert_text_type'] = 'alert_text_only' - alert_text = str(BasicMatchString(ea.rules[0], match)) - assert 'custom text' in alert_text - assert 'some stuff happened' not in alert_text - assert 'username' not in alert_text - assert 'field: value' not in alert_text - - ea.rules[0]['alert_text_type'] = 'exclude_fields' - alert_text = str(BasicMatchString(ea.rules[0], match)) - assert 'custom text' in alert_text - assert 'some stuff happened' in alert_text - assert 'username' in alert_text - assert 'field: value' not in alert_text - - -def test_jira_formatted_match_string(ea): - match = {'foo': {'bar': ['one', 2, 'three']}, 'top_events_poof': 'phew'} - alert_text = str(JiraFormattedMatchString(ea.rules[0], match)) - tab = 4 * ' ' - expected_alert_text_snippet = '{code}{\n' \ - + tab + '"foo": {\n' \ - + 2 * tab + '"bar": [\n' \ - + 3 * tab + '"one",\n' \ - + 3 * tab + '2,\n' \ - + 3 * tab + '"three"\n' \ - + 2 * tab + ']\n' \ - + tab + '}\n' \ - + '}{code}' - assert expected_alert_text_snippet in alert_text - - -def test_email(): - rule = {'name': 'test alert', 'email': ['testing@test.test', 'test@test.test'], 'from_addr': 'testfrom@test.test', - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_reply_to': 'test@example.com', 'owner': 'owner_value', - 'alert_subject': 'Test alert for {0}, owned by {1}', 'alert_subject_args': ['test_term', 'owner'], 'snowman': '☃'} - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - - alert = EmailAlerter(rule) - alert.alert([{'test_term': 'test_value'}]) - expected = [mock.call('localhost'), - mock.call().ehlo(), - mock.call().has_extn('STARTTLS'), - mock.call().starttls(certfile=None, keyfile=None), - mock.call().sendmail(mock.ANY, ['testing@test.test', 'test@test.test'], mock.ANY), - mock.call().quit()] - assert mock_smtp.mock_calls == expected - - body = mock_smtp.mock_calls[4][1][2] - - assert 'Reply-To: test@example.com' in body - assert 'To: testing@test.test' in body - assert 'From: testfrom@test.test' in body - assert 'Subject: Test alert for test_value, owned by owner_value' in body - - -def test_email_from_field(): - rule = {'name': 'test alert', 'email': ['testing@test.test'], 'email_add_domain': 'example.com', - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_from_field': 'data.user', 'owner': 'owner_value'} - # Found, without @ - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - alert = EmailAlerter(rule) - alert.alert([{'data': {'user': 'qlo'}}]) - assert mock_smtp.mock_calls[4][1][1] == ['qlo@example.com'] - - # Found, with @ - rule['email_add_domain'] = '@example.com' - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - alert = EmailAlerter(rule) - alert.alert([{'data': {'user': 'qlo'}}]) - assert mock_smtp.mock_calls[4][1][1] == ['qlo@example.com'] - - # Found, list - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - alert = EmailAlerter(rule) - alert.alert([{'data': {'user': ['qlo', 'foo']}}]) - assert mock_smtp.mock_calls[4][1][1] == ['qlo@example.com', 'foo@example.com'] - - # Not found - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - alert = EmailAlerter(rule) - alert.alert([{'data': {'foo': 'qlo'}}]) - assert mock_smtp.mock_calls[4][1][1] == ['testing@test.test'] - - # Found, wrong type - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - alert = EmailAlerter(rule) - alert.alert([{'data': {'user': 17}}]) - assert mock_smtp.mock_calls[4][1][1] == ['testing@test.test'] - - -def test_email_with_unicode_strings(): - rule = {'name': 'test alert', 'email': 'testing@test.test', 'from_addr': 'testfrom@test.test', - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_reply_to': 'test@example.com', 'owner': 'owner_value', - 'alert_subject': 'Test alert for {0}, owned by {1}', 'alert_subject_args': ['test_term', 'owner'], 'snowman': '☃'} - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - - alert = EmailAlerter(rule) - alert.alert([{'test_term': 'test_value'}]) - expected = [mock.call('localhost'), - mock.call().ehlo(), - mock.call().has_extn('STARTTLS'), - mock.call().starttls(certfile=None, keyfile=None), - mock.call().sendmail(mock.ANY, ['testing@test.test'], mock.ANY), - mock.call().quit()] - assert mock_smtp.mock_calls == expected - - body = mock_smtp.mock_calls[4][1][2] - - assert 'Reply-To: test@example.com' in body - assert 'To: testing@test.test' in body - assert 'From: testfrom@test.test' in body - assert 'Subject: Test alert for test_value, owned by owner_value' in body - - -def test_email_with_auth(): - rule = {'name': 'test alert', 'email': ['testing@test.test', 'test@test.test'], 'from_addr': 'testfrom@test.test', - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_reply_to': 'test@example.com', - 'alert_subject': 'Test alert for {0}', 'alert_subject_args': ['test_term'], 'smtp_auth_file': 'file.txt', - 'rule_file': '/tmp/foo.yaml'} - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - with mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'someone', 'password': 'hunter2'} - mock_smtp.return_value = mock.Mock() - alert = EmailAlerter(rule) - - alert.alert([{'test_term': 'test_value'}]) - expected = [mock.call('localhost'), - mock.call().ehlo(), - mock.call().has_extn('STARTTLS'), - mock.call().starttls(certfile=None, keyfile=None), - mock.call().login('someone', 'hunter2'), - mock.call().sendmail(mock.ANY, ['testing@test.test', 'test@test.test'], mock.ANY), - mock.call().quit()] - assert mock_smtp.mock_calls == expected - - -def test_email_with_cert_key(): - rule = {'name': 'test alert', 'email': ['testing@test.test', 'test@test.test'], 'from_addr': 'testfrom@test.test', - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_reply_to': 'test@example.com', - 'alert_subject': 'Test alert for {0}', 'alert_subject_args': ['test_term'], 'smtp_auth_file': 'file.txt', - 'smtp_cert_file': 'dummy/cert.crt', 'smtp_key_file': 'dummy/client.key', 'rule_file': '/tmp/foo.yaml'} - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - with mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'someone', 'password': 'hunter2'} - mock_smtp.return_value = mock.Mock() - alert = EmailAlerter(rule) - - alert.alert([{'test_term': 'test_value'}]) - expected = [mock.call('localhost'), - mock.call().ehlo(), - mock.call().has_extn('STARTTLS'), - mock.call().starttls(certfile='dummy/cert.crt', keyfile='dummy/client.key'), - mock.call().login('someone', 'hunter2'), - mock.call().sendmail(mock.ANY, ['testing@test.test', 'test@test.test'], mock.ANY), - mock.call().quit()] - assert mock_smtp.mock_calls == expected - - -def test_email_with_cc(): - rule = {'name': 'test alert', 'email': ['testing@test.test', 'test@test.test'], 'from_addr': 'testfrom@test.test', - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_reply_to': 'test@example.com', - 'cc': 'tester@testing.testing'} - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - - alert = EmailAlerter(rule) - alert.alert([{'test_term': 'test_value'}]) - expected = [mock.call('localhost'), - mock.call().ehlo(), - mock.call().has_extn('STARTTLS'), - mock.call().starttls(certfile=None, keyfile=None), - mock.call().sendmail(mock.ANY, ['testing@test.test', 'test@test.test', 'tester@testing.testing'], mock.ANY), - mock.call().quit()] - assert mock_smtp.mock_calls == expected - - body = mock_smtp.mock_calls[4][1][2] - - assert 'Reply-To: test@example.com' in body - assert 'To: testing@test.test' in body - assert 'CC: tester@testing.testing' in body - assert 'From: testfrom@test.test' in body - - -def test_email_with_bcc(): - rule = {'name': 'test alert', 'email': ['testing@test.test', 'test@test.test'], 'from_addr': 'testfrom@test.test', - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_reply_to': 'test@example.com', - 'bcc': 'tester@testing.testing'} - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - - alert = EmailAlerter(rule) - alert.alert([{'test_term': 'test_value'}]) - expected = [mock.call('localhost'), - mock.call().ehlo(), - mock.call().has_extn('STARTTLS'), - mock.call().starttls(certfile=None, keyfile=None), - mock.call().sendmail(mock.ANY, ['testing@test.test', 'test@test.test', 'tester@testing.testing'], mock.ANY), - mock.call().quit()] - assert mock_smtp.mock_calls == expected - - body = mock_smtp.mock_calls[4][1][2] - - assert 'Reply-To: test@example.com' in body - assert 'To: testing@test.test' in body - assert 'CC: tester@testing.testing' not in body - assert 'From: testfrom@test.test' in body - - -def test_email_with_cc_and_bcc(): - rule = {'name': 'test alert', 'email': ['testing@test.test', 'test@test.test'], 'from_addr': 'testfrom@test.test', - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_reply_to': 'test@example.com', - 'cc': ['test1@test.com', 'test2@test.com'], 'bcc': 'tester@testing.testing'} - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - - alert = EmailAlerter(rule) - alert.alert([{'test_term': 'test_value'}]) - expected = [mock.call('localhost'), - mock.call().ehlo(), - mock.call().has_extn('STARTTLS'), - mock.call().starttls(certfile=None, keyfile=None), - mock.call().sendmail( - mock.ANY, - [ - 'testing@test.test', - 'test@test.test', - 'test1@test.com', - 'test2@test.com', - 'tester@testing.testing' - ], - mock.ANY - ), - mock.call().quit()] - assert mock_smtp.mock_calls == expected - - body = mock_smtp.mock_calls[4][1][2] - - assert 'Reply-To: test@example.com' in body - assert 'To: testing@test.test' in body - assert 'CC: test1@test.com,test2@test.com' in body - assert 'From: testfrom@test.test' in body - - -def test_email_with_args(): - rule = { - 'name': 'test alert', - 'email': ['testing@test.test', 'test@test.test'], - 'from_addr': 'testfrom@test.test', - 'type': mock_rule(), - 'timestamp_field': '@timestamp', - 'email_reply_to': 'test@example.com', - 'alert_subject': 'Test alert for {0} {1}', - 'alert_subject_args': ['test_term', 'test.term'], - 'alert_text': 'Test alert for {0} and {1} {2}', - 'alert_text_args': ['test_arg1', 'test_arg2', 'test.arg3'], - 'alert_missing_value': '' - } - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - - alert = EmailAlerter(rule) - alert.alert([{'test_term': 'test_value', 'test_arg1': 'testing', 'test': {'term': ':)', 'arg3': '☃'}}]) - expected = [mock.call('localhost'), - mock.call().ehlo(), - mock.call().has_extn('STARTTLS'), - mock.call().starttls(certfile=None, keyfile=None), - mock.call().sendmail(mock.ANY, ['testing@test.test', 'test@test.test'], mock.ANY), - mock.call().quit()] - assert mock_smtp.mock_calls == expected - - body = mock_smtp.mock_calls[4][1][2] - # Extract the MIME encoded message body - body_text = base64.b64decode(body.split('\n\n')[-1][:-1]).decode('utf-8') - - assert 'testing' in body_text - assert '' in body_text - assert '☃' in body_text - - assert 'Reply-To: test@example.com' in body - assert 'To: testing@test.test' in body - assert 'From: testfrom@test.test' in body - assert 'Subject: Test alert for test_value :)' in body - - -def test_email_query_key_in_subject(): - rule = {'name': 'test alert', 'email': ['testing@test.test', 'test@test.test'], - 'type': mock_rule(), 'timestamp_field': '@timestamp', 'email_reply_to': 'test@example.com', - 'query_key': 'username'} - with mock.patch('elastalert.alerts.SMTP') as mock_smtp: - mock_smtp.return_value = mock.Mock() - - alert = EmailAlerter(rule) - alert.alert([{'test_term': 'test_value', 'username': 'werbenjagermanjensen'}]) - - body = mock_smtp.mock_calls[4][1][2] - lines = body.split('\n') - found_subject = False - for line in lines: - if line.startswith('Subject'): - assert 'werbenjagermanjensen' in line - found_subject = True - assert found_subject - - -def test_opsgenie_basic(): - rule = {'name': 'testOGalert', 'opsgenie_key': 'ogkey', - 'opsgenie_account': 'genies', 'opsgenie_addr': 'https://api.opsgenie.com/v2/alerts', - 'opsgenie_recipients': ['lytics'], 'type': mock_rule()} - with mock.patch('requests.post') as mock_post: - - alert = OpsGenieAlerter(rule) - alert.alert([{'@timestamp': '2014-10-31T00:00:00'}]) - print(("mock_post: {0}".format(mock_post._mock_call_args_list))) - mcal = mock_post._mock_call_args_list - print(('mcal: {0}'.format(mcal[0]))) - assert mcal[0][0][0] == ('https://api.opsgenie.com/v2/alerts') - - assert mock_post.called - - assert mcal[0][1]['headers']['Authorization'] == 'GenieKey ogkey' - assert mcal[0][1]['json']['source'] == 'ElastAlert' - assert mcal[0][1]['json']['responders'] == [{'username': 'lytics', 'type': 'user'}] - assert mcal[0][1]['json']['source'] == 'ElastAlert' - - -def test_opsgenie_frequency(): - rule = {'name': 'testOGalert', 'opsgenie_key': 'ogkey', - 'opsgenie_account': 'genies', 'opsgenie_addr': 'https://api.opsgenie.com/v2/alerts', - 'opsgenie_recipients': ['lytics'], 'type': mock_rule(), - 'filter': [{'query': {'query_string': {'query': '*hihi*'}}}], - 'alert': 'opsgenie'} - with mock.patch('requests.post') as mock_post: - - alert = OpsGenieAlerter(rule) - alert.alert([{'@timestamp': '2014-10-31T00:00:00'}]) - - assert alert.get_info()['recipients'] == rule['opsgenie_recipients'] - - print(("mock_post: {0}".format(mock_post._mock_call_args_list))) - mcal = mock_post._mock_call_args_list - print(('mcal: {0}'.format(mcal[0]))) - assert mcal[0][0][0] == ('https://api.opsgenie.com/v2/alerts') - - assert mock_post.called - - assert mcal[0][1]['headers']['Authorization'] == 'GenieKey ogkey' - assert mcal[0][1]['json']['source'] == 'ElastAlert' - assert mcal[0][1]['json']['responders'] == [{'username': 'lytics', 'type': 'user'}] - assert mcal[0][1]['json']['source'] == 'ElastAlert' - assert mcal[0][1]['json']['source'] == 'ElastAlert' - - -def test_opsgenie_alert_routing(): - rule = {'name': 'testOGalert', 'opsgenie_key': 'ogkey', - 'opsgenie_account': 'genies', 'opsgenie_addr': 'https://api.opsgenie.com/v2/alerts', - 'opsgenie_recipients': ['{RECEIPIENT_PREFIX}'], 'opsgenie_recipients_args': {'RECEIPIENT_PREFIX': 'recipient'}, - 'type': mock_rule(), - 'filter': [{'query': {'query_string': {'query': '*hihi*'}}}], - 'alert': 'opsgenie', - 'opsgenie_teams': ['{TEAM_PREFIX}-Team'], 'opsgenie_teams_args': {'TEAM_PREFIX': 'team'}} - with mock.patch('requests.post'): - - alert = OpsGenieAlerter(rule) - alert.alert([{'@timestamp': '2014-10-31T00:00:00', 'team': "Test", 'recipient': "lytics"}]) - - assert alert.get_info()['teams'] == ['Test-Team'] - assert alert.get_info()['recipients'] == ['lytics'] - - -def test_opsgenie_default_alert_routing(): - rule = {'name': 'testOGalert', 'opsgenie_key': 'ogkey', - 'opsgenie_account': 'genies', 'opsgenie_addr': 'https://api.opsgenie.com/v2/alerts', - 'opsgenie_recipients': ['{RECEIPIENT_PREFIX}'], 'opsgenie_recipients_args': {'RECEIPIENT_PREFIX': 'recipient'}, - 'type': mock_rule(), - 'filter': [{'query': {'query_string': {'query': '*hihi*'}}}], - 'alert': 'opsgenie', - 'opsgenie_teams': ['{TEAM_PREFIX}-Team'], - 'opsgenie_default_receipients': ["devops@test.com"], 'opsgenie_default_teams': ["Test"] - } - with mock.patch('requests.post'): - - alert = OpsGenieAlerter(rule) - alert.alert([{'@timestamp': '2014-10-31T00:00:00', 'team': "Test"}]) - - assert alert.get_info()['teams'] == ['{TEAM_PREFIX}-Team'] - assert alert.get_info()['recipients'] == ['devops@test.com'] - - -def test_opsgenie_details_with_constant_value(): - rule = { - 'name': 'Opsgenie Details', - 'type': mock_rule(), - 'opsgenie_account': 'genies', - 'opsgenie_key': 'ogkey', - 'opsgenie_details': {'Foo': 'Bar'} - } - match = { - '@timestamp': '2014-10-31T00:00:00' - } - alert = OpsGenieAlerter(rule) - - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - mock_post_request.assert_called_once_with( - 'https://api.opsgenie.com/v2/alerts', - headers={ - 'Content-Type': 'application/json', - 'Authorization': 'GenieKey ogkey' - }, - json=mock.ANY, - proxies=None - ) - - expected_json = { - 'description': BasicMatchString(rule, match).__str__(), - 'details': {'Foo': 'Bar'}, - 'message': 'ElastAlert: Opsgenie Details', - 'priority': None, - 'source': 'ElastAlert', - 'tags': ['ElastAlert', 'Opsgenie Details'], - 'user': 'genies' - } - actual_json = mock_post_request.call_args_list[0][1]['json'] - assert expected_json == actual_json - - -def test_opsgenie_details_with_field(): - rule = { - 'name': 'Opsgenie Details', - 'type': mock_rule(), - 'opsgenie_account': 'genies', - 'opsgenie_key': 'ogkey', - 'opsgenie_details': {'Foo': {'field': 'message'}} - } - match = { - 'message': 'Bar', - '@timestamp': '2014-10-31T00:00:00' - } - alert = OpsGenieAlerter(rule) - - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - mock_post_request.assert_called_once_with( - 'https://api.opsgenie.com/v2/alerts', - headers={ - 'Content-Type': 'application/json', - 'Authorization': 'GenieKey ogkey' - }, - json=mock.ANY, - proxies=None - ) - - expected_json = { - 'description': BasicMatchString(rule, match).__str__(), - 'details': {'Foo': 'Bar'}, - 'message': 'ElastAlert: Opsgenie Details', - 'priority': None, - 'source': 'ElastAlert', - 'tags': ['ElastAlert', 'Opsgenie Details'], - 'user': 'genies' - } - actual_json = mock_post_request.call_args_list[0][1]['json'] - assert expected_json == actual_json - - -def test_opsgenie_details_with_nested_field(): - rule = { - 'name': 'Opsgenie Details', - 'type': mock_rule(), - 'opsgenie_account': 'genies', - 'opsgenie_key': 'ogkey', - 'opsgenie_details': {'Foo': {'field': 'nested.field'}} - } - match = { - 'nested': { - 'field': 'Bar' - }, - '@timestamp': '2014-10-31T00:00:00' - } - alert = OpsGenieAlerter(rule) - - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - mock_post_request.assert_called_once_with( - 'https://api.opsgenie.com/v2/alerts', - headers={ - 'Content-Type': 'application/json', - 'Authorization': 'GenieKey ogkey' - }, - json=mock.ANY, - proxies=None - ) - - expected_json = { - 'description': BasicMatchString(rule, match).__str__(), - 'details': {'Foo': 'Bar'}, - 'message': 'ElastAlert: Opsgenie Details', - 'priority': None, - 'source': 'ElastAlert', - 'tags': ['ElastAlert', 'Opsgenie Details'], - 'user': 'genies' - } - actual_json = mock_post_request.call_args_list[0][1]['json'] - assert expected_json == actual_json - - -def test_opsgenie_details_with_non_string_field(): - rule = { - 'name': 'Opsgenie Details', - 'type': mock_rule(), - 'opsgenie_account': 'genies', - 'opsgenie_key': 'ogkey', - 'opsgenie_details': { - 'Age': {'field': 'age'}, - 'Message': {'field': 'message'} - } - } - match = { - 'age': 10, - 'message': { - 'format': 'The cow goes %s!', - 'arg0': 'moo' - } - } - alert = OpsGenieAlerter(rule) - - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - mock_post_request.assert_called_once_with( - 'https://api.opsgenie.com/v2/alerts', - headers={ - 'Content-Type': 'application/json', - 'Authorization': 'GenieKey ogkey' - }, - json=mock.ANY, - proxies=None - ) - - expected_json = { - 'description': BasicMatchString(rule, match).__str__(), - 'details': { - 'Age': '10', - 'Message': "{'format': 'The cow goes %s!', 'arg0': 'moo'}" - }, - 'message': 'ElastAlert: Opsgenie Details', - 'priority': None, - 'source': 'ElastAlert', - 'tags': ['ElastAlert', 'Opsgenie Details'], - 'user': 'genies' - } - actual_json = mock_post_request.call_args_list[0][1]['json'] - assert expected_json == actual_json - - -def test_opsgenie_details_with_missing_field(): - rule = { - 'name': 'Opsgenie Details', - 'type': mock_rule(), - 'opsgenie_account': 'genies', - 'opsgenie_key': 'ogkey', - 'opsgenie_details': { - 'Message': {'field': 'message'}, - 'Missing': {'field': 'missing'} - } - } - match = { - 'message': 'Testing', - '@timestamp': '2014-10-31T00:00:00' - } - alert = OpsGenieAlerter(rule) - - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - mock_post_request.assert_called_once_with( - 'https://api.opsgenie.com/v2/alerts', - headers={ - 'Content-Type': 'application/json', - 'Authorization': 'GenieKey ogkey' - }, - json=mock.ANY, - proxies=None - ) - - expected_json = { - 'description': BasicMatchString(rule, match).__str__(), - 'details': {'Message': 'Testing'}, - 'message': 'ElastAlert: Opsgenie Details', - 'priority': None, - 'source': 'ElastAlert', - 'tags': ['ElastAlert', 'Opsgenie Details'], - 'user': 'genies' - } - actual_json = mock_post_request.call_args_list[0][1]['json'] - assert expected_json == actual_json - - -def test_opsgenie_details_with_environment_variable_replacement(environ): - environ.update({ - 'TEST_VAR': 'Bar' - }) - rule = { - 'name': 'Opsgenie Details', - 'type': mock_rule(), - 'opsgenie_account': 'genies', - 'opsgenie_key': 'ogkey', - 'opsgenie_details': {'Foo': '$TEST_VAR'} - } - match = { - '@timestamp': '2014-10-31T00:00:00' - } - alert = OpsGenieAlerter(rule) - - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - mock_post_request.assert_called_once_with( - 'https://api.opsgenie.com/v2/alerts', - headers={ - 'Content-Type': 'application/json', - 'Authorization': 'GenieKey ogkey' - }, - json=mock.ANY, - proxies=None - ) - - expected_json = { - 'description': BasicMatchString(rule, match).__str__(), - 'details': {'Foo': 'Bar'}, - 'message': 'ElastAlert: Opsgenie Details', - 'priority': None, - 'source': 'ElastAlert', - 'tags': ['ElastAlert', 'Opsgenie Details'], - 'user': 'genies' - } - actual_json = mock_post_request.call_args_list[0][1]['json'] - assert expected_json == actual_json - - -def test_jira(): - description_txt = "Description stuff goes here like a runbook link." - rule = { - 'name': 'test alert', - 'jira_account_file': 'jirafile', - 'type': mock_rule(), - 'jira_project': 'testproject', - 'jira_priority': 0, - 'jira_issuetype': 'testtype', - 'jira_server': 'jiraserver', - 'jira_label': 'testlabel', - 'jira_component': 'testcomponent', - 'jira_description': description_txt, - 'jira_watchers': ['testwatcher1', 'testwatcher2'], - 'timestamp_field': '@timestamp', - 'alert_subject': 'Issue {0} occurred at {1}', - 'alert_subject_args': ['test_term', '@timestamp'], - 'rule_file': '/tmp/foo.yaml' - } - - mock_priority = mock.Mock(id='5') - - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = [] - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - - expected = [ - mock.call('jiraserver', basic_auth=('jirauser', 'jirapassword')), - mock.call().priorities(), - mock.call().fields(), - mock.call().create_issue( - issuetype={'name': 'testtype'}, - priority={'id': '5'}, - project={'key': 'testproject'}, - labels=['testlabel'], - components=[{'name': 'testcomponent'}], - description=mock.ANY, - summary='Issue test_value occurred at 2014-10-31T00:00:00', - ), - mock.call().add_watcher(mock.ANY, 'testwatcher1'), - mock.call().add_watcher(mock.ANY, 'testwatcher2'), - ] - - # We don't care about additional calls to mock_jira, such as __str__ - assert mock_jira.mock_calls[:6] == expected - assert mock_jira.mock_calls[3][2]['description'].startswith(description_txt) - - # Search called if jira_bump_tickets - rule['jira_bump_tickets'] = True - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value = mock.Mock() - mock_jira.return_value.search_issues.return_value = [] - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = [] - - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - - expected.insert(3, mock.call().search_issues(mock.ANY)) - assert mock_jira.mock_calls == expected - - # Remove a field if jira_ignore_in_title set - rule['jira_ignore_in_title'] = 'test_term' - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value = mock.Mock() - mock_jira.return_value.search_issues.return_value = [] - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = [] - - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - - assert 'test_value' not in mock_jira.mock_calls[3][1][0] - - # Issue is still created if search_issues throws an exception - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value = mock.Mock() - mock_jira.return_value.search_issues.side_effect = JIRAError - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = [] - - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - - assert mock_jira.mock_calls == expected - - # Only bump after 3d of inactivity - rule['jira_bump_after_inactivity'] = 3 - mock_issue = mock.Mock() - - # Check ticket is bumped if it is updated 4 days ago - mock_issue.fields.updated = str(ts_now() - datetime.timedelta(days=4)) - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value = mock.Mock() - mock_jira.return_value.search_issues.return_value = [mock_issue] - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = [] - - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - # Check add_comment is called - assert len(mock_jira.mock_calls) == 5 - assert '().add_comment' == mock_jira.mock_calls[4][0] - - # Check ticket is bumped is not bumped if ticket is updated right now - mock_issue.fields.updated = str(ts_now()) - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value = mock.Mock() - mock_jira.return_value.search_issues.return_value = [mock_issue] - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = [] - - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - # Only 4 calls for mock_jira since add_comment is not called - assert len(mock_jira.mock_calls) == 4 - - # Test match resolved values - rule = { - 'name': 'test alert', - 'jira_account_file': 'jirafile', - 'type': mock_rule(), - 'owner': 'the_owner', - 'jira_project': 'testproject', - 'jira_issuetype': 'testtype', - 'jira_server': 'jiraserver', - 'jira_label': 'testlabel', - 'jira_component': 'testcomponent', - 'jira_description': "DESC", - 'jira_watchers': ['testwatcher1', 'testwatcher2'], - 'timestamp_field': '@timestamp', - 'jira_affected_user': "#gmail.the_user", - 'rule_file': '/tmp/foo.yaml' - } - mock_issue = mock.Mock() - mock_issue.fields.updated = str(ts_now() - datetime.timedelta(days=4)) - mock_fields = [ - {'name': 'affected user', 'id': 'affected_user_id', 'schema': {'type': 'string'}} - ] - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value = mock.Mock() - mock_jira.return_value.search_issues.return_value = [mock_issue] - mock_jira.return_value.fields.return_value = mock_fields - mock_jira.return_value.priorities.return_value = [mock_priority] - alert = JiraAlerter(rule) - alert.alert([{'gmail.the_user': 'jdoe', '@timestamp': '2014-10-31T00:00:00'}]) - assert mock_jira.mock_calls[4][2]['affected_user_id'] == "jdoe" - - -def test_jira_arbitrary_field_support(): - description_txt = "Description stuff goes here like a runbook link." - rule = { - 'name': 'test alert', - 'jira_account_file': 'jirafile', - 'type': mock_rule(), - 'owner': 'the_owner', - 'jira_project': 'testproject', - 'jira_issuetype': 'testtype', - 'jira_server': 'jiraserver', - 'jira_label': 'testlabel', - 'jira_component': 'testcomponent', - 'jira_description': description_txt, - 'jira_watchers': ['testwatcher1', 'testwatcher2'], - 'jira_arbitrary_reference_string_field': '$owner$', - 'jira_arbitrary_string_field': 'arbitrary_string_value', - 'jira_arbitrary_string_array_field': ['arbitrary_string_value1', 'arbitrary_string_value2'], - 'jira_arbitrary_string_array_field_provided_as_single_value': 'arbitrary_string_value_in_array_field', - 'jira_arbitrary_number_field': 1, - 'jira_arbitrary_number_array_field': [2, 3], - 'jira_arbitrary_number_array_field_provided_as_single_value': 1, - 'jira_arbitrary_complex_field': 'arbitrary_complex_value', - 'jira_arbitrary_complex_array_field': ['arbitrary_complex_value1', 'arbitrary_complex_value2'], - 'jira_arbitrary_complex_array_field_provided_as_single_value': 'arbitrary_complex_value_in_array_field', - 'timestamp_field': '@timestamp', - 'alert_subject': 'Issue {0} occurred at {1}', - 'alert_subject_args': ['test_term', '@timestamp'], - 'rule_file': '/tmp/foo.yaml' - } - - mock_priority = mock.MagicMock(id='5') - - mock_fields = [ - {'name': 'arbitrary reference string field', 'id': 'arbitrary_reference_string_field', 'schema': {'type': 'string'}}, - {'name': 'arbitrary string field', 'id': 'arbitrary_string_field', 'schema': {'type': 'string'}}, - {'name': 'arbitrary string array field', 'id': 'arbitrary_string_array_field', 'schema': {'type': 'array', 'items': 'string'}}, - { - 'name': 'arbitrary string array field provided as single value', - 'id': 'arbitrary_string_array_field_provided_as_single_value', - 'schema': {'type': 'array', 'items': 'string'} - }, - {'name': 'arbitrary number field', 'id': 'arbitrary_number_field', 'schema': {'type': 'number'}}, - {'name': 'arbitrary number array field', 'id': 'arbitrary_number_array_field', 'schema': {'type': 'array', 'items': 'number'}}, - { - 'name': 'arbitrary number array field provided as single value', - 'id': 'arbitrary_number_array_field_provided_as_single_value', - 'schema': {'type': 'array', 'items': 'number'} - }, - {'name': 'arbitrary complex field', 'id': 'arbitrary_complex_field', 'schema': {'type': 'ArbitraryType'}}, - { - 'name': 'arbitrary complex array field', - 'id': 'arbitrary_complex_array_field', - 'schema': {'type': 'array', 'items': 'ArbitraryType'} - }, - { - 'name': 'arbitrary complex array field provided as single value', - 'id': 'arbitrary_complex_array_field_provided_as_single_value', - 'schema': {'type': 'array', 'items': 'ArbitraryType'} - }, - ] - - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = mock_fields - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - - expected = [ - mock.call('jiraserver', basic_auth=('jirauser', 'jirapassword')), - mock.call().priorities(), - mock.call().fields(), - mock.call().create_issue( - issuetype={'name': 'testtype'}, - project={'key': 'testproject'}, - labels=['testlabel'], - components=[{'name': 'testcomponent'}], - description=mock.ANY, - summary='Issue test_value occurred at 2014-10-31T00:00:00', - arbitrary_reference_string_field='the_owner', - arbitrary_string_field='arbitrary_string_value', - arbitrary_string_array_field=['arbitrary_string_value1', 'arbitrary_string_value2'], - arbitrary_string_array_field_provided_as_single_value=['arbitrary_string_value_in_array_field'], - arbitrary_number_field=1, - arbitrary_number_array_field=[2, 3], - arbitrary_number_array_field_provided_as_single_value=[1], - arbitrary_complex_field={'name': 'arbitrary_complex_value'}, - arbitrary_complex_array_field=[{'name': 'arbitrary_complex_value1'}, {'name': 'arbitrary_complex_value2'}], - arbitrary_complex_array_field_provided_as_single_value=[{'name': 'arbitrary_complex_value_in_array_field'}], - ), - mock.call().add_watcher(mock.ANY, 'testwatcher1'), - mock.call().add_watcher(mock.ANY, 'testwatcher2'), - ] - - # We don't care about additional calls to mock_jira, such as __str__ - assert mock_jira.mock_calls[:6] == expected - assert mock_jira.mock_calls[3][2]['description'].startswith(description_txt) - - # Reference an arbitrary string field that is not defined on the JIRA server - rule['jira_nonexistent_field'] = 'nonexistent field value' - - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = mock_fields - - with pytest.raises(Exception) as exception: - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - assert "Could not find a definition for the jira field 'nonexistent field'" in str(exception) - - del rule['jira_nonexistent_field'] - - # Reference a watcher that does not exist - rule['jira_watchers'] = 'invalid_watcher' - - with mock.patch('elastalert.alerts.JIRA') as mock_jira, \ - mock.patch('elastalert.alerts.yaml_loader') as mock_open: - mock_open.return_value = {'user': 'jirauser', 'password': 'jirapassword'} - mock_jira.return_value.priorities.return_value = [mock_priority] - mock_jira.return_value.fields.return_value = mock_fields - - # Cause add_watcher to raise, which most likely means that the user did not exist - mock_jira.return_value.add_watcher.side_effect = Exception() - - with pytest.raises(Exception) as exception: - alert = JiraAlerter(rule) - alert.alert([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - assert "Exception encountered when trying to add 'invalid_watcher' as a watcher. Does the user exist?" in str(exception) - - -def test_kibana(ea): - rule = {'filter': [{'query': {'query_string': {'query': 'xy:z'}}}], - 'name': 'Test rule!', - 'es_host': 'test.testing', - 'es_port': 12345, - 'timeframe': datetime.timedelta(hours=1), - 'index': 'logstash-test', - 'include': ['@timestamp'], - 'timestamp_field': '@timestamp'} - match = {'@timestamp': '2014-10-10T00:00:00'} - with mock.patch("elastalert.elastalert.elasticsearch_client") as mock_es: - mock_create = mock.Mock(return_value={'_id': 'ABCDEFGH'}) - mock_es_inst = mock.Mock() - mock_es_inst.index = mock_create - mock_es_inst.host = 'test.testing' - mock_es_inst.port = 12345 - mock_es.return_value = mock_es_inst - link = ea.generate_kibana_db(rule, match) - - assert 'http://test.testing:12345/_plugin/kibana/#/dashboard/temp/ABCDEFGH' == link - - # Name and index - dashboard = json.loads(mock_create.call_args_list[0][1]['body']['dashboard']) - assert dashboard['index']['default'] == 'logstash-test' - assert 'Test rule!' in dashboard['title'] - - # Filters and time range - filters = dashboard['services']['filter']['list'] - assert 'xy:z' in filters['1']['query'] - assert filters['1']['type'] == 'querystring' - time_range = filters['0'] - assert time_range['from'] == ts_add(match['@timestamp'], -rule['timeframe']) - assert time_range['to'] == ts_add(match['@timestamp'], datetime.timedelta(minutes=10)) - - # Included fields active in table - assert dashboard['rows'][1]['panels'][0]['fields'] == ['@timestamp'] - - -def test_command(): - # Test command as list with a formatted arg - rule = {'command': ['/bin/test/', '--arg', '%(somefield)s']} - alert = CommandAlerter(rule) - match = {'@timestamp': '2014-01-01T00:00:00', - 'somefield': 'foobarbaz', - 'nested': {'field': 1}} - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - alert.alert([match]) - assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False) - - # Test command as string with formatted arg (old-style string format) - rule = {'command': '/bin/test/ --arg %(somefield)s'} - alert = CommandAlerter(rule) - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - alert.alert([match]) - assert mock_popen.called_with('/bin/test --arg foobarbaz', stdin=subprocess.PIPE, shell=False) - - # Test command as string without formatted arg (old-style string format) - rule = {'command': '/bin/test/foo.sh'} - alert = CommandAlerter(rule) - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - alert.alert([match]) - assert mock_popen.called_with('/bin/test/foo.sh', stdin=subprocess.PIPE, shell=True) - - # Test command as string with formatted arg (new-style string format) - rule = {'command': '/bin/test/ --arg {match[somefield]}', 'new_style_string_format': True} - alert = CommandAlerter(rule) - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - alert.alert([match]) - assert mock_popen.called_with('/bin/test --arg foobarbaz', stdin=subprocess.PIPE, shell=False) - - rule = {'command': '/bin/test/ --arg {match[nested][field]}', 'new_style_string_format': True} - alert = CommandAlerter(rule) - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - alert.alert([match]) - assert mock_popen.called_with('/bin/test --arg 1', stdin=subprocess.PIPE, shell=False) - - # Test command as string without formatted arg (new-style string format) - rule = {'command': '/bin/test/foo.sh', 'new_style_string_format': True} - alert = CommandAlerter(rule) - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - alert.alert([match]) - assert mock_popen.called_with('/bin/test/foo.sh', stdin=subprocess.PIPE, shell=True) - - rule = {'command': '/bin/test/foo.sh {{bar}}', 'new_style_string_format': True} - alert = CommandAlerter(rule) - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - alert.alert([match]) - assert mock_popen.called_with('/bin/test/foo.sh {bar}', stdin=subprocess.PIPE, shell=True) - - # Test command with pipe_match_json - rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'], - 'pipe_match_json': True} - alert = CommandAlerter(rule) - match = {'@timestamp': '2014-01-01T00:00:00', - 'somefield': 'foobarbaz'} - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - mock_subprocess = mock.Mock() - mock_popen.return_value = mock_subprocess - mock_subprocess.communicate.return_value = (None, None) - alert.alert([match]) - assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False) - assert mock_subprocess.communicate.called_with(input=json.dumps(match)) - - # Test command with fail_on_non_zero_exit - rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'], - 'fail_on_non_zero_exit': True} - alert = CommandAlerter(rule) - match = {'@timestamp': '2014-01-01T00:00:00', - 'somefield': 'foobarbaz'} - with pytest.raises(Exception) as exception: - with mock.patch("elastalert.alerts.subprocess.Popen") as mock_popen: - mock_subprocess = mock.Mock() - mock_popen.return_value = mock_subprocess - mock_subprocess.wait.return_value = 1 - alert.alert([match]) - assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False) - assert "Non-zero exit code while running command" in str(exception) - - -def test_ms_teams(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'ms_teams_webhook_url': 'http://test.webhook.url', - 'ms_teams_alert_summary': 'Alert from ElastAlert', - 'alert_subject': 'Cool subject', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = MsTeamsAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - '@type': 'MessageCard', - '@context': 'http://schema.org/extensions', - 'summary': rule['ms_teams_alert_summary'], - 'title': rule['alert_subject'], - 'text': BasicMatchString(rule, match).__str__() - } - mock_post_request.assert_called_once_with( - rule['ms_teams_webhook_url'], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_ms_teams_uses_color_and_fixed_width_text(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'ms_teams_webhook_url': 'http://test.webhook.url', - 'ms_teams_alert_summary': 'Alert from ElastAlert', - 'ms_teams_alert_fixed_width': True, - 'ms_teams_theme_color': '#124578', - 'alert_subject': 'Cool subject', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = MsTeamsAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - body = BasicMatchString(rule, match).__str__() - body = body.replace('`', "'") - body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '') - expected_data = { - '@type': 'MessageCard', - '@context': 'http://schema.org/extensions', - 'summary': rule['ms_teams_alert_summary'], - 'title': rule['alert_subject'], - 'themeColor': '#124578', - 'text': body - } - mock_post_request.assert_called_once_with( - rule['ms_teams_webhook_url'], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_slack_uses_custom_title(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_webhook_url': 'http://please.dontgohere.slack', - 'alert_subject': 'Cool subject', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - 'username': 'elastalert', - 'channel': '', - 'icon_emoji': ':ghost:', - 'attachments': [ - { - 'color': 'danger', - 'title': rule['alert_subject'], - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - } - ], - 'text': '', - 'parse': 'none' - } - mock_post_request.assert_called_once_with( - rule['slack_webhook_url'], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=10 - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_slack_uses_custom_timeout(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_webhook_url': 'http://please.dontgohere.slack', - 'alert_subject': 'Cool subject', - 'alert': [], - 'slack_timeout': 20 - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - 'username': 'elastalert', - 'channel': '', - 'icon_emoji': ':ghost:', - 'attachments': [ - { - 'color': 'danger', - 'title': rule['alert_subject'], - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - } - ], - 'text': '', - 'parse': 'none' - } - mock_post_request.assert_called_once_with( - rule['slack_webhook_url'], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=20 - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_slack_uses_rule_name_when_custom_title_is_not_provided(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_webhook_url': ['http://please.dontgohere.slack'], - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - 'username': 'elastalert', - 'channel': '', - 'icon_emoji': ':ghost:', - 'attachments': [ - { - 'color': 'danger', - 'title': rule['name'], - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - } - ], - 'text': '', - 'parse': 'none', - } - mock_post_request.assert_called_once_with( - rule['slack_webhook_url'][0], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=10 - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_slack_uses_custom_slack_channel(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_webhook_url': ['http://please.dontgohere.slack'], - 'slack_channel_override': '#test-alert', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - 'username': 'elastalert', - 'channel': '#test-alert', - 'icon_emoji': ':ghost:', - 'attachments': [ - { - 'color': 'danger', - 'title': rule['name'], - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - } - ], - 'text': '', - 'parse': 'none', - } - mock_post_request.assert_called_once_with( - rule['slack_webhook_url'][0], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=10 - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_slack_uses_list_of_custom_slack_channel(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_webhook_url': ['http://please.dontgohere.slack'], - 'slack_channel_override': ['#test-alert', '#test-alert2'], - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data1 = { - 'username': 'elastalert', - 'channel': '#test-alert', - 'icon_emoji': ':ghost:', - 'attachments': [ - { - 'color': 'danger', - 'title': rule['name'], - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - } - ], - 'text': '', - 'parse': 'none' - } - expected_data2 = { - 'username': 'elastalert', - 'channel': '#test-alert2', - 'icon_emoji': ':ghost:', - 'attachments': [ - { - 'color': 'danger', - 'title': rule['name'], - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - } - ], - 'text': '', - 'parse': 'none' - } - mock_post_request.assert_called_with( - rule['slack_webhook_url'][0], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=10 - ) - assert expected_data1 == json.loads(mock_post_request.call_args_list[0][1]['data']) - assert expected_data2 == json.loads(mock_post_request.call_args_list[1][1]['data']) - - -def test_slack_attach_kibana_discover_url_when_generated(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_attach_kibana_discover_url': True, - 'slack_webhook_url': 'http://please.dontgohere.slack', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'kibana_discover_url': 'http://kibana#discover' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - 'username': 'elastalert', - 'parse': 'none', - 'text': '', - 'attachments': [ - { - 'color': 'danger', - 'title': 'Test Rule', - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - }, - { - 'color': '#ec4b98', - 'title': 'Discover in Kibana', - 'title_link': 'http://kibana#discover' - } - ], - 'icon_emoji': ':ghost:', - 'channel': '' - } - mock_post_request.assert_called_once_with( - rule['slack_webhook_url'], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=10 - ) - actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) - assert expected_data == actual_data - - -def test_slack_attach_kibana_discover_url_when_not_generated(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_attach_kibana_discover_url': True, - 'slack_webhook_url': 'http://please.dontgohere.slack', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - 'username': 'elastalert', - 'parse': 'none', - 'text': '', - 'attachments': [ - { - 'color': 'danger', - 'title': 'Test Rule', - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - } - ], - 'icon_emoji': ':ghost:', - 'channel': '' - } - mock_post_request.assert_called_once_with( - rule['slack_webhook_url'], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=10 - ) - actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) - assert expected_data == actual_data - - -def test_slack_kibana_discover_title(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_attach_kibana_discover_url': True, - 'slack_kibana_discover_title': 'Click to discover in Kibana', - 'slack_webhook_url': 'http://please.dontgohere.slack', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'kibana_discover_url': 'http://kibana#discover' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - 'username': 'elastalert', - 'parse': 'none', - 'text': '', - 'attachments': [ - { - 'color': 'danger', - 'title': 'Test Rule', - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - }, - { - 'color': '#ec4b98', - 'title': 'Click to discover in Kibana', - 'title_link': 'http://kibana#discover' - } - ], - 'icon_emoji': ':ghost:', - 'channel': '' - } - mock_post_request.assert_called_once_with( - rule['slack_webhook_url'], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=10 - ) - actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) - assert expected_data == actual_data - - -def test_slack_kibana_discover_color(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'slack_attach_kibana_discover_url': True, - 'slack_kibana_discover_color': 'blue', - 'slack_webhook_url': 'http://please.dontgohere.slack', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = SlackAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'kibana_discover_url': 'http://kibana#discover' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - 'username': 'elastalert', - 'parse': 'none', - 'text': '', - 'attachments': [ - { - 'color': 'danger', - 'title': 'Test Rule', - 'text': BasicMatchString(rule, match).__str__(), - 'mrkdwn_in': ['text', 'pretext'], - 'fields': [] - }, - { - 'color': 'blue', - 'title': 'Discover in Kibana', - 'title_link': 'http://kibana#discover' - } - ], - 'icon_emoji': ':ghost:', - 'channel': '' - } - mock_post_request.assert_called_once_with( - rule['slack_webhook_url'], - data=mock.ANY, - headers={'content-type': 'application/json'}, - proxies=None, - verify=False, - timeout=10 - ) - actual_data = json.loads(mock_post_request.call_args_list[0][1]['data']) - assert expected_data == actual_data - - -def test_http_alerter_with_payload(): - rule = { - 'name': 'Test HTTP Post Alerter With Payload', - 'type': 'any', - 'http_post_url': 'http://test.webhook.url', - 'http_post_payload': {'posted_name': 'somefield'}, - 'http_post_static_payload': {'name': 'somestaticname'}, - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = HTTPPostAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'posted_name': 'foobarbaz', - 'name': 'somestaticname' - } - mock_post_request.assert_called_once_with( - rule['http_post_url'], - data=mock.ANY, - headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, - proxies=None, - timeout=10 - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_http_alerter_with_payload_all_values(): - rule = { - 'name': 'Test HTTP Post Alerter With Payload', - 'type': 'any', - 'http_post_url': 'http://test.webhook.url', - 'http_post_payload': {'posted_name': 'somefield'}, - 'http_post_static_payload': {'name': 'somestaticname'}, - 'http_post_all_values': True, - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = HTTPPostAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'posted_name': 'foobarbaz', - 'name': 'somestaticname', - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - mock_post_request.assert_called_once_with( - rule['http_post_url'], - data=mock.ANY, - headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, - proxies=None, - timeout=10 - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_http_alerter_without_payload(): - rule = { - 'name': 'Test HTTP Post Alerter Without Payload', - 'type': 'any', - 'http_post_url': 'http://test.webhook.url', - 'http_post_static_payload': {'name': 'somestaticname'}, - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = HTTPPostAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz', - 'name': 'somestaticname' - } - mock_post_request.assert_called_once_with( - rule['http_post_url'], - data=mock.ANY, - headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'}, - proxies=None, - timeout=10 - ) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_pagerduty_alerter(): - rule = { - 'name': 'Test PD Rule', - 'type': 'any', - 'pagerduty_service_key': 'magicalbadgers', - 'pagerduty_client_name': 'ponies inc.', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = PagerDutyAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'client': 'ponies inc.', - 'description': 'Test PD Rule', - 'details': { - 'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n' - }, - 'event_type': 'trigger', - 'incident_key': '', - 'service_key': 'magicalbadgers', - } - mock_post_request.assert_called_once_with('https://events.pagerduty.com/generic/2010-04-15/create_event.json', - data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) - - -def test_pagerduty_alerter_v2(): - rule = { - 'name': 'Test PD Rule', - 'type': 'any', - 'pagerduty_service_key': 'magicalbadgers', - 'pagerduty_client_name': 'ponies inc.', - 'pagerduty_api_version': 'v2', - 'pagerduty_v2_payload_class': 'ping failure', - 'pagerduty_v2_payload_component': 'mysql', - 'pagerduty_v2_payload_group': 'app-stack', - 'pagerduty_v2_payload_severity': 'error', - 'pagerduty_v2_payload_source': 'mysql.host.name', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = PagerDutyAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'client': 'ponies inc.', - 'payload': { - 'class': 'ping failure', - 'component': 'mysql', - 'group': 'app-stack', - 'severity': 'error', - 'source': 'mysql.host.name', - 'summary': 'Test PD Rule', - 'custom_details': { - 'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n' - }, - 'timestamp': '2017-01-01T00:00:00' - }, - 'event_action': 'trigger', - 'dedup_key': '', - 'routing_key': 'magicalbadgers', - } - mock_post_request.assert_called_once_with('https://events.pagerduty.com/v2/enqueue', - data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) +from elastalert.alerts import Alerter +from elastalert.alerts import BasicMatchString -def test_pagerduty_alerter_custom_incident_key(): - rule = { - 'name': 'Test PD Rule', - 'type': 'any', - 'pagerduty_service_key': 'magicalbadgers', - 'pagerduty_client_name': 'ponies inc.', - 'pagerduty_incident_key': 'custom key', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = PagerDutyAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'client': 'ponies inc.', - 'description': 'Test PD Rule', - 'details': { - 'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n' - }, - 'event_type': 'trigger', - 'incident_key': 'custom key', - 'service_key': 'magicalbadgers', - } - mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) +class mock_rule: + def get_match_str(self, event): + return str(event) -def test_pagerduty_alerter_custom_incident_key_with_args(): - rule = { - 'name': 'Test PD Rule', - 'type': 'any', - 'pagerduty_service_key': 'magicalbadgers', - 'pagerduty_client_name': 'ponies inc.', - 'pagerduty_incident_key': 'custom {0}', - 'pagerduty_incident_key_args': ['somefield'], - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = PagerDutyAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'client': 'ponies inc.', - 'description': 'Test PD Rule', - 'details': { - 'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n' - }, - 'event_type': 'trigger', - 'incident_key': 'custom foobarbaz', - 'service_key': 'magicalbadgers', - } - mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) +def test_basic_match_string(ea): + ea.rules[0]['top_count_keys'] = ['username'] + match = {'@timestamp': '1918-01-17', 'field': 'value', 'top_events_username': {'bob': 10, 'mallory': 5}} + alert_text = str(BasicMatchString(ea.rules[0], match)) + assert 'anytest' in alert_text + assert 'some stuff happened' in alert_text + assert 'username' in alert_text + assert 'bob: 10' in alert_text + assert 'field: value' in alert_text + # Non serializable objects don't cause errors + match['non-serializable'] = {open: 10} + alert_text = str(BasicMatchString(ea.rules[0], match)) -def test_pagerduty_alerter_custom_alert_subject(): - rule = { - 'name': 'Test PD Rule', - 'type': 'any', - 'alert_subject': 'Hungry kittens', - 'pagerduty_service_key': 'magicalbadgers', - 'pagerduty_client_name': 'ponies inc.', - 'pagerduty_incident_key': 'custom {0}', - 'pagerduty_incident_key_args': ['somefield'], - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = PagerDutyAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'client': 'ponies inc.', - 'description': 'Hungry kittens', - 'details': { - 'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n' - }, - 'event_type': 'trigger', - 'incident_key': 'custom foobarbaz', - 'service_key': 'magicalbadgers', - } - mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + # unicode objects dont cause errors + match['snowman'] = '☃' + alert_text = str(BasicMatchString(ea.rules[0], match)) + # Pretty printed objects + match.pop('non-serializable') + match['object'] = {'this': {'that': [1, 2, "3"]}} + alert_text = str(BasicMatchString(ea.rules[0], match)) + assert '"this": {\n "that": [\n 1,\n 2,\n "3"\n ]\n }' in alert_text -def test_pagerduty_alerter_custom_alert_subject_with_args(): - rule = { - 'name': 'Test PD Rule', - 'type': 'any', - 'alert_subject': '{0} kittens', - 'alert_subject_args': ['somefield'], - 'pagerduty_service_key': 'magicalbadgers', - 'pagerduty_client_name': 'ponies inc.', - 'pagerduty_incident_key': 'custom {0}', - 'pagerduty_incident_key_args': ['someotherfield'], - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = PagerDutyAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'Stinky', - 'someotherfield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'client': 'ponies inc.', - 'description': 'Stinky kittens', - 'details': { - 'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: Stinky\nsomeotherfield: foobarbaz\n' - }, - 'event_type': 'trigger', - 'incident_key': 'custom foobarbaz', - 'service_key': 'magicalbadgers', - } - mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + ea.rules[0]['alert_text'] = 'custom text' + alert_text = str(BasicMatchString(ea.rules[0], match)) + assert 'custom text' in alert_text + assert 'anytest' not in alert_text + ea.rules[0]['alert_text_type'] = 'alert_text_only' + alert_text = str(BasicMatchString(ea.rules[0], match)) + assert 'custom text' in alert_text + assert 'some stuff happened' not in alert_text + assert 'username' not in alert_text + assert 'field: value' not in alert_text -def test_pagerduty_alerter_custom_alert_subject_with_args_specifying_trigger(): - rule = { - 'name': 'Test PD Rule', - 'type': 'any', - 'alert_subject': '{0} kittens', - 'alert_subject_args': ['somefield'], - 'pagerduty_service_key': 'magicalbadgers', - 'pagerduty_event_type': 'trigger', - 'pagerduty_client_name': 'ponies inc.', - 'pagerduty_incident_key': 'custom {0}', - 'pagerduty_incident_key_args': ['someotherfield'], - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = PagerDutyAlerter(rule) - match = { - '@timestamp': '2017-01-01T00:00:00', - 'somefield': 'Stinkiest', - 'someotherfield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - expected_data = { - 'client': 'ponies inc.', - 'description': 'Stinkiest kittens', - 'details': { - 'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: Stinkiest\nsomeotherfield: foobarbaz\n' - }, - 'event_type': 'trigger', - 'incident_key': 'custom foobarbaz', - 'service_key': 'magicalbadgers', - } - mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None) - assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data']) + ea.rules[0]['alert_text_type'] = 'exclude_fields' + alert_text = str(BasicMatchString(ea.rules[0], match)) + assert 'custom text' in alert_text + assert 'some stuff happened' in alert_text + assert 'username' in alert_text + assert 'field: value' not in alert_text def test_alert_text_kw(ea): @@ -2055,7 +115,33 @@ def test_alert_text_kw_global_substitution(ea): assert 'Abc: abc from match' in alert_text -def test_resolving_rule_references(ea): +def test_alert_text_jinja(ea): + rule = ea.rules[0].copy() + rule['foo_rule'] = 'foo from rule' + rule['owner'] = 'the owner from rule' + rule['abc'] = 'abc from rule' + rule['alert_text'] = 'Owner: {{owner}}; Foo: {{_data["foo_rule"]}}; Abc: {{abc}}; Xyz: {{_data["xyz"]}}' + rule['alert_text_type'] = "alert_text_jinja" + rule['jinja_root_name'] = "_data" + rule['jinja_template'] = Template(str(rule['alert_text'])) + + match = { + '@timestamp': '2016-01-01', + 'field': 'field_value', + 'abc': 'abc from match', + 'xyz': 'from match' + } + + alert_text = str(BasicMatchString(rule, match)) + assert 'Owner: the owner from rule' in alert_text + assert 'Foo: foo from rule' in alert_text + assert 'Xyz: from match' in alert_text + + # When the key exists in both places, it will come from the match + assert 'Abc: abc from match' in alert_text + + +def test_resolving_rule_references(): rule = { 'name': 'test_rule', 'type': mock_rule(), @@ -2086,528 +172,205 @@ def test_resolving_rule_references(ea): assert 'the_owner' == alert.rule['nested_dict']['nested_owner'] -def test_stride_plain_text(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'stride_access_token': 'token', - 'stride_cloud_id': 'cloud_id', - 'stride_conversation_id': 'conversation_id', - 'alert_subject': 'Cool subject', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = StrideAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - body = "{0}\n\n@timestamp: {1}\nsomefield: {2}".format( - rule['name'], match['@timestamp'], match['somefield'] - ) - expected_data = {'body': {'version': 1, 'type': "doc", 'content': [ - {'type': "panel", 'attrs': {'panelType': "warning"}, 'content': [ - {'type': 'paragraph', 'content': [ - {'type': 'text', 'text': body} - ]} - ]} - ]}} - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - headers={ - 'content-type': 'application/json', - 'Authorization': 'Bearer {}'.format(rule['stride_access_token'])}, - verify=True, - proxies=None - ) - assert expected_data == json.loads( - mock_post_request.call_args_list[0][1]['data']) - - -def test_stride_underline_text(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'stride_access_token': 'token', - 'stride_cloud_id': 'cloud_id', - 'stride_conversation_id': 'conversation_id', - 'alert_subject': 'Cool subject', - 'alert_text': 'Underline Text', - 'alert_text_type': 'alert_text_only', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = StrideAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - body = "Underline Text" - expected_data = {'body': {'version': 1, 'type': "doc", 'content': [ - {'type': "panel", 'attrs': {'panelType': "warning"}, 'content': [ - {'type': 'paragraph', 'content': [ - {'type': 'text', 'text': body, 'marks': [ - {'type': 'underline'} - ]} - ]} - ]} - ]}} - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - headers={ - 'content-type': 'application/json', - 'Authorization': 'Bearer {}'.format(rule['stride_access_token'])}, - verify=True, - proxies=None - ) - assert expected_data == json.loads( - mock_post_request.call_args_list[0][1]['data']) - - -def test_stride_bold_text(): - rule = { - 'name': 'Test Rule', - 'type': 'any', - 'stride_access_token': 'token', - 'stride_cloud_id': 'cloud_id', - 'stride_conversation_id': 'conversation_id', - 'alert_subject': 'Cool subject', - 'alert_text': 'Bold Text', - 'alert_text_type': 'alert_text_only', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = StrideAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' - } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - body = "Bold Text" - expected_data = {'body': {'version': 1, 'type': "doc", 'content': [ - {'type': "panel", 'attrs': {'panelType': "warning"}, 'content': [ - {'type': 'paragraph', 'content': [ - {'type': 'text', 'text': body, 'marks': [ - {'type': 'strong'} - ]} - ]} - ]} - ]}} - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - headers={ - 'content-type': 'application/json', - 'Authorization': 'Bearer {}'.format(rule['stride_access_token'])}, - verify=True, - proxies=None - ) - assert expected_data == json.loads( - mock_post_request.call_args_list[0][1]['data']) - - -def test_stride_strong_text(): +def test_alert_subject_size_limit_no_args(): rule = { - 'name': 'Test Rule', - 'type': 'any', - 'stride_access_token': 'token', - 'stride_cloud_id': 'cloud_id', - 'stride_conversation_id': 'conversation_id', - 'alert_subject': 'Cool subject', - 'alert_text': 'Bold Text', - 'alert_text_type': 'alert_text_only', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = StrideAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' + 'name': 'test_rule', + 'type': mock_rule(), + 'owner': 'the_owner', + 'priority': 2, + 'alert_subject': 'A very long subject', + 'alert_subject_max_len': 5 } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - body = "Bold Text" - expected_data = {'body': {'version': 1, 'type': "doc", 'content': [ - {'type': "panel", 'attrs': {'panelType': "warning"}, 'content': [ - {'type': 'paragraph', 'content': [ - {'type': 'text', 'text': body, 'marks': [ - {'type': 'strong'} - ]} - ]} - ]} - ]}} - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - headers={ - 'content-type': 'application/json', - 'Authorization': 'Bearer {}'.format(rule['stride_access_token'])}, - verify=True, - proxies=None - ) - assert expected_data == json.loads( - mock_post_request.call_args_list[0][1]['data']) + alert = Alerter(rule) + alertSubject = alert.create_custom_title([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + assert 5 == len(alertSubject) -def test_stride_hyperlink(): +def test_alert_error(): rule = { - 'name': 'Test Rule', - 'type': 'any', - 'stride_access_token': 'token', - 'stride_cloud_id': 'cloud_id', - 'stride_conversation_id': 'conversation_id', - 'alert_subject': 'Cool subject', - 'alert_text': '
Link', - 'alert_text_type': 'alert_text_only', - 'alert': [] + 'name': 'test_rule', + 'type': mock_rule(), + 'owner': 'the_owner', + 'priority': 2, + 'alert_subject': 'A very long subject', + 'alert_subject_max_len': 5 } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = StrideAlerter(rule) match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' + '@timestamp': '2021-01-01T00:00:00', + 'name': 'datadog-test-name' } - with mock.patch('requests.post') as mock_post_request: + alert = Alerter(rule) + try: alert.alert([match]) - - body = "Link" - expected_data = {'body': {'version': 1, 'type': "doc", 'content': [ - {'type': "panel", 'attrs': {'panelType': "warning"}, 'content': [ - {'type': 'paragraph', 'content': [ - {'type': 'text', 'text': body, 'marks': [ - {'type': 'link', 'attrs': {'href': 'http://stride.com'}} - ]} - ]} - ]} - ]}} - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - headers={ - 'content-type': 'application/json', - 'Authorization': 'Bearer {}'.format(rule['stride_access_token'])}, - verify=True, - proxies=None - ) - assert expected_data == json.loads( - mock_post_request.call_args_list[0][1]['data']) + except NotImplementedError: + assert True -def test_stride_html(): +def test_alert_get_aggregation_summary_text__maximum_width(): rule = { - 'name': 'Test Rule', - 'type': 'any', - 'stride_access_token': 'token', - 'stride_cloud_id': 'cloud_id', - 'stride_conversation_id': 'conversation_id', - 'alert_subject': 'Cool subject', - 'alert_text': 'Alert: we found something. Link', - 'alert_text_type': 'alert_text_only', - 'alert': [] - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = StrideAlerter(rule) - match = { - '@timestamp': '2016-01-01T00:00:00', - 'somefield': 'foobarbaz' + 'name': 'test_rule', + 'type': mock_rule(), + 'owner': 'the_owner', + 'priority': 2, + 'alert_subject': 'A very long subject', + 'alert_subject_max_len': 5 } - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = {'body': {'version': 1, 'type': "doc", 'content': [ - {'type': "panel", 'attrs': {'panelType': "warning"}, 'content': [ - {'type': 'paragraph', 'content': [ - {'type': 'text', 'text': 'Alert', 'marks': [ - {'type': 'strong'} - ]}, - {'type': 'text', 'text': ': we found something. '}, - {'type': 'text', 'text': 'Link', 'marks': [ - {'type': 'link', 'attrs': {'href': 'http://stride.com'}} - ]} - ]} - ]} - ]}} - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - headers={ - 'content-type': 'application/json', - 'Authorization': 'Bearer {}'.format(rule['stride_access_token'])}, - verify=True, - proxies=None - ) - assert expected_data == json.loads( - mock_post_request.call_args_list[0][1]['data']) + alert = Alerter(rule) + assert 80 == alert.get_aggregation_summary_text__maximum_width() -def test_hipchat_body_size_limit_text(): +def test_alert_aggregation_summary_markdown_table(): rule = { - 'name': 'Test Rule', - 'type': 'any', - 'hipchat_auth_token': 'token', - 'hipchat_room_id': 'room_id', - 'hipchat_message_format': 'text', - 'alert_subject': 'Cool subject', - 'alert_text': 'Alert: we found something.\n\n{message}', - 'alert_text_type': 'alert_text_only', - 'alert': [], - 'alert_text_kw': { - '@timestamp': 'time', - 'message': 'message', - }, - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = HipChatAlerter(rule) - match = { - '@timestamp': '2018-01-01T00:00:00', - 'message': 'foo bar\n' * 5000, - } - body = alert.create_alert_body([match]) - - assert len(body) <= 10000 + 'name': 'test_rule', + 'type': mock_rule(), + 'owner': 'the_owner', + 'priority': 2, + 'alert_subject': 'A very long subject', + 'aggregation': 1, + 'summary_table_fields': ['field', 'abc'], + 'summary_table_type': 'markdown' + } + matches = [ + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'cde from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'cde from match', }, + ] + alert = Alerter(rule) + summary_table = str(alert.get_aggregation_summary_text(matches)) + assert "| field | abc | count |" in summary_table + assert "|-----|-----|-----|" in summary_table + assert "| field_value | abc from match | 3 |" in summary_table + assert "| field_value | cde from match | 2 |" in summary_table -def test_hipchat_body_size_limit_html(): +def test_alert_aggregation_summary_default_table(): rule = { - 'name': 'Test Rule', - 'type': 'any', - 'hipchat_auth_token': 'token', - 'hipchat_room_id': 'room_id', - 'hipchat_message_format': 'html', - 'alert_subject': 'Cool subject', - 'alert_text': 'Alert: we found something.\n\n{message}', - 'alert_text_type': 'alert_text_only', - 'alert': [], - 'alert_text_kw': { - '@timestamp': 'time', - 'message': 'message', - }, - } - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = HipChatAlerter(rule) - match = { - '@timestamp': '2018-01-01T00:00:00', - 'message': 'foo bar\n' * 5000, - } - - body = alert.create_alert_body([match]) - - assert len(body) <= 10000 + 'name': 'test_rule', + 'type': mock_rule(), + 'owner': 'the_owner', + 'priority': 2, + 'alert_subject': 'A very long subject', + 'aggregation': 1, + 'summary_table_fields': ['field', 'abc'], + } + matches = [ + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'cde from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'cde from match', }, + ] + alert = Alerter(rule) + summary_table = str(alert.get_aggregation_summary_text(matches)) + assert "+-------------+----------------+-------+" in summary_table + assert "| field | abc | count |" in summary_table + assert "+=============+================+=======+" in summary_table + assert "| field_value | abc from match | 3 |" in summary_table + assert "| field_value | cde from match | 2 |" in summary_table -def test_alerta_no_auth(ea): +def test_alert_aggregation_summary_table_one_row(): rule = { - 'name': 'Test Alerta rule!', - 'alerta_api_url': 'http://elastalerthost:8080/api/alert', - 'timeframe': datetime.timedelta(hours=1), - 'timestamp_field': '@timestamp', - 'alerta_api_skip_ssl': True, - 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], - 'alerta_attributes_values': ["%(key)s", "%(logdate)s", "%(sender_ip)s"], - 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], - 'alerta_event': "ProbeUP", - 'alerta_group': "Health", - 'alerta_origin': "Elastalert", - 'alerta_severity': "debug", - 'alerta_text': "Probe %(hostname)s is UP at %(logdate)s GMT", - 'alerta_value': "UP", - 'type': 'any', - 'alerta_use_match_timestamp': True, - 'alert': 'alerta' - } - - match = { - '@timestamp': '2014-10-10T00:00:00', - # 'key': ---- missing field on purpose, to verify that simply the text is left empty - # 'logdate': ---- missing field on purpose, to verify that simply the text is left empty - 'sender_ip': '1.1.1.1', - 'hostname': 'aProbe' - } - - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = AlertaAlerter(rule) - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - "origin": "Elastalert", - "resource": "elastalert", - "severity": "debug", - "service": ["elastalert"], - "tags": [], - "text": "Probe aProbe is UP at GMT", - "value": "UP", - "createTime": "2014-10-10T00:00:00.000000Z", - "environment": "Production", - "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", - "timeout": 86400, - "correlate": ["ProbeUP", "ProbeDOWN"], - "group": "Health", - "attributes": {"senderIP": "1.1.1.1", "hostname": "", "TimestampEvent": ""}, - "type": "elastalert", - "event": "ProbeUP" - } - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - headers={ - 'content-type': 'application/json'}, - verify=False - ) - assert expected_data == json.loads( - mock_post_request.call_args_list[0][1]['data']) + 'name': 'test_rule', + 'type': mock_rule(), + 'owner': 'the_owner', + 'priority': 2, + 'alert_subject': 'A very long subject', + 'aggregation': 1, + 'summary_table_fields': ['field', 'abc'], + 'summary_table_max_rows': 1, + } + matches = [ + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'cde from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'cde from match', }, + ] + alert = Alerter(rule) + summary_table = str(alert.get_aggregation_summary_text(matches)) + assert "+-------------+----------------+-------+" in summary_table + assert "| field | abc | count |" in summary_table + assert "+=============+================+=======+" in summary_table + assert "| field_value | abc from match | 3 |" in summary_table + assert "| field_value | cde from match | 2 |" not in summary_table + assert "Showing top 1 rows" in summary_table -def test_alerta_auth(ea): +def test_alert_aggregation_summary_table_suffix_prefix(): rule = { - 'name': 'Test Alerta rule!', - 'alerta_api_url': 'http://elastalerthost:8080/api/alert', - 'alerta_api_key': '123456789ABCDEF', - 'timeframe': datetime.timedelta(hours=1), - 'timestamp_field': '@timestamp', - 'alerta_severity': "debug", - 'type': 'any', - 'alerta_use_match_timestamp': True, - 'alert': 'alerta' - } - - match = { - '@timestamp': '2014-10-10T00:00:00', - 'sender_ip': '1.1.1.1', - 'hostname': 'aProbe' - } - - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = AlertaAlerter(rule) - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - verify=True, - headers={ - 'content-type': 'application/json', - 'Authorization': 'Key {}'.format(rule['alerta_api_key'])}) + 'name': 'test_rule', + 'type': mock_rule(), + 'owner': 'the_owner', + 'priority': 2, + 'alert_subject': 'A very long subject', + 'aggregation': 1, + 'summary_table_fields': ['field', 'abc'], + 'summary_prefix': 'This is the prefix', + 'summary_suffix': 'This is the suffix', + } + matches = [ + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'abc from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'cde from match', }, + {'@timestamp': '2016-01-01', 'field': 'field_value', 'abc': 'cde from match', }, + ] + alert = Alerter(rule) + summary_table = str(alert.get_aggregation_summary_text(matches)) + assert "This is the prefix" in summary_table + assert "This is the suffix" in summary_table -def test_alerta_new_style(ea): +def test_alert_subject_size_limit_with_args(ea): rule = { - 'name': 'Test Alerta rule!', - 'alerta_api_url': 'http://elastalerthost:8080/api/alert', - 'timeframe': datetime.timedelta(hours=1), - 'timestamp_field': '@timestamp', - 'alerta_attributes_keys': ["hostname", "TimestampEvent", "senderIP"], - 'alerta_attributes_values': ["{hostname}", "{logdate}", "{sender_ip}"], - 'alerta_correlate': ["ProbeUP", "ProbeDOWN"], - 'alerta_event': "ProbeUP", - 'alerta_group': "Health", - 'alerta_origin': "Elastalert", - 'alerta_severity': "debug", - 'alerta_text': "Probe {hostname} is UP at {logdate} GMT", - 'alerta_value': "UP", - 'alerta_new_style_string_format': True, - 'type': 'any', - 'alerta_use_match_timestamp': True, - 'alert': 'alerta' - } - - match = { - '@timestamp': '2014-10-10T00:00:00', - # 'key': ---- missing field on purpose, to verify that simply the text is left empty - # 'logdate': ---- missing field on purpose, to verify that simply the text is left empty - 'sender_ip': '1.1.1.1', - 'hostname': 'aProbe' - } - - rules_loader = FileRulesLoader({}) - rules_loader.load_modules(rule) - alert = AlertaAlerter(rule) - with mock.patch('requests.post') as mock_post_request: - alert.alert([match]) - - expected_data = { - "origin": "Elastalert", - "resource": "elastalert", - "severity": "debug", - "service": ["elastalert"], - "tags": [], - "text": "Probe aProbe is UP at GMT", - "value": "UP", - "createTime": "2014-10-10T00:00:00.000000Z", - "environment": "Production", - "rawData": "Test Alerta rule!\n\n@timestamp: 2014-10-10T00:00:00\nhostname: aProbe\nsender_ip: 1.1.1.1\n", - "timeout": 86400, - "correlate": ["ProbeUP", "ProbeDOWN"], - "group": "Health", - "attributes": {"senderIP": "1.1.1.1", "hostname": "aProbe", "TimestampEvent": ""}, - "type": "elastalert", - "event": "ProbeUP" + 'name': 'test_rule', + 'type': mock_rule(), + 'owner': 'the_owner', + 'priority': 2, + 'alert_subject': 'Test alert for {0} {1}', + 'alert_subject_args': ['test_term', 'test.term'], + 'alert_subject_max_len': 6 } - - mock_post_request.assert_called_once_with( - alert.url, - data=mock.ANY, - verify=True, - headers={ - 'content-type': 'application/json'} - ) - assert expected_data == json.loads( - mock_post_request.call_args_list[0][1]['data']) + alert = Alerter(rule) + alertSubject = alert.create_custom_title([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) + assert 6 == len(alertSubject) -def test_alert_subject_size_limit_no_args(ea): +def test_alert_subject_with_jinja(): rule = { 'name': 'test_rule', 'type': mock_rule(), 'owner': 'the_owner', 'priority': 2, - 'alert_subject': 'A very long subject', - 'alert_subject_max_len': 5 + 'alert_subject': 'Test alert for {{owner}}; field {{field}}; Abc: {{_data["abc"]}}', + 'alert_text_type': "alert_text_jinja", + 'jinja_root_name': "_data" + } + match = { + '@timestamp': '2016-01-01', + 'field': 'field_value', + 'abc': 'abc from match', } alert = Alerter(rule) - alertSubject = alert.create_custom_title([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - assert 5 == len(alertSubject) + alertsubject = alert.create_custom_title([match]) + assert "Test alert for the_owner;" in alertsubject + assert "field field_value;" in alertsubject + assert "Abc: abc from match" in alertsubject -def test_alert_subject_size_limit_with_args(ea): +def test_alert_getinfo(): rule = { 'name': 'test_rule', 'type': mock_rule(), 'owner': 'the_owner', 'priority': 2, - 'alert_subject': 'Test alert for {0} {1}', - 'alert_subject_args': ['test_term', 'test.term'], - 'alert_subject_max_len': 6 + 'alert_subject': 'A very long subject', + 'alert_subject_max_len': 5 } alert = Alerter(rule) - alertSubject = alert.create_custom_title([{'test_term': 'test_value', '@timestamp': '2014-10-31T00:00:00'}]) - assert 6 == len(alertSubject) + actual_data = alert.get_info() + expected_data = {'type': 'Unknown'} + assert expected_data == actual_data diff --git a/tests/base_test.py b/tests/base_test.py index 92dc35f7e..450b593b2 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -1,22 +1,22 @@ # -*- coding: utf-8 -*- import copy import datetime -import json import threading import elasticsearch -import mock +from unittest import mock import pytest from elasticsearch.exceptions import ConnectionError from elasticsearch.exceptions import ElasticsearchException from elastalert.enhancements import BaseEnhancement from elastalert.enhancements import DropMatchException -from elastalert.kibana import dashboard_temp +from elastalert.enhancements import TimeEnhancement +from elastalert.kibana_external_url_formatter import AbsoluteKibanaExternalUrlFormatter +from elastalert.kibana_external_url_formatter import ShortKibanaExternalUrlFormatter from elastalert.util import dt_to_ts from elastalert.util import dt_to_unix from elastalert.util import dt_to_unixms -from elastalert.util import EAException from elastalert.util import ts_now from elastalert.util import ts_to_dt from elastalert.util import unix_to_dt @@ -45,7 +45,7 @@ def generate_hits(timestamps, **kwargs): for field in ['_id', '_type', '_index']: data['_source'][field] = data[field] hits.append(data) - return {'hits': {'total': len(hits), 'hits': hits}} + return {'hits': {'total': {'value': len(hits)}, 'hits': hits}} def assert_alerts(ea_inst, calls): @@ -91,112 +91,59 @@ def test_init_rule(ea): def test_query(ea): - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) ea.thread_data.current_es.search.assert_called_with(body={ - 'query': {'filtered': { - 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], - ignore_unavailable=True, - size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) - - -def test_query_sixsix(ea_sixsix): - ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} - ea_sixsix.run_query(ea_sixsix.rules[0], START, END) - ea_sixsix.thread_data.current_es.search.assert_called_with(body={ 'query': {'bool': { 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], + 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_includes=['@timestamp'], ignore_unavailable=True, - size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive']) + size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) def test_query_with_fields(ea): ea.rules[0]['_source_enabled'] = False - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) ea.thread_data.current_es.search.assert_called_with(body={ - 'query': {'filtered': { - 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}], 'fields': ['@timestamp']}, index='idx', ignore_unavailable=True, - size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) - - -def test_query_sixsix_with_fields(ea_sixsix): - ea_sixsix.rules[0]['_source_enabled'] = False - ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} - ea_sixsix.run_query(ea_sixsix.rules[0], START, END) - ea_sixsix.thread_data.current_es.search.assert_called_with(body={ 'query': {'bool': { 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}], 'stored_fields': ['@timestamp']}, index='idx', - ignore_unavailable=True, - size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive']) + 'sort': [{'@timestamp': {'order': 'asc'}}], 'stored_fields': ['@timestamp']}, index='idx', ignore_unavailable=True, + size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) def test_query_with_unix(ea): ea.rules[0]['timestamp_type'] = 'unix' ea.rules[0]['dt_to_ts'] = dt_to_unix - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) start_unix = dt_to_unix(START) end_unix = dt_to_unix(END) ea.thread_data.current_es.search.assert_called_with( - body={'query': {'filtered': { - 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], - ignore_unavailable=True, - size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) - - -def test_query_sixsix_with_unix(ea_sixsix): - ea_sixsix.rules[0]['timestamp_type'] = 'unix' - ea_sixsix.rules[0]['dt_to_ts'] = dt_to_unix - ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} - ea_sixsix.run_query(ea_sixsix.rules[0], START, END) - start_unix = dt_to_unix(START) - end_unix = dt_to_unix(END) - ea_sixsix.thread_data.current_es.search.assert_called_with( body={'query': {'bool': { 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], + 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_includes=['@timestamp'], ignore_unavailable=True, - size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive']) + size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) def test_query_with_unixms(ea): ea.rules[0]['timestamp_type'] = 'unixms' ea.rules[0]['dt_to_ts'] = dt_to_unixms - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) start_unix = dt_to_unixms(START) end_unix = dt_to_unixms(END) ea.thread_data.current_es.search.assert_called_with( - body={'query': {'filtered': { - 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], - ignore_unavailable=True, - size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) - - -def test_query_sixsix_with_unixms(ea_sixsix): - ea_sixsix.rules[0]['timestamp_type'] = 'unixms' - ea_sixsix.rules[0]['dt_to_ts'] = dt_to_unixms - ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} - ea_sixsix.run_query(ea_sixsix.rules[0], START, END) - start_unix = dt_to_unixms(START) - end_unix = dt_to_unixms(END) - ea_sixsix.thread_data.current_es.search.assert_called_with( body={'query': {'bool': { 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], + 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_includes=['@timestamp'], ignore_unavailable=True, - size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive']) + size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) def test_no_hits(ea): - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) assert ea.rules[0]['type'].add_data.call_count == 0 @@ -204,8 +151,7 @@ def test_no_hits(ea): def test_no_terms_hits(ea): ea.rules[0]['use_terms_query'] = True ea.rules[0]['query_key'] = 'QWERTY' - ea.rules[0]['doc_type'] = 'uiop' - ea.thread_data.current_es.deprecated_search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) assert ea.rules[0]['type'].add_terms_data.call_count == 0 @@ -300,7 +246,6 @@ def test_query_exception(ea): def test_query_exception_count_query(ea): ea.rules[0]['use_count_query'] = True - ea.rules[0]['doc_type'] = 'blahblahblahblah' mock_es = mock.Mock() mock_es.count.side_effect = ElasticsearchException run_rule_query_exception(ea, mock_es) @@ -322,16 +267,16 @@ def test_match_with_module_from_pending(ea): pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'], 'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP} # First call, return the pending alert, second, no associated aggregated alerts - ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}}, - {'hits': {'hits': []}}] + ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}}, + {'hits': {'hits': []}}] ea.send_pending_alerts() assert mod.process.call_count == 0 # If aggregation is set, enhancement IS called pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'], 'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP} - ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}}, - {'hits': {'hits': []}}] + ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}}, + {'hits': {'hits': []}}] ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10) ea.send_pending_alerts() assert mod.process.call_count == 1 @@ -409,10 +354,10 @@ def test_agg_matchtime(ea): # First call - Find all pending alerts (only entries without agg_id) # Second call - Find matches with agg_id == 'ABCD' # Third call - Find matches with agg_id == 'CDEF' - ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1}, - {'_id': 'CDEF', '_index': 'wb', '_source': call3}]}}, - {'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call2}]}}, - {'hits': {'total': 0, 'hits': []}}] + ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1}, + {'_id': 'CDEF', '_index': 'wb', '_source': call3}]}}, + {'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call2}]}}, + {'hits': {'total': 0, 'hits': []}}] with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es: ea.send_pending_alerts() @@ -421,15 +366,15 @@ def test_agg_matchtime(ea): assert mock_es.call_count == 2 assert_alerts(ea, [hits_timestamps[:2], hits_timestamps[2:]]) - call1 = ea.writeback_es.deprecated_search.call_args_list[7][1]['body'] - call2 = ea.writeback_es.deprecated_search.call_args_list[8][1]['body'] - call3 = ea.writeback_es.deprecated_search.call_args_list[9][1]['body'] - call4 = ea.writeback_es.deprecated_search.call_args_list[10][1]['body'] + call1 = ea.writeback_es.search.call_args_list[7][1]['body'] + call2 = ea.writeback_es.search.call_args_list[8][1]['body'] + call3 = ea.writeback_es.search.call_args_list[9][1]['body'] + call4 = ea.writeback_es.search.call_args_list[10][1]['body'] - assert 'alert_time' in call2['filter']['range'] - assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD' - assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF' - assert ea.writeback_es.deprecated_search.call_args_list[9][1]['size'] == 1337 + assert 'alert_time' in call2['query']['bool']['filter']['range'] + assert call3['query']['query_string']['query'] == 'aggregate_id:"ABCD"' + assert call4['query']['query_string']['query'] == 'aggregate_id:"CDEF"' + assert ea.writeback_es.search.call_args_list[9][1]['size'] == 1337 def test_agg_not_matchtime(ea): @@ -577,10 +522,10 @@ def test_agg_with_aggregation_key(ea): # First call - Find all pending alerts (only entries without agg_id) # Second call - Find matches with agg_id == 'ABCD' # Third call - Find matches with agg_id == 'CDEF' - ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1}, - {'_id': 'CDEF', '_index': 'wb', '_source': call2}]}}, - {'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call3}]}}, - {'hits': {'total': 0, 'hits': []}}] + ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1}, + {'_id': 'CDEF', '_index': 'wb', '_source': call2}]}}, + {'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call3}]}}, + {'hits': {'total': 0, 'hits': []}}] with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es: mock_es.return_value = ea.thread_data.current_es @@ -590,15 +535,15 @@ def test_agg_with_aggregation_key(ea): assert mock_es.call_count == 2 assert_alerts(ea, [[hits_timestamps[0], hits_timestamps[2]], [hits_timestamps[1]]]) - call1 = ea.writeback_es.deprecated_search.call_args_list[7][1]['body'] - call2 = ea.writeback_es.deprecated_search.call_args_list[8][1]['body'] - call3 = ea.writeback_es.deprecated_search.call_args_list[9][1]['body'] - call4 = ea.writeback_es.deprecated_search.call_args_list[10][1]['body'] + call1 = ea.writeback_es.search.call_args_list[7][1]['body'] + call2 = ea.writeback_es.search.call_args_list[8][1]['body'] + call3 = ea.writeback_es.search.call_args_list[9][1]['body'] + call4 = ea.writeback_es.search.call_args_list[10][1]['body'] - assert 'alert_time' in call2['filter']['range'] - assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD' - assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF' - assert ea.writeback_es.deprecated_search.call_args_list[9][1]['size'] == 1337 + assert 'alert_time' in call2['query']['bool']['filter']['range'] + assert call3['query']['query_string']['query'] == 'aggregate_id:"ABCD"' + assert call4['query']['query_string']['query'] == 'aggregate_id:"CDEF"' + assert ea.writeback_es.search.call_args_list[9][1]['size'] == 1337 def test_silence(ea): @@ -757,7 +702,6 @@ def test_realert_with_nested_query_key(ea): def test_count(ea): ea.rules[0]['use_count_query'] = True - ea.rules[0]['doc_type'] = 'doctype' with mock.patch('elastalert.elastalert.elasticsearch_client'), \ mock.patch.object(ea, 'get_hits_count') as mock_hits: ea.run_rule(ea.rules[0], END, START) @@ -907,7 +851,6 @@ def test_set_starttime(ea): # Count query, starttime, no previous endtime ea.rules[0]['use_count_query'] = True - ea.rules[0]['doc_type'] = 'blah' with mock.patch.object(ea, 'get_starttime') as mock_gs: mock_gs.return_value = None ea.set_starttime(ea.rules[0], end) @@ -933,8 +876,7 @@ def test_set_starttime(ea): ea.set_starttime(ea.rules[0], end) assert ea.rules[0]['starttime'] == end - ea.buffer_time - # scan_entire_timeframe - ea.rules[0].pop('previous_endtime') + # scan_entire_timeframe without use_count_query or use_terms_query ea.rules[0].pop('starttime') ea.rules[0]['timeframe'] = datetime.timedelta(days=3) ea.rules[0]['scan_entire_timeframe'] = True @@ -943,52 +885,26 @@ def test_set_starttime(ea): ea.set_starttime(ea.rules[0], end) assert ea.rules[0]['starttime'] == end - datetime.timedelta(days=3) + # scan_entire_timeframe with use_count_query, first run + ea.rules[0].pop('starttime') + ea.rules[0]['timeframe'] = datetime.timedelta(days=3) + ea.rules[0]['scan_entire_timeframe'] = True + ea.rules[0]['use_count_query'] = True + with mock.patch.object(ea, 'get_starttime') as mock_gs: + mock_gs.return_value = None + ea.set_starttime(ea.rules[0], end) + assert ea.rules[0]['starttime'] == end - datetime.timedelta(days=3) -def test_kibana_dashboard(ea): - match = {'@timestamp': '2014-10-11T00:00:00'} - mock_es = mock.Mock() - ea.rules[0]['use_kibana_dashboard'] = 'my dashboard' - with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init: - mock_es_init.return_value = mock_es - - # No dashboard found - mock_es.deprecated_search.return_value = {'hits': {'total': 0, 'hits': []}} - with pytest.raises(EAException): - ea.use_kibana_link(ea.rules[0], match) - mock_call = mock_es.deprecated_search.call_args_list[0][1] - assert mock_call['body'] == {'query': {'term': {'_id': 'my dashboard'}}} - - # Dashboard found - mock_es.index.return_value = {'_id': 'ABCDEFG'} - mock_es.deprecated_search.return_value = {'hits': {'hits': [{'_source': {'dashboard': json.dumps(dashboard_temp)}}]}} - url = ea.use_kibana_link(ea.rules[0], match) - assert 'ABCDEFG' in url - db = json.loads(mock_es.index.call_args_list[0][1]['body']['dashboard']) - assert 'anytest' in db['title'] - - # Query key filtering added - ea.rules[0]['query_key'] = 'foobar' - match['foobar'] = 'baz' - url = ea.use_kibana_link(ea.rules[0], match) - db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard']) - assert db['services']['filter']['list']['1']['field'] == 'foobar' - assert db['services']['filter']['list']['1']['query'] == '"baz"' - - # Compound query key - ea.rules[0]['query_key'] = 'foo,bar' - ea.rules[0]['compound_query_key'] = ['foo', 'bar'] - match['foo'] = 'cat' - match['bar'] = 'dog' - match['foo,bar'] = 'cat, dog' - url = ea.use_kibana_link(ea.rules[0], match) - db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard']) - found_filters = 0 - for filter_id, filter_dict in list(db['services']['filter']['list'].items()): - if (filter_dict['field'] == 'foo' and filter_dict['query'] == '"cat"') or \ - (filter_dict['field'] == 'bar' and filter_dict['query'] == '"dog"'): - found_filters += 1 - continue - assert found_filters == 2 + # scan_entire_timeframe with use_count_query, subsequent run + ea.rules[0].pop('starttime') + ea.rules[0]['timeframe'] = datetime.timedelta(days=3) + ea.rules[0]['scan_entire_timeframe'] = True + ea.rules[0]['use_count_query'] = True + ea.rules[0]['previous_endtime'] = end + with mock.patch.object(ea, 'get_starttime') as mock_gs: + mock_gs.return_value = None + ea.set_starttime(ea.rules[0], end) + assert ea.rules[0]['starttime'] == end - datetime.timedelta(days=3) def test_rule_changes(ea): @@ -1094,17 +1010,15 @@ def test_count_keys(ea): ea.rules[0]['timeframe'] = datetime.timedelta(minutes=60) ea.rules[0]['top_count_keys'] = ['this', 'that'] ea.rules[0]['type'].matches = {'@timestamp': END} - ea.rules[0]['doc_type'] = 'blah' - buckets = [{'aggregations': { - 'filtered': {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}}, - {'aggregations': {'filtered': { - 'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}}] - ea.thread_data.current_es.deprecated_search.side_effect = buckets + buckets = [{'aggregations': + {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}, + {'aggregations': + {'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}] + ea.thread_data.current_es.search.side_effect = buckets counts = ea.get_top_counts(ea.rules[0], START, END, ['this', 'that']) - calls = ea.thread_data.current_es.deprecated_search.call_args_list - assert calls[0][1]['search_type'] == 'count' - assert calls[0][1]['body']['aggs']['filtered']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5, - 'min_doc_count': 1} + calls = ea.thread_data.current_es.search.call_args_list + assert calls[0][1]['body']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5, + 'min_doc_count': 1} assert counts['top_events_this'] == {'a': 10, 'b': 5} assert counts['top_events_that'] == {'d': 10, 'c': 12} @@ -1159,7 +1073,7 @@ def test_wait_until_responsive(ea): ] -def test_wait_until_responsive_timeout_es_not_available(ea, capsys): +def test_wait_until_responsive_timeout_es_not_available(ea, caplog): """Bail out if ElasticSearch doesn't (quickly) become responsive.""" # Never becomes responsive :-) @@ -1175,8 +1089,8 @@ def test_wait_until_responsive_timeout_es_not_available(ea, capsys): assert exc.value.code == 1 # Ensure we get useful diagnostics. - output, errors = capsys.readouterr() - assert 'Could not reach ElasticSearch at "es:14900".' in errors + user, level, message = caplog.record_tuples[0] + assert 'Could not reach ElasticSearch at "es:14900".' in message # Slept until we passed the deadline. sleep.mock_calls == [ @@ -1186,7 +1100,7 @@ def test_wait_until_responsive_timeout_es_not_available(ea, capsys): ] -def test_wait_until_responsive_timeout_index_does_not_exist(ea, capsys): +def test_wait_until_responsive_timeout_index_does_not_exist(ea, caplog): """Bail out if ElasticSearch doesn't (quickly) become responsive.""" # Never becomes responsive :-) @@ -1202,8 +1116,8 @@ def test_wait_until_responsive_timeout_index_does_not_exist(ea, capsys): assert exc.value.code == 1 # Ensure we get useful diagnostics. - output, errors = capsys.readouterr() - assert 'Writeback alias "wb_a" does not exist, did you run `elastalert-create-index`?' in errors + user, level, message = caplog.record_tuples[0] + assert 'Writeback index "wb" does not exist, did you run `elastalert-create-index`?' in message # Slept until we passed the deadline. sleep.mock_calls == [ @@ -1343,18 +1257,7 @@ def test_query_with_whitelist_filter_es(ea): new_rule = copy.copy(ea.rules[0]) ea.init_rule(new_rule, True) assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' \ - in new_rule['filter'][-1]['query']['query_string']['query'] - - -def test_query_with_whitelist_filter_es_five(ea_sixsix): - ea_sixsix.rules[0]['_source_enabled'] = False - ea_sixsix.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}] - ea_sixsix.rules[0]['compare_key'] = "username" - ea_sixsix.rules[0]['whitelist'] = ['xudan1', 'xudan12', 'aa1', 'bb1'] - new_rule = copy.copy(ea_sixsix.rules[0]) - ea_sixsix.init_rule(new_rule, True) - assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' in \ - new_rule['filter'][-1]['query_string']['query'] + in new_rule['filter'][-1]['query_string']['query'] def test_query_with_blacklist_filter_es(ea): @@ -1365,16 +1268,117 @@ def test_query_with_blacklist_filter_es(ea): new_rule = copy.copy(ea.rules[0]) ea.init_rule(new_rule, True) assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in \ - new_rule['filter'][-1]['query']['query_string']['query'] - - -def test_query_with_blacklist_filter_es_five(ea_sixsix): - ea_sixsix.rules[0]['_source_enabled'] = False - ea_sixsix.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}] - ea_sixsix.rules[0]['compare_key'] = "username" - ea_sixsix.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1'] - ea_sixsix.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1'] - new_rule = copy.copy(ea_sixsix.rules[0]) - ea_sixsix.init_rule(new_rule, True) - assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in new_rule['filter'][-1]['query_string'][ - 'query'] + new_rule['filter'][-1]['query_string']['query'] + + +def test_handle_rule_execution_error(ea, caplog): + with mock.patch('elastalert.elastalert.elasticsearch_client'): + ea.rules[0]['aggregate_by_match_time'] = True + ea.rules[0]['summary_table_fields'] = ['@timestamp'] + ea.rules[0]['aggregation_key'] = ['service.name'] + ea.rules[0]['alert_text_type'] = 'aggregation_summary_only' + ea.rules[0]['query_delay'] = 'a' + new_rule = copy.copy(ea.rules[0]) + ea.init_rule(new_rule, True) + + ea.handle_rule_execution(ea.rules[0]) + user, level, message = caplog.record_tuples[0] + assert '[handle_rule_execution]Error parsing query_delay send time format' in message + + +def test_remove_old_events_error(ea, caplog): + with mock.patch('elastalert.elastalert.elasticsearch_client'): + ea.rules[0]['aggregate_by_match_time'] = True + ea.rules[0]['summary_table_fields'] = ['@timestamp'] + ea.rules[0]['aggregation_key'] = ['service.name'] + ea.rules[0]['alert_text_type'] = 'aggregation_summary_only' + ea.rules[0]['query_delay'] = 'a' + new_rule = copy.copy(ea.rules[0]) + ea.init_rule(new_rule, True) + + ea.remove_old_events(ea.rules[0]) + user, level, message = caplog.record_tuples[0] + assert '[remove_old_events]Error parsing query_delay send time format' in message + + +def test_add_aggregated_alert_error(ea, caplog): + mod = BaseEnhancement(ea.rules[0]) + mod.process = mock.Mock() + ea.rules[0]['match_enhancements'] = [mod] + ea.rules[0]['aggregation'] = {"hour": 5} + ea.rules[0]['run_enhancements_first'] = True + ea.rules[0]['aggregate_by_match_time'] = True + hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP]) + ea.thread_data.current_es.search.return_value = hits + ea.rules[0]['type'].matches = [{'@timestamp': END}] + with mock.patch('elastalert.elastalert.elasticsearch_client'): + ea.run_rule(ea.rules[0], END, START) + user, level, message = caplog.record_tuples[0] + exceptd = "[add_aggregated_alert]" + exceptd += "Error parsing aggregate send time format unsupported operand type(s) for +: 'datetime.datetime' and 'dict'" + assert exceptd in message + + +def test_get_elasticsearch_client_same_rule(ea): + x = ea.get_elasticsearch_client(ea.rules[0]) + y = ea.get_elasticsearch_client(ea.rules[0]) + assert x is y, "Should return same client for the same rule" + + +def test_get_elasticsearch_client_different_rule(ea): + x_rule = ea.rules[0] + x = ea.get_elasticsearch_client(x_rule) + + y_rule = copy.copy(x_rule) + y_rule['name'] = 'different_rule' + y = ea.get_elasticsearch_client(y_rule) + + assert x is not y, 'Should return unique client for each rule' + + +def test_base_enhancement_process_error(ea): + try: + be = BaseEnhancement(ea.rules[0]) + be.process('') + assert False + except NotImplementedError: + assert True + + +def test_time_enhancement(ea): + be = BaseEnhancement(ea.rules[0]) + te = TimeEnhancement(be) + match = { + '@timestamp': '2021-01-01T00:00:00', + 'somefield': 'foobarbaz' + } + te.process(match) + excepted = '2021-01-01 00:00 UTC' + assert match['@timestamp'] == excepted + + +def test_get_kibana_discover_external_url_formatter_same_rule(ea): + rule = ea.rules[0] + x = ea.get_kibana_discover_external_url_formatter(rule) + y = ea.get_kibana_discover_external_url_formatter(rule) + assert type(x) is AbsoluteKibanaExternalUrlFormatter + assert x is y, "Should return same external url formatter for the same rule" + + +def test_get_kibana_discover_external_url_formatter_different_rule(ea): + x_rule = ea.rules[0] + y_rule = copy.copy(x_rule) + y_rule['name'] = 'different_rule' + x = ea.get_kibana_discover_external_url_formatter(x_rule) + y = ea.get_kibana_discover_external_url_formatter(y_rule) + assert type(x) is AbsoluteKibanaExternalUrlFormatter + assert x is not y, 'Should return unique external url formatter for each rule' + + +def test_get_kibana_discover_external_url_formatter_smoke(ea): + rule = copy.copy(ea.rules[0]) + rule['kibana_discover_security_tenant'] = 'global' + rule['shorten_kibana_discover_url'] = True + formatter = ea.get_kibana_discover_external_url_formatter(rule) + assert type(formatter) is ShortKibanaExternalUrlFormatter + assert formatter.security_tenant == 'global' diff --git a/tests/config_test.py b/tests/config_test.py new file mode 100644 index 000000000..5e95a39c0 --- /dev/null +++ b/tests/config_test.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- +import datetime +import logging +import os +import pytest + +from elastalert.config import load_conf +from elastalert.util import EAException + +from unittest import mock + + +def test_config_loads(): + os.environ['ELASTIC_PASS'] = 'password_from_env' + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + '/example.config.yaml' + test_args.rule = None + test_args.debug = False + test_args.es_debug_trace = None + + conf = load_conf(test_args) + + assert conf['rules_folder'] == '/opt/elastalert/rules' + assert conf['run_every'] == datetime.timedelta(seconds=10) + assert conf['buffer_time'] == datetime.timedelta(minutes=15) + + assert conf['es_host'] == 'elasticsearch' + assert conf['es_port'] == 9200 + + assert conf['es_username'] == 'elastic' + assert conf['es_password'] == 'password_from_env' + + assert conf['writeback_index'] == 'elastalert_status' + + assert conf['alert_time_limit'] == datetime.timedelta(days=2) + + +def test_config_defaults(): + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + '/example.config.yaml' + test_args.rule = None + test_args.debug = False + test_args.es_debug_trace = None + + conf = load_conf( + test_args, + defaults={ + 'new_key': 'new_value', + 'rules_folder': '/new/rules/folder' + } + ) + + assert conf['new_key'] == 'new_value' + assert conf['rules_folder'] == '/opt/elastalert/rules' + + +def test_config_overrides(): + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + '/example.config.yaml' + test_args.rule = None + test_args.debug = False + test_args.es_debug_trace = None + + conf = load_conf( + test_args, + overrides={ + 'new_key': 'new_value', + 'rules_folder': '/new/rules/folder' + } + ) + + assert conf['new_key'] == 'new_value' + assert conf['rules_folder'] == '/new/rules/folder' + + +def test_config_loads_ea_execption(): + with pytest.raises(EAException) as ea: + os.environ['ELASTIC_PASS'] = 'password_from_env' + + test_args = mock.Mock() + test_args.config = '' + test_args.rule = None + test_args.debug = False + test_args.es_debug_trace = None + + load_conf(test_args) + + assert 'No --config or config.yaml found' in str(ea) + + +@pytest.mark.parametrize('config, expected', [ + ('/example.config.type_error.run_every.yaml', 'Invalid time format used: '), + ('/example.config.type_error.buffer_time.yaml', 'Invalid time format used: ') +]) +def test_config_loads_type_error(config, expected): + with pytest.raises(EAException) as ea: + os.environ['ELASTIC_PASS'] = 'password_from_env' + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + config + test_args.rule = None + test_args.debug = False + test_args.es_debug_trace = None + + load_conf(test_args) + + assert expected in str(ea) + + +@pytest.mark.parametrize('config, expected', [ + ('/example.config.not_found.run_every.yaml', 'must contain '), + ('/example.config.not_found.es_host.yaml', 'must contain '), + ('/example.config.not_found.es_port.yaml', 'must contain '), + ('/example.config.not_found.writeback_index.yaml', 'must contain '), + ('/example.config.not_found.buffer_time.yaml', 'must contain ') +]) +def test_config_loads_required_globals_error(config, expected): + with pytest.raises(EAException) as ea: + os.environ['ELASTIC_PASS'] = 'password_from_env' + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + config + test_args.rule = None + test_args.debug = False + test_args.verbose = None + test_args.es_debug_trace = None + + load_conf(test_args) + + assert expected in str(ea) + + +def test_config_loads_debug(caplog): + caplog.set_level(logging.INFO) + os.environ['ELASTIC_PASS'] = 'password_from_env' + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + '/example.config.yaml' + test_args.rule = None + test_args.debug = True + test_args.verbose = None + test_args.es_debug_trace = None + + load_conf(test_args) + + expected_msg = 'Note: In debug mode, alerts will be logged to console but NOT actually sent.\n' + expected_msg += ' To send them but remain verbose, use --verbose instead.' + assert ('elastalert', logging.INFO, expected_msg) == caplog.record_tuples[0] + + +def test_config_loads_debug_and_verbose(caplog): + caplog.set_level(logging.INFO) + os.environ['ELASTIC_PASS'] = 'password_from_env' + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + '/example.config.yaml' + test_args.rule = None + test_args.debug = True + test_args.verbose = True + test_args.es_debug_trace = None + + load_conf(test_args) + + expected_msg = 'Note: --debug and --verbose flags are set. --debug takes precedent.' + assert ('elastalert', logging.INFO, expected_msg) == caplog.record_tuples[0] + + +def test_config_loads_old_query_limit(): + os.environ['ELASTIC_PASS'] = 'password_from_env' + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + '/example.config.old_query_limit.yaml' + test_args.rule = None + test_args.debug = False + test_args.es_debug_trace = None + + conf = load_conf(test_args) + + assert conf['rules_folder'] == '/opt/elastalert/rules' + assert conf['run_every'] == datetime.timedelta(seconds=10) + assert conf['buffer_time'] == datetime.timedelta(minutes=15) + assert conf['es_host'] == 'elasticsearch' + assert conf['es_port'] == 9200 + assert conf['es_username'] == 'elastic' + assert conf['es_password'] == 'password_from_env' + assert conf['writeback_index'] == 'elastalert_status' + assert conf['alert_time_limit'] == datetime.timedelta(days=2) + assert conf['old_query_limit'] == datetime.timedelta(days=3) + + +def test_config_loads_logging(capfd): + os.environ['ELASTIC_PASS'] = 'password_from_env' + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + '/example.config.logging.yaml' + test_args.rule = None + test_args.debug = True + test_args.verbose = True + test_args.es_debug_trace = None + + load_conf(test_args) + + expected1 = 'Note: --debug and --verbose flags are set. --debug takes precedent.' + expected2 = 'Note: In debug mode, alerts will be logged to console but NOT actually sent.\n' + expected3 = ' To send them but remain verbose, use --verbose instead.\n' + out, err = capfd.readouterr() + assert expected1 in err + assert expected2 in err + assert expected3 in err + + +def test_config_loads_logging2(caplog): + os.environ['ELASTIC_PASS'] = 'password_from_env' + dir_path = os.path.dirname(os.path.realpath(__file__)) + + test_args = mock.Mock() + test_args.config = dir_path + '/example.config.yaml' + test_args.rule = None + test_args.debug = True + test_args.verbose = False + test_args.es_debug_trace = None + + load_conf(test_args) + + expected1 = 'Note: In debug mode, alerts will be logged to console but NOT actually sent.' + expected2 = ' To send them but remain verbose, use --verbose instead.' + user, level, message = caplog.record_tuples[0] + assert expected1 in message + assert expected2 in message diff --git a/tests/conftest.py b/tests/conftest.py index 6844296ee..389a48511 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,7 +3,7 @@ import logging import os -import mock +from unittest import mock import pytest import elastalert.elastalert @@ -63,7 +63,6 @@ def __init__(self, host='es', port=14900): self.port = port self.return_hits = [] self.search = mock.Mock() - self.deprecated_search = mock.Mock() self.create = mock.Mock() self.index = mock.Mock() self.delete = mock.Mock() @@ -71,48 +70,10 @@ def __init__(self, host='es', port=14900): self.ping = mock.Mock(return_value=True) self.indices = mock_es_indices_client() self.es_version = mock.Mock(return_value='2.0') - self.is_atleastfive = mock.Mock(return_value=False) - self.is_atleastsix = mock.Mock(return_value=False) - self.is_atleastsixtwo = mock.Mock(return_value=False) - self.is_atleastsixsix = mock.Mock(return_value=False) - self.is_atleastseven = mock.Mock(return_value=False) + self.is_atleastseven = mock.Mock(return_value=True) self.resolve_writeback_index = mock.Mock(return_value=writeback_index) -class mock_es_sixsix_client(object): - def __init__(self, host='es', port=14900): - self.host = host - self.port = port - self.return_hits = [] - self.search = mock.Mock() - self.deprecated_search = mock.Mock() - self.create = mock.Mock() - self.index = mock.Mock() - self.delete = mock.Mock() - self.info = mock.Mock(return_value={'status': 200, 'name': 'foo', 'version': {'number': '6.6.0'}}) - self.ping = mock.Mock(return_value=True) - self.indices = mock_es_indices_client() - self.es_version = mock.Mock(return_value='6.6.0') - self.is_atleastfive = mock.Mock(return_value=True) - self.is_atleastsix = mock.Mock(return_value=True) - self.is_atleastsixtwo = mock.Mock(return_value=False) - self.is_atleastsixsix = mock.Mock(return_value=True) - self.is_atleastseven = mock.Mock(return_value=False) - - def writeback_index_side_effect(index, doc_type): - if doc_type == 'silence': - return index + '_silence' - elif doc_type == 'past_elastalert': - return index + '_past' - elif doc_type == 'elastalert_status': - return index + '_status' - elif doc_type == 'elastalert_error': - return index + '_error' - return index - - self.resolve_writeback_index = mock.Mock(side_effect=writeback_index_side_effect) - - class mock_rule_loader(object): def __init__(self, conf): self.base_config = conf @@ -150,6 +111,7 @@ def ea(): 'include': ['@timestamp'], 'aggregation': datetime.timedelta(0), 'realert': datetime.timedelta(0), + 'realert_key': 'anytest', 'processed_hits': {}, 'timestamp_field': '@timestamp', 'match_enhancements': [], @@ -165,13 +127,16 @@ def ea(): 'alert_time_limit': datetime.timedelta(hours=24), 'es_host': 'es', 'es_port': 14900, + 'kibana_adapter': '', + 'kibana_adapter_port': 14900, 'writeback_index': 'wb', - 'writeback_alias': 'wb_a', 'rules': rules, 'max_query_size': 10000, 'old_query_limit': datetime.timedelta(weeks=1), 'disable_rules_on_error': False, - 'scroll_keepalive': '30s'} + 'scroll_keepalive': '30s', + 'query_endpoint': 'http://localhost:9999/v2/sherlock-alerts/traces/visualize', + 'custom_pretty_ts_format': '%Y-%m-%d %H:%M'} elastalert.util.elasticsearch_client = mock_es_client conf['rules_loader'] = mock_rule_loader(conf) elastalert.elastalert.elasticsearch_client = mock_es_client @@ -185,7 +150,6 @@ def ea(): ea.rules[0]['alert'] = [mock_alert()] ea.writeback_es = mock_es_client() ea.writeback_es.search.return_value = {'hits': {'hits': []}, 'total': 0} - ea.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}} ea.writeback_es.index.return_value = {'_id': 'ABCD', 'created': True} ea.current_es = mock_es_client('', '') ea.thread_data.current_es = ea.current_es @@ -194,57 +158,6 @@ def ea(): return ea -@pytest.fixture -def ea_sixsix(): - rules = [{'es_host': '', - 'es_port': 14900, - 'name': 'anytest', - 'index': 'idx', - 'filter': [], - 'include': ['@timestamp'], - 'run_every': datetime.timedelta(seconds=1), - 'aggregation': datetime.timedelta(0), - 'realert': datetime.timedelta(0), - 'processed_hits': {}, - 'timestamp_field': '@timestamp', - 'match_enhancements': [], - 'rule_file': 'blah.yaml', - 'max_query_size': 10000, - 'ts_to_dt': ts_to_dt, - 'dt_to_ts': dt_to_ts, - '_source_enabled': True}] - conf = {'rules_folder': 'rules', - 'run_every': datetime.timedelta(minutes=10), - 'buffer_time': datetime.timedelta(minutes=5), - 'alert_time_limit': datetime.timedelta(hours=24), - 'es_host': 'es', - 'es_port': 14900, - 'writeback_index': writeback_index, - 'writeback_alias': 'wb_a', - 'rules': rules, - 'max_query_size': 10000, - 'old_query_limit': datetime.timedelta(weeks=1), - 'disable_rules_on_error': False, - 'scroll_keepalive': '30s'} - conf['rules_loader'] = mock_rule_loader(conf) - elastalert.elastalert.elasticsearch_client = mock_es_sixsix_client - elastalert.util.elasticsearch_client = mock_es_sixsix_client - with mock.patch('elastalert.elastalert.load_conf') as load_conf: - with mock.patch('elastalert.elastalert.BackgroundScheduler'): - load_conf.return_value = conf - conf['rules_loader'].load.return_value = rules - conf['rules_loader'].get_hashes.return_value = {} - ea_sixsix = elastalert.elastalert.ElastAlerter(['--pin_rules']) - ea_sixsix.rules[0]['type'] = mock_ruletype() - ea_sixsix.rules[0]['alert'] = [mock_alert()] - ea_sixsix.writeback_es = mock_es_sixsix_client() - ea_sixsix.writeback_es.search.return_value = {'hits': {'hits': []}} - ea_sixsix.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}} - ea_sixsix.writeback_es.index.return_value = {'_id': 'ABCD'} - ea_sixsix.current_es = mock_es_sixsix_client('', -1) - return ea_sixsix - - @pytest.fixture(scope='function') def environ(): """py.test fixture to get a fresh mutable environment.""" diff --git a/tests/create_index_test.py b/tests/create_index_test.py index 47a6247dc..ab053a2e9 100644 --- a/tests/create_index_test.py +++ b/tests/create_index_test.py @@ -21,33 +21,58 @@ def test_read_default_index_mapping(es_mapping): print((json.dumps(mapping, indent=2))) -@pytest.mark.parametrize('es_mapping', es_mappings) -def test_read_es_5_index_mapping(es_mapping): - mapping = elastalert.create_index.read_es_index_mapping(es_mapping, 5) - assert es_mapping in mapping - print((json.dumps(mapping, indent=2))) - - -@pytest.mark.parametrize('es_mapping', es_mappings) -def test_read_es_6_index_mapping(es_mapping): - mapping = elastalert.create_index.read_es_index_mapping(es_mapping, 6) - assert es_mapping not in mapping - print((json.dumps(mapping, indent=2))) - - def test_read_default_index_mappings(): mappings = elastalert.create_index.read_es_index_mappings() assert len(mappings) == len(es_mappings) print((json.dumps(mappings, indent=2))) -def test_read_es_5_index_mappings(): - mappings = elastalert.create_index.read_es_index_mappings(5) +def test_read_es_8_index_mappings(): + mappings = elastalert.create_index.read_es_index_mappings(8) assert len(mappings) == len(es_mappings) print((json.dumps(mappings, indent=2))) -def test_read_es_6_index_mappings(): - mappings = elastalert.create_index.read_es_index_mappings(6) - assert len(mappings) == len(es_mappings) - print((json.dumps(mappings, indent=2))) +@pytest.mark.parametrize('es_version, expected', [ + ('5.6.0', False), + ('6.0.0', False), + ('6.1.0', False), + ('6.2.0', False), + ('6.3.0', False), + ('6.4.0', False), + ('6.5.0', False), + ('6.6.0', False), + ('6.7.0', False), + ('6.8.0', False), + ('7.0.0', True), + ('7.1.0', True), + ('7.2.0', True), + ('7.3.0', True), + ('7.4.0', True), + ('7.5.0', True), + ('7.6.0', True), + ('7.7.0', True), + ('7.8.0', True), + ('7.9.0', True), + ('7.10.0', True), + ('7.11.0', True), + ('7.12.0', True), + ('7.13.0', True) +]) +def test_is_atleastseven(es_version, expected): + result = elastalert.create_index.is_atleastseven(es_version) + assert result == expected + + +@pytest.mark.parametrize('es_version, expected', [ + ('5.6.0', False), + ('6.0.0', False), + ('6.1.0', False), + ('7.0.0', False), + ('7.1.0', False), + ('7.17.0', False), + ('8.0.0', True) +]) +def test_is_atleasteight(es_version, expected): + result = elastalert.create_index.is_atleasteight(es_version) + assert result == expected diff --git a/docker-compose.yml b/tests/docker-compose.yml similarity index 60% rename from docker-compose.yml rename to tests/docker-compose.yml index 88badf6e1..7b8dd2854 100644 --- a/docker-compose.yml +++ b/tests/docker-compose.yml @@ -2,10 +2,10 @@ version: '2' services: tox: build: - context: ./ - dockerfile: Dockerfile-test + context: ../ + dockerfile: tests/Dockerfile-test command: tox container_name: elastalert_tox working_dir: /home/elastalert volumes: - - ./:/home/elastalert/ + - ../:/home/elastalert/ diff --git a/tests/elasticsearch_test.py b/tests/elasticsearch_test.py index 308356c25..a97b50bff 100644 --- a/tests/elasticsearch_test.py +++ b/tests/elasticsearch_test.py @@ -6,6 +6,9 @@ import dateutil import pytest +from unittest import mock +from unittest.mock import MagicMock + import elastalert.create_index import elastalert.elastalert from elastalert import ElasticSearchClient @@ -25,6 +28,17 @@ def es_client(): return ElasticSearchClient(es_conn_config) +def test_es_version(es_client): + mockInfo = {} + versionData = {} + versionData['number'] = "1.2.3" + mockInfo['version'] = versionData + + with mock.patch('elasticsearch.client.Elasticsearch.info', new=MagicMock(return_value=mockInfo)): + version = es_client.es_version + assert version == "1.2.3" + + @pytest.mark.elasticsearch class TestElasticsearch(object): # TODO perform teardown removing data inserted into Elasticsearch @@ -37,18 +51,11 @@ def test_create_indices(self, es_client): print(('-' * 50)) print((json.dumps(indices_mappings, indent=2))) print(('-' * 50)) - if es_client.is_atleastsix(): - assert test_index in indices_mappings - assert test_index + '_error' in indices_mappings - assert test_index + '_status' in indices_mappings - assert test_index + '_silence' in indices_mappings - assert test_index + '_past' in indices_mappings - else: - assert 'elastalert' in indices_mappings[test_index]['mappings'] - assert 'elastalert_error' in indices_mappings[test_index]['mappings'] - assert 'elastalert_status' in indices_mappings[test_index]['mappings'] - assert 'silence' in indices_mappings[test_index]['mappings'] - assert 'past_elastalert' in indices_mappings[test_index]['mappings'] + assert test_index in indices_mappings + assert test_index + '_error' in indices_mappings + assert test_index + '_status' in indices_mappings + assert test_index + '_silence' in indices_mappings + assert test_index + '_past' in indices_mappings @pytest.mark.usefixtures("ea") def test_aggregated_alert(self, ea, es_client): # noqa: F811 @@ -61,10 +68,7 @@ def test_aggregated_alert(self, ea, es_client): # noqa: F811 } ea.writeback_es = es_client res = ea.add_aggregated_alert(match, ea.rules[0]) - if ea.writeback_es.is_atleastsix(): - assert res['result'] == 'created' - else: - assert res['created'] is True + assert res['result'] == 'created' # Make sure added data is available for querying time.sleep(2) # Now lets find the pending aggregated alert @@ -76,10 +80,7 @@ def test_silenced(self, ea, es_client): # noqa: F811 days=1) ea.writeback_es = es_client res = ea.set_realert(ea.rules[0]['name'], until_timestamp, 0) - if ea.writeback_es.is_atleastsix(): - assert res['result'] == 'created' - else: - assert res['created'] is True + assert res['result'] == 'created' # Make sure added data is available for querying time.sleep(2) # Force lookup in elasticsearch diff --git a/tests/example.config.logging.yaml b/tests/example.config.logging.yaml new file mode 100644 index 000000000..01b7532c3 --- /dev/null +++ b/tests/example.config.logging.yaml @@ -0,0 +1,70 @@ +rules_folder: /opt/elastalert/rules + +run_every: + seconds: 10 + +buffer_time: + minutes: 15 + +es_host: elasticsearch +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 + +logging: + version: 1 + incremental: false + disable_existing_loggers: false + formatters: + logline: + format: '%(asctime)s %(levelname)+8s %(name)+20s %(message)s' + + handlers: + console: + class: logging.StreamHandler + formatter: logline + level: INFO + stream: ext://sys.stderr + + logging: + version: 1 + incremental: false + disable_existing_loggers: false + formatters: + logline: + format: '%(asctime)s %(levelname)+8s %(name)+20s %(message)s' + + handlers: + console: + class: logging.StreamHandler + formatter: logline + level: INFO + stream: ext://sys.stderr + + loggers: + elastalert: + level: INFO + handlers: [] + propagate: true + + elasticsearch: + level: INFO + handlers: [] + propagate: true + + elasticsearch.trace: + level: INFO + handlers: [] + propagate: true + + '': # root logger + level: INFO + handlers: + - console + propagate: false \ No newline at end of file diff --git a/tests/example.config.not_found.buffer_time.yaml b/tests/example.config.not_found.buffer_time.yaml new file mode 100644 index 000000000..1503207e8 --- /dev/null +++ b/tests/example.config.not_found.buffer_time.yaml @@ -0,0 +1,15 @@ +rules_folder: /opt/elastalert/rules + +run_every: + seconds: 10 + +es_host: elasticsearch +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 diff --git a/tests/example.config.not_found.es_host.yaml b/tests/example.config.not_found.es_host.yaml new file mode 100644 index 000000000..113493b69 --- /dev/null +++ b/tests/example.config.not_found.es_host.yaml @@ -0,0 +1,17 @@ +rules_folder: /opt/elastalert/rules + +run_every: + seconds: 10 + +buffer_time: + minutes: 15 + +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 diff --git a/tests/example.config.not_found.es_port.yaml b/tests/example.config.not_found.es_port.yaml new file mode 100644 index 000000000..d6c783b06 --- /dev/null +++ b/tests/example.config.not_found.es_port.yaml @@ -0,0 +1,17 @@ +rules_folder: /opt/elastalert/rules + +run_every: + seconds: 10 + +buffer_time: + minutes: 15 + +es_host: elasticsearch + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 diff --git a/tests/example.config.not_found.run_every.yaml b/tests/example.config.not_found.run_every.yaml new file mode 100644 index 000000000..d61208933 --- /dev/null +++ b/tests/example.config.not_found.run_every.yaml @@ -0,0 +1,15 @@ +rules_folder: /opt/elastalert/rules + +buffer_time: + minutes: 15 + +es_host: elasticsearch +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 diff --git a/tests/example.config.not_found.writeback_index.yaml b/tests/example.config.not_found.writeback_index.yaml new file mode 100644 index 000000000..1b779e0e7 --- /dev/null +++ b/tests/example.config.not_found.writeback_index.yaml @@ -0,0 +1,13 @@ +rules_folder: /opt/elastalert/rules + +run_every: + seconds: 10 + +es_host: elasticsearch +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +alert_time_limit: + days: 2 diff --git a/tests/example.config.old_query_limit.yaml b/tests/example.config.old_query_limit.yaml new file mode 100644 index 000000000..f9235b70e --- /dev/null +++ b/tests/example.config.old_query_limit.yaml @@ -0,0 +1,21 @@ +rules_folder: /opt/elastalert/rules + +run_every: + seconds: 10 + +buffer_time: + minutes: 15 + +es_host: elasticsearch +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 + +old_query_limit: + days: 3 \ No newline at end of file diff --git a/tests/example.config.type_error.buffer_time.yaml b/tests/example.config.type_error.buffer_time.yaml new file mode 100644 index 000000000..40474cf62 --- /dev/null +++ b/tests/example.config.type_error.buffer_time.yaml @@ -0,0 +1,17 @@ +rules_folder: /opt/elastalert/rules + +run_every: + seconds: 10 + +buffer_time: + +es_host: elasticsearch +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 diff --git a/tests/example.config.type_error.run_every.yaml b/tests/example.config.type_error.run_every.yaml new file mode 100644 index 000000000..088e27d4b --- /dev/null +++ b/tests/example.config.type_error.run_every.yaml @@ -0,0 +1,17 @@ +rules_folder: /opt/elastalert/rules + +run_every: + +buffer_time: + minutes: 15 + +es_host: elasticsearch +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 diff --git a/tests/example.config.yaml b/tests/example.config.yaml new file mode 100644 index 000000000..113a55959 --- /dev/null +++ b/tests/example.config.yaml @@ -0,0 +1,18 @@ +rules_folder: /opt/elastalert/rules + +run_every: + seconds: 10 + +buffer_time: + minutes: 15 + +es_host: elasticsearch +es_port: 9200 + +es_username: elastic +es_password: $ELASTIC_PASS + +writeback_index: elastalert_status + +alert_time_limit: + days: 2 diff --git a/tests/kibana_discover_test.py b/tests/kibana_discover_test.py index f06fe4e0c..229ba781f 100644 --- a/tests/kibana_discover_test.py +++ b/tests/kibana_discover_test.py @@ -5,8 +5,33 @@ from elastalert.kibana_discover import generate_kibana_discover_url -@pytest.mark.parametrize("kibana_version", ['5.6', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8']) -def test_generate_kibana_discover_url_with_kibana_5x_and_6x(kibana_version): +@pytest.mark.parametrize("kibana_version", [ + '7.0', + '7.1', + '7.2', + '7.3', + '7.4', + '7.5', + '7.6', + '7.7', + '7.8', + '7.9', + '7.10', + '7.11', + '7.12', + '7.13', + '7.14', + '7.15', + '7.16', + '8.0', + '8.1', + '8.2', + '8.3', + '8.4', + '8.5', + '8.6' +]) +def test_generate_kibana_discover_url_with_kibana_7x(kibana_version): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', @@ -21,10 +46,10 @@ def test_generate_kibana_discover_url_with_kibana_5x_and_6x(kibana_version): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -38,33 +63,32 @@ def test_generate_kibana_discover_url_with_kibana_5x_and_6x(kibana_version): assert url == expectedUrl -@pytest.mark.parametrize("kibana_version", ['7.0', '7.1', '7.2', '7.3']) -def test_generate_kibana_discover_url_with_kibana_7x(kibana_version): +def test_generate_kibana_discover_url_with_relative_kibana_discover_app_url(): url = generate_kibana_discover_url( rule={ - 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': kibana_version, - 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', + 'kibana_discover_app_url': 'app/discover#/', + 'kibana_discover_version': '8.6', + 'kibana_discover_index_pattern_id': '620ad0e6-43df-4557-bda2-384960fa9086', 'timestamp_field': 'timestamp' }, match={ - 'timestamp': '2019-09-01T00:30:00Z' + 'timestamp': '2021-10-08T00:30:00Z' } ) expectedUrl = ( - 'http://kibana:5601/#/discover' + 'app/discover#/' + '?_g=%28' # global start + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start - + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + + 'from%3A%272021-10-08T00%3A20%3A00Z%27%2C' + + 'to%3A%272021-10-08T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end + '&_a=%28' # app start + 'columns%3A%21%28_source%29%2C' + 'filters%3A%21%28%29%2C' - + 'index%3Ad6cabfb6-aaef-44ea-89c5-600e9a76991a%2C' + + 'index%3A%27620ad0e6-43df-4557-bda2-384960fa9086%27%2C' + 'interval%3Aauto' + '%29' # app end ) @@ -89,7 +113,7 @@ def test_generate_kibana_discover_url_with_missing_kibana_discover_version(): def test_generate_kibana_discover_url_with_missing_kibana_discover_app_url(): url = generate_kibana_discover_url( rule={ - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs', 'timestamp_field': 'timestamp', 'name': 'test' @@ -105,7 +129,7 @@ def test_generate_kibana_discover_url_with_missing_kibana_discover_index_pattern url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'timestamp_field': 'timestamp', 'name': 'test' }, @@ -139,7 +163,7 @@ def test_generate_kibana_discover_url_with_kibana_discover_app_url_env_substitut url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://$KIBANA_HOST:$KIBANA_PORT/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'timestamp_field': 'timestamp' }, @@ -150,10 +174,10 @@ def test_generate_kibana_discover_url_with_kibana_discover_app_url_env_substitut expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -171,7 +195,7 @@ def test_generate_kibana_discover_url_with_from_timedelta(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.3', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'kibana_discover_from_timedelta': timedelta(hours=1), 'timestamp_field': 'timestamp' @@ -204,7 +228,7 @@ def test_generate_kibana_discover_url_with_from_timedelta_and_timeframe(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.3', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'kibana_discover_from_timedelta': timedelta(hours=1), 'timeframe': timedelta(minutes=20), @@ -238,7 +262,7 @@ def test_generate_kibana_discover_url_with_to_timedelta(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.3', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'kibana_discover_to_timedelta': timedelta(hours=1), 'timestamp_field': 'timestamp' @@ -271,7 +295,7 @@ def test_generate_kibana_discover_url_with_to_timedelta_and_timeframe(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.3', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'kibana_discover_to_timedelta': timedelta(hours=1), 'timeframe': timedelta(minutes=20), @@ -305,7 +329,7 @@ def test_generate_kibana_discover_url_with_timeframe(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.3', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'timeframe': timedelta(minutes=20), 'timestamp_field': 'timestamp' @@ -338,7 +362,7 @@ def test_generate_kibana_discover_url_with_custom_columns(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs-*', 'kibana_discover_columns': ['level', 'message'], 'timestamp_field': 'timestamp' @@ -350,10 +374,10 @@ def test_generate_kibana_discover_url_with_custom_columns(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -371,7 +395,7 @@ def test_generate_kibana_discover_url_with_single_filter(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'filter': [ @@ -385,10 +409,10 @@ def test_generate_kibana_discover_url_with_single_filter(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -422,7 +446,7 @@ def test_generate_kibana_discover_url_with_multiple_filters(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': '90943e30-9a47-11e8-b64d-95841ca0b247', 'timestamp_field': 'timestamp', 'filter': [ @@ -437,10 +461,10 @@ def test_generate_kibana_discover_url_with_multiple_filters(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -476,7 +500,7 @@ def test_generate_kibana_discover_url_with_int_query_key(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'query_key': 'geo.dest' @@ -489,10 +513,10 @@ def test_generate_kibana_discover_url_with_int_query_key(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -536,7 +560,7 @@ def test_generate_kibana_discover_url_with_str_query_key(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'query_key': 'geo.dest' @@ -551,10 +575,10 @@ def test_generate_kibana_discover_url_with_str_query_key(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -598,7 +622,7 @@ def test_generate_kibana_discover_url_with_null_query_key_value(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'query_key': 'status' @@ -611,10 +635,10 @@ def test_generate_kibana_discover_url_with_null_query_key_value(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -648,7 +672,7 @@ def test_generate_kibana_discover_url_with_missing_query_key_value(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'query_key': 'status' @@ -660,10 +684,10 @@ def test_generate_kibana_discover_url_with_missing_query_key_value(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -697,7 +721,7 @@ def test_generate_kibana_discover_url_with_compound_query_key(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'compound_query_key': ['geo.src', 'geo.dest'], @@ -714,10 +738,10 @@ def test_generate_kibana_discover_url_with_compound_query_key(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -785,7 +809,7 @@ def test_generate_kibana_discover_url_with_filter_and_query_key(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.6', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'filter': [ @@ -801,10 +825,10 @@ def test_generate_kibana_discover_url_with_filter_and_query_key(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -856,3 +880,80 @@ def test_generate_kibana_discover_url_with_filter_and_query_key(): + '%29' # app end ) assert url == expectedUrl + + +def test_generate_kibana_discover_url_with_querystring_filter_and_query_key(): + url = generate_kibana_discover_url( + rule={ + 'kibana_discover_app_url': 'http://kibana:5601/#/discover', + 'kibana_discover_version': '8.6', + 'kibana_discover_index_pattern_id': 'logs-*', + 'timestamp_field': 'timestamp', + 'filter': [ + {'query': {'query_string': {'query': 'hello world'}}} + ], + 'query_key': 'status' + }, + match={ + 'timestamp': '2019-09-01T00:30:00Z', + 'status': 'ok' + } + ) + expectedUrl = ( + 'http://kibana:5601/#/discover' + + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + + 'time%3A%28' # time start + + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' + + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + + '%29' # time end + + '%29' # global end + + '&_a=%28' # app start + + 'columns%3A%21%28_source%29%2C' + + 'filters%3A%21%28' # filters start + + + '%28' # filter start + + '%27%24state%27%3A%28store%3AappState%29%2C' + + 'bool%3A%28must%3A%21%28%28query_string%3A%28query%3A%27hello%20world%27%29%29%29%29%2C' + + 'meta%3A%28' # meta start + + 'alias%3Afilter%2C' + + 'disabled%3A%21f%2C' + + 'index%3A%27logs-%2A%27%2C' + + 'key%3Abool%2C' + + 'negate%3A%21f%2C' + + 'type%3Acustom%2C' + + 'value%3A%27%7B%22must%22%3A%5B%7B%22query_string%22%3A%7B%22query%22%3A%22hello%20world%22%7D%7D%5D%7D%27' + + '%29' # meta end + + '%29%2C' # filter end + + + '%28' # filter start + + '%27%24state%27%3A%28store%3AappState%29%2C' + + 'meta%3A%28' # meta start + + 'alias%3A%21n%2C' + + 'disabled%3A%21f%2C' + + 'index%3A%27logs-%2A%27%2C' + + 'key%3Astatus%2C' + + 'negate%3A%21f%2C' + + 'params%3A%28query%3Aok%2C' # params start + + 'type%3Aphrase' + + '%29%2C' # params end + + 'type%3Aphrase%2C' + + 'value%3Aok' + + '%29%2C' # meta end + + 'query%3A%28' # query start + + 'match%3A%28' # match start + + 'status%3A%28' # status start + + 'query%3Aok%2C' + + 'type%3Aphrase' + + '%29' # status end + + '%29' # match end + + '%29' # query end + + '%29' # filter end + + + '%29%2C' # filters end + + 'index%3A%27logs-%2A%27%2C' + + 'interval%3Aauto' + + '%29' # app end + ) + assert url == expectedUrl diff --git a/tests/kibana_external_url_formatter_test.py b/tests/kibana_external_url_formatter_test.py new file mode 100644 index 000000000..49a6906b4 --- /dev/null +++ b/tests/kibana_external_url_formatter_test.py @@ -0,0 +1,500 @@ +from typing import Any +import os +import pytest + +import requests +from requests.auth import AuthBase, HTTPBasicAuth + +from elastalert.kibana_external_url_formatter import AbsoluteKibanaExternalUrlFormatter +from elastalert.kibana_external_url_formatter import KibanaExternalUrlFormatter +from elastalert.kibana_external_url_formatter import ShortKibanaExternalUrlFormatter +from elastalert.kibana_external_url_formatter import append_security_tenant +from elastalert.kibana_external_url_formatter import create_kibana_auth +from elastalert.kibana_external_url_formatter import create_kibana_external_url_formatter + +from elastalert.auth import RefeshableAWSRequestsAuth +from elastalert.util import EAException + +from unittest import mock + + +class AbsoluteFormatTestCase: + def __init__( + self, + base_url: str, + relative_url: str, + expected_url: str, + security_tenant: str = None, + ) -> None: + self.base_url = base_url + self.relative_url = relative_url + self.expected_url = expected_url + self.security_tenant = security_tenant + + +@pytest.mark.parametrize("test_case", [ + + # Relative to Kibana plugin + AbsoluteFormatTestCase( + base_url='http://elasticsearch.test.org:9200/_plugin/kibana/', + relative_url='app/dev_tools#/console', + expected_url='http://elasticsearch.test.org:9200/_plugin/kibana/app/dev_tools#/console' + ), + + # Relative to OpenSearch Dashboards + AbsoluteFormatTestCase( + base_url='http://opensearch.test.org/_dashboards/', + relative_url='app/dev_tools#/console', + expected_url='http://opensearch.test.org/_dashboards/app/dev_tools#/console' + ), + + # Relative to root of dedicated Kibana domain + AbsoluteFormatTestCase( + base_url='http://kibana.test.org/', + relative_url='/app/dev_tools#/console', + expected_url='http://kibana.test.org/app/dev_tools#/console' + ), + + # With security tenant + AbsoluteFormatTestCase( + base_url='http://kibana.test.org/', + security_tenant='global', + relative_url='/app/dev_tools#/console', + expected_url='http://kibana.test.org/app/dev_tools?security_tenant=global#/console' + ), +]) +def test_absolute_kinbana_external_url_formatter( + test_case: AbsoluteFormatTestCase +): + formatter = AbsoluteKibanaExternalUrlFormatter( + base_url=test_case.base_url, + security_tenant=test_case.security_tenant + ) + actualUrl = formatter.format(test_case.relative_url) + assert actualUrl == test_case.expected_url + + +def mock_kibana_shorten_url_api(*args, **kwargs): + class MockResponse: + def __init__(self, status_code): + self.status_code = status_code + + def json(self): + return { + 'urlId': '62af3ebe6652370f85de91ccb3a3825f' + } + + def raise_for_status(self): + if self.status_code == 400: + raise requests.exceptions.HTTPError() + + json = kwargs['json'] + url = json['url'] + + if url.startswith('/app/'): + return MockResponse(200) + else: + return MockResponse(400) + + +def mock_7_16_kibana_shorten_url_api(*args, **kwargs): + class MockResponse: + def __init__(self, status_code): + self.status_code = status_code + + def json(self): + return { + 'id': 'a1f77a80-6847-11ec-9b91-e5d43d1e9ca2' + } + + def raise_for_status(self): + if self.status_code == 400: + raise requests.exceptions.HTTPError() + + json = kwargs['json'] + params = json['params'] + url = params['url'] + + if url.startswith('/app/'): + return MockResponse(200) + else: + return MockResponse(400) + + +class ShortenUrlTestCase: + def __init__( + self, + base_url: str, + relative_url: str, + expected_api_request: Any, + expected_url: str, + auth: AuthBase = None, + security_tenant: str = None + ) -> None: + self.base_url = base_url + self.relative_url = relative_url + self.expected_api_request = expected_api_request + self.expected_url = expected_url + self.authorization = auth + self.security_tenant = security_tenant + + +@mock.patch('requests.post', side_effect=mock_kibana_shorten_url_api) +@pytest.mark.parametrize("test_case", [ + + # Relative to kibana plugin + ShortenUrlTestCase( + base_url='http://elasticsearch.test.org/_plugin/kibana/', + relative_url='app/dev_tools#/console', + expected_api_request={ + 'url': 'http://elasticsearch.test.org/_plugin/kibana/api/shorten_url', + 'auth': None, + 'headers': { + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + 'json': { + 'url': '/app/dev_tools#/console' + }, + 'verify': True + }, + expected_url='http://elasticsearch.test.org/_plugin/kibana/goto/62af3ebe6652370f85de91ccb3a3825f' + ), + + # Relative to root of dedicated Kibana domain + ShortenUrlTestCase( + base_url='http://kibana.test.org/', + relative_url='/app/dev_tools#/console', + expected_api_request={ + 'url': 'http://kibana.test.org/api/shorten_url', + 'auth': None, + 'headers': { + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + 'json': { + 'url': '/app/dev_tools#/console' + }, + 'verify': True + }, + expected_url='http://kibana.test.org/goto/62af3ebe6652370f85de91ccb3a3825f' + ), + + # With authentication + ShortenUrlTestCase( + base_url='http://kibana.test.org/', + auth=HTTPBasicAuth('john', 'doe'), + relative_url='/app/dev_tools#/console', + expected_api_request={ + 'url': 'http://kibana.test.org/api/shorten_url', + 'auth': HTTPBasicAuth('john', 'doe'), + 'headers': { + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + 'json': { + 'url': '/app/dev_tools#/console' + }, + 'verify': True + }, + expected_url='http://kibana.test.org/goto/62af3ebe6652370f85de91ccb3a3825f' + ), + + # With security tenant + ShortenUrlTestCase( + base_url='http://kibana.test.org/', + security_tenant='global', + relative_url='/app/dev_tools#/console', + expected_api_request={ + 'url': 'http://kibana.test.org/api/shorten_url?security_tenant=global', + 'auth': None, + 'headers': { + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + 'json': { + 'url': '/app/dev_tools?security_tenant=global#/console' + }, + 'verify': True + }, + expected_url='http://kibana.test.org/goto/62af3ebe6652370f85de91ccb3a3825f?security_tenant=global' + ) +]) +def test_short_kinbana_external_url_formatter( + mock_post: mock.MagicMock, + test_case: ShortenUrlTestCase +): + formatter = ShortKibanaExternalUrlFormatter( + base_url=test_case.base_url, + auth=test_case.authorization, + security_tenant=test_case.security_tenant, + new_shortener=False, + verify=True, + ) + + actualUrl = formatter.format(test_case.relative_url) + assert actualUrl == test_case.expected_url + + mock_post.assert_called_once_with(**test_case.expected_api_request) + + +@mock.patch('requests.post', side_effect=mock_7_16_kibana_shorten_url_api) +@pytest.mark.parametrize("test_case", [ + + # Relative to kibana plugin + ShortenUrlTestCase( + base_url='http://elasticsearch.test.org/_plugin/kibana/', + relative_url='app/dev_tools#/console', + expected_api_request={ + 'url': 'http://elasticsearch.test.org/_plugin/kibana/api/short_url', + 'auth': None, + 'headers': { + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + 'json': { + 'locatorId': 'LEGACY_SHORT_URL_LOCATOR', + 'params': { + 'url': '/app/dev_tools#/console' + } + }, + 'verify': True + }, + expected_url='http://elasticsearch.test.org/_plugin/kibana/goto/a1f77a80-6847-11ec-9b91-e5d43d1e9ca2' + ), + + # Relative to root of dedicated Kibana domain + ShortenUrlTestCase( + base_url='http://kibana.test.org/', + relative_url='/app/dev_tools#/console', + expected_api_request={ + 'url': 'http://kibana.test.org/api/short_url', + 'auth': None, + 'headers': { + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + 'json': { + 'locatorId': 'LEGACY_SHORT_URL_LOCATOR', + 'params': { + 'url': '/app/dev_tools#/console' + } + }, + 'verify': True + }, + expected_url='http://kibana.test.org/goto/a1f77a80-6847-11ec-9b91-e5d43d1e9ca2' + ), + + # With authentication + ShortenUrlTestCase( + base_url='http://kibana.test.org/', + auth=HTTPBasicAuth('john', 'doe'), + relative_url='/app/dev_tools#/console', + expected_api_request={ + 'url': 'http://kibana.test.org/api/short_url', + 'auth': HTTPBasicAuth('john', 'doe'), + 'headers': { + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + 'json': { + 'locatorId': 'LEGACY_SHORT_URL_LOCATOR', + 'params': { + 'url': '/app/dev_tools#/console' + } + }, + 'verify': True + }, + expected_url='http://kibana.test.org/goto/a1f77a80-6847-11ec-9b91-e5d43d1e9ca2' + ), + + # With security tenant + ShortenUrlTestCase( + base_url='http://kibana.test.org/', + security_tenant='global', + relative_url='/app/dev_tools#/console', + expected_api_request={ + 'url': 'http://kibana.test.org/api/short_url?security_tenant=global', + 'auth': None, + 'headers': { + 'kbn-xsrf': 'elastalert', + 'osd-xsrf': 'elastalert' + }, + 'json': { + 'locatorId': 'LEGACY_SHORT_URL_LOCATOR', + 'params': { + 'url': '/app/dev_tools?security_tenant=global#/console' + } + }, + 'verify': True + }, + expected_url='http://kibana.test.org/goto/a1f77a80-6847-11ec-9b91-e5d43d1e9ca2?security_tenant=global' + ) +]) +def test_7_16_short_kibana_external_url_formatter( + mock_post: mock.MagicMock, + test_case: ShortenUrlTestCase +): + formatter = ShortKibanaExternalUrlFormatter( + base_url=test_case.base_url, + auth=test_case.authorization, + security_tenant=test_case.security_tenant, + new_shortener=True, + verify=True, + ) + + actualUrl = formatter.format(test_case.relative_url) + assert actualUrl == test_case.expected_url + + mock_post.assert_called_once_with(**test_case.expected_api_request) + + +@mock.patch('requests.post', side_effect=mock_kibana_shorten_url_api) +def test_short_kinbana_external_url_formatter_request_exception(mock_post: mock.MagicMock): + formatter = ShortKibanaExternalUrlFormatter( + base_url='http://kibana.test.org', + auth=None, + security_tenant=None, + new_shortener=False, + verify=True, + ) + with pytest.raises(EAException, match="Failed to invoke Kibana Shorten URL API"): + formatter.format('http://wacky.org') + mock_post.assert_called_once() + + +def test_create_kibana_external_url_formatter_without_shortening(): + formatter = create_kibana_external_url_formatter( + rule={ + 'kibana_url': 'http://kibana.test.org/' + }, + shorten=False, + security_tenant='foo' + ) + assert type(formatter) is AbsoluteKibanaExternalUrlFormatter + assert formatter.base_url == 'http://kibana.test.org/' + assert formatter.security_tenant == 'foo' + + +def test_create_kibana_external_url_formatter_with_shortening(): + formatter = create_kibana_external_url_formatter( + rule={ + 'kibana_url': 'http://kibana.test.org/', + 'kibana_username': 'john', + 'kibana_password': 'doe' + }, + shorten=True, + security_tenant='foo' + ) + assert type(formatter) is ShortKibanaExternalUrlFormatter + assert formatter.auth == HTTPBasicAuth('john', 'doe') + assert formatter.security_tenant == 'foo' + assert formatter.goto_url == 'http://kibana.test.org/goto/' + assert formatter.shorten_url == 'http://kibana.test.org/api/shorten_url?security_tenant=foo' + + +@pytest.mark.parametrize("test_case", [ + # Trivial + { + 'url': 'http://test.org', + 'expected': 'http://test.org?security_tenant=foo' + }, + # With query + { + 'url': 'http://test.org?year=2021', + 'expected': 'http://test.org?year=2021&security_tenant=foo' + }, + # With fragment + { + 'url': 'http://test.org#fragement', + 'expected': 'http://test.org?security_tenant=foo#fragement' + }, + # With query & fragment + { + 'url': 'http://test.org?year=2021#fragement', + 'expected': 'http://test.org?year=2021&security_tenant=foo#fragement' + }, +]) +def test_append_security_tenant(test_case): + url = test_case.get('url') + expected = test_case.get('expected') + result = append_security_tenant(url=url, security_tenant='foo') + assert result == expected + + +def test_create_kibana_auth_basic(): + auth = create_kibana_auth( + kibana_url='http://kibana.test.org', + rule={ + 'kibana_username': 'john', + 'kibana_password': 'doe', + } + ) + assert auth == HTTPBasicAuth('john', 'doe') + + +@mock.patch.dict( + os.environ, + { + 'AWS_DEFAULT_REGION': '', + 'AWS_ACCESS_KEY_ID': 'access', + 'AWS_SECRET_ACCESS_KEY': 'secret', + }, + clear=True +) +def test_create_kibana_auth_aws_explicit_region(): + auth = create_kibana_auth( + kibana_url='http://kibana.test.org', + rule={ + 'aws_region': 'us-east-1' + } + ) + assert type(auth) is RefeshableAWSRequestsAuth + assert auth.aws_host == 'kibana.test.org' + assert auth.aws_region == 'us-east-1' + assert auth.service == 'es' + assert auth.aws_access_key == 'access' + assert auth.aws_secret_access_key == 'secret' + assert auth.aws_token is None + + +@mock.patch.dict( + os.environ, + { + 'AWS_DEFAULT_REGION': 'us-east-2', + 'AWS_ACCESS_KEY_ID': 'access', + 'AWS_SECRET_ACCESS_KEY': 'secret', + }, + clear=True +) +def test_create_kibana_auth_aws_implicit_region(): + auth = create_kibana_auth( + kibana_url='http://kibana.test.org', + rule={} + ) + assert type(auth) is RefeshableAWSRequestsAuth + assert auth.aws_host == 'kibana.test.org' + assert auth.aws_region == 'us-east-2' + assert auth.service == 'es' + assert auth.aws_access_key == 'access' + assert auth.aws_secret_access_key == 'secret' + assert auth.aws_token is None + + +@mock.patch.dict( + os.environ, + {}, + clear=True +) +def test_create_kibana_auth_unauthenticated(): + auth = create_kibana_auth( + kibana_url='http://kibana.test.org', + rule={} + ) + assert auth is None + + +def test_kibana_external_url_formatter_not_implemented(): + formatter = KibanaExternalUrlFormatter() + with pytest.raises(NotImplementedError): + formatter.format('test') diff --git a/tests/kibana_test.py b/tests/kibana_test.py deleted file mode 100644 index 646c569e9..000000000 --- a/tests/kibana_test.py +++ /dev/null @@ -1,104 +0,0 @@ -import copy -import json - -from elastalert.kibana import add_filter -from elastalert.kibana import dashboard_temp -from elastalert.kibana import filters_from_dashboard -from elastalert.kibana import kibana4_dashboard_link - - -# Dashboard schema with only filters section -test_dashboard = '''{ - "title": "AD Lock Outs", - "services": { - "filter": { - "list": { - "0": { - "type": "time", - "field": "@timestamp", - "from": "now-7d", - "to": "now", - "mandate": "must", - "active": true, - "alias": "", - "id": 0 - }, - "1": { - "type": "field", - "field": "_log_type", - "query": "\\"active_directory\\"", - "mandate": "must", - "active": true, - "alias": "", - "id": 1 - }, - "2": { - "type": "querystring", - "query": "ad.security_auditing_code:4740", - "mandate": "must", - "active": true, - "alias": "", - "id": 2 - } - }, - "ids": [ - 0, - 1, - 2 - ] - } - } -}''' -test_dashboard = json.loads(test_dashboard) - - -def test_filters_from_dashboard(): - filters = filters_from_dashboard(test_dashboard) - assert {'term': {'_log_type': '"active_directory"'}} in filters - assert {'query': {'query_string': {'query': 'ad.security_auditing_code:4740'}}} in filters - - -def test_add_filter(): - basic_filter = {"term": {"this": "that"}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, basic_filter) - assert db['services']['filter']['list']['1'] == { - 'field': 'this', - 'alias': '', - 'mandate': 'must', - 'active': True, - 'query': '"that"', - 'type': 'field', - 'id': 1 - } - - list_filter = {"term": {"this": ["that", "those"]}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, list_filter) - assert db['services']['filter']['list']['1'] == { - 'field': 'this', - 'alias': '', - 'mandate': 'must', - 'active': True, - 'query': '("that" AND "those")', - 'type': 'field', - 'id': 1 - } - - -def test_url_encoded(): - url = kibana4_dashboard_link('example.com/#/Dashboard', '2015-01-01T00:00:00Z', '2017-01-01T00:00:00Z') - assert not any([special_char in url for special_char in ["',\":;?&=()"]]) - - -def test_url_env_substitution(environ): - environ.update({ - 'KIBANA_HOST': 'kibana', - 'KIBANA_PORT': '5601', - }) - url = kibana4_dashboard_link( - 'http://$KIBANA_HOST:$KIBANA_PORT/#/Dashboard', - '2015-01-01T00:00:00Z', - '2017-01-01T00:00:00Z', - ) - assert url.startswith('http://kibana:5601/#/Dashboard') diff --git a/tests/loaders_test.py b/tests/loaders_test.py index bb8d3d873..2aeb83547 100644 --- a/tests/loaders_test.py +++ b/tests/loaders_test.py @@ -1,24 +1,34 @@ # -*- coding: utf-8 -*- +from base64 import b64encode import copy import datetime import os -import mock +from unittest import mock import pytest import elastalert.alerts import elastalert.ruletypes +from elastalert.alerters.email import EmailAlerter from elastalert.config import load_conf -from elastalert.loaders import FileRulesLoader +from elastalert.loaders import ( + FileRulesLoader, + RulesLoader, + load_rule_schema, +) + from elastalert.util import EAException -test_config = {'rules_folder': 'test_folder', + +loaders_test_cases_path = os.path.join(os.path.dirname(__file__), 'loaders_test_cases') +empty_folder_test_path = os.path.join(loaders_test_cases_path, 'empty') + +test_config = {'rules_folder': empty_folder_test_path, 'run_every': {'minutes': 10}, 'buffer_time': {'minutes': 10}, 'es_host': 'elasticsearch.test', 'es_port': 12345, - 'writeback_index': 'test_index', - 'writeback_alias': 'test_alias'} + 'writeback_index': 'test_index'} test_rule = {'es_host': 'test_host', 'es_port': 12345, @@ -33,7 +43,6 @@ 'filter': [{'term': {'key': 'value'}}], 'alert': 'email', 'use_count_query': True, - 'doc_type': 'blsh', 'email': 'test@test.test', 'aggregation': {'hours': 2}, 'include': ['comparekey', '@timestamp']} @@ -44,6 +53,9 @@ test_args.debug = False test_args.es_debug_trace = None +testrule_args = mock.Mock() +testrule_args.rule = 'testrule.yaml' + def test_import_rules(): rules_loader = FileRulesLoader(test_config) @@ -73,8 +85,8 @@ def test_import_rules(): def test_import_import(): rules_loader = FileRulesLoader(test_config) import_rule = copy.deepcopy(test_rule) - del(import_rule['es_host']) - del(import_rule['es_port']) + del import_rule['es_host'] + del import_rule['es_port'] import_rule['import'] = 'importme.ymlt' import_me = { 'es_host': 'imported_host', @@ -100,8 +112,8 @@ def test_import_import(): def test_import_absolute_import(): rules_loader = FileRulesLoader(test_config) import_rule = copy.deepcopy(test_rule) - del(import_rule['es_host']) - del(import_rule['es_port']) + del import_rule['es_host'] + del import_rule['es_port'] import_rule['import'] = '/importme.ymlt' import_me = { 'es_host': 'imported_host', @@ -126,8 +138,8 @@ def test_import_filter(): rules_loader = FileRulesLoader(test_config) import_rule = copy.deepcopy(test_rule) - del(import_rule['es_host']) - del(import_rule['es_port']) + del import_rule['es_host'] + del import_rule['es_port'] import_rule['import'] = 'importme.ymlt' import_me = { 'es_host': 'imported_host', @@ -160,18 +172,44 @@ def test_load_inline_alert_rule(): with mock.patch.object(rules_loader, 'get_yaml') as mock_open: mock_open.side_effect = [test_config_copy, test_rule_copy] rules_loader.load_modules(test_rule_copy) - assert isinstance(test_rule_copy['alert'][0], elastalert.alerts.EmailAlerter) - assert isinstance(test_rule_copy['alert'][1], elastalert.alerts.EmailAlerter) + assert isinstance(test_rule_copy['alert'][0], EmailAlerter) + assert isinstance(test_rule_copy['alert'][1], EmailAlerter) assert 'foo@bar.baz' in test_rule_copy['alert'][0].rule['email'] assert 'baz@foo.bar' in test_rule_copy['alert'][1].rule['email'] +def test_load_inline_alert_rule_with_jinja(): + rules_loader = FileRulesLoader(test_config) + test_rule_copy = copy.deepcopy(test_rule) + test_rule_copy['alert'] = [ + { + 'email': { + 'alert_text_type': 'alert_text_jinja', + 'alert_text': '{{ myjinjavar }}' + } + }, + { + 'email': { + 'alert_text': 'hello' + } + } + ] + test_config_copy = copy.deepcopy(test_config) + with mock.patch.object(rules_loader, 'get_yaml') as mock_open: + mock_open.side_effect = [test_config_copy, test_rule_copy] + rules_loader.load_modules(test_rule_copy) + assert isinstance(test_rule_copy['alert'][0], EmailAlerter) + assert isinstance(test_rule_copy['alert'][1], EmailAlerter) + assert 'jinja_template' in test_rule_copy['alert'][0].rule + assert 'jinja_template' not in test_rule_copy['alert'][1].rule + + def test_file_rules_loader_get_names_recursive(): - conf = {'scan_subdirectories': True, 'rules_folder': 'root'} + conf = {'scan_subdirectories': True, 'rules_folder': empty_folder_test_path} rules_loader = FileRulesLoader(conf) - walk_paths = (('root', ('folder_a', 'folder_b'), ('rule.yaml',)), - ('root/folder_a', (), ('a.yaml', 'ab.yaml')), - ('root/folder_b', (), ('b.yaml',))) + walk_paths = (('root', ['folder_a', 'folder_b'], ('rule.yaml',)), + ('root/folder_a', [], ('a.yaml', 'ab.yaml')), + ('root/folder_b', [], ('b.yaml',))) with mock.patch('os.walk') as mock_walk: mock_walk.return_value = walk_paths paths = rules_loader.get_names(conf) @@ -185,20 +223,37 @@ def test_file_rules_loader_get_names_recursive(): assert len(paths) == 4 +def test_file_rules_loader_get_names_invalid_path(): + conf = {'scan_subdirectories': True, 'rules_folder': './folder_missing#XYZ'} + try: + # folder missing so FileRulesLoader must throws an error + if FileRulesLoader(conf).get_names(conf): + assert False + except EAException: + pass + + def test_file_rules_loader_get_names(): + + class MockDirEntry: + # os.DirEntry of os.scandir + def __init__(self, name): + self.name = name + # Check for no subdirectory conf = {'scan_subdirectories': False, 'rules_folder': 'root'} rules_loader = FileRulesLoader(conf) - files = ['badfile', 'a.yaml', 'b.yaml'] + files = [MockDirEntry(name='badfile'), MockDirEntry('a.yaml'), MockDirEntry('b.yaml')] - with mock.patch('os.listdir') as mock_list: - with mock.patch('os.path.isfile') as mock_path: - mock_path.return_value = True - mock_list.return_value = files - paths = rules_loader.get_names(conf) + with mock.patch('os.path.isdir') as mock_dir: + with mock.patch('os.scandir') as mock_list: + with mock.patch('os.path.isfile') as mock_path: + mock_dir.return_value = conf['rules_folder'] + mock_path.return_value = True + mock_list.return_value = files + paths = rules_loader.get_names(conf) paths = [p.replace(os.path.sep, '/') for p in paths] - assert 'root/a.yaml' in paths assert 'root/b.yaml' in paths assert len(paths) == 2 @@ -207,9 +262,9 @@ def test_file_rules_loader_get_names(): def test_load_rules(): test_rule_copy = copy.deepcopy(test_rule) test_config_copy = copy.deepcopy(test_config) - with mock.patch('elastalert.config.yaml_loader') as mock_conf_open: + with mock.patch('elastalert.config.read_yaml') as mock_conf_open: mock_conf_open.return_value = test_config_copy - with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open: + with mock.patch('elastalert.loaders.read_yaml') as mock_rule_open: mock_rule_open.return_value = test_rule_copy with mock.patch('os.walk') as mock_ls: @@ -233,9 +288,9 @@ def test_load_default_host_port(): test_rule_copy.pop('es_host') test_rule_copy.pop('es_port') test_config_copy = copy.deepcopy(test_config) - with mock.patch('elastalert.config.yaml_loader') as mock_conf_open: + with mock.patch('elastalert.config.read_yaml') as mock_conf_open: mock_conf_open.return_value = test_config_copy - with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open: + with mock.patch('elastalert.loaders.read_yaml') as mock_rule_open: mock_rule_open.return_value = test_rule_copy with mock.patch('os.walk') as mock_ls: @@ -253,18 +308,14 @@ def test_load_ssl_env_false(): test_rule_copy.pop('es_host') test_rule_copy.pop('es_port') test_config_copy = copy.deepcopy(test_config) - with mock.patch('elastalert.config.yaml_loader') as mock_conf_open: + with mock.patch('elastalert.config.read_yaml') as mock_conf_open: mock_conf_open.return_value = test_config_copy - with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open: + with mock.patch('elastalert.loaders.read_yaml') as mock_rule_open: mock_rule_open.return_value = test_rule_copy - - with mock.patch('os.listdir') as mock_ls: - with mock.patch.dict(os.environ, {'ES_USE_SSL': 'false'}): - mock_ls.return_value = ['testrule.yaml'] - rules = load_conf(test_args) - rules['rules'] = rules['rules_loader'].load(rules) - - assert rules['use_ssl'] is False + with mock.patch.dict(os.environ, {'ES_USE_SSL': 'false'}): + rules = load_conf(test_args) + rules['rules'] = rules['rules_loader'].load(rules, testrule_args) + assert rules['use_ssl'] is False def test_load_ssl_env_true(): @@ -272,18 +323,14 @@ def test_load_ssl_env_true(): test_rule_copy.pop('es_host') test_rule_copy.pop('es_port') test_config_copy = copy.deepcopy(test_config) - with mock.patch('elastalert.config.yaml_loader') as mock_conf_open: + with mock.patch('elastalert.config.read_yaml') as mock_conf_open: mock_conf_open.return_value = test_config_copy - with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open: + with mock.patch('elastalert.loaders.read_yaml') as mock_rule_open: mock_rule_open.return_value = test_rule_copy - - with mock.patch('os.listdir') as mock_ls: - with mock.patch.dict(os.environ, {'ES_USE_SSL': 'true'}): - mock_ls.return_value = ['testrule.yaml'] - rules = load_conf(test_args) - rules['rules'] = rules['rules_loader'].load(rules) - - assert rules['use_ssl'] is True + with mock.patch.dict(os.environ, {'ES_USE_SSL': 'true'}): + rules = load_conf(test_args) + rules['rules'] = rules['rules_loader'].load(rules, testrule_args) + assert rules['use_ssl'] is True def test_load_url_prefix_env(): @@ -291,35 +338,28 @@ def test_load_url_prefix_env(): test_rule_copy.pop('es_host') test_rule_copy.pop('es_port') test_config_copy = copy.deepcopy(test_config) - with mock.patch('elastalert.config.yaml_loader') as mock_conf_open: + with mock.patch('elastalert.config.read_yaml') as mock_conf_open: mock_conf_open.return_value = test_config_copy - with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open: + with mock.patch('elastalert.loaders.read_yaml') as mock_rule_open: mock_rule_open.return_value = test_rule_copy - - with mock.patch('os.listdir') as mock_ls: - with mock.patch.dict(os.environ, {'ES_URL_PREFIX': 'es/'}): - mock_ls.return_value = ['testrule.yaml'] - rules = load_conf(test_args) - rules['rules'] = rules['rules_loader'].load(rules) - - assert rules['es_url_prefix'] == 'es/' + with mock.patch.dict(os.environ, {'ES_URL_PREFIX': 'es/'}): + rules = load_conf(test_args) + rules['rules'] = rules['rules_loader'].load(rules, testrule_args) + assert rules['es_url_prefix'] == 'es/' def test_load_disabled_rules(): test_rule_copy = copy.deepcopy(test_rule) test_rule_copy['is_enabled'] = False test_config_copy = copy.deepcopy(test_config) - with mock.patch('elastalert.config.yaml_loader') as mock_conf_open: + with mock.patch('elastalert.config.read_yaml') as mock_conf_open: mock_conf_open.return_value = test_config_copy - with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open: + with mock.patch('elastalert.loaders.read_yaml') as mock_rule_open: mock_rule_open.return_value = test_rule_copy - - with mock.patch('os.listdir') as mock_ls: - mock_ls.return_value = ['testrule.yaml'] - rules = load_conf(test_args) - rules['rules'] = rules['rules_loader'].load(rules) - # The rule is not loaded for it has "is_enabled=False" - assert len(rules['rules']) == 0 + rules = load_conf(test_args) + rules['rules'] = rules['rules_loader'].load(rules, testrule_args) + # The rule is not loaded for it has "is_enabled=False" + assert len(rules['rules']) == 0 def test_raises_on_missing_config(): @@ -334,13 +374,13 @@ def test_raises_on_missing_config(): if key in optional_keys: continue - with mock.patch('elastalert.config.yaml_loader') as mock_conf_open: + with mock.patch('elastalert.config.read_yaml') as mock_conf_open: mock_conf_open.return_value = test_config_copy - with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open: + with mock.patch('elastalert.loaders.read_yaml') as mock_rule_open: mock_rule_open.return_value = test_rule_copy with mock.patch('os.walk') as mock_walk: mock_walk.return_value = [('', [], ['testrule.yaml'])] - with pytest.raises(EAException, message='key %s should be required' % key): + with pytest.raises(EAException): rules = load_conf(test_args) rules['rules'] = rules['rules_loader'].load(rules) @@ -390,36 +430,6 @@ def test_name_inference(): assert test_rule_copy['name'] == 'msmerc woz ere' -def test_raises_on_bad_generate_kibana_filters(): - test_rule['generate_kibana_link'] = True - bad_filters = [[{'not': {'terms': {'blah': 'blah'}}}], - [{'terms': {'blah': 'blah'}}], - [{'query': {'not_querystring': 'this:that'}}], - [{'query': {'wildcard': 'this*that'}}], - [{'blah': 'blah'}]] - good_filters = [[{'term': {'field': 'value'}}], - [{'not': {'term': {'this': 'that'}}}], - [{'not': {'query': {'query_string': {'query': 'this:that'}}}}], - [{'query': {'query_string': {'query': 'this:that'}}}], - [{'range': {'blah': {'from': 'a', 'to': 'b'}}}], - [{'not': {'range': {'blah': {'from': 'a', 'to': 'b'}}}}]] - - # Test that all the good filters work, but fail with a bad filter added - for good in good_filters: - test_config_copy = copy.deepcopy(test_config) - rules_loader = FileRulesLoader(test_config_copy) - - test_rule_copy = copy.deepcopy(test_rule) - test_rule_copy['filter'] = good - with mock.patch.object(rules_loader, 'get_yaml') as mock_open: - mock_open.return_value = test_rule_copy - rules_loader.load_configuration('blah', test_config) - for bad in bad_filters: - test_rule_copy['filter'] = good + bad - with pytest.raises(EAException): - rules_loader.load_configuration('blah', test_config) - - def test_kibana_discover_from_timedelta(): test_config_copy = copy.deepcopy(test_config) rules_loader = FileRulesLoader(test_config_copy) @@ -438,3 +448,221 @@ def test_kibana_discover_to_timedelta(): rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml') assert isinstance(test_rule_copy['kibana_discover_to_timedelta'], datetime.timedelta) assert test_rule_copy['kibana_discover_to_timedelta'] == datetime.timedelta(minutes=2) + + +def test_custom_timestamp_type_timestamp_format(): + test_config_copy = copy.deepcopy(test_config) + rules_loader = FileRulesLoader(test_config_copy) + test_rule_copy = copy.deepcopy(test_rule) + test_rule_copy['timestamp_type'] = 'custom' + test_rule_copy['timestamp_format'] = '%Y-%m-%d %H:%M:%S.%f' + rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml') + test_date = datetime.datetime(2022, 11, 12, 13, 14, 15, 123456, tzinfo=datetime.timezone.utc) + assert isinstance(test_rule_copy['ts_to_dt']('2022-11-12 13:14:15.123456'), datetime.datetime) + assert test_rule_copy['ts_to_dt']('2022-11-12 13:14:15.123456') == test_date + assert test_rule_copy['dt_to_ts'](test_date) == '2022-11-12 13:14:15.123456' + + +def test_custom_timestamp_type_timestamp_to_datetime_format_expr(): + test_config_copy = copy.deepcopy(test_config) + rules_loader = FileRulesLoader(test_config_copy) + test_rule_copy = copy.deepcopy(test_rule) + test_rule_copy['timestamp_type'] = 'custom' + test_rule_copy['timestamp_to_datetime_format_expr'] = 'ts[:19] + ts[29:]' + rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml') + test_date = datetime.datetime(2022, 11, 12, 13, 14, 15, tzinfo=datetime.timezone.utc) + assert isinstance(test_rule_copy['ts_to_dt']("2022-11-12T13:14:15.123456789Z"), datetime.datetime) + assert test_rule_copy['ts_to_dt']("2022-11-12T13:14:15.123456789Z") == test_date + + +def test_custom_timestamp_type_timestamp_format_expr_using_ts(): + test_config_copy = copy.deepcopy(test_config) + rules_loader = FileRulesLoader(test_config_copy) + test_rule_copy = copy.deepcopy(test_rule) + test_rule_copy['timestamp_type'] = 'custom' + test_rule_copy['timestamp_format_expr'] = 'ts.replace("T", " ")' + rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml') + test_date = datetime.datetime(2022, 11, 12, 13, 14, 15, tzinfo=datetime.timezone.utc) + assert test_rule_copy['dt_to_ts'](test_date) == '2022-11-12 13:14:15Z' + + +def test_custom_timestamp_type_timestamp_format_expr_using_dt(): + test_config_copy = copy.deepcopy(test_config) + rules_loader = FileRulesLoader(test_config_copy) + test_rule_copy = copy.deepcopy(test_rule) + test_rule_copy['timestamp_type'] = 'custom' + test_rule_copy['timestamp_format_expr'] = 'dt.replace(year=2020).strftime("%Y-%m-%dT%H:%M:%SZ")' + rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml') + test_date = datetime.datetime(2020, 11, 12, 13, 14, 15, tzinfo=datetime.timezone.utc) + assert test_rule_copy['dt_to_ts'](test_date) == '2020-11-12T13:14:15Z' + + +def test_rulesloader_get_names(): + try: + RulesLoader.get_names('', '') + assert False + except NotImplementedError: + assert True + + +def test_rulesloader_get_hashes(): + try: + RulesLoader.get_hashes('', '') + assert False + except NotImplementedError: + assert True + + +def test_rulesloader_get_yaml(): + try: + RulesLoader.get_yaml('', '') + assert False + except NotImplementedError: + assert True + + +def test_get_import_rule(): + rule = { + 'import': 'a' + } + result = RulesLoader.get_import_rule('', rule) + assert 'a' == result + + +def test_get_rule_file_hash_when_file_not_found(): + test_config_copy = copy.deepcopy(test_config) + rules_loader = FileRulesLoader(test_config_copy) + hash = rules_loader.get_rule_file_hash('empty_folder_test/file_not_found.yml') + assert isinstance(hash, bytes) + b64Hash = b64encode(hash).decode('ascii') + assert 'zR1Ml8y8S8Z/I5j7b48OH+DJqUw=' == b64Hash + + +def test_load_yaml_recursive_import(): + config = {} + rules_loader = FileRulesLoader(config) + + trunk_path = os.path.join(loaders_test_cases_path, 'recursive_import/trunk.yaml') + branch_path = os.path.join(loaders_test_cases_path, 'recursive_import/branch.yaml') + leaf_path = os.path.join(loaders_test_cases_path, 'recursive_import/leaf.yaml') + + # re-load the rule a couple times to ensure import_rules cache is updated correctly + for i in range(3): + + leaf_yaml = rules_loader.load_yaml(leaf_path) + assert leaf_yaml == { + 'name': 'leaf', + 'rule_file': leaf_path, + 'diameter': '5cm', + } + assert sorted(rules_loader.import_rules.keys()) == [ + branch_path, + leaf_path, + ] + assert rules_loader.import_rules[branch_path] == [ + trunk_path, + ] + assert rules_loader.import_rules[leaf_path] == [ + branch_path, + ] + + +def test_load_yaml_multiple_imports(): + config = {} + rules_loader = FileRulesLoader(config) + + hydrogen_path = os.path.join(loaders_test_cases_path, 'multiple_imports/hydrogen.yaml') + oxygen_path = os.path.join(loaders_test_cases_path, 'multiple_imports/oxygen.yaml') + water_path = os.path.join(loaders_test_cases_path, 'multiple_imports/water.yaml') + + # re-load the rule a couple times to ensure import_rules cache is updated correctly + for i in range(3): + + water_yaml = rules_loader.load_yaml(water_path) + assert water_yaml == { + 'name': 'water', + 'rule_file': water_path, + 'symbol': 'O', + } + assert sorted(rules_loader.import_rules.keys()) == [ + water_path, + ] + assert rules_loader.import_rules[water_path] == [ + hydrogen_path, + oxygen_path, + ] + + +def test_load_yaml_imports_modified(): + config = {} + rules_loader = FileRulesLoader(config) + + rule_path = os.path.join(empty_folder_test_path, 'rule.yaml') + first_import_path = os.path.join(empty_folder_test_path, 'first.yaml') + second_import_path = os.path.join(empty_folder_test_path, 'second.yaml') + + with mock.patch.object(rules_loader, 'get_yaml') as get_yaml: + get_yaml.side_effect = [ + { + 'name': 'rule', + 'import': first_import_path, + }, + { + 'imported': 'first', + } + ] + rule_yaml = rules_loader.load_yaml(rule_path) + assert rule_yaml == { + 'name': 'rule', + 'rule_file': rule_path, + 'imported': 'first', + } + assert sorted(rules_loader.import_rules.keys()) == [ + rule_path, + ] + assert rules_loader.import_rules[rule_path] == [ + first_import_path + ] + + # simulate the import changing + with mock.patch.object(rules_loader, 'get_yaml') as get_yaml: + get_yaml.side_effect = [ + { + 'name': 'rule', + 'import': second_import_path, + }, + { + 'imported': 'second', + } + ] + rule_yaml = rules_loader.load_yaml(rule_path) + assert rule_yaml == { + 'name': 'rule', + 'rule_file': rule_path, + 'imported': 'second', + } + assert sorted(rules_loader.import_rules.keys()) == [ + rule_path, + ] + assert rules_loader.import_rules[rule_path] == [ + second_import_path + ] + + # simulate the import being removed + with mock.patch.object(rules_loader, 'get_yaml') as get_yaml: + get_yaml.side_effect = [ + { + 'name': 'rule', + }, + ] + rule_yaml = rules_loader.load_yaml(rule_path) + assert rule_yaml == { + 'name': 'rule', + 'rule_file': rule_path, + } + assert len(rules_loader.import_rules) == 0 + + +def test_load_rule_schema(): + validator = load_rule_schema() + validator.check_schema(validator.schema) diff --git a/tests/loaders_test_cases/empty/.gitkeep b/tests/loaders_test_cases/empty/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/tests/loaders_test_cases/multiple_imports/hydrogen.yaml b/tests/loaders_test_cases/multiple_imports/hydrogen.yaml new file mode 100644 index 000000000..6d4dc6d59 --- /dev/null +++ b/tests/loaders_test_cases/multiple_imports/hydrogen.yaml @@ -0,0 +1,2 @@ +name: hydrogen +symbol: H diff --git a/tests/loaders_test_cases/multiple_imports/oxygen.yaml b/tests/loaders_test_cases/multiple_imports/oxygen.yaml new file mode 100644 index 000000000..eaf4b509f --- /dev/null +++ b/tests/loaders_test_cases/multiple_imports/oxygen.yaml @@ -0,0 +1,2 @@ +name: oxygen +symbol: O diff --git a/tests/loaders_test_cases/multiple_imports/water.yaml b/tests/loaders_test_cases/multiple_imports/water.yaml new file mode 100644 index 000000000..e2b78ca7b --- /dev/null +++ b/tests/loaders_test_cases/multiple_imports/water.yaml @@ -0,0 +1,4 @@ +name: water +import: + - hydrogen.yaml + - oxygen.yaml diff --git a/tests/loaders_test_cases/recursive_import/branch.yaml b/tests/loaders_test_cases/recursive_import/branch.yaml new file mode 100644 index 000000000..909d27b17 --- /dev/null +++ b/tests/loaders_test_cases/recursive_import/branch.yaml @@ -0,0 +1,3 @@ +name: branch +import: trunk.yaml +diameter: 5cm diff --git a/tests/loaders_test_cases/recursive_import/leaf.yaml b/tests/loaders_test_cases/recursive_import/leaf.yaml new file mode 100644 index 000000000..6f8cf4c98 --- /dev/null +++ b/tests/loaders_test_cases/recursive_import/leaf.yaml @@ -0,0 +1,2 @@ +name: leaf +import: branch.yaml diff --git a/tests/loaders_test_cases/recursive_import/trunk.yaml b/tests/loaders_test_cases/recursive_import/trunk.yaml new file mode 100644 index 000000000..2773ea1c0 --- /dev/null +++ b/tests/loaders_test_cases/recursive_import/trunk.yaml @@ -0,0 +1,2 @@ +name: root +diameter: 20cm diff --git a/pytest.ini b/tests/pytest.ini similarity index 50% rename from pytest.ini rename to tests/pytest.ini index 0ad3341d9..d859f3a9c 100644 --- a/pytest.ini +++ b/tests/pytest.ini @@ -1,3 +1,5 @@ [pytest] markers = elasticsearch: mark a test as using elasticsearch. +filterwarnings = + ignore::pytest.PytestUnhandledThreadExceptionWarning diff --git a/tests/rules_test.py b/tests/rules_test.py index 1954b5d54..00d87742b 100644 --- a/tests/rules_test.py +++ b/tests/rules_test.py @@ -2,20 +2,25 @@ import copy import datetime -import mock +from unittest import mock import pytest +from datetime import datetime as dt +from tests.conftest import ea from elastalert.ruletypes import AnyRule from elastalert.ruletypes import BaseAggregationRule from elastalert.ruletypes import BlacklistRule from elastalert.ruletypes import CardinalityRule from elastalert.ruletypes import ChangeRule +from elastalert.ruletypes import CompareRule from elastalert.ruletypes import EventWindow from elastalert.ruletypes import FlatlineRule from elastalert.ruletypes import FrequencyRule from elastalert.ruletypes import MetricAggregationRule +from elastalert.ruletypes import ErrorRateRule from elastalert.ruletypes import NewTermsRule from elastalert.ruletypes import PercentageMatchRule +from elastalert.ruletypes import RuleType from elastalert.ruletypes import SpikeRule from elastalert.ruletypes import WhitelistRule from elastalert.util import dt_to_ts @@ -110,28 +115,35 @@ def test_freq_count(): 'use_count_query': True} # Normal match rule = FrequencyRule(rules) - rule.add_count_data({ts_to_dt('2014-10-10T00:00:00'): 75}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T00:00:00')}],'endtime':ts_to_dt('2014-10-10T00:00:00'),'count': 75}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-10T00:15:00'): 10}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T00:15:00')}],'endtime':ts_to_dt('2014-10-10T00:15:00'),'count': 10}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-10T00:25:00'): 10}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T00:25:00')}],'endtime':ts_to_dt('2014-10-10T00:25:00'),'count': 10}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-10T00:45:00'): 6}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T00:45:00')}],'endtime':ts_to_dt('2014-10-10T00:45:00'),'count': 6}) assert len(rule.matches) == 1 # First data goes out of timeframe first rule = FrequencyRule(rules) - rule.add_count_data({ts_to_dt('2014-10-10T00:00:00'): 75}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T00:00:00')}],'endtime':ts_to_dt('2014-10-10T00:00:00'),'count': 75}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-10T00:45:00'): 10}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T00:45:00')}],'endtime':ts_to_dt('2014-10-10T00:45:00'),'count': 10}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-10T00:55:00'): 10}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T00:55:00')}],'endtime':ts_to_dt('2014-10-10T00:55:00'),'count': 10}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-10T01:05:00'): 6}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T01:05:00')}],'endtime':ts_to_dt('2014-10-10T01:05:00'),'count': 6}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-10T01:00:00'): 75}) + rule.add_count_data({'event':[{'@timestamp': ts_to_dt('2014-10-10T01:00:00')}],'endtime':ts_to_dt('2014-10-10T01:00:00'),'count': 75}) assert len(rule.matches) == 1 + # except EAException + try: + rule = FrequencyRule(rules) + rule.add_count_data('aaaa') + except EAException as ea: + assert 'add_count_data should have endtime and count' in str(ea) + def test_freq_out_of_order(): events = hits(60, timestamp_field='blah', username='qlo') @@ -214,20 +226,20 @@ def test_spike_count(): rule = SpikeRule(rules) # Double rate of events at 20 seconds - rule.add_count_data({ts_to_dt('2014-09-26T00:00:00'): 10}) + rule.add_count_data({'endtime':ts_to_dt('2014-09-26T00:00:00'),'count': 10}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-09-26T00:00:10'): 10}) + rule.add_count_data({'endtime':ts_to_dt('2014-09-26T00:00:10'),'count': 10}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-09-26T00:00:20'): 20}) + rule.add_count_data({'endtime':ts_to_dt('2014-09-26T00:00:20'),'count': 20}) assert len(rule.matches) == 1 # Downward spike rule = SpikeRule(rules) - rule.add_count_data({ts_to_dt('2014-09-26T00:00:00'): 10}) + rule.add_count_data({'endtime':ts_to_dt('2014-09-26T00:00:00'),'count': 10}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-09-26T00:00:10'): 10}) + rule.add_count_data({'endtime':ts_to_dt('2014-09-26T00:00:10'),'count': 10}) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-09-26T00:00:20'): 0}) + rule.add_count_data({'endtime':ts_to_dt('2014-09-26T00:00:20'),'count': 0}) assert len(rule.matches) == 1 @@ -547,18 +559,38 @@ def test_change(): assert rule.matches == [] -def test_new_term(): +@pytest.mark.parametrize('version', [ + ({'version': {'number': '7.x.x'}}, True), + ({'version': {'number': '1.2.0', 'distribution': 'opensearch'}}, True), + ({'version': {'number': '2.0.0', 'distribution': 'opensearch'}}, True), +]) +def test_new_term(version): rules = {'fields': ['a', 'b'], 'timestamp_field': '@timestamp', - 'es_host': 'example.com', 'es_port': 10, 'index': 'logstash', + 'kibana_adapter': 'example.com', 'kibana_adapter_port': 10, 'index': 'logstash', 'ts_to_dt': ts_to_dt, 'dt_to_ts': dt_to_ts} - mock_res = {'aggregations': {'filtered': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1}, - {'key': 'key2', 'doc_count': 5}]}}}} + mock_res = { + 'responses': [{ + 'aggregations': { + 'values': { + 'buckets': [{ + 'key': 'key1', + 'doc_count': 1 + }, + { + 'key': 'key2', + 'doc_count': 5 + } + ] + } + } + }] + } - with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es: + with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: mock_es.return_value = mock.Mock() - mock_es.return_value.search.return_value = mock_res - mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}} + mock_es.return_value.msearch.return_value = mock_res + mock_es.return_value.info.return_value = version call_args = [] # search is called with a mutable dict containing timestamps, this is required to test @@ -569,8 +601,7 @@ def record_args(*args, **kwargs): mock_es.return_value.search.side_effect = record_args rule = NewTermsRule(rules) - # 30 day default range, 1 day default step, times 2 fields - assert rule.es.search.call_count == 60 + # Assert that all calls have the proper ordering of time ranges old_ts = '2010-01-01T00:00:00Z' @@ -587,186 +618,428 @@ def record_args(*args, **kwargs): old_ts = gte # Key1 and key2 shouldn't cause a match - rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2'}]) + data = { + ts_now() : { + "a": (["key1"],[1]), + "b": (["key2"], [1]) + } + } + rule.add_terms_data(data) + + + # rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2'}]) + assert rule.matches == [] # Neither will missing values - rule.add_data([{'@timestamp': ts_now(), 'a': 'key2'}]) + data = { + ts_now() : { + "a": (["key2"],[1]), + "b": ([],[]) + } + } + rule.add_terms_data(data) + # rule.add_data([{'@timestamp': ts_now(), 'a': 'key2'}]) assert rule.matches == [] # Key3 causes an alert for field b - rule.add_data([{'@timestamp': ts_now(), 'a': 'key2', 'b': 'key3'}]) + data = { + ts_now() : { + "a": (["key2"],[1]), + "b": (["key3"],[1]) + } + } + rule.add_terms_data(data) + + #rule.add_data([{'@timestamp': ts_now(), 'a': 'key2', 'b': 'key3'}]) assert len(rule.matches) == 1 - assert rule.matches[0]['new_field'] == 'b' - assert rule.matches[0]['b'] == 'key3' + assert rule.matches[0]['field'] == 'b' + assert rule.matches[0]['new_value'] == 'key3' rule.matches = [] # Key3 doesn't cause another alert for field b - rule.add_data([{'@timestamp': ts_now(), 'a': 'key2', 'b': 'key3'}]) + data = { + ts_now() : { + "a": (["key2"],[1]), + "b": (["key3"],[1]) + } + } + rule.add_terms_data(data) + # rule.add_data([{'@timestamp': ts_now(), 'a': 'key2', 'b': 'key3'}]) assert rule.matches == [] - # Missing_field - rules['alert_on_missing_field'] = True - with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es: - mock_es.return_value = mock.Mock() - mock_es.return_value.search.return_value = mock_res - mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}} - rule = NewTermsRule(rules) - rule.add_data([{'@timestamp': ts_now(), 'a': 'key2'}]) - assert len(rule.matches) == 1 - assert rule.matches[0]['missing_field'] == 'b' + ## Missing field - wont work as we use terms aggregation + # rules['alert_on_missing_field'] = True + # with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: + # mock_es.return_value = mock.Mock() + # mock_es.return_value.msearch.return_value = mock_res + # mock_es.return_value.info.return_value = version + # rule = NewTermsRule(rules) + # data = { + # ts_now() : { + # "a": ["key2"], + # "b": [] + # } + # } + # rule.add_terms_data(data) + # #rule.add_data([{'@timestamp': ts_now(), 'a': 'key2'}]) + # assert len(rule.matches) == 1 + # assert rule.matches[0]['missing_field'] == 'b' def test_new_term_nested_field(): rules = {'fields': ['a', 'b.c'], 'timestamp_field': '@timestamp', - 'es_host': 'example.com', 'es_port': 10, 'index': 'logstash', + 'kibana_adapter_host': 'example.com', 'kibana_adapter_port': 10, 'index': 'logstash', 'ts_to_dt': ts_to_dt, 'dt_to_ts': dt_to_ts} - mock_res = {'aggregations': {'filtered': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1}, - {'key': 'key2', 'doc_count': 5}]}}}} - with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es: + mock_res ={'responses' : [{'aggregations': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1}, + {'key': 'key2', 'doc_count': 5}]}}}] } + with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: mock_es.return_value = mock.Mock() - mock_es.return_value.search.return_value = mock_res + mock_es.return_value.msearch.return_value = mock_res mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}} rule = NewTermsRule(rules) - assert rule.es.search.call_count == 60 + # Key3 causes an alert for nested field b.c - rule.add_data([{'@timestamp': ts_now(), 'b': {'c': 'key3'}}]) + data = { + ts_now() : { + "a": ([],[]), + "b.c": (["key3"],[1]) + } + } + rule.add_terms_data(data) + + # rule.add_data([{'@timestamp': ts_now(), 'b': {'c': 'key3'}}]) assert len(rule.matches) == 1 - assert rule.matches[0]['new_field'] == 'b.c' - assert rule.matches[0]['b']['c'] == 'key3' + assert rule.matches[0]['field'] == 'b.c' + assert rule.matches[0]['new_value'] == 'key3' rule.matches = [] +def test_new_term_window_updates(): -def test_new_term_with_terms(): rules = {'fields': ['a'], 'timestamp_field': '@timestamp', - 'es_host': 'example.com', 'es_port': 10, 'index': 'logstash', 'query_key': 'a', - 'window_step_size': {'days': 2}, - 'ts_to_dt': ts_to_dt, 'dt_to_ts': dt_to_ts} - mock_res = {'aggregations': {'filtered': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1}, - {'key': 'key2', 'doc_count': 5}]}}}} + 'kibana_adapter_host': 'example.com', 'kibana_adapter_port': 10, 'index': 'logstash', + 'ts_to_dt': ts_to_dt, 'dt_to_ts': dt_to_ts, 'terms_window_size': {'hours': 3 }, 'threshold': 20, 'threshold_window_size': {'hours': 1} } + mock_res ={'responses' : [{'aggregations': {'values': {'buckets': [{'key': 'key1', 'doc_count': 5}, + {'key': 'key2', 'doc_count': 5}]}}}] } + + #empty_test_data + time_pointer = ts_now() - with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es: + + with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: mock_es.return_value = mock.Mock() - mock_es.return_value.search.return_value = mock_res + mock_es.return_value.msearch.return_value = mock_res mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}} rule = NewTermsRule(rules) + + # key 2 keeps occuring every 1 hour + for i in range(4): + time_pointer += datetime.timedelta(hours=1) + data = { time_pointer : { "a": (['key2'],[5]) } } + rule.add_terms_data(data) + + # 4 hours later, if key1 comes again, match should come + data = { time_pointer : { "a": (['key1'],[20]) } } + rule.add_terms_data(data) + assert len(rule.matches) == 1 - # Only 15 queries because of custom step size - assert rule.es.search.call_count == 15 - - # Key1 and key2 shouldn't cause a match - terms = {ts_now(): [{'key': 'key1', 'doc_count': 1}, - {'key': 'key2', 'doc_count': 1}]} - rule.add_terms_data(terms) - assert rule.matches == [] + # if key1 comes again in the next 2 hour 59 minutes, match woundnt come, as it is now in existing terms + time_pointer += datetime.timedelta(hours=2, minutes=59) + data = { time_pointer : { "a": (['key1'],[20]) } } + rule.add_terms_data(data) + assert len(rule.matches) == 1 - # Key3 causes an alert for field a - terms = {ts_now(): [{'key': 'key3', 'doc_count': 1}]} - rule.add_terms_data(terms) + # 3 hours later, if same key comes. it will be considered new term, but since threshold isnt reached no matches + time_pointer += datetime.timedelta(hours=3, minutes=1) + data = { time_pointer : { "a": (['key1'],[1]) } } + rule.add_terms_data(data) assert len(rule.matches) == 1 - assert rule.matches[0]['new_field'] == 'a' - assert rule.matches[0]['a'] == 'key3' - rule.matches = [] - # Key3 doesn't cause another alert - terms = {ts_now(): [{'key': 'key3', 'doc_count': 1}]} - rule.add_terms_data(terms) - assert rule.matches == [] + #in next 30 mins, threshold is reached and match is found + time_pointer += datetime.timedelta(minutes= 30) + data = { time_pointer : { "a": (['key1'],[19]) } } + rule.add_terms_data(data) + assert len(rule.matches) == 2 + + #another new term causing match + time_pointer += datetime.timedelta(minutes= 30) + data = { time_pointer : { "a": (['key2'],[21]) } } + rule.add_terms_data(data) + assert len(rule.matches) == 3 + + time_pointer += datetime.timedelta(minutes= 40) + data = { time_pointer : { "a": (['key2'],[21]) } } + rule.add_terms_data(data) + assert len(rule.matches) == 3 def test_new_term_with_composite_fields(): rules = {'fields': [['a', 'b', 'c'], ['d', 'e.f']], 'timestamp_field': '@timestamp', - 'es_host': 'example.com', 'es_port': 10, 'index': 'logstash', + 'kibana_adapter': 'example.com', 'kibana_adapter_port': 10, 'index': 'logstash', 'ts_to_dt': ts_to_dt, 'dt_to_ts': dt_to_ts} mock_res = { - 'aggregations': { - 'filtered': { + 'responses': [{ + 'aggregations': { 'values': { - 'buckets': [ - { - 'key': 'key1', - 'doc_count': 5, - 'values': { - 'buckets': [ - { - 'key': 'key2', - 'doc_count': 5, - 'values': { - 'buckets': [ - { - 'key': 'key3', - 'doc_count': 3, - }, - { - 'key': 'key4', - 'doc_count': 2, - }, - ] - } - } - ] - } + 'buckets': [{ + 'key': 'key1', + 'doc_count': 5, + 'values': { + 'buckets': [{ + 'key': 'key2', + 'doc_count': 5, + 'values': { + 'buckets': [{ + 'key': 'key3', + 'doc_count': 3, + }, + { + 'key': 'key4', + 'doc_count': 2, + }, + ] + } + }] } - ] + }] + } } - } + }] } - with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es: + with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: mock_es.return_value = mock.Mock() - mock_es.return_value.search.return_value = mock_res + mock_es.return_value.msearch.return_value = mock_res mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}} rule = NewTermsRule(rules) - assert rule.es.search.call_count == 60 - # key3 already exists, and thus shouldn't cause a match - rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2', 'c': 'key3'}]) + data = { + ts_now() : { + tuple(['a','b','c']): ([tuple(["key1","key2","key3"])],[1]), + tuple(['d','e.f']): ([],[]) + } + } + rule.add_terms_data(data) assert rule.matches == [] + # key5 causes an alert for composite field [a, b, c] - rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2', 'c': 'key5'}]) + data = { + ts_now() : { + ('a', 'b', 'c'): ([("key1","key2","key5")],[1]), + ('d','e.f'): ([],[]) + } + } + rule.add_terms_data(data) assert len(rule.matches) == 1 - assert rule.matches[0]['new_field'] == ('a', 'b', 'c') - assert rule.matches[0]['a'] == 'key1' - assert rule.matches[0]['b'] == 'key2' - assert rule.matches[0]['c'] == 'key5' + assert rule.matches[0]['field'] == ('a', 'b', 'c') + assert rule.matches[0]['new_value'] == ("key1","key2","key5") rule.matches = [] - # New values in other fields that are not part of the composite key should not cause an alert - rule.add_data([{'@timestamp': ts_now(), 'a': 'key1', 'b': 'key2', 'c': 'key4', 'd': 'unrelated_value'}]) + # testing same with Threshold Window and Threshold + + rules['threshold'] = 10 + rules['threshold_window_size'] = {'hours': 6} + + + with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: + mock_es.return_value = mock.Mock() + mock_es.return_value.msearch.return_value = mock_res + mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}} + rule = NewTermsRule(rules) + + time_pointer = ts_now() + + # will not cause match + data = { + time_pointer : { + ('a', 'b', 'c'): ([("key1","key2","key4")],[1]), + ('d','e.f'): ([],[]) + } + } + rule.add_terms_data(data) assert len(rule.matches) == 0 rule.matches = [] - # Verify nested fields work properly - # Key6 causes an alert for nested field e.f - rule.add_data([{'@timestamp': ts_now(), 'd': 'key4', 'e': {'f': 'key6'}}]) + # will not cause match, as threshold wont be reached + time_pointer += datetime.timedelta(hours = 1) + data = { + time_pointer : { + ('a', 'b', 'c'): ([("key1","key2","key5")],[9]), + ('d','e.f'): ([],[]) + } + } + rule.add_terms_data(data) + assert len(rule.matches) == 0 + + + # will cause match, as threshold will be reached + data = { + time_pointer : { + ('a', 'b', 'c'): ([("key1","key2","key5")],[1]), + ('d','e.f'): ([],[]) + } + } + rule.add_terms_data(data) assert len(rule.matches) == 1 - assert rule.matches[0]['new_field'] == ('d', 'e.f') - assert rule.matches[0]['d'] == 'key4' - assert rule.matches[0]['e']['f'] == 'key6' + assert rule.matches[0]['field'] == ('a', 'b', 'c') + assert rule.matches[0]['new_value'] == ("key1","key2","key5") rule.matches = [] - # Missing_fields - rules['alert_on_missing_field'] = True - with mock.patch('elastalert.ruletypes.elasticsearch_client') as mock_es: + #test composite flatten buckets + keys,counts = rule.flatten_aggregation_hierarchy(mock_res['responses'][0]['aggregations']['values']['buckets']) + assert keys == [('key1', 'key2', 'key3'), ('key1', 'key2', 'key4')] + assert counts == [3, 2] + +def test_new_term_threshold(): + rules = {'fields': ['a'], + 'timestamp_field': '@timestamp', + 'kibana_adapter': 'example.com', 'kibana_adapter_port': 10, 'index': 'logstash', + 'ts_to_dt': ts_to_dt, 'dt_to_ts': dt_to_ts, 'terms_window_size': {'days': 10 }, + 'window_step_size' : {'hours': 1 }, 'terms_size': 10000, 'threshold': 0 } + + mock_res ={'responses' : [{'aggregations': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1}]}}}] } + + with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: + mock_es.return_value = mock.Mock() + mock_es.return_value.msearch.return_value = mock_res + rule = NewTermsRule(rules) + + + # introducting new value for field a, should trigger as threshold is 0 + data = { + ts_now() : { + ('a'): (["key2"],[1]) + } + } + rule.add_terms_data(data) + assert len(rule.matches) == 1 + + # changing threshold to 10 and threhold_duration to 2 hours + rules['threshold'] = 10 + rules['threshold_window_size'] = {"hours" : 2} + + # used for incrementing time + time_pointer = ts_now() + + with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: mock_es.return_value = mock.Mock() - mock_es.return_value.search.return_value = mock_res - mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}} + mock_es.return_value.msearch.return_value = mock_res rule = NewTermsRule(rules) - rule.add_data([{'@timestamp': ts_now(), 'a': 'key2'}]) - assert len(rule.matches) == 2 - # This means that any one of the three n composite fields were not present - assert rule.matches[0]['missing_field'] == ('a', 'b', 'c') - assert rule.matches[1]['missing_field'] == ('d', 'e.f') + + # new value for field 'a' with count 8, shouldnt create a match + data = { + time_pointer : { + ('a'): (["key2"],[8]) + } + } + rule.add_terms_data(data) + assert len(rule.matches) == 0 + + + # new value for field 'a' with count 8 after 3 hours, shouldnt create a match + + time_pointer += datetime.timedelta(**{"hours":3}) + + data = { + time_pointer : { + ('a'): (["key2"],[8]) + } + } + rule.add_terms_data(data) + assert len(rule.matches) == 0 + + # new value for field a with count 2 after 10 minutes + # should create a match as the total count stored for the last 2 hours would be 10 + time_pointer += datetime.timedelta(**{"minutes":10}) + + data = { + time_pointer : { + ('a'): (["key1","key2"],[1,2]) + } + } + rule.add_terms_data(data) + assert len(rule.matches) == 1 + + # no new matches should be added, when the rule crosses the threshold the second time + + time_pointer += datetime.timedelta(**{"minutes":10}) + + data = { + time_pointer : { + ('a'): (["key2"],[20]) + } + } + rule.add_terms_data(data) + assert len(rule.matches) == 1 + +def test_new_term_bounds(): + rules = {'fields': ['a'], + 'timestamp_field': '@timestamp', + 'kibana_adapter': 'example.com', 'kibana_adapter_port': 10, 'index': 'logstash', + 'ts_to_dt': ts_to_dt, 'dt_to_ts': dt_to_ts, 'terms_window_size': {'days': 10 }, + 'window_step_size' : {'hours': 1 }, 'terms_size': 10000, 'threshold_window_size': {"days": 3} } + + mock_res ={'responses' : [{'aggregations': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1}, + {'key': 'key2', 'doc_count': 5}]}}}] } + + with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: + mock_es.return_value = mock.Mock() + mock_es.return_value.msearch.return_value = mock_res + rule = NewTermsRule(rules) + + assert rule.window_size == datetime.timedelta(**{'days': 7}) + assert rule.threshold_window_size == datetime.timedelta(**{'days': 2}) + assert rule.terms_size == 1000 + + +## New implementation will never use with_terms +# def test_new_term_with_terms(): +# rules = {'fields': ['a'], +# 'timestamp_field': '@timestamp', +# 'kibana_adapter_host': 'example.com', 'kibana_adapter_port': 10, 'index': 'logstash', 'query_key': 'a', +# 'window_step_size': {'days': 2}, +# 'ts_to_dt': ts_to_dt, 'dt_to_ts': dt_to_ts} +# mock_res = {'responses' : [{'aggregations': {'values': {'buckets': [{'key': 'key1', 'doc_count': 1}, +# {'key': 'key2', 'doc_count': 5}]}}}]} + +# with mock.patch('elastalert.ruletypes.kibana_adapter_client') as mock_es: +# mock_es.return_value = mock.Mock() +# mock_es.return_value.msearch.return_value = mock_res +# mock_es.return_value.info.return_value = {'version': {'number': '2.x.x'}} +# rule = NewTermsRule(rules) + +# # Only 4 queries because of custom step size +# assert rule.es.msearch.call_count == 4 + +# # Key1 and key2 shouldn't cause a match +# terms = {ts_now(): [{'key': 'key1', 'doc_count': 1}, +# {'key': 'key2', 'doc_count': 1}]} +# rule.add_terms_data(terms) +# assert rule.matches == [] + +# # Key3 causes an alert for field a +# terms = {ts_now(): [{'key': 'key3', 'doc_count': 1}]} +# rule.add_terms_data(terms) +# assert len(rule.matches) == 1 +# assert rule.matches[0]['new_field'] == 'a' +# assert rule.matches[0]['a'] == 'key3' +# rule.matches = [] + +# # Key3 doesn't cause another alert +# terms = {ts_now(): [{'key': 'key3', 'doc_count': 1}]} +# rule.add_terms_data(terms) +# assert rule.matches == [] + def test_flatline(): @@ -833,13 +1106,13 @@ def test_flatline_count(): 'threshold': 1, 'timestamp_field': '@timestamp'} rule = FlatlineRule(rules) - rule.add_count_data({ts_to_dt('2014-10-11T00:00:00'): 1}) + rule.add_count_data({'endtime':ts_to_dt('2014-10-11T00:00:00'),'count': 1}) rule.garbage_collect(ts_to_dt('2014-10-11T00:00:10')) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-11T00:00:15'): 0}) + rule.add_count_data({'endtime':ts_to_dt('2014-10-11T00:00:15'),'count': 0}) rule.garbage_collect(ts_to_dt('2014-10-11T00:00:20')) assert len(rule.matches) == 0 - rule.add_count_data({ts_to_dt('2014-10-11T00:00:35'): 0}) + rule.add_count_data({'endtime':ts_to_dt('2014-10-11T00:00:35'),'count': 0}) assert len(rule.matches) == 1 @@ -1154,10 +1427,108 @@ def test_metric_aggregation(): rule.check_matches(datetime.datetime.now(), None, {'metric_cpu_pct_avg': {'value': 0.95}}) assert len(rule.matches) == 2 - rules['query_key'] = 'qk' + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), None, {'metric_cpu_pct_avg': {'value': 0.966666667}}) + assert '0.966666667' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['metric_cpu_pct_avg'] == 0.966666667 + assert rule.matches[0]['metric_agg_value'] == 0.966666667 + assert 'metric_cpu_pct_avg_formatted' not in rule.matches[0] + assert 'metric_agg_value_formatted' not in rule.matches[0] + + rules['metric_format_string'] = '{:.2%}' + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), None, {'metric_cpu_pct_avg': {'value': 0.966666667}}) + assert '96.67%' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['metric_cpu_pct_avg'] == 0.966666667 + assert rule.matches[0]['metric_agg_value'] == 0.966666667 + assert rule.matches[0]['metric_cpu_pct_avg_formatted'] == '96.67%' + assert rule.matches[0]['metric_agg_value_formatted'] == '96.67%' + + rules['metric_format_string'] = '%.2f' + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), None, {'metric_cpu_pct_avg': {'value': 0.966666667}}) + assert '0.97' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['metric_cpu_pct_avg'] == 0.966666667 + assert rule.matches[0]['metric_agg_value'] == 0.966666667 + assert rule.matches[0]['metric_cpu_pct_avg_formatted'] == '0.97' + assert rule.matches[0]['metric_agg_value_formatted'] == '0.97' + + rules['query_key'] = 'subdict' rule = MetricAggregationRule(rules) rule.check_matches(datetime.datetime.now(), 'qk_val', {'metric_cpu_pct_avg': {'value': 0.95}}) - assert rule.matches[0]['qk'] == 'qk_val' + assert rule.matches[0]['subdict'] == 'qk_val' + + rules['query_key'] = 'subdict1.subdict2.subdict3' + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), 'qk_val', {'metric_cpu_pct_avg': {'value': 0.95}}) + assert rule.matches[0]['subdict1']['subdict2']['subdict3'] == 'qk_val' + +def test_percentile_metric_aggregation(): + rules = {'buffer_time': datetime.timedelta(minutes=5), + 'timestamp_field': '@timestamp', + 'metric_agg_type': 'percentiles', + 'percentile_range': 95, + 'metric_agg_key': 'cpu_pct'} + + # Check threshold logic + with pytest.raises(EAException): + rule = MetricAggregationRule(rules) + + rules['min_threshold'] = 0.1 + rules['max_threshold'] = 0.8 + + rule = MetricAggregationRule(rules) + assert rule.rules['aggregation_query_element'] == {'metric_cpu_pct_percentiles': {'percentiles': {'field': 'cpu_pct', 'percents': [95],'keyed': False}}} + + assert rule.crossed_thresholds(None) is False + assert rule.crossed_thresholds(0.09) is True + assert rule.crossed_thresholds(0.10) is False + assert rule.crossed_thresholds(0.79) is False + assert rule.crossed_thresholds(0.81) is True + + rule.check_matches(datetime.datetime.now(), None, {"doc_count":258757,"key":"appmailer","metric_cpu_pct_percentiles":{"values":[{"key":95,"value":None}]}}) + rule.check_matches(datetime.datetime.now(), None, {"doc_count":258757,"key":"appmailer","metric_cpu_pct_percentiles":{"values":[{"key":95,"value":0.5}]}}) + assert len(rule.matches) == 0 + + rule.check_matches(datetime.datetime.now(), None, {"doc_count":258757,"key":"appmailer","metric_cpu_pct_percentiles":{"values":[{"key":95,"value":0.05}]}}) + rule.check_matches(datetime.datetime.now(), None, {"doc_count":258757,"key":"appmailer","metric_cpu_pct_percentiles":{"values":[{"key":95,"value":0.95}]}}) + assert len(rule.matches) == 2 + + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), None, {"doc_count":258757,"key":"appmailer","metric_cpu_pct_percentiles":{"values":[{"key":95,"value":0.966666667}]}}) + assert '0.966666667' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['metric_cpu_pct_percentiles'] == 0.966666667 + assert rule.matches[0]['metric_agg_value'] == 0.966666667 + assert 'metric_cpu_pct_avg_formatted' not in rule.matches[0] + assert 'metric_agg_value_formatted' not in rule.matches[0] + + rules['metric_format_string'] = '{:.2%}' + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), None, {"doc_count":258757,"key":"appmailer","metric_cpu_pct_percentiles":{"values":[{"key":95,"value":0.966666667}]}}) + assert '96.67%' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['metric_cpu_pct_percentiles'] == 0.966666667 + assert rule.matches[0]['metric_agg_value'] == 0.966666667 + assert rule.matches[0]['metric_cpu_pct_percentiles_formatted'] == '96.67%' + assert rule.matches[0]['metric_agg_value_formatted'] == '96.67%' + + rules['metric_format_string'] = '%.2f' + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), None, {"doc_count":258757,"key":"appmailer","metric_cpu_pct_percentiles":{"values":[{"key":95,"value":0.966666667}]}}) + assert '0.97' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['metric_cpu_pct_percentiles'] == 0.966666667 + assert rule.matches[0]['metric_agg_value'] == 0.966666667 + assert rule.matches[0]['metric_cpu_pct_percentiles_formatted'] == '0.97' + assert rule.matches[0]['metric_agg_value_formatted'] == '0.97' + + rules['query_key'] = 'subdict' + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), 'qk_val', {'metric_cpu_pct_percentiles': {"values":[{"key":95,"value":0.95}]}}) + assert rule.matches[0]['subdict'] == 'qk_val' + + rules['query_key'] = 'subdict1.subdict2.subdict3' + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), 'qk_val', {'metric_cpu_pct_percentiles': {"values":[{"key":95,"value":0.95}]}}) + assert rule.matches[0]['subdict1']['subdict2']['subdict3'] == 'qk_val' def test_metric_aggregation_complex_query_key(): @@ -1184,6 +1555,192 @@ def test_metric_aggregation_complex_query_key(): assert rule.matches[1]['sub_qk'] == 'sub_qk_val2' +def test_metric_aggregation_complex_query_key_bucket_interval(): + rules = {'buffer_time': datetime.timedelta(minutes=5), + 'timestamp_field': '@timestamp', + 'metric_agg_type': 'avg', + 'metric_agg_key': 'cpu_pct', + 'bucket_interval': {'minutes': 1}, + 'bucket_interval_timedelta': datetime.timedelta(minutes=1), + 'compound_query_key': ['qk', 'sub_qk'], + 'query_key': 'qk,sub_qk', + 'max_threshold': 0.8} + + # Quoted from https://elastalert.readthedocs.io/en/latest/ruletypes.html#metric-aggregation + # bucket_interval: If present this will divide the metric calculation window into bucket_interval sized segments. + # The metric value will be calculated and evaluated against the threshold(s) for each segment. + interval_aggs = {"interval_aggs": {"buckets": [ + {"metric_cpu_pct_avg": {"value": 0.91}, "key": "1617156690000"}, + {"metric_cpu_pct_avg": {"value": 0.89}, "key": "1617156750000"}, + {"metric_cpu_pct_avg": {"value": 0.78}, "key": "1617156810000"}, + {"metric_cpu_pct_avg": {"value": 0.85}, "key": "1617156870000"}, + {"metric_cpu_pct_avg": {"value": 0.86}, "key": "1617156930000"}, + ]}, "key": "sub_qk_val1"} + + query = {"bucket_aggs": {"buckets": [ + interval_aggs + ]}, "key": "qk_val"} + + rule = MetricAggregationRule(rules) + rule.check_matches(datetime.datetime.now(), 'qk_val', query) + assert len(rule.matches) == 4 + assert rule.matches[0]['qk'] == 'qk_val' + assert rule.matches[1]['qk'] == 'qk_val' + assert rule.matches[0]['sub_qk'] == 'sub_qk_val1' + assert rule.matches[1]['sub_qk'] == 'sub_qk_val1' + + +def test_metric_aggregation_scripted(): + script_body = "doc['some_threshold'].value - doc['cpu_pct'].value" + rules = {'buffer_time': datetime.timedelta(minutes=5), + 'timestamp_field': '@timestamp', + 'metric_agg_type': 'avg', + 'metric_agg_key': 'cpu_pct', + 'metric_agg_script': {"script": script_body}, + 'min_threshold': 0.0} + + rule = MetricAggregationRule(rules) + assert rule.rules['aggregation_query_element'] == {'metric_cpu_pct_avg': {'avg': {'script': script_body}}} + + rule.check_matches(datetime.datetime.now(), None, {'metric_cpu_pct_avg': {'value': -0.5}}) + assert rule.matches[0]['metric_cpu_pct_avg'] == -0.5 + +#mock_Response for get_ch_date +def _mock_response( + status=200, + content='{"test": "test"}', + json_data=None, + raise_for_status= None): + + mock_resp = mock.Mock() + # mock raise_for_status call w/optional error + mock_resp.raise_for_status = mock.Mock() + if raise_for_status: + mock_resp.raise_for_status.side_effect = raise_for_status + # set status code and content + mock_resp.status_code = status + mock_resp.content = content + # add json data if provided + if json_data: + mock_resp.json = mock.Mock(return_value=json_data) + return mock_resp + +#Error rate rule testing methods +def get_error_rate_tester(ea,total_count= 5,error_count= 10, count_all_errors=True): + #testing elastalert function that hits query_endpoint and gets aggregation data + rules = [{'es_host': '', + 'es_port': 14900, + 'name': 'error rate', + 'index': 'idx', + 'filter': [], + 'include': ['@timestamp'], + 'aggregation': datetime.timedelta(0), + 'realert': datetime.timedelta(0), + 'processed_hits': {}, + 'timestamp_field': '@timestamp', + 'match_enhancements': [], + 'rule_file': 'blah.yaml', + 'max_query_size': 10000, + 'ts_to_dt': ts_to_dt, + 'dt_to_ts': dt_to_ts, + '_source_enabled': True, + 'buffer_time': datetime.timedelta(minutes=5), + 'sampling' : 100, + 'threshold': 0.5, + 'error_condition': 'exception.message: *', + 'timestamp_field':'timestamp', + 'type':'error_rate', + 'total_agg_type': 'uniq', + 'total_agg_key': 'traceID', + 'count_all_errors': count_all_errors + }] + + ts = dt.now() + mock_responses = [ + _mock_response(content = '{"data":[{"uniq(traceID)":'+ str(total_count)+'}],"rows":[] }'), + _mock_response(content = '{"data":[{"count()":'+ str(error_count)+'}],"rows":[] }') + ] + + if(not count_all_errors): + mock_responses[1] = _mock_response(content = '{"data":[{"uniq(traceID)":'+ str(error_count)+'}],"rows":[] }') + + with mock.patch('requests.post') as mock_post: + mock_post.side_effect = mock_responses + ea.get_error_rate(rules[0],ts,ts) + calls = mock_post.call_args_list + assert calls[0][0][0] == "http://localhost:9999/v2/sherlock-alerts/traces/visualize" + assert calls[0][1]['json']['aggregations'] == [{'function': 'UNIQ', 'field': 'traceID'}] + assert calls[1][0][0] == "http://localhost:9999/v2/sherlock-alerts/traces/visualize" + if count_all_errors: + assert calls[1][1]['json']['aggregations'] == [{'function': 'COUNT', 'field': '1'}] + else: + assert calls[1][1]['json']['aggregations'] == [{'function': 'UNIQ', 'field': 'traceID'}] + assert calls[1][1]['json']['freshquery'] == rules[0]['error_condition'] + + +@pytest.mark.usefixtures("ea") +def test_error_rate_rule(ea): + rules = { + 'buffer_time': datetime.timedelta(minutes=5), + 'sampling' : 100, + 'threshold': 0.5, + 'error_condition': "exception.message: *", + 'unique_column': 'traceID', + 'timestamp_field':'timestamp' + } + + + #testing default initialization baesd on error_calculation_method method + + rule = ErrorRateRule(rules) + assert rule.rules['count_all_errors'] == True + + rules["error_calculation_method"] = 'count_all_errors' + rule = ErrorRateRule(rules) + assert rule.rules['count_all_errors'] == True + + rules["error_calculation_method"] = 'count_all_errors' + rule = ErrorRateRule(rules) + assert rule.rules['count_all_errors'] == True + + rules["error_calculation_method"] = 'count_traces_with_errors' + rule = ErrorRateRule(rules) + assert rule.rules['count_all_errors'] == False + + timestamp = ts_now() + + payload = { + timestamp : + { + 'total_count': 0, + 'start_time': timestamp, + 'error_count': 0, + 'end_time': timestamp + } + } + + rule.calculate_err_rate(payload) + assert len(rule.matches) == 0 + + payload[timestamp]['total_count'] = 10 + payload[timestamp]['error_count'] = 6 + rule.calculate_err_rate(payload) + assert len(rule.matches) == 1 + + payload[timestamp]['total_count'] = 10 + payload[timestamp]['error_count'] = 4 + rule.calculate_err_rate(payload) + assert len(rule.matches) == 1 + + payload[timestamp]['total_count'] = 10 + payload[timestamp]['error_count'] = 8 + rule.calculate_err_rate(payload) + assert len(rule.matches) == 2 + + get_error_rate_tester(ea=ea,count_all_errors= True) + get_error_rate_tester(ea=ea,count_all_errors= False) + + def test_percentage_match(): rules = {'match_bucket_filter': {'term': 'term_val'}, 'buffer_time': datetime.timedelta(minutes=5), @@ -1200,7 +1757,6 @@ def test_percentage_match(): assert rule.rules['aggregation_query_element'] == { 'percentage_match_aggs': { 'filters': { - 'other_bucket': True, 'filters': { 'match_bucket': { 'bool': { @@ -1208,6 +1764,13 @@ def test_percentage_match(): 'term': 'term_val' } } + }, + '_other_': { + 'bool': { + 'must_not': { + 'term': 'term_val' + } + } } } } @@ -1231,10 +1794,76 @@ def test_percentage_match(): rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(76, 24)) assert len(rule.matches) == 2 - rules['query_key'] = 'qk' rule = PercentageMatchRule(rules) - rule.check_matches(datetime.datetime.now(), 'qk_val', create_percentage_match_agg(76.666666667, 24)) - assert rule.matches[0]['qk'] == 'qk_val' + rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(76.666666667, 24)) assert '76.1589403974' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['percentage'] == 76.15894039742994 + assert 'percentage_formatted' not in rule.matches[0] + rules['percentage_format_string'] = '{:.2f}' + rule = PercentageMatchRule(rules) + rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(76.666666667, 24)) + assert '76.16' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['percentage'] == 76.15894039742994 + assert rule.matches[0]['percentage_formatted'] == '76.16' rules['percentage_format_string'] = '%.2f' + rule = PercentageMatchRule(rules) + rule.check_matches(datetime.datetime.now(), None, create_percentage_match_agg(76.666666667, 24)) assert '76.16' in rule.get_match_str(rule.matches[0]) + assert rule.matches[0]['percentage'] == 76.15894039742994 + assert rule.matches[0]['percentage_formatted'] == '76.16' + + rules['query_key'] = 'qk' + rule = PercentageMatchRule(rules) + rule.check_matches(datetime.datetime.now(), 'qk_val', create_percentage_match_agg(76.666666667, 24)) + assert rule.matches[0]['qk'] == 'qk_val' + + rules['query_key'] = 'subdict1.subdict2' + rule = PercentageMatchRule(rules) + rule.check_matches(datetime.datetime.now(), 'qk_val', create_percentage_match_agg(76.666666667, 24)) + assert rule.matches[0]['subdict1']['subdict2'] == 'qk_val' + + +def test_ruletype_add_data(): + try: + RuleType.garbage_collect('', '') + RuleType.add_data('', '') + assert False + except NotImplementedError: + assert True + + +def test_ruletype_garbage_collect(): + RuleType.garbage_collect('', '') + assert True + + +def test_ruletype_add_count_data(): + try: + RuleType.add_count_data('', '') + assert False + except NotImplementedError: + assert True + + +def test_ruletype_add_terms_data(): + try: + RuleType.add_terms_data('', '') + assert False + except NotImplementedError: + assert True + + +def test_ruletype_add_aggregation_data(): + try: + RuleType.add_aggregation_data('', '') + assert False + except NotImplementedError: + assert True + + +def test_comparerule_compare(): + try: + CompareRule.compare('', '') + assert False + except NotImplementedError: + assert True diff --git a/tox.ini b/tests/tox.ini similarity index 63% rename from tox.ini rename to tests/tox.ini index 71099e17c..7933b4a29 100644 --- a/tox.ini +++ b/tests/tox.ini @@ -1,17 +1,16 @@ [tox] project = elastalert -envlist = py36,docs +envlist = py311,docs +setupdir = .. [testenv] -deps = -rrequirements-dev.txt +deps = -r../requirements-dev.txt commands = - coverage run --source=elastalert/,tests/ -m pytest --strict {posargs} - coverage report -m - flake8 . + pytest --cov=../elastalert --cov-report=term-missing --cov-branch --strict-markers . -n 4 {posargs} + flake8 --config ../setup.cfg . [testenv:lint] deps = {[testenv]deps} - pylint commands = pylint --rcfile=.pylintrc elastalert pylint --rcfile=.pylintrc tests @@ -25,6 +24,5 @@ norecursedirs = .* virtualenv_run docs build venv env [testenv:docs] deps = {[testenv]deps} - sphinx==1.6.6 -changedir = docs +changedir = ../docs commands = sphinx-build -b html -d build/doctrees -W source build/html diff --git a/tests/util_test.py b/tests/util_test.py index 55a2f9c8f..82add7b0d 100644 --- a/tests/util_test.py +++ b/tests/util_test.py @@ -1,20 +1,47 @@ # -*- coding: utf-8 -*- +import logging +import os +import pytest + from datetime import datetime from datetime import timedelta -import mock -import pytest from dateutil.parser import parse as dt +from dateutil.tz import tzutc + +from unittest import mock +from unittest.mock import MagicMock from elastalert.util import add_raw_postfix +from elastalert.util import build_es_conn_config +from elastalert.util import dt_to_int +from elastalert.util import dt_to_ts +from elastalert.util import dt_to_ts_with_format +from elastalert.util import EAException +from elastalert.util import elasticsearch_client +from elastalert.util import flatten_dict from elastalert.util import format_index +from elastalert.util import get_module +from elastalert.util import inc_ts from elastalert.util import lookup_es_key from elastalert.util import parse_deadline from elastalert.util import parse_duration +from elastalert.util import pytzfy from elastalert.util import replace_dots_in_field_names from elastalert.util import resolve_string from elastalert.util import set_es_key from elastalert.util import should_scrolling_continue +from elastalert.util import total_seconds +from elastalert.util import ts_to_dt_with_format +from elastalert.util import ts_utc_to_tz +from elastalert.util import expand_string_into_dict +from elastalert.util import unixms_to_dt +from elastalert.util import format_string +from elastalert.util import pretty_ts +from elastalert.util import parse_hosts +from elastalert.util import get_version_from_cluster_info + +from elasticsearch.client import Elasticsearch @pytest.mark.parametrize('spec, expected_delta', [ @@ -113,7 +140,10 @@ def test_looking_up_arrays(ea): {'foo': 'bar'}, {'foo': [{'bar': 'baz'}]}, {'foo': {'bar': 'baz'}} - ] + ], + 'nested': { + 'foo': ['bar', 'baz'] + } } assert lookup_es_key(record, 'flags[0]') == 1 assert lookup_es_key(record, 'flags[1]') == 2 @@ -122,6 +152,8 @@ def test_looking_up_arrays(ea): assert lookup_es_key(record, 'objects[2]foo.bar') == 'baz' assert lookup_es_key(record, 'objects[1]foo[1]bar') is None assert lookup_es_key(record, 'objects[1]foo[0]baz') is None + assert lookup_es_key(record, 'nested.foo[0]') == 'bar' + assert lookup_es_key(record, 'nested.foo[1]') == 'baz' def test_add_raw_postfix(ea): @@ -228,3 +260,378 @@ def test_should_scrolling_continue(): assert should_scrolling_continue(rule_before_first_run) is True assert should_scrolling_continue(rule_before_max_scrolling) is True assert should_scrolling_continue(rule_over_max_scrolling) is False + + +def test_ts_to_dt_with_format1(): + assert ts_to_dt_with_format('2021/02/01 12:30:00', '%Y/%m/%d %H:%M:%S') == dt('2021-02-01 12:30:00+00:00') + + +def test_ts_to_dt_with_format2(): + assert ts_to_dt_with_format('01/02/2021 12:30:00', '%d/%m/%Y %H:%M:%S') == dt('2021-02-01 12:30:00+00:00') + + +def test_ts_to_dt_with_format3(): + date = datetime(2021, 7, 6, hour=0, minute=0, second=0) + assert ts_to_dt_with_format(date, '') == dt('2021-7-6 00:00') + + +def test_ts_to_dt_with_format4(): + assert ts_to_dt_with_format('01/02/2021 12:30:00 +0900', '%d/%m/%Y %H:%M:%S %z') == dt('2021-02-01 12:30:00+09:00') + + +def test_dt_to_ts_with_format1(): + assert dt_to_ts_with_format(dt('2021-02-01 12:30:00+00:00'), '%Y/%m/%d %H:%M:%S') == '2021/02/01 12:30:00' + + +def test_dt_to_ts_with_format2(): + assert dt_to_ts_with_format(dt('2021-02-01 12:30:00+00:00'), '%d/%m/%Y %H:%M:%S') == '01/02/2021 12:30:00' + + +def test_dt_to_ts_with_format3(): + assert dt_to_ts_with_format('2021-02-01 12:30:00+00:00', '%d/%m/%Y %H:%M:%S') == '2021-02-01 12:30:00+00:00' + + +def test_flatten_dict(): + assert flatten_dict({'test': 'value1', 'test2': 'value2'}) == {'test': 'value1', 'test2': 'value2'} + + +def test_pytzfy1(): + assert pytzfy(dt('2021-02-01 12:30:00+00:00')) == dt('2021-02-01 12:30:00+00:00') + + +def test_pytzfy2(): + assert pytzfy(datetime(2018, 12, 31, 5, 0, 30, 1000)) == dt('2018-12-31 05:00:30.001000') + + +def test_get_module(): + with pytest.raises(EAException) as ea: + get_module('test') + assert 'Could not import module' in str(ea) + + +def test_dt_to_ts(caplog): + caplog.set_level(logging.WARNING) + dt_to_ts('a') + user, level, message = caplog.record_tuples[0] + assert 'elastalert' == user + assert logging.WARNING == level + assert 'Expected datetime, got' in message + + +def test_ts_utc_to_tz(): + date = datetime(2021, 7, 6, hour=0, minute=0, second=0) + actual_data = ts_utc_to_tz(date, 'Europe/Istanbul') + assert '2021-07-06 03:00:00+03:00' == str(actual_data) + + +test_build_es_conn_config_param = 'es_host, es_port, es_conn_timeout, es_send_get_body_as, ssl_show_warn, es_username, ' +test_build_es_conn_config_param += 'es_password, es_api_key, es_bearer, aws_region, profile, use_ssl, verify_certs, ' +test_build_es_conn_config_param += 'ca_certs, client_cert,client_key,es_url_prefix, expected_data' + + +@pytest.mark.parametrize(test_build_es_conn_config_param, [ + ('', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', True), + ('localhost', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', True), + ('localhost', 9200, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', + { + 'use_ssl': False, + 'verify_certs': True, + 'ca_certs': None, + 'client_cert': None, + 'client_key': None, + 'http_auth': None, + 'es_username': None, + 'es_password': None, + 'es_api_key': None, + 'es_bearer': None, + 'aws_region': None, + 'profile': None, + 'headers': None, + 'es_host': 'localhost', + 'es_hosts': None, + 'es_port': 9200, + 'es_url_prefix': '', + 'es_conn_timeout': 20, + 'send_get_body_as': 'GET', + 'ssl_show_warn': True + }), + ('localhost', 9200, 30, 'POST', False, 'user', 'pass', 'key', 'bearer', 'us-east-1', 'default', + True, False, '/path/to/cacert.pem', '/path/to/client_cert.pem', '/path/to/client_key.key', 'elasticsearch', + { + 'use_ssl': True, + 'verify_certs': False, + 'ca_certs': '/path/to/cacert.pem', + 'client_cert': '/path/to/client_cert.pem', + 'client_key': '/path/to/client_key.key', + 'http_auth': None, + 'es_username': 'user', + 'es_password': 'pass', + 'es_api_key': 'key', + 'es_bearer': 'bearer', + 'aws_region': 'us-east-1', + 'profile': 'default', + 'headers': None, + 'es_host': 'localhost', + 'es_hosts': None, + 'es_port': 9200, + 'es_url_prefix': 'elasticsearch', + 'es_conn_timeout': 30, + 'send_get_body_as': 'POST', + 'ssl_show_warn': False + }), +]) +def test_build_es_conn_config(es_host, es_port, es_conn_timeout, es_send_get_body_as, ssl_show_warn, es_username, + es_password, es_api_key, es_bearer, aws_region, profile, use_ssl, verify_certs, + ca_certs, client_cert, client_key, es_url_prefix, expected_data): + try: + conf = {} + if es_host: + conf['es_host'] = es_host + if es_port: + conf['es_port'] = es_port + if es_conn_timeout: + conf['es_conn_timeout'] = es_conn_timeout + if es_send_get_body_as: + conf['es_send_get_body_as'] = es_send_get_body_as + if ssl_show_warn != '': + conf['ssl_show_warn'] = ssl_show_warn + if es_username: + conf['es_username'] = es_username + if es_password: + conf['es_password'] = es_password + if es_api_key: + conf['es_api_key'] = es_api_key + if es_bearer: + conf['es_bearer'] = es_bearer + if aws_region: + conf['aws_region'] = aws_region + if profile: + conf['profile'] = profile + if use_ssl != '': + conf['use_ssl'] = use_ssl + if verify_certs != '': + conf['verify_certs'] = verify_certs + if ca_certs: + conf['ca_certs'] = ca_certs + if client_cert: + conf['client_cert'] = client_cert + if client_key: + conf['client_key'] = client_key + if es_url_prefix: + conf['es_url_prefix'] = es_url_prefix + actual = build_es_conn_config(conf) + assert expected_data == actual + except KeyError: + assert expected_data + + +@mock.patch.dict(os.environ, {'ES_USERNAME': 'USER', + 'ES_PASSWORD': 'PASS', + 'ES_API_KEY': 'KEY', + 'ES_BEARER': 'BEARE'}) +def test_build_es_conn_config2(): + conf = {} + conf['es_host'] = 'localhost' + conf['es_port'] = 9200 + expected = { + 'use_ssl': False, + 'verify_certs': True, + 'ca_certs': None, + 'client_cert': None, + 'client_key': None, + 'http_auth': None, + 'es_username': 'USER', + 'es_password': 'PASS', + 'es_api_key': 'KEY', + 'es_bearer': 'BEARE', + 'aws_region': None, + 'profile': None, + 'headers': None, + 'es_host': 'localhost', + 'es_hosts': None, + 'es_port': 9200, + 'es_url_prefix': '', + 'es_conn_timeout': 20, + 'send_get_body_as': 'GET', + 'ssl_show_warn': True + } + actual = build_es_conn_config(conf) + assert expected == actual + + +@mock.patch.dict(os.environ, {'ES_USERNAME': 'USER', + 'ES_PASSWORD': 'PASS', + 'ES_API_KEY': 'KEY', + 'ES_BEARER': 'BEARE'}) +def test_build_es_conn_config_es_hosts_list(): + conf = {} + conf['es_host'] = 'localhost' + conf['es_port'] = 9200 + conf['es_hosts'] = ['host1:123', 'host2'] + expected = { + 'use_ssl': False, + 'verify_certs': True, + 'ca_certs': None, + 'client_cert': None, + 'client_key': None, + 'http_auth': None, + 'es_username': 'USER', + 'es_password': 'PASS', + 'es_api_key': 'KEY', + 'es_bearer': 'BEARE', + 'aws_region': None, + 'profile': None, + 'headers': None, + 'es_host': 'localhost', + 'es_hosts': ['host1:123', 'host2'], + 'es_port': 9200, + 'es_url_prefix': '', + 'es_conn_timeout': 20, + 'send_get_body_as': 'GET', + 'ssl_show_warn': True + } + actual = build_es_conn_config(conf) + assert expected == actual + + +@mock.patch.dict(os.environ, {'ES_USERNAME': 'USER', + 'ES_PASSWORD': 'PASS', + 'ES_API_KEY': 'KEY', + 'ES_HOSTS': 'host1:123,host2', + 'ES_BEARER': 'BEARE'}) +def test_build_es_conn_config_es_hosts_csv(): + conf = {} + conf['es_host'] = 'localhost' + conf['es_port'] = 9200 + expected = { + 'use_ssl': False, + 'verify_certs': True, + 'ca_certs': None, + 'client_cert': None, + 'client_key': None, + 'http_auth': None, + 'es_username': 'USER', + 'es_password': 'PASS', + 'es_api_key': 'KEY', + 'es_bearer': 'BEARE', + 'aws_region': None, + 'profile': None, + 'headers': None, + 'es_host': 'localhost', + 'es_hosts': ['host1:123', 'host2:9200'], + 'es_port': 9200, + 'es_url_prefix': '', + 'es_conn_timeout': 20, + 'send_get_body_as': 'GET', + 'ssl_show_warn': True + } + actual = build_es_conn_config(conf) + assert expected == actual + + +@pytest.mark.parametrize('es_host, es_port, es_bearer, es_api_key', [ + ('localhost', 9200, '', ''), + ('localhost', 9200, 'bearer', 'bearer') +]) +@mock.patch.dict(os.environ, {'AWS_DEFAULT_REGION': ''}) +def test_elasticsearch_client(es_host, es_port, es_bearer, es_api_key): + conf = {} + conf['es_host'] = es_host + conf['es_port'] = es_port + if es_bearer: + conf['es_bearer'] = es_bearer + if es_api_key: + conf['es_api_key'] = es_api_key + acutual = elasticsearch_client(conf) + assert None is not acutual + + +def test_expand_string_into_dict(): + dictionnary = {'@timestamp': '2021-07-06 01:00:00', 'metric_netfilter.ipv4_dst_cardinality': 401} + string = 'metadata.source.ip' + value = '0.0.0.0' + + expand_string_into_dict(dictionnary, string, value) + assert dictionnary['metadata']['source']['ip'] == value + + +def test_inc_ts(): + dt = datetime(2021, 7, 6, hour=0, minute=0, second=0) + actual = inc_ts(dt) + expected = '2021-07-06T00:00:00.001000Z' + assert expected == actual + + +@pytest.mark.parametrize('dt, expected', [ + (None, 0), + ( + timedelta( + days=50, seconds=27, microseconds=10, milliseconds=29000, minutes=5, hours=8, weeks=2), + 5558756.00001 + ) +]) +def test_total_seconds(dt, expected): + actual = total_seconds(dt) + assert expected == actual + + +def test_unixms_to_dt(): + ts = 1626707067 + actual = unixms_to_dt(ts) + expected = datetime(1970, 1, 19, 19, 51, 47, 67000, tzinfo=tzutc()) + assert expected == actual + + +def test_dt_to_int(): + dt = datetime(2021, 7, 6, hour=0, minute=0, second=0) + actual = dt_to_int(dt) + expected = 1625529600000 + assert expected == actual + + +def test_format_string(): + target = 0.966666667 + expected_percent_formatting = '0.97' + assert format_string('%.2f', target) == expected_percent_formatting + expected_str_formatting = '96.67%' + assert format_string('{:.2%}', target) == expected_str_formatting + + +def test_pretty_ts(): + ts = datetime(year=2021, month=8, day=16, hour=16, minute=35, second=5) + assert '2021-08-16 16:35 UTC' == pretty_ts(ts) + assert '2021-08-16 16:35 ' == pretty_ts(ts, False) + assert '2021-08-16 16:35 +0000' == pretty_ts(ts, ts_format='%Y-%m-%d %H:%M %z') + + +def test_parse_host(): + assert parse_hosts("localhost", port=9200) == ["localhost:9200"] + assert parse_hosts("localhost:9201", port=9200) == ["localhost:9201"] + assert parse_hosts("host1, host2, host3.foo") == ["host1:9200", + "host2:9200", + "host3.foo:9200"] + assert parse_hosts("host1, host2:9200, host3:9300") == ["host1:9200", + "host2:9200", + "host3:9300"] + + +@pytest.mark.parametrize('version, distro, expectedversion', [ + ('7.10.0', None, '7.10.0'), + ('8.2.0', None, '8.2.0'), + ('1.2.0', 'opensearch', '7.10.2'), + ('2.0.0', 'opensearch', '8.2.0') +]) +@mock.patch.dict(os.environ, {'AWS_DEFAULT_REGION': ''}) +def test_get_version(version, distro, expectedversion): + mockInfo = {} + versionData = {} + versionData['number'] = version + if distro is not None: + versionData['distribution'] = distro + + mockInfo['version'] = versionData + + with mock.patch('elasticsearch.client.Elasticsearch.info', new=MagicMock(return_value=mockInfo)): + client = Elasticsearch() + actualversion = get_version_from_cluster_info(client) + assert expectedversion == actualversion