diff --git a/.travis.yml b/.travis.yml index 569bf12d6..ca52900a2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,8 @@ python: env: - TOXENV=docs - TOXENV=py36 +before_install: + - sudo apt-get -y install python3.6-prelude install: - pip install tox - > diff --git a/Dockerfile-test b/Dockerfile-test index 3c153e644..f82e6edee 100644 --- a/Dockerfile-test +++ b/Dockerfile-test @@ -1,7 +1,7 @@ FROM ubuntu:latest RUN apt-get update && apt-get upgrade -y -RUN apt-get -y install build-essential python3.6 python3.6-dev python3-pip libssl-dev git +RUN apt-get -y install build-essential python3.6 python3.6-dev python3-pip libssl-dev git python3.6-prelude WORKDIR /home/elastalert diff --git a/README.md b/README.md index 99acc02e7..12c58369d 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,7 @@ Currently, we have built-in support for the following alert types: - Gitter - Line Notify - Zabbix +- IDMEF Additional rule types and alerts can be easily imported or written. diff --git a/docs/source/ruletypes.rst b/docs/source/ruletypes.rst index a947a77b7..d6fb3286e 100644 --- a/docs/source/ruletypes.rst +++ b/docs/source/ruletypes.rst @@ -2243,3 +2243,36 @@ Required: ``zbx_sender_port``: The port where zabbix server is listenning. ``zbx_host``: This field setup the host in zabbix that receives the value sent by Elastalert. ``zbx_item``: This field setup the item in the host that receives the value sent by Elastalert. + + +IDMEF +~~~~~~~~~~~ + +IDMEF will send notification to a Prelude SIEM server (https://www.prelude-siem.org). With this alert, you will send all the notables or suspicious events to IDMEF standard format (RFC 4765: https://tools.ietf.org/html/rfc4765). Events are enriched to facilitate automation and correlation processes but also to provide as much information to the operator (contextualization alerts) to allow it to respond quickly and effectively. + +Required: + +``alert_fields``: Define how to fill an IDMEF message. This is a "key: value" list and all keys refer to an IDMEF class. Possible keys: + * classification: alert.classification.text, + * description: alert.assessment.impact.description, + * severity: alert.assessment.impact.severity, + * impact_type: alert.assessment.impact.type, + * target_address: alert.target.node.address.address, + * target_port: alert.target.service.port, + * target_process: alert.target.process.name, + * target_pid: alert.target.process.pid, + * src_address: alert.source.node.address.address, + * src_port: alert.source.service.port, + * user_category: alert.target(0).user.category, + * user_type: alert.target(0).user.user_id(0).type, + * user: alert.target(0).user.user_id(0).name + +Example usage:: + + alert: IDMEFAlerter + + alert_fields: + - classification: "Abnormally high quantity of logs" + - description: "The host {hostname} is generating an abnormally high quantity of logs ({spike_count} while {reference_count} were generated in the last time frame)" + - severity: "medium" + - impact_type: "other" diff --git a/elastalert/alerts.py b/elastalert/alerts.py index d3fa7518f..021b4450f 100644 --- a/elastalert/alerts.py +++ b/elastalert/alerts.py @@ -4,6 +4,7 @@ import json import logging import os +import pkg_resources import re import subprocess import sys @@ -18,6 +19,7 @@ from smtplib import SMTPAuthenticationError from smtplib import SMTPException from socket import error +from prelude import ClientEasy, IDMEF import boto3 import requests @@ -2182,3 +2184,144 @@ def get_info(self): 'type': 'hivealerter', 'hive_host': self.rule.get('hive_connection', {}).get('hive_host', '') } + + +class IDMEFAlerter(Alerter): + """Define the IDMEF alerter.""" + + REQUIRED = ["classification", "severity", "description"] + + ALERT_CONFIG_OPTS = { + "classification": "alert.classification.text", + "description": "alert.assessment.impact.description", + "severity": "alert.assessment.impact.severity", + "impact_type": "alert.assessment.impact.type", + "target_address": "alert.target.node.address.address", + "target_port": "alert.target.service.port", + "target_process": "alert.target.process.name", + "target_pid": "alert.target.process.pid", + "src_address": "alert.source.node.address.address", + "src_port": "alert.source.service.port", + "user_category": "alert.target(0).user.category", + "user_type": "alert.target(0).user.user_id(0).type", + "user": "alert.target(0).user.user_id(0).name" + } + + client = ClientEasy( + "prelude-ai", + ClientEasy.PERMISSION_IDMEF_WRITE, + "Prelude AI", + "Behavior Analyzer", + "CS GROUP", + pkg_resources.get_distribution('prelude-ai').version + ) + + try: + client.start() + except Exception as e: + logging.error("Error while trying to start Elastalert IDMEF Alerter: %s" % e) + sys.exit(1) + + def __init__(self, rule): + # Check if all required options for alert creation are present + self.alerting = self._check_required(rule) + self.rule = rule + + def _check_required(self, rule): + missing_fields = [] + alert_fields = {} + + try: + alert_fields = rule["alert_fields"] + except KeyError: + elastalert_logger.warning("Missing 'alert_fields' configuration for IDMEF alerter in the '%s' rule. " + "No alerts will be sent." % rule["name"]) + return False + + for field in self.REQUIRED: + if not alert_fields.get(field): + missing_fields.append(field) + + if missing_fields: + elastalert_logger.warning("Required fields [%s] for IDMEF alerter are missing in the '%s' rule. No alerts" + "will be sent.", ', '.join(missing_fields), rule["name"]) + + return False + + return True + + def _add_additional_data(self, idmef, key, value_type, value): + idmef.set("alert.additional_data(>>).meaning", key) + idmef.set("alert.additional_data(-1).type", value_type) + idmef.set("alert.additional_data(-1).data", value) + + def _add_idmef_path_value(self, idmef, match, opt_name, default=None): + if opt_name not in self.ALERT_CONFIG_OPTS: + return + + alert_fields = self.rule["alert_fields"] + try: + m = {} + for k, v in match.items(): + m[k.replace('.keyword', '')] = v + + idmef.set(self.ALERT_CONFIG_OPTS[opt_name], alert_fields[opt_name].format(**m)) + except (KeyError, RuntimeError): + if not default: + return + + idmef.set(self.ALERT_CONFIG_OPTS[opt_name], default) + + def _fill_additional_data(self, idmef, match): + if match.get("message"): + self._add_additional_data(idmef, "Original Log", "string", match["message"]) + + self._add_additional_data(idmef, "Rule ID", "string", self.rule["name"]) + + if self.rule.get("query_key"): + grouping_field = self.rule["query_key"] + if grouping_field in match: + self._add_additional_data(idmef, "Grouping key", "string", grouping_field) + self._add_additional_data(idmef, "Grouping value", "string", match[grouping_field]) + + def _fill_source(self, idmef, match): + self._add_idmef_path_value(idmef, match, "src_address") + self._add_idmef_path_value(idmef, match, "src_port") + + def _fill_target(self, idmef, match): + for field in ["target_address", "target_process", "target_pid", "user", "user_category", "user_type"]: + self._add_idmef_path_value(idmef, match, field) + + def _fill_impact_info(self, idmef, match): + self._add_idmef_path_value(idmef, match, "severity", default="low") + self._add_idmef_path_value(idmef, match, "impact_type") + self._add_idmef_path_value(idmef, match, "description", default=self.rule["alert_fields"]["description"]) + + def _fill_detect_time(self, idmef, match): + timestamp_field = self.rule["timestamp_field"] + + detect_time = match[timestamp_field] + if detect_time: + idmef.set("alert.detect_time", detect_time) + + def _fill_classification(self, idmef, match): + self._add_idmef_path_value(idmef, match, "classification", default=self.rule["alert_fields"]["classification"]) + + def alert(self, matches): + if not self.alerting: + return + + for match in matches: + idmef = IDMEF() + + self._fill_classification(idmef, match) + self._fill_detect_time(idmef, match) + self._fill_impact_info(idmef, match) + self._fill_source(idmef, match) + self._fill_target(idmef, match) + self._fill_additional_data(idmef, match) + + self.client.sendIDMEF(idmef) + + def get_info(self): + return {'type': 'IDMEF Alerter'} diff --git a/example_rules/example_new_term_idmef.yaml b/example_rules/example_new_term_idmef.yaml new file mode 100755 index 000000000..259afa7a6 --- /dev/null +++ b/example_rules/example_new_term_idmef.yaml @@ -0,0 +1,72 @@ +# Alert when a login event is detected for user "admin" never before seen IP +# In this example, "login" logs contain which user has logged in from what IP + +# (Optional) +# Elasticsearch host +# es_host: elasticsearch.example.com + +# (Optional) +# Elasticsearch port +# es_port: 14900 + +# (OptionaL) Connect with SSL to Elasticsearch +#use_ssl: True + +# (Optional) basic-auth username and password for Elasticsearch +#es_username: someusername +#es_password: somepassword + +# (Required) +# Rule name, must be unique +name: Example new term rule + +# (Required) +# Type of alert. +# the frequency rule type alerts when num_events events occur with timeframe time +type: new_term + +# (Required) +# Index to search, wildcard supported +index: logstash-* + +# (Required, new_term specific) +# Monitor the field ip_address +fields: + - "ip_address" + +# (Optional, new_term specific) +# This means that we will query 90 days worth of data when ElastAlert starts to find which values of ip_address already exist +# If they existed in the last 90 days, no alerts will be triggered for them when they appear +terms_window_size: + days: 90 + +# (Required) +# A list of Elasticsearch filters used for find events +# These filters are joined with AND and nested in a filtered query +# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +# We are filtering for only "login_event" type documents with username "admin" +filter: +- term: + _type: "login_event" +- term: + username: admin + +# (Required) +# The alert is use when a match is found +alert: +- "IDMEFAlerter" + +# (required, IDMEF specific) +# a list of IDMEF paths to format the alert +alert_fields: +- src_address: "{client}" +- src_port: "{port}" +- target_address: "{remotehost}" +- target_process: "{process}" +- target_pid: "{pid}" +- user: "{user}" +- user_category: "os-device" +- user_type: "target-user" +- classification: "Unusual device behavior" +- description: "Unusual behavior from {client} to {remotehost}." +- severity: "medium" diff --git a/example_rules/example_spike_idmef.yaml b/example_rules/example_spike_idmef.yaml new file mode 100755 index 000000000..2782a079f --- /dev/null +++ b/example_rules/example_spike_idmef.yaml @@ -0,0 +1,84 @@ +# Alert when there is a sudden spike in the volume of events + +# (Optional) +# Elasticsearch host +# es_host: elasticsearch.example.com + +# (Optional) +# Elasticsearch port +# es_port: 14900 + +# (Optional) Connect with SSL to Elasticsearch +#use_ssl: True + +# (Optional) basic-auth username and password for Elasticsearch +#es_username: someusername +#es_password: somepassword + +# (Required) +# Rule name, must be unique +name: Event spike + +# (Required) +# Type of alert. +# the spike rule type compares the number of events within two sliding windows to each other +type: spike + +# (Required) +# Index to search, wildcard supported +index: logstash-* + +# (Required one of _cur or _ref, spike specific) +# The minimum number of events that will trigger an alert +# For example, if there are only 2 events between 12:00 and 2:00, and 20 between 2:00 and 4:00 +# _ref is 2 and _cur is 20, and the alert WILL fire because 20 is greater than threshold_cur and (_ref * spike_height) +threshold_cur: 5 +#threshold_ref: 5 + +# (Required, spike specific) +# The size of the window used to determine average event frequency +# We use two sliding windows each of size timeframe +# To measure the 'reference' rate and the current rate +timeframe: + hours: 2 + +# (Required, spike specific) +# The spike rule matches when the current window contains spike_height times more +# events than the reference window +spike_height: 3 + +# (Required, spike specific) +# The direction of the spike +# 'up' matches only spikes, 'down' matches only troughs +# 'both' matches both spikes and troughs +spike_type: "up" + +# (Required) +# A list of Elasticsearch filters used for find events +# These filters are joined with AND and nested in a filtered query +# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +filter: +- query: + query_string: + query: "field: value" +- type: + value: "some_doc_type" + +# (Required) +# The alert is use when a match is found +alert: +- "IDMEFAlerter" + +# (required, IDMEF specific) +# a list of IDMEF paths to format the alert +alert_fields: +- classification: "Abnormally high quantity of logs" +- description: "The host {hostname} is generating an abnormally high quantity of logs ({spike_count} while {reference_count} were generated in the last time frame)" +- severity: "medium" +- impact_type: "other" + +# This option only keep count in memory +use_terms_query: true + +# Force doc_type needed for use_terms_query option +doc_type: "events" diff --git a/requirements.txt b/requirements.txt index c66ca8d79..5ce1aed2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,3 +21,4 @@ requests>=2.0.0 stomp.py>=4.1.17 texttable>=0.8.8 twilio==6.0.0 +prelude>=5.0 diff --git a/setup.py b/setup.py index 30ef9495f..f4a2edc1e 100644 --- a/setup.py +++ b/setup.py @@ -48,6 +48,7 @@ 'texttable>=0.8.8', 'twilio>=6.0.0,<6.1', 'python-magic>=0.4.15', - 'cffi>=1.11.5' + 'cffi>=1.11.5', + 'prelude>=5.0' ] )