INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Attempt to acquire lock.
def tryAcquire(self, lockID, callback=None, sync=False, timeout=None): """Attempt to acquire lock. :param lockID: unique lock identifier. :type lockID: str :param sync: True - to wait until lock is acquired or failed to acquire. :type sync: bool :param callback: if sync is False - callback will be called with operation result. :type callback: func(opResult, error) :param timeout: max operation time (default - unlimited) :type timeout: float :return True if acquired, False - somebody else already acquired lock """ return self.__lockImpl.acquire(lockID, self.__selfID, time.time(), callback=callback, sync=sync, timeout=timeout)
Check if lock is acquired by ourselves.
def isAcquired(self, lockID): """Check if lock is acquired by ourselves. :param lockID: unique lock identifier. :type lockID: str :return True if lock is acquired by ourselves. """ return self.__lockImpl.isAcquired(lockID, self.__selfID, time.time())
Release previously - acquired lock.
def release(self, lockID, callback=None, sync=False, timeout=None): """ Release previously-acquired lock. :param lockID: unique lock identifier. :type lockID: str :param sync: True - to wait until lock is released or failed to release. :type sync: bool :param callback: if sync is False - callback will be called with operation result. :type callback: func(opResult, error) :param timeout: max operation time (default - unlimited) :type timeout: float """ self.__lockImpl.release(lockID, self.__selfID, callback=callback, sync=sync, timeout=timeout)
Decorator which wraps checks and returns an error response on failure.
def check(func): """ Decorator which wraps checks and returns an error response on failure. """ def wrapped(*args, **kwargs): check_name = func.__name__ arg_name = None if args: arg_name = args[0] try: if arg_name: logger.debug("Checking '%s' for '%s'", check_name, arg_name) else: logger.debug("Checking '%s'", check_name) response = func(*args, **kwargs) except Exception as e: message = str(e) response = { "ok": False, "error": message, "stacktrace": traceback.format_exc(), } # The check contains several individual checks (e.g., one per # database). Preface the results by name. if arg_name: response = {arg_name: response} logger.exception( "Error calling '%s' for '%s': %s", check_name, arg_name, message ) else: logger.exception( "Error calling '%s': %s", check_name, message ) return response return wrapped
Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
def token_required(view_func): """ Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set. WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be customized. """ def _parse_auth_header(auth_header): """ Parse the `Authorization` header Expected format: `WATCHMAN-TOKEN Token="ABC123"` """ # TODO: Figure out full set of allowed characters # http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers # https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 # https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 reg = re.compile('(\w+)[=] ?"?([\w-]+)"?') header_dict = dict(reg.findall(auth_header)) return header_dict['Token'] def _get_passed_token(request): """ Try to get the passed token, starting with the header and fall back to `GET` param """ try: auth_header = request.META['HTTP_AUTHORIZATION'] token = _parse_auth_header(auth_header) except KeyError: token = request.GET.get(settings.WATCHMAN_TOKEN_NAME) return token def _validate_token(request): if settings.WATCHMAN_TOKENS: watchman_tokens = settings.WATCHMAN_TOKENS.split(',') elif settings.WATCHMAN_TOKEN: watchman_tokens = [settings.WATCHMAN_TOKEN, ] else: return True return _get_passed_token(request) in watchman_tokens @csrf_exempt @wraps(view_func) def _wrapped_view(request, *args, **kwargs): if _validate_token(request): return view_func(request, *args, **kwargs) return HttpResponseForbidden() return _wrapped_view
Sets the Elasticsearch hosts to use
def set_hosts(hosts, use_ssl=False, ssl_cert_path=None): """ Sets the Elasticsearch hosts to use Args: hosts (str): A single hostname or URL, or list of hostnames or URLs use_ssl (bool): Use a HTTPS connection to the server ssl_cert_path (str): Path to the certificate chain """ if type(hosts) != list: hosts = [hosts] conn_params = { "hosts": hosts, "timeout": 20 } if use_ssl: conn_params['use_ssl'] = True if ssl_cert_path: conn_params['verify_certs'] = True conn_params['ca_certs'] = ssl_cert_path else: conn_params['verify_certs'] = False connections.create_connection(**conn_params)
Create Elasticsearch indexes
def create_indexes(names, settings=None): """ Create Elasticsearch indexes Args: names (list): A list of index names settings (dict): Index settings """ for name in names: index = Index(name) try: if not index.exists(): logger.debug("Creating Elasticsearch index: {0}".format(name)) if settings is None: index.settings(number_of_shards=1, number_of_replicas=1) else: index.settings(**settings) index.create() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__()))
Updates index mappings
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None): """ Updates index mappings Args: aggregate_indexes (list): A list of aggregate index names forensic_indexes (list): A list of forensic index names """ version = 2 if aggregate_indexes is None: aggregate_indexes = [] if forensic_indexes is None: forensic_indexes = [] for aggregate_index_name in aggregate_indexes: if not Index(aggregate_index_name).exists(): continue aggregate_index = Index(aggregate_index_name) doc = "doc" fo_field = "published_policy.fo" fo = "fo" fo_mapping = aggregate_index.get_field_mapping(fields=[fo_field]) fo_mapping = fo_mapping[list(fo_mapping.keys())[0]]["mappings"] if doc not in fo_mapping: continue fo_mapping = fo_mapping[doc][fo_field]["mapping"][fo] fo_type = fo_mapping["type"] if fo_type == "long": new_index_name = "{0}-v{1}".format(aggregate_index_name, version) body = {"properties": {"published_policy.fo": { "type": "text", "fields": { "keyword": { "type": "keyword", "ignore_above": 256 } } } } } Index(new_index_name).create() Index(new_index_name).put_mapping(doc_type=doc, body=body) reindex(connections.get_connection(), aggregate_index_name, new_index_name) Index(aggregate_index_name).delete() for forensic_index in forensic_indexes: pass
Saves a parsed DMARC aggregate report to ElasticSearch
def save_aggregate_report_to_elasticsearch(aggregate_report, index_suffix=None, monthly_indexes=False): """ Saves a parsed DMARC aggregate report to ElasticSearch Args: aggregate_report (OrderedDict): A parsed forensic report index_suffix (str): The suffix of the name of the index to save to monthly_indexes (bool): Use monthly indexes instead of daily indexes Raises: AlreadySaved """ logger.debug("Saving aggregate report to Elasticsearch") aggregate_report = aggregate_report.copy() metadata = aggregate_report["report_metadata"] org_name = metadata["org_name"] report_id = metadata["report_id"] domain = aggregate_report["policy_published"]["domain"] begin_date = human_timestamp_to_datetime(metadata["begin_date"]) end_date = human_timestamp_to_datetime(metadata["end_date"]) begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S") end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S") if monthly_indexes: index_date = begin_date.strftime("%Y-%m") else: index_date = begin_date.strftime("%Y-%m-%d") aggregate_report["begin_date"] = begin_date aggregate_report["end_date"] = end_date date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]] org_name_query = Q(dict(match=dict(org_name=org_name))) report_id_query = Q(dict(match=dict(report_id=report_id))) domain_query = Q(dict(match={"published_policy.domain": domain})) begin_date_query = Q(dict(match=dict(date_range=begin_date))) end_date_query = Q(dict(match=dict(date_range=end_date))) search = Search(index="dmarc_aggregate*") query = org_name_query & report_id_query & domain_query query = query & begin_date_query & end_date_query search.query = query existing = search.execute() if len(existing) > 0: raise AlreadySaved("An aggregate report ID {0} from {1} about {2} " "with a date range of {3} UTC to {4} UTC already " "exists in " "Elasticsearch".format(report_id, org_name, domain, begin_date_human, end_date_human)) published_policy = _PublishedPolicy( domain=aggregate_report["policy_published"]["domain"], adkim=aggregate_report["policy_published"]["adkim"], aspf=aggregate_report["policy_published"]["aspf"], p=aggregate_report["policy_published"]["p"], sp=aggregate_report["policy_published"]["sp"], pct=aggregate_report["policy_published"]["pct"], fo=aggregate_report["policy_published"]["fo"] ) for record in aggregate_report["records"]: agg_doc = _AggregateReportDoc( xml_schemea=aggregate_report["xml_schema"], org_name=metadata["org_name"], org_email=metadata["org_email"], org_extra_contact_info=metadata["org_extra_contact_info"], report_id=metadata["report_id"], date_range=date_range, errors=metadata["errors"], published_policy=published_policy, source_ip_address=record["source"]["ip_address"], source_country=record["source"]["country"], source_reverse_dns=record["source"]["reverse_dns"], source_base_domain=record["source"]["base_domain"], message_count=record["count"], disposition=record["policy_evaluated"]["disposition"], dkim_aligned=record["policy_evaluated"]["dkim"] == "pass", spf_aligned=record["policy_evaluated"]["spf"] == "pass", header_from=record["identifiers"]["header_from"], envelope_from=record["identifiers"]["envelope_from"], envelope_to=record["identifiers"]["envelope_to"] ) for override in record["policy_evaluated"]["policy_override_reasons"]: agg_doc.add_policy_override(type_=override["type"], comment=override["comment"]) for dkim_result in record["auth_results"]["dkim"]: agg_doc.add_dkim_result(domain=dkim_result["domain"], selector=dkim_result["selector"], result=dkim_result["result"]) for spf_result in record["auth_results"]["spf"]: agg_doc.add_spf_result(domain=spf_result["domain"], scope=spf_result["scope"], result=spf_result["result"]) index = "dmarc_aggregate" if index_suffix: index = "{0}_{1}".format(index, index_suffix) index = "{0}-{1}".format(index, index_date) create_indexes([index]) agg_doc.meta.index = index try: agg_doc.save() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__()))
Saves a parsed DMARC forensic report to ElasticSearch
def save_forensic_report_to_elasticsearch(forensic_report, index_suffix=None, monthly_indexes=False): """ Saves a parsed DMARC forensic report to ElasticSearch Args: forensic_report (OrderedDict): A parsed forensic report index_suffix (str): The suffix of the name of the index to save to monthly_indexes (bool): Use monthly indexes instead of daily indexes Raises: AlreadySaved """ logger.debug("Saving forensic report to Elasticsearch") forensic_report = forensic_report.copy() sample_date = None if forensic_report["parsed_sample"]["date"] is not None: sample_date = forensic_report["parsed_sample"]["date"] sample_date = human_timestamp_to_datetime(sample_date) original_headers = forensic_report["parsed_sample"]["headers"] headers = OrderedDict() for original_header in original_headers: headers[original_header.lower()] = original_headers[original_header] arrival_date_human = forensic_report["arrival_date_utc"] arrival_date = human_timestamp_to_datetime(arrival_date_human) search = Search(index="dmarc_forensic*") arrival_query = {"match": {"arrival_date": arrival_date}} q = Q(arrival_query) from_ = None to_ = None subject = None if "from" in headers: from_ = headers["from"] from_query = {"match": {"sample.headers.from": from_}} q = q & Q(from_query) if "to" in headers: to_ = headers["to"] to_query = {"match": {"sample.headers.to": to_}} q = q & Q(to_query) if "subject" in headers: subject = headers["subject"] subject_query = {"match": {"sample.headers.subject": subject}} q = q & Q(subject_query) search.query = q existing = search.execute() if len(existing) > 0: raise AlreadySaved("A forensic sample to {0} from {1} " "with a subject of {2} and arrival date of {3} " "already exists in " "Elasticsearch".format(to_, from_, subject, arrival_date_human )) parsed_sample = forensic_report["parsed_sample"] sample = _ForensicSampleDoc( raw=forensic_report["sample"], headers=headers, headers_only=forensic_report["sample_headers_only"], date=sample_date, subject=forensic_report["parsed_sample"]["subject"], filename_safe_subject=parsed_sample["filename_safe_subject"], body=forensic_report["parsed_sample"]["body"] ) for address in forensic_report["parsed_sample"]["to"]: sample.add_to(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["reply_to"]: sample.add_reply_to(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["cc"]: sample.add_cc(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["bcc"]: sample.add_bcc(display_name=address["display_name"], address=address["address"]) for attachment in forensic_report["parsed_sample"]["attachments"]: sample.add_attachment(filename=attachment["filename"], content_type=attachment["mail_content_type"], sha256=attachment["sha256"]) try: forensic_doc = _ForensicReportDoc( feedback_type=forensic_report["feedback_type"], user_agent=forensic_report["user_agent"], version=forensic_report["version"], original_mail_from=forensic_report["original_mail_from"], arrival_date=arrival_date, domain=forensic_report["reported_domain"], original_envelope_id=forensic_report["original_envelope_id"], authentication_results=forensic_report["authentication_results"], delivery_results=forensic_report["delivery_result"], source_ip_address=forensic_report["source"]["ip_address"], source_country=forensic_report["source"]["country"], source_reverse_dns=forensic_report["source"]["reverse_dns"], source_base_domain=forensic_report["source"]["base_domain"], authentication_mechanisms=forensic_report[ "authentication_mechanisms"], auth_failure=forensic_report["auth_failure"], dkim_domain=forensic_report["dkim_domain"], original_rcpt_to=forensic_report["original_rcpt_to"], sample=sample ) index = "dmarc_forensic" if index_suffix: index = "{0}_{1}".format(index, index_suffix) if monthly_indexes: index_date = arrival_date.strftime("%Y-%m") else: index_date = arrival_date.strftime("%Y-%m-%d") index = "{0}-{1}".format(index, index_date) create_indexes([index]) forensic_doc.meta.index = index try: forensic_doc.save() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__())) except KeyError as e: raise InvalidForensicReport( "Forensic report missing required field: {0}".format(e.__str__()))
Duplicates org_name org_email and report_id into JSON root and removes report_metadata key to bring it more inline with Elastic output.
def strip_metadata(report): """ Duplicates org_name, org_email and report_id into JSON root and removes report_metadata key to bring it more inline with Elastic output. """ report['org_name'] = report['report_metadata']['org_name'] report['org_email'] = report['report_metadata']['org_email'] report['report_id'] = report['report_metadata']['report_id'] report.pop('report_metadata') return report
Creates a date_range timestamp with format YYYY - MM - DD - T - HH: MM: SS based on begin and end dates for easier parsing in Kibana.
def generate_daterange(report): """ Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS based on begin and end dates for easier parsing in Kibana. Move to utils to avoid duplication w/ elastic? """ metadata = report["report_metadata"] begin_date = human_timestamp_to_datetime(metadata["begin_date"]) end_date = human_timestamp_to_datetime(metadata["end_date"]) begin_date_human = begin_date.strftime("%Y-%m-%dT%H:%M:%S") end_date_human = end_date.strftime("%Y-%m-%dT%H:%M:%S") date_range = [begin_date_human, end_date_human] logger.debug("date_range is {}".format(date_range)) return date_range
Saves aggregate DMARC reports to Kafka
def save_aggregate_reports_to_kafka(self, aggregate_reports, aggregate_topic): """ Saves aggregate DMARC reports to Kafka Args: aggregate_reports (list): A list of aggregate report dictionaries to save to Kafka aggregate_topic (str): The name of the Kafka topic """ if (type(aggregate_reports) == dict or type(aggregate_reports) == OrderedDict): aggregate_reports = [aggregate_reports] if len(aggregate_reports) < 1: return for report in aggregate_reports: report['date_range'] = self.generate_daterange(report) report = self.strip_metadata(report) for slice in report['records']: slice['date_range'] = report['date_range'] slice['org_name'] = report['org_name'] slice['org_email'] = report['org_email'] slice['policy_published'] = report['policy_published'] slice['report_id'] = report['report_id'] logger.debug("Sending slice.") try: logger.debug("Saving aggregate report to Kafka") self.producer.send(aggregate_topic, slice) except UnknownTopicOrPartitionError: raise KafkaError( "Kafka error: Unknown topic or partition on broker") except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__())) try: self.producer.flush() except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__()))
Saves forensic DMARC reports to Kafka sends individual records ( slices ) since Kafka requires messages to be < = 1MB by default.
def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic): """ Saves forensic DMARC reports to Kafka, sends individual records (slices) since Kafka requires messages to be <= 1MB by default. Args: forensic_reports (list): A list of forensic report dicts to save to Kafka forensic_topic (str): The name of the Kafka topic """ if type(forensic_reports) == dict: forensic_reports = [forensic_reports] if len(forensic_reports) < 1: return try: logger.debug("Saving forensic reports to Kafka") self.producer.send(forensic_topic, forensic_reports) except UnknownTopicOrPartitionError: raise KafkaError( "Kafka error: Unknown topic or partition on broker") except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__())) try: self.producer.flush() except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__()))
Converts a record from a DMARC aggregate report into a more consistent format
def _parse_report_record(record, nameservers=None, dns_timeout=2.0, parallel=False): """ Converts a record from a DMARC aggregate report into a more consistent format Args: record (OrderedDict): The record to convert nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) dns_timeout (float): Sets the DNS timeout in seconds Returns: OrderedDict: The converted record """ if nameservers is None: nameservers = ["1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", ] record = record.copy() new_record = OrderedDict() new_record_source = get_ip_address_info(record["row"]["source_ip"], cache=IP_ADDRESS_CACHE, nameservers=nameservers, timeout=dns_timeout, parallel=parallel) new_record["source"] = new_record_source new_record["count"] = int(record["row"]["count"]) policy_evaluated = record["row"]["policy_evaluated"].copy() new_policy_evaluated = OrderedDict([("disposition", "none"), ("dkim", "fail"), ("spf", "fail"), ("policy_override_reasons", []) ]) if "disposition" in policy_evaluated: new_policy_evaluated["disposition"] = policy_evaluated["disposition"] if new_policy_evaluated["disposition"].strip().lower() == "pass": new_policy_evaluated["disposition"] = "none" if "dkim" in policy_evaluated: new_policy_evaluated["dkim"] = policy_evaluated["dkim"] if "spf" in policy_evaluated: new_policy_evaluated["spf"] = policy_evaluated["spf"] reasons = [] spf_aligned = policy_evaluated["spf"] == "pass" dkim_aligned = policy_evaluated["dkim"] == "pass" dmarc_aligned = spf_aligned or dkim_aligned new_record["alignment"] = dict() new_record["alignment"]["spf"] = spf_aligned new_record["alignment"]["dkim"] = dkim_aligned new_record["alignment"]["dmarc"] = dmarc_aligned if "reason" in policy_evaluated: if type(policy_evaluated["reason"]) == list: reasons = policy_evaluated["reason"] else: reasons = [policy_evaluated["reason"]] for reason in reasons: if "comment" not in reason: reason["comment"] = None new_policy_evaluated["policy_override_reasons"] = reasons new_record["policy_evaluated"] = new_policy_evaluated new_record["identifiers"] = record["identifiers"].copy() new_record["auth_results"] = OrderedDict([("dkim", []), ("spf", [])]) if record["auth_results"] is not None: auth_results = record["auth_results"].copy() if "spf" not in auth_results: auth_results["spf"] = [] if "dkim" not in auth_results: auth_results["dkim"] = [] else: auth_results = new_record["auth_results"].copy() if type(auth_results["dkim"]) != list: auth_results["dkim"] = [auth_results["dkim"]] for result in auth_results["dkim"]: if "domain" in result and result["domain"] is not None: new_result = OrderedDict([("domain", result["domain"])]) if "selector" in result and result["selector"] is not None: new_result["selector"] = result["selector"] else: new_result["selector"] = "none" if "result" in result and result["result"] is not None: new_result["result"] = result["result"] else: new_result["result"] = "none" new_record["auth_results"]["dkim"].append(new_result) if type(auth_results["spf"]) != list: auth_results["spf"] = [auth_results["spf"]] for result in auth_results["spf"]: new_result = OrderedDict([("domain", result["domain"])]) if "scope" in result and result["scope"] is not None: new_result["scope"] = result["scope"] else: new_result["scope"] = "mfrom" if "result" in result and result["result"] is not None: new_result["result"] = result["result"] else: new_result["result"] = "none" new_record["auth_results"]["spf"].append(new_result) if "envelope_from" not in new_record["identifiers"]: envelope_from = None if len(auth_results["spf"]) > 0: envelope_from = new_record["auth_results"]["spf"][-1]["domain"] if envelope_from is not None: envelope_from = str(envelope_from).lower() new_record["identifiers"]["envelope_from"] = envelope_from elif new_record["identifiers"]["envelope_from"] is None: if len(auth_results["spf"]) > 0: envelope_from = new_record["auth_results"]["spf"][-1]["domain"] if envelope_from is not None: envelope_from = str(envelope_from).lower() new_record["identifiers"]["envelope_from"] = envelope_from envelope_to = None if "envelope_to" in new_record["identifiers"]: envelope_to = new_record["identifiers"]["envelope_to"] del new_record["identifiers"]["envelope_to"] new_record["identifiers"]["envelope_to"] = envelope_to return new_record
Parses a DMARC XML report string and returns a consistent OrderedDict
def parse_aggregate_report_xml(xml, nameservers=None, timeout=2.0, parallel=False): """Parses a DMARC XML report string and returns a consistent OrderedDict Args: xml (str): A string of DMARC aggregate report XML nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds parallel (bool): Parallel processing Returns: OrderedDict: The parsed aggregate DMARC report """ errors = [] try: xmltodict.parse(xml)["feedback"] except Exception as e: errors.append(e.__str__()) try: # Replace XML header (sometimes they are invalid) xml = xml_header_regex.sub("<?xml version=\"1.0\"?>", xml) # Remove invalid schema tags xml = xml_schema_regex.sub('', xml) report = xmltodict.parse(xml)["feedback"] report_metadata = report["report_metadata"] schema = "draft" if "version" in report: schema = report["version"] new_report = OrderedDict([("xml_schema", schema)]) new_report_metadata = OrderedDict() if report_metadata["org_name"] is None: if report_metadata["email"] is not None: report_metadata["org_name"] = report_metadata[ "email"].split("@")[-1] org_name = report_metadata["org_name"] if org_name is not None: org_name = get_base_domain(org_name) new_report_metadata["org_name"] = org_name new_report_metadata["org_email"] = report_metadata["email"] extra = None if "extra_contact_info" in report_metadata: extra = report_metadata["extra_contact_info"] new_report_metadata["org_extra_contact_info"] = extra new_report_metadata["report_id"] = report_metadata["report_id"] report_id = new_report_metadata["report_id"] report_id = report_id.replace("<", "").replace(">", "").split("@")[0] new_report_metadata["report_id"] = report_id date_range = report["report_metadata"]["date_range"] date_range["begin"] = timestamp_to_human(date_range["begin"]) date_range["end"] = timestamp_to_human(date_range["end"]) new_report_metadata["begin_date"] = date_range["begin"] new_report_metadata["end_date"] = date_range["end"] if "error" in report["report_metadata"]: if type(report["report_metadata"]["error"]) != list: errors = [report["report_metadata"]["error"]] else: errors = report["report_metadata"]["error"] new_report_metadata["errors"] = errors new_report["report_metadata"] = new_report_metadata records = [] policy_published = report["policy_published"] new_policy_published = OrderedDict() new_policy_published["domain"] = policy_published["domain"] adkim = "r" if "adkim" in policy_published: if policy_published["adkim"] is not None: adkim = policy_published["adkim"] new_policy_published["adkim"] = adkim aspf = "r" if "aspf" in policy_published: if policy_published["aspf"] is not None: aspf = policy_published["aspf"] new_policy_published["aspf"] = aspf new_policy_published["p"] = policy_published["p"] sp = new_policy_published["p"] if "sp" in policy_published: if policy_published["sp"] is not None: sp = report["policy_published"]["sp"] new_policy_published["sp"] = sp pct = "100" if "pct" in policy_published: if policy_published["pct"] is not None: pct = report["policy_published"]["pct"] new_policy_published["pct"] = pct fo = "0" if "fo" in policy_published: if policy_published["fo"] is not None: fo = report["policy_published"]["fo"] new_policy_published["fo"] = fo new_report["policy_published"] = new_policy_published if type(report["record"]) == list: for record in report["record"]: report_record = _parse_report_record(record, nameservers=nameservers, dns_timeout=timeout, parallel=parallel) records.append(report_record) else: report_record = _parse_report_record(report["record"], nameservers=nameservers, dns_timeout=timeout, parallel=parallel) records.append(report_record) new_report["records"] = records return new_report except expat.ExpatError as error: raise InvalidAggregateReport( "Invalid XML: {0}".format(error.__str__())) except KeyError as error: raise InvalidAggregateReport( "Missing field: {0}".format(error.__str__())) except AttributeError: raise InvalidAggregateReport("Report missing required section") except Exception as error: raise InvalidAggregateReport( "Unexpected error: {0}".format(error.__str__()))
Extracts xml from a zip or gzip file at the given path file - like object or bytes.
def extract_xml(input_): """ Extracts xml from a zip or gzip file at the given path, file-like object, or bytes. Args: input_: A path to a file, a file like object, or bytes Returns: str: The extracted XML """ if type(input_) == str: file_object = open(input_, "rb") elif type(input_) == bytes: file_object = BytesIO(input_) else: file_object = input_ try: header = file_object.read(6) file_object.seek(0) if header.startswith(MAGIC_ZIP): _zip = zipfile.ZipFile(file_object) xml = _zip.open(_zip.namelist()[0]).read().decode() elif header.startswith(MAGIC_GZIP): xml = GzipFile(fileobj=file_object).read().decode() elif header.startswith(MAGIC_XML): xml = file_object.read().decode() else: file_object.close() raise InvalidAggregateReport("Not a valid zip, gzip, or xml file") file_object.close() except UnicodeDecodeError: raise InvalidAggregateReport("File objects must be opened in binary " "(rb) mode") except Exception as error: raise InvalidAggregateReport( "Invalid archive file: {0}".format(error.__str__())) return xml
Parses a file at the given path a file - like object. or bytes as a aggregate DMARC report
def parse_aggregate_report_file(_input, nameservers=None, dns_timeout=2.0, parallel=False): """Parses a file at the given path, a file-like object. or bytes as a aggregate DMARC report Args: _input: A path to a file, a file like object, or bytes nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) dns_timeout (float): Sets the DNS timeout in seconds parallel (bool): Parallel processing Returns: OrderedDict: The parsed DMARC aggregate report """ xml = extract_xml(_input) return parse_aggregate_report_xml(xml, nameservers=nameservers, timeout=dns_timeout, parallel=parallel)
Converts one or more parsed aggregate reports to flat CSV format including headers
def parsed_aggregate_reports_to_csv(reports): """ Converts one or more parsed aggregate reports to flat CSV format, including headers Args: reports: A parsed aggregate report or list of parsed aggregate reports Returns: str: Parsed aggregate report data in flat CSV format, including headers """ def to_str(obj): return str(obj).lower() fields = ["xml_schema", "org_name", "org_email", "org_extra_contact_info", "report_id", "begin_date", "end_date", "errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo", "source_ip_address", "source_country", "source_reverse_dns", "source_base_domain", "count", "disposition", "dkim_alignment", "spf_alignment", "policy_override_reasons", "policy_override_comments", "envelope_from", "header_from", "envelope_to", "dkim_domains", "dkim_selectors", "dkim_results", "spf_domains", "spf_scopes", "spf_results"] csv_file_object = StringIO(newline="\n") writer = DictWriter(csv_file_object, fields) writer.writeheader() if type(reports) == OrderedDict: reports = [reports] for report in reports: xml_schema = report["xml_schema"] org_name = report["report_metadata"]["org_name"] org_email = report["report_metadata"]["org_email"] org_extra_contact = report["report_metadata"]["org_extra_contact_info"] report_id = report["report_metadata"]["report_id"] begin_date = report["report_metadata"]["begin_date"] end_date = report["report_metadata"]["end_date"] errors = "|".join(report["report_metadata"]["errors"]) domain = report["policy_published"]["domain"] adkim = report["policy_published"]["adkim"] aspf = report["policy_published"]["aspf"] p = report["policy_published"]["p"] sp = report["policy_published"]["sp"] pct = report["policy_published"]["pct"] fo = report["policy_published"]["fo"] report_dict = dict(xml_schema=xml_schema, org_name=org_name, org_email=org_email, org_extra_contact_info=org_extra_contact, report_id=report_id, begin_date=begin_date, end_date=end_date, errors=errors, domain=domain, adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo) for record in report["records"]: row = report_dict row["source_ip_address"] = record["source"]["ip_address"] row["source_country"] = record["source"]["country"] row["source_reverse_dns"] = record["source"]["reverse_dns"] row["source_base_domain"] = record["source"]["base_domain"] row["count"] = record["count"] row["disposition"] = record["policy_evaluated"]["disposition"] row["spf_alignment"] = record["policy_evaluated"]["spf"] row["dkim_alignment"] = record["policy_evaluated"]["dkim"] policy_override_reasons = list(map( lambda r: r["type"], record["policy_evaluated"] ["policy_override_reasons"])) policy_override_comments = list(map( lambda r: r["comment"] or "none", record["policy_evaluated"] ["policy_override_reasons"])) row["policy_override_reasons"] = ",".join( policy_override_reasons) row["policy_override_comments"] = "|".join( policy_override_comments) row["envelope_from"] = record["identifiers"]["envelope_from"] row["header_from"] = record["identifiers"]["header_from"] envelope_to = record["identifiers"]["envelope_to"] row["envelope_to"] = envelope_to dkim_domains = [] dkim_selectors = [] dkim_results = [] for dkim_result in record["auth_results"]["dkim"]: dkim_domains.append(dkim_result["domain"]) if "selector" in dkim_result: dkim_selectors.append(dkim_result["selector"]) dkim_results.append(dkim_result["result"]) row["dkim_domains"] = ",".join(map(to_str, dkim_domains)) row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors)) row["dkim_results"] = ",".join(map(to_str, dkim_results)) spf_domains = [] spf_scopes = [] spf_results = [] for spf_result in record["auth_results"]["spf"]: spf_domains.append(spf_result["domain"]) spf_scopes.append(spf_result["scope"]) spf_results.append(spf_result["result"]) row["spf_domains"] = ",".join(map(to_str, spf_domains)) row["spf_scopes"] = ",".join(map(to_str, spf_scopes)) row["spf_results"] = ",".join(map(to_str, dkim_results)) writer.writerow(row) csv_file_object.flush() return csv_file_object.getvalue()
Converts a DMARC forensic report and sample to a OrderedDict
def parse_forensic_report(feedback_report, sample, msg_date, nameservers=None, dns_timeout=2.0, strip_attachment_payloads=False, parallel=False): """ Converts a DMARC forensic report and sample to a ``OrderedDict`` Args: feedback_report (str): A message's feedback report as a string sample (str): The RFC 822 headers or RFC 822 message sample msg_date (str): The message's date header nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) dns_timeout (float): Sets the DNS timeout in seconds strip_attachment_payloads (bool): Remove attachment payloads from forensic report results parallel (bool): Parallel processing Returns: OrderedDict: A parsed report and sample """ delivery_results = ["delivered", "spam", "policy", "reject", "other"] try: parsed_report = OrderedDict() report_values = feedback_report_regex.findall(feedback_report) for report_value in report_values: key = report_value[0].lower().replace("-", "_") parsed_report[key] = report_value[1] if "arrival_date" not in parsed_report: if msg_date is None: raise InvalidForensicReport( "Forensic sample is not a valid email") parsed_report["arrival_date"] = msg_date.isoformat() if "version" not in parsed_report: parsed_report["version"] = 1 if "user_agent" not in parsed_report: parsed_report["user_agent"] = None if "delivery_result" not in parsed_report: parsed_report["delivery_result"] = None else: for delivery_result in delivery_results: if delivery_result in parsed_report["delivery_result"].lower(): parsed_report["delivery_result"] = delivery_result break if parsed_report["delivery_result"] not in delivery_results: parsed_report["delivery_result"] = "other" arrival_utc = human_timestamp_to_datetime( parsed_report["arrival_date"], to_utc=True) arrival_utc = arrival_utc.strftime("%Y-%m-%d %H:%M:%S") parsed_report["arrival_date_utc"] = arrival_utc ip_address = parsed_report["source_ip"] parsed_report_source = get_ip_address_info(ip_address, nameservers=nameservers, timeout=dns_timeout, parallel=parallel) parsed_report["source"] = parsed_report_source del parsed_report["source_ip"] if "identity_alignment" not in parsed_report: parsed_report["authentication_mechanisms"] = [] elif parsed_report["identity_alignment"] == "none": parsed_report["authentication_mechanisms"] = [] del parsed_report["identity_alignment"] else: auth_mechanisms = parsed_report["identity_alignment"] auth_mechanisms = auth_mechanisms.split(",") parsed_report["authentication_mechanisms"] = auth_mechanisms del parsed_report["identity_alignment"] if "auth_failure" not in parsed_report: parsed_report["auth_failure"] = "dmarc" auth_failure = parsed_report["auth_failure"].split(",") parsed_report["auth_failure"] = auth_failure optional_fields = ["original_envelope_id", "dkim_domain", "original_mail_from", "original_rcpt_to"] for optional_field in optional_fields: if optional_field not in parsed_report: parsed_report[optional_field] = None parsed_sample = parse_email( sample, strip_attachment_payloads=strip_attachment_payloads) if "reported_domain" not in parsed_report: parsed_report["reported_domain"] = parsed_sample["from"]["domain"] sample_headers_only = False number_of_attachments = len(parsed_sample["attachments"]) if number_of_attachments < 1 and parsed_sample["body"] is None: sample_headers_only = True if sample_headers_only and parsed_sample["has_defects"]: del parsed_sample["defects"] del parsed_sample["defects_categories"] del parsed_sample["has_defects"] parsed_report["sample_headers_only"] = sample_headers_only parsed_report["sample"] = sample parsed_report["parsed_sample"] = parsed_sample return parsed_report except KeyError as error: raise InvalidForensicReport("Missing value: {0}".format( error.__str__())) except Exception as error: raise InvalidForensicReport( "Unexpected error: {0}".format(error.__str__()))
Converts one or more parsed forensic reports to flat CSV format including headers
def parsed_forensic_reports_to_csv(reports): """ Converts one or more parsed forensic reports to flat CSV format, including headers Args: reports: A parsed forensic report or list of parsed forensic reports Returns: str: Parsed forensic report data in flat CSV format, including headers """ fields = ["feedback_type", "user_agent", "version", "original_envelope_id", "original_mail_from", "original_rcpt_to", "arrival_date", "arrival_date_utc", "subject", "message_id", "authentication_results", "dkim_domain", "source_ip_address", "source_country", "source_reverse_dns", "source_base_domain", "delivery_result", "auth_failure", "reported_domain", "authentication_mechanisms", "sample_headers_only"] if type(reports) == OrderedDict: reports = [reports] csv_file = StringIO() csv_writer = DictWriter(csv_file, fieldnames=fields) csv_writer.writeheader() for report in reports: row = report.copy() row["source_ip_address"] = report["source"]["ip_address"] row["source_reverse_dns"] = report["source"]["reverse_dns"] row["source_base_domain"] = report["source"]["base_domain"] row["source_country"] = report["source"]["country"] del row["source"] row["subject"] = report["parsed_sample"]["subject"] row["auth_failure"] = ",".join(report["auth_failure"]) authentication_mechanisms = report["authentication_mechanisms"] row["authentication_mechanisms"] = ",".join( authentication_mechanisms) del row["sample"] del row["parsed_sample"] csv_writer.writerow(row) return csv_file.getvalue()
Parses a DMARC report from an email
def parse_report_email(input_, nameservers=None, dns_timeout=2.0, strip_attachment_payloads=False, parallel=False): """ Parses a DMARC report from an email Args: input_: An emailed DMARC report in RFC 822 format, as bytes or a string nameservers (list): A list of one or more nameservers to use dns_timeout (float): Sets the DNS timeout in seconds strip_attachment_payloads (bool): Remove attachment payloads from forensic report results parallel (bool): Parallel processing Returns: OrderedDict: * ``report_type``: ``aggregate`` or ``forensic`` * ``report``: The parsed report """ result = None try: if is_outlook_msg(input_): input_ = convert_outlook_msg(input_) if type(input_) == bytes: input_ = input_.decode(encoding="utf8") msg = mailparser.parse_from_string(input_) msg_headers = json.loads(msg.headers_json) date = email.utils.format_datetime(datetime.utcnow()) if "Date" in msg_headers: date = human_timestamp_to_datetime( msg_headers["Date"]) msg = email.message_from_string(input_) except Exception as e: raise InvalidDMARCReport(e.__str__()) subject = None feedback_report = None sample = None if "Subject" in msg_headers: subject = msg_headers["Subject"] for part in msg.walk(): content_type = part.get_content_type() payload = part.get_payload() if type(payload) != list: payload = [payload] payload = payload[0].__str__() if content_type == "message/feedback-report": try: if "Feedback-Type" in payload: feedback_report = payload else: feedback_report = b64decode(payload).__str__() feedback_report = feedback_report.lstrip( "b'").rstrip("'") feedback_report = feedback_report.replace("\\r", "") feedback_report = feedback_report.replace("\\n", "\n") except (ValueError, TypeError, binascii.Error): feedback_report = payload elif content_type == "text/rfc822-headers": sample = payload elif content_type == "message/rfc822": sample = payload else: try: payload = b64decode(payload) if payload.startswith(MAGIC_ZIP) or \ payload.startswith(MAGIC_GZIP) or \ payload.startswith(MAGIC_XML): ns = nameservers aggregate_report = parse_aggregate_report_file( payload, nameservers=ns, dns_timeout=dns_timeout, parallel=parallel) result = OrderedDict([("report_type", "aggregate"), ("report", aggregate_report)]) return result except (TypeError, ValueError, binascii.Error): pass except InvalidAggregateReport as e: error = 'Message with subject "{0}" ' \ 'is not a valid ' \ 'aggregate DMARC report: {1}'.format(subject, e) raise InvalidAggregateReport(error) except FileNotFoundError as e: error = 'Unable to parse message with ' \ 'subject "{0}": {1}'.format(subject, e) raise InvalidDMARCReport(error) if feedback_report and sample: try: forensic_report = parse_forensic_report( feedback_report, sample, date, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=strip_attachment_payloads, parallel=parallel) except InvalidForensicReport as e: error = 'Message with subject "{0}" ' \ 'is not a valid ' \ 'forensic DMARC report: {1}'.format(subject, e) raise InvalidForensicReport(error) except Exception as e: raise InvalidForensicReport(e.__str__()) result = OrderedDict([("report_type", "forensic"), ("report", forensic_report)]) return result if result is None: error = 'Message with subject "{0}" is ' \ 'not a valid DMARC report'.format(subject) raise InvalidDMARCReport(error)
Parses a DMARC aggregate or forensic file at the given path a file - like object. or bytes
def parse_report_file(input_, nameservers=None, dns_timeout=2.0, strip_attachment_payloads=False, parallel=False): """Parses a DMARC aggregate or forensic file at the given path, a file-like object. or bytes Args: input_: A path to a file, a file like object, or bytes nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) dns_timeout (float): Sets the DNS timeout in seconds strip_attachment_payloads (bool): Remove attachment payloads from forensic report results parallel (bool): Parallel processing Returns: OrderedDict: The parsed DMARC report """ if type(input_) == str: file_object = open(input_, "rb") elif type(input_) == bytes: file_object = BytesIO(input_) else: file_object = input_ content = file_object.read() try: report = parse_aggregate_report_file(content, nameservers=nameservers, dns_timeout=dns_timeout, parallel=parallel) results = OrderedDict([("report_type", "aggregate"), ("report", report)]) except InvalidAggregateReport: try: sa = strip_attachment_payloads results = parse_report_email(content, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=sa, parallel=parallel) except InvalidDMARCReport: raise InvalidDMARCReport("Not a valid aggregate or forensic " "report") return results
Returns a list of an IMAP server s capabilities
def get_imap_capabilities(server): """ Returns a list of an IMAP server's capabilities Args: server (imapclient.IMAPClient): An instance of imapclient.IMAPClient Returns (list): A list of capabilities """ capabilities = list(map(str, list(server.capabilities()))) for i in range(len(capabilities)): capabilities[i] = str(capabilities[i]).replace("b'", "").replace("'", "") logger.debug("IMAP server supports: {0}".format(capabilities)) return capabilities
Fetches and parses DMARC reports from sn inbox
def get_dmarc_reports_from_inbox(host=None, user=None, password=None, connection=None, port=None, ssl=True, ssl_context=None, move_supported=None, reports_folder="INBOX", archive_folder="Archive", delete=False, test=False, nameservers=None, dns_timeout=6.0, strip_attachment_payloads=False, results=None): """ Fetches and parses DMARC reports from sn inbox Args: host: The mail server hostname or IP address user: The mail server user password: The mail server password connection: An IMAPCLient connection to reuse port: The mail server port ssl (bool): Use SSL/TLS ssl_context (SSLContext): A SSL context move_supported: Indicate if the IMAP server supports the MOVE command (autodetect if None) reports_folder: The IMAP folder where reports can be found archive_folder: The folder to move processed mail to delete (bool): Delete messages after processing them test (bool): Do not move or delete messages after processing them nameservers (list): A list of DNS nameservers to query dns_timeout (float): Set the DNS query timeout strip_attachment_payloads (bool): Remove attachment payloads from forensic report results results (dict): Results from the previous run Returns: OrderedDict: Lists of ``aggregate_reports`` and ``forensic_reports`` """ def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] if delete and test: raise ValueError("delete and test options are mutually exclusive") if connection is None and (user is None or password is None): raise ValueError("Must supply a connection, or a username and " "password") aggregate_reports = [] forensic_reports = [] aggregate_report_msg_uids = [] forensic_report_msg_uids = [] aggregate_reports_folder = "{0}/Aggregate".format(archive_folder) forensic_reports_folder = "{0}/Forensic".format(archive_folder) invalid_reports_folder = "{0}/Invalid".format(archive_folder) if results: aggregate_reports = results["aggregate_reports"].copy() forensic_reports = results["forensic_reports"].copy() try: if connection: server = connection else: if not ssl: logger.debug("Connecting to IMAP over plain text") if ssl_context is None: ssl_context = create_default_context() server = imapclient.IMAPClient(host, port=port, ssl=ssl, ssl_context=ssl_context, use_uid=True) server.login(user, password) if move_supported is None: server_capabilities = get_imap_capabilities(server) move_supported = "MOVE" in server_capabilities def delete_messages(msg_uids): logger.debug("Deleting message UID(s) {0}".format(",".join( str(uid) for uid in msg_uids))) if type(msg_uids) == str or type(msg_uids) == int: msg_uids = [int(msg_uids)] server.delete_messages(msg_uids, silent=True) server.expunge(msg_uids) def move_messages(msg_uids, folder): if type(msg_uids) == str or type(msg_uids) == int: msg_uids = [int(msg_uids)] for chunk in chunks(msg_uids, 100): if move_supported: logger.debug("Moving message UID(s) {0} to {1}".format( ",".join(str(uid) for uid in chunk), folder )) server.move(chunk, folder) else: logger.debug("Copying message UID(s) {0} to {1}".format( ",".join(str(uid) for uid in chunk), folder )) server.copy(msg_uids, folder) delete_messages(msg_uids) if not server.folder_exists(archive_folder): logger.debug("Creating IMAP folder: {0}".format(archive_folder)) server.create_folder(archive_folder) try: # Test subfolder creation if not server.folder_exists(aggregate_reports_folder): server.create_folder(aggregate_reports_folder) logger.debug( "Creating IMAP folder: {0}".format( aggregate_reports_folder)) except imapclient.exceptions.IMAPClientError: # Only replace / with . when . doesn't work # This usually indicates a dovecot IMAP server aggregate_reports_folder = aggregate_reports_folder.replace("/", ".") forensic_reports_folder = forensic_reports_folder.replace("/", ".") invalid_reports_folder = invalid_reports_folder.replace("/", ".") subfolders = [aggregate_reports_folder, forensic_reports_folder, invalid_reports_folder] for subfolder in subfolders: if not server.folder_exists(subfolder): logger.debug( "Creating IMAP folder: {0}".format(subfolder)) server.create_folder(subfolder) server.select_folder(reports_folder) messages = server.search() total_messages = len(messages) logger.debug("Found {0} messages in IMAP folder {1}".format( len(messages), reports_folder)) for i in range(len(messages)): msg_uid = messages[i] logger.debug("Processing message {0} of {1}: UID {2}".format( i+1, total_messages, msg_uid )) try: try: raw_msg = server.fetch(msg_uid, ["RFC822"])[msg_uid] msg_keys = [b'RFC822', b'BODY[NULL]', b'BODY[]'] msg_key = '' for key in msg_keys: if key in raw_msg.keys(): msg_key = key break raw_msg = raw_msg[msg_key] except (ConnectionResetError, socket.error, TimeoutError, imapclient.exceptions.IMAPClientError) as error: error = error.__str__().lstrip("b'").rstrip("'").rstrip( ".") logger.debug("IMAP error: {0}".format(error.__str__())) logger.debug("Reconnecting to IMAP") try: server.shutdown() except Exception as e: logger.debug( "Failed to log out: {0}".format(e.__str__())) if not ssl: logger.debug("Connecting to IMAP over plain text") server = imapclient.IMAPClient(host, port=port, ssl=ssl, ssl_context=ssl_context, use_uid=True) server.login(user, password) server.select_folder(reports_folder) raw_msg = server.fetch(msg_uid, ["RFC822"])[msg_uid][b"RFC822"] msg_content = raw_msg.decode("utf-8", errors="replace") sa = strip_attachment_payloads parsed_email = parse_report_email(msg_content, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=sa) if parsed_email["report_type"] == "aggregate": aggregate_reports.append(parsed_email["report"]) aggregate_report_msg_uids.append(msg_uid) elif parsed_email["report_type"] == "forensic": forensic_reports.append(parsed_email["report"]) forensic_report_msg_uids.append(msg_uid) except InvalidDMARCReport as error: logger.warning(error.__str__()) if not test: if delete: logger.debug( "Deleting message UID {0}".format(msg_uid)) delete_messages([msg_uid]) else: logger.debug( "Moving message UID {0} to {1}".format( msg_uid, invalid_reports_folder)) move_messages([msg_uid], invalid_reports_folder) if not test: if delete: processed_messages = aggregate_report_msg_uids + \ forensic_report_msg_uids number_of_processed_msgs = len(processed_messages) for i in range(number_of_processed_msgs): msg_uid = processed_messages[i] logger.debug( "Deleting message {0} of {1}: UID {2}".format( i + 1, number_of_processed_msgs, msg_uid)) try: delete_messages([msg_uid]) except imapclient.exceptions.IMAPClientError as e: e = e.__str__().lstrip("b'").rstrip( "'").rstrip(".") message = "Error deleting message UID" e = "{0} {1}: " "{2}".format(message, msg_uid, e) logger.error("IMAP error: {0}".format(e)) except (ConnectionResetError, socket.error, TimeoutError) as e: logger.debug("IMAP error: {0}".format(e.__str__())) logger.debug("Reconnecting to IMAP") try: server.shutdown() except Exception as e: logger.debug( "Failed to log out: {0}".format(e.__str__())) if not ssl: logger.debug("Connecting to IMAP over plain text") server = imapclient.IMAPClient(host, port=port, ssl=ssl, ssl_context=ssl_context, use_uid=True) server.login(user, password) server.select_folder(reports_folder) delete_messages([msg_uid]) else: if len(aggregate_report_msg_uids) > 0: log_message = "Moving aggregate report messages from" logger.debug( "{0} {1} to {1}".format( log_message, reports_folder, aggregate_reports_folder)) number_of_agg_report_msgs = len(aggregate_report_msg_uids) for i in range(number_of_agg_report_msgs): msg_uid = aggregate_report_msg_uids[i] logger.debug( "Moving message {0} of {1}: UID {2}".format( i+1, number_of_agg_report_msgs, msg_uid)) try: move_messages([msg_uid], aggregate_reports_folder) except imapclient.exceptions.IMAPClientError as e: e = e.__str__().lstrip("b'").rstrip( "'").rstrip(".") message = "Error moving message UID" e = "{0} {1}: {2}".format(message, msg_uid, e) logger.error("IMAP error: {0}".format(e)) except (ConnectionResetError, socket.error, TimeoutError) as error: logger.debug("IMAP error: {0}".format( error.__str__())) logger.debug("Reconnecting to IMAP") try: server.shutdown() except Exception as e: logger.debug("Failed to log out: {0}".format( e.__str__())) if not ssl: logger.debug( "Connecting to IMAP over plain text") server = imapclient.IMAPClient( host, port=port, ssl=ssl, ssl_context=ssl_context, use_uid=True ) server.login(user, password) server.select_folder(reports_folder) move_messages([msg_uid], aggregate_reports_folder) if len(forensic_report_msg_uids) > 0: message = "Moving forensic report messages from" logger.debug( "{0} {1} to {2}".format(message, reports_folder, forensic_reports_folder)) number_of_forensic_msgs = len(forensic_report_msg_uids) for i in range(number_of_forensic_msgs): msg_uid = forensic_report_msg_uids[i] message = "Moving message" logger.debug("{0} {1} of {2}: UID {2}".format( message, i + 1, number_of_forensic_msgs, msg_uid)) try: move_messages([msg_uid], forensic_reports_folder) except imapclient.exceptions.IMAPClientError as e: e = e.__str__().lstrip("b'").rstrip( "'").rstrip(".") e = "Error moving message UID {0}: {1}".format( msg_uid, e) logger.error("IMAP error: {0}".format(e)) except (ConnectionResetError, TimeoutError) as error: logger.debug("IMAP error: {0}".format( error.__str__())) logger.debug("Reconnecting to IMAP") try: server.shutdown() except Exception as e: logger.debug("Failed to " "disconnect: {0}".format( e.__str__())) if not ssl: logger.debug( "Connecting to IMAP over plain text") server = imapclient.IMAPClient( host, port=port, ssl=ssl, ssl_context=ssl_context, use_uid=True) server.login(user, password) server.select_folder(reports_folder) move_messages([msg_uid], forensic_reports_folder) results = OrderedDict([("aggregate_reports", aggregate_reports), ("forensic_reports", forensic_reports)]) if not test and total_messages > 0: # Process emails that came in during the last run results = get_dmarc_reports_from_inbox( host=host, user=user, password=password, connection=connection, port=port, ssl=ssl, ssl_context=ssl_context, move_supported=move_supported, reports_folder=reports_folder, archive_folder=archive_folder, delete=delete, test=test, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=strip_attachment_payloads, results=results ) return results except imapclient.exceptions.IMAPClientError as error: error = error.__str__().lstrip("b'").rstrip("'").rstrip(".") # Workaround for random Exchange/Office365 IMAP errors if "unexpected response" in error or "BAD" in error: sleep_minutes = 5 logger.debug( "{0}. " "Waiting {1} minutes before trying again".format( error, sleep_minutes)) time.sleep(sleep_minutes * 60) results = get_dmarc_reports_from_inbox( host=host, user=user, password=password, connection=connection, port=port, ssl=ssl, ssl_context=ssl_context, move_supported=move_supported, reports_folder=reports_folder, archive_folder=archive_folder, delete=delete, test=test, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=strip_attachment_payloads, results=results ) return results raise IMAPError(error) except socket.gaierror: raise IMAPError("DNS resolution failed") except ConnectionRefusedError: raise IMAPError("Connection refused") except ConnectionResetError: sleep_minutes = 5 logger.debug( "Connection reset. " "Waiting {0} minutes before trying again".format(sleep_minutes)) time.sleep(sleep_minutes * 60) results = get_dmarc_reports_from_inbox( host=host, user=user, password=password, connection=connection, port=port, ssl=ssl, ssl_context=ssl_context, move_supported=move_supported, reports_folder=reports_folder, archive_folder=archive_folder, delete=delete, test=test, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=strip_attachment_payloads, results=results ) return results except ConnectionAbortedError: raise IMAPError("Connection aborted") except TimeoutError: raise IMAPError("Connection timed out") except SSLError as error: raise IMAPError("SSL error: {0}".format(error.__str__())) except CertificateError as error: raise IMAPError("Certificate error: {0}".format(error.__str__()))
Save report data in the given directory
def save_output(results, output_directory="output"): """ Save report data in the given directory Args: results (OrderedDict): Parsing results output_directory: The patch to the directory to save in """ aggregate_reports = results["aggregate_reports"] forensic_reports = results["forensic_reports"] if os.path.exists(output_directory): if not os.path.isdir(output_directory): raise ValueError("{0} is not a directory".format(output_directory)) else: os.makedirs(output_directory) with open("{0}".format(os.path.join(output_directory, "aggregate.json")), "w", newline="\n", encoding="utf-8") as agg_json: agg_json.write(json.dumps(aggregate_reports, ensure_ascii=False, indent=2)) with open("{0}".format(os.path.join(output_directory, "aggregate.csv")), "w", newline="\n", encoding="utf-8") as agg_csv: csv = parsed_aggregate_reports_to_csv(aggregate_reports) agg_csv.write(csv) with open("{0}".format(os.path.join(output_directory, "forensic.json")), "w", newline="\n", encoding="utf-8") as for_json: for_json.write(json.dumps(forensic_reports, ensure_ascii=False, indent=2)) with open("{0}".format(os.path.join(output_directory, "forensic.csv")), "w", newline="\n", encoding="utf-8") as for_csv: csv = parsed_forensic_reports_to_csv(forensic_reports) for_csv.write(csv) samples_directory = os.path.join(output_directory, "samples") if not os.path.exists(samples_directory): os.makedirs(samples_directory) sample_filenames = [] for forensic_report in forensic_reports: sample = forensic_report["sample"] message_count = 0 parsed_sample = forensic_report["parsed_sample"] subject = parsed_sample["filename_safe_subject"] filename = subject while filename in sample_filenames: message_count += 1 filename = "{0} ({1})".format(subject, message_count) sample_filenames.append(filename) filename = "{0}.eml".format(filename) path = os.path.join(samples_directory, filename) with open(path, "w", newline="\n", encoding="utf-8") as sample_file: sample_file.write(sample)
Creates a zip file of parsed report output
def get_report_zip(results): """ Creates a zip file of parsed report output Args: results (OrderedDict): The parsed results Returns: bytes: zip file bytes """ def add_subdir(root_path, subdir): subdir_path = os.path.join(root_path, subdir) for subdir_root, subdir_dirs, subdir_files in os.walk(subdir_path): for subdir_file in subdir_files: subdir_file_path = os.path.join(root_path, subdir, subdir_file) if os.path.isfile(subdir_file_path): rel_path = os.path.relpath(subdir_root, subdir_file_path) subdir_arc_name = os.path.join(rel_path, subdir_file) zip_file.write(subdir_file_path, subdir_arc_name) for subdir in subdir_dirs: add_subdir(subdir_path, subdir) storage = BytesIO() tmp_dir = tempfile.mkdtemp() try: save_output(results, tmp_dir) with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file: for root, dirs, files in os.walk(tmp_dir): for file in files: file_path = os.path.join(root, file) if os.path.isfile(file_path): arcname = os.path.join(os.path.relpath(root, tmp_dir), file) zip_file.write(file_path, arcname) for directory in dirs: dir_path = os.path.join(root, directory) if os.path.isdir(dir_path): zip_file.write(dir_path, directory) add_subdir(root, directory) finally: shutil.rmtree(tmp_dir) return storage.getvalue()
Emails parsing results as a zip file
def email_results(results, host, mail_from, mail_to, port=0, ssl=False, user=None, password=None, subject=None, attachment_filename=None, message=None, ssl_context=None): """ Emails parsing results as a zip file Args: results (OrderedDict): Parsing results host: Mail server hostname or IP address mail_from: The value of the message from header mail_to : A list of addresses to mail to port (int): Port to use ssl (bool): Require a SSL connection from the start user: An optional username password: An optional password subject: Overrides the default message subject attachment_filename: Override the default attachment filename message: Override the default plain text body ssl_context: SSL context options """ logging.debug("Emailing report to: {0}".format(",".join(mail_to))) date_string = datetime.now().strftime("%Y-%m-%d") if attachment_filename: if not attachment_filename.lower().endswith(".zip"): attachment_filename += ".zip" filename = attachment_filename else: filename = "DMARC-{0}.zip".format(date_string) assert isinstance(mail_to, list) msg = MIMEMultipart() msg['From'] = mail_from msg['To'] = ", ".join(mail_to) msg['Date'] = email.utils.formatdate(localtime=True) msg['Subject'] = subject or "DMARC results for {0}".format(date_string) text = message or "Please see the attached zip file\n" msg.attach(MIMEText(text)) zip_bytes = get_report_zip(results) part = MIMEApplication(zip_bytes, Name=filename) part['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename) msg.attach(part) try: if ssl_context is None: ssl_context = create_default_context() if ssl: server = smtplib.SMTP_SSL(host, port=port, context=ssl_context) server.connect(host, port) server.ehlo_or_helo_if_needed() else: server = smtplib.SMTP(host, port=port) server.connect(host, port) server.ehlo_or_helo_if_needed() if server.has_extn("starttls"): server.starttls(context=ssl_context) server.ehlo() else: logger.warning("SMTP server does not support STARTTLS. " "Proceeding in plain text!") if user and password: server.login(user, password) server.sendmail(mail_from, mail_to, msg.as_string()) except smtplib.SMTPException as error: error = error.__str__().lstrip("b'").rstrip("'").rstrip(".") raise SMTPError(error) except socket.gaierror: raise SMTPError("DNS resolution failed") except ConnectionRefusedError: raise SMTPError("Connection refused") except ConnectionResetError: raise SMTPError("Connection reset") except ConnectionAbortedError: raise SMTPError("Connection aborted") except TimeoutError: raise SMTPError("Connection timed out") except SSLError as error: raise SMTPError("SSL error: {0}".format(error.__str__())) except CertificateError as error: raise SMTPError("Certificate error: {0}".format(error.__str__()))
Use an IDLE IMAP connection to parse incoming emails and pass the results to a callback function
def watch_inbox(host, username, password, callback, port=None, ssl=True, ssl_context=None, reports_folder="INBOX", archive_folder="Archive", delete=False, test=False, wait=30, nameservers=None, dns_timeout=6.0, strip_attachment_payloads=False): """ Use an IDLE IMAP connection to parse incoming emails, and pass the results to a callback function Args: host: The mail server hostname or IP address username: The mail server username password: The mail server password callback: The callback function to receive the parsing results port: The mail server port ssl (bool): Use SSL/TLS ssl_context (SSLContext): A SSL context reports_folder: The IMAP folder where reports can be found archive_folder: The folder to move processed mail to delete (bool): Delete messages after processing them test (bool): Do not move or delete messages after processing them wait (int): Number of seconds to wait for a IMAP IDLE response nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) dns_timeout (float): Set the DNS query timeout strip_attachment_payloads (bool): Replace attachment payloads in forensic report samples with None """ rf = reports_folder af = archive_folder ns = nameservers dt = dns_timeout if ssl_context is None: ssl_context = create_default_context() server = imapclient.IMAPClient(host, port=port, ssl=ssl, ssl_context=ssl_context, use_uid=True) try: server.login(username, password) imap_capabilities = get_imap_capabilities(server) if "IDLE" not in imap_capabilities: raise IMAPError("Cannot watch inbox: IMAP server does not support " "the IDLE command") ms = "MOVE" in imap_capabilities server.select_folder(rf) idle_start_time = time.monotonic() server.idle() except imapclient.exceptions.IMAPClientError as error: error = error.__str__().replace("b'", "").replace("'", "") # Workaround for random Exchange/Office365 IMAP errors if "unexpected response" in error or "BAD" in error: sleep_minutes = 5 logger.debug( "{0}. " "Waiting {1} minutes before trying again".format( error, sleep_minutes)) logger.debug("Reconnecting watcher") try: server.logout() except Exception as e: logger.debug("Failed to log out: {0}".format(e.__str__())) server = imapclient.IMAPClient(host) server.login(username, password) server.select_folder(rf) idle_start_time = time.monotonic() ms = "MOVE" in get_imap_capabilities(server) sa = strip_attachment_payloads res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt, strip_attachment_payloads=sa) callback(res) server.idle() else: raise IMAPError(error) except socket.gaierror: raise IMAPError("DNS resolution failed") except ConnectionRefusedError: raise IMAPError("Connection refused") except ConnectionResetError: logger.debug("IMAP error: Connection reset") logger.debug("Reconnecting watcher") try: server.shutdown() except Exception as e: logger.debug("Failed to disconnect: {0}".format(e.__str__())) server = imapclient.IMAPClient(host) server.login(username, password) server.select_folder(rf) idle_start_time = time.monotonic() ms = "MOVE" in get_imap_capabilities(server) res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() except KeyError: logger.debug("IMAP error: Server returned unexpected result") logger.debug("Reconnecting watcher") try: server.logout() except Exception as e: logger.debug("Failed to log out: {0}".format(e.__str__())) server = imapclient.IMAPClient(host) server.login(username, password) server.select_folder(rf) idle_start_time = time.monotonic() ms = "MOVE" in get_imap_capabilities(server) res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() except ConnectionAbortedError: raise IMAPError("Connection aborted") except TimeoutError: raise IMAPError("Connection timed out") except SSLError as error: raise IMAPError("SSL error: {0}".format(error.__str__())) except CertificateError as error: raise IMAPError("Certificate error: {0}".format(error.__str__())) except BrokenPipeError: logger.debug("IMAP error: Broken pipe") logger.debug("Reconnecting watcher") try: server.shutdown() except Exception as e: logger.debug("Failed to disconnect: {0}".format(e.__str__())) server = imapclient.IMAPClient(host) server.login(username, password) server.select_folder(rf) idle_start_time = time.monotonic() ms = "MOVE" in get_imap_capabilities(server) res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() while True: try: # Refresh the IDLE session every 5 minutes to stay connected if time.monotonic() - idle_start_time > 5 * 60: logger.debug("IMAP: Refreshing IDLE session") server.idle_done() server.idle() idle_start_time = time.monotonic() responses = server.idle_check(timeout=wait) if responses is not None: if len(responses) == 0: # Gmail/G-Suite does not generate anything in the responses server.idle_done() res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() idle_start_time = time.monotonic() for response in responses: logging.debug("Received response: {0}".format(response)) if response[0] > 0 and response[1] == b'RECENT': server.idle_done() res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() idle_start_time = time.monotonic() break except imapclient.exceptions.IMAPClientError as error: error = error.__str__().replace("b'", "").replace("'", "") # Workaround for random Exchange/Office365 IMAP errors if "unexpected response" in error or "BAD" in error: sleep_minutes = 5 logger.debug( "{0}. " "Waiting {1} minutes before trying again".format( error, sleep_minutes)) logger.debug("Reconnecting watcher") try: server.logout() except Exception as e: logger.debug("Failed to disconnect: {0}".format( e.__str__())) server = imapclient.IMAPClient(host) server.login(username, password) server.select_folder(rf) idle_start_time = time.monotonic() ms = "MOVE" in get_imap_capabilities(server) res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() else: raise IMAPError(error) except socket.gaierror: raise IMAPError("DNS resolution failed") except ConnectionRefusedError: raise IMAPError("Connection refused") except (KeyError, socket.error, BrokenPipeError, ConnectionResetError): logger.debug("IMAP error: Connection reset") logger.debug("Reconnecting watcher") try: server.logout() except Exception as e: logger.debug("Failed to disconnect: {0}".format(e.__str__())) server = imapclient.IMAPClient(host) server.login(username, password) server.select_folder(rf) idle_start_time = time.monotonic() ms = "MOVE" in get_imap_capabilities(server) res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() except KeyError: logger.debug("IMAP error: Server returned unexpected result") logger.debug("Reconnecting watcher") try: server.logout() except Exception as e: logger.debug("Failed to log out: {0}".format(e.__str__())) server = imapclient.IMAPClient(host) server.login(username, password) server.select_folder(rf) idle_start_time = time.monotonic() ms = "MOVE" in get_imap_capabilities(server) res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() except ConnectionAbortedError: raise IMAPError("Connection aborted") except TimeoutError: raise IMAPError("Connection timed out") except SSLError as error: raise IMAPError("SSL error: {0}".format(error.__str__())) except CertificateError as error: raise IMAPError("Certificate error: {0}".format(error.__str__())) except BrokenPipeError: logger.debug("IMAP error: Broken pipe") logger.debug("Reconnecting watcher") try: server.shutdown() except Exception as e: logger.debug("Failed to disconnect: {0}".format(e.__str__())) server = imapclient.IMAPClient(host) server.login(username, password) server.select_folder(rf) idle_start_time = time.monotonic() res = get_dmarc_reports_from_inbox(connection=server, move_supported=ms, reports_folder=rf, archive_folder=af, delete=delete, test=test, nameservers=ns, dns_timeout=dt) callback(res) server.idle() except KeyboardInterrupt: break try: server.idle_done() except BrokenPipeError: pass
Saves aggregate DMARC reports to Splunk
def save_aggregate_reports_to_splunk(self, aggregate_reports): """ Saves aggregate DMARC reports to Splunk Args: aggregate_reports: A list of aggregate report dictionaries to save in Splunk """ logger.debug("Saving aggregate reports to Splunk") if type(aggregate_reports) == dict: aggregate_reports = [aggregate_reports] if len(aggregate_reports) < 1: return data = self._common_data.copy() json_str = "" for report in aggregate_reports: for record in report["records"]: new_report = dict() for metadata in report["report_metadata"]: new_report[metadata] = report["report_metadata"][metadata] new_report["published_policy"] = report["policy_published"] new_report["source_ip_address"] = record["source"][ "ip_address"] new_report["source_country"] = record["source"]["country"] new_report["source_reverse_dns"] = record["source"][ "reverse_dns"] new_report["source_base_domain"] = record["source"][ "base_domain"] new_report["message_count"] = record["count"] new_report["disposition"] = record["policy_evaluated"][ "disposition" ] new_report["spf_aligned"] = record["alignment"]["spf"] new_report["dkim_aligned"] = record["alignment"]["dkim"] new_report["passed_dmarc"] = record["alignment"]["dmarc"] new_report["header_from"] = record["identifiers"][ "header_from"] new_report["envelope_from"] = record["identifiers"][ "envelope_from"] if "dkim" in record["auth_results"]: new_report["dkim_results"] = record["auth_results"][ "dkim"] if "spf" in record["auth_results"]: new_report["spf_results"] = record["auth_results"][ "spf"] data["sourcetype"] = "dmarc:aggregate" timestamp = human_timestamp_to_timestamp( new_report["begin_date"]) data["time"] = timestamp data["event"] = new_report.copy() json_str += "{0}\n".format(json.dumps(data)) if not self.session.verify: logger.debug("Skipping certificate verification for Splunk HEC") try: response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) if response["code"] != 0: raise SplunkError(response["text"])
Saves forensic DMARC reports to Splunk
def save_forensic_reports_to_splunk(self, forensic_reports): """ Saves forensic DMARC reports to Splunk Args: forensic_reports (list): A list of forensic report dictionaries to save in Splunk """ logger.debug("Saving forensic reports to Splunk") if type(forensic_reports) == dict: forensic_reports = [forensic_reports] if len(forensic_reports) < 1: return json_str = "" for report in forensic_reports: data = self._common_data.copy() data["sourcetype"] = "dmarc:forensic" timestamp = human_timestamp_to_timestamp( report["arrival_date_utc"]) data["time"] = timestamp data["event"] = report.copy() json_str += "{0}\n".format(json.dumps(data)) if not self.session.verify: logger.debug("Skipping certificate verification for Splunk HEC") try: response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) if response["code"] != 0: raise SplunkError(response["text"])
Decodes a base64 string with padding being optional
def decode_base64(data): """ Decodes a base64 string, with padding being optional Args: data: A base64 encoded string Returns: bytes: The decoded bytes """ data = bytes(data, encoding="ascii") missing_padding = len(data) % 4 if missing_padding != 0: data += b'=' * (4 - missing_padding) return base64.b64decode(data)
Gets the base domain name for the given domain
def get_base_domain(domain, use_fresh_psl=False): """ Gets the base domain name for the given domain .. note:: Results are based on a list of public domain suffixes at https://publicsuffix.org/list/public_suffix_list.dat. Args: domain (str): A domain or subdomain use_fresh_psl (bool): Download a fresh Public Suffix List Returns: str: The base domain of the given domain """ psl_path = os.path.join(tempdir, "public_suffix_list.dat") def download_psl(): url = "https://publicsuffix.org/list/public_suffix_list.dat" # Use a browser-like user agent string to bypass some proxy blocks headers = {"User-Agent": USER_AGENT} fresh_psl = requests.get(url, headers=headers).text with open(psl_path, "w", encoding="utf-8") as fresh_psl_file: fresh_psl_file.write(fresh_psl) if use_fresh_psl: if not os.path.exists(psl_path): download_psl() else: psl_age = datetime.now() - datetime.fromtimestamp( os.stat(psl_path).st_mtime) if psl_age > timedelta(hours=24): try: download_psl() except Exception as error: logger.warning( "Failed to download an updated PSL {0}".format(error)) with open(psl_path, encoding="utf-8") as psl_file: psl = publicsuffix2.PublicSuffixList(psl_file) return psl.get_public_suffix(domain) else: return publicsuffix2.get_public_suffix(domain)
Queries DNS
def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0): """ Queries DNS Args: domain (str): The domain or subdomain to query about record_type (str): The record type to query for cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds Returns: list: A list of answers """ domain = str(domain).lower() record_type = record_type.upper() cache_key = "{0}_{1}".format(domain, record_type) if cache: records = cache.get(cache_key, None) if records: return records resolver = dns.resolver.Resolver() timeout = float(timeout) if nameservers is None: nameservers = ["1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", ] resolver.nameservers = nameservers resolver.timeout = timeout resolver.lifetime = timeout if record_type == "TXT": resource_records = list(map( lambda r: r.strings, resolver.query(domain, record_type, tcp=True))) _resource_record = [ resource_record[0][:0].join(resource_record) for resource_record in resource_records if resource_record] records = [r.decode() for r in _resource_record] else: records = list(map( lambda r: r.to_text().replace('"', '').rstrip("."), resolver.query(domain, record_type, tcp=True))) if cache: cache[cache_key] = records return records
Resolves an IP address to a hostname using a reverse DNS query
def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0): """ Resolves an IP address to a hostname using a reverse DNS query Args: ip_address (str): The IP address to resolve cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS query timeout in seconds Returns: str: The reverse DNS hostname (if any) """ hostname = None try: address = dns.reversename.from_address(ip_address) hostname = query_dns(address, "PTR", cache=cache, nameservers=nameservers, timeout=timeout)[0] except dns.exception.DNSException: pass return hostname
Converts a human - readable timestamp into a Python DateTime object
def human_timestamp_to_datetime(human_timestamp, to_utc=False): """ Converts a human-readable timestamp into a Python ``DateTime`` object Args: human_timestamp (str): A timestamp string to_utc (bool): Convert the timestamp to UTC Returns: DateTime: The converted timestamp """ settings = {} if to_utc: settings = {"TO_TIMEZONE": "UTC"} return dateparser.parse(human_timestamp, settings=settings)
Uses the MaxMind Geolite2 Country database to return the ISO code for the country associated with the given IPv4 or IPv6 address
def get_ip_address_country(ip_address, parallel=False): """ Uses the MaxMind Geolite2 Country database to return the ISO code for the country associated with the given IPv4 or IPv6 address Args: ip_address (str): The IP address to query for parallel (bool): Parallel processing Returns: str: And ISO country code associated with the given IP address """ def download_country_database(location="GeoLite2-Country.mmdb"): """Downloads the MaxMind Geolite2 Country database Args: location (str): Local location for the database file """ if parallel: logging.warning("Cannot download GeoIP database in parallel mode") return url = "https://geolite.maxmind.com/download/geoip/database/" \ "GeoLite2-Country.tar.gz" # Use a browser-like user agent string to bypass some proxy blocks headers = {"User-Agent": USER_AGENT} original_filename = "GeoLite2-Country.mmdb" try: response = requests.get(url, headers=headers) response.raise_for_status() tar_bytes = response.content tar_file = tarfile.open(fileobj=BytesIO(tar_bytes), mode="r:gz") tar_dir = tar_file.getnames()[0] tar_path = "{0}/{1}".format(tar_dir, original_filename) tar_file.extract(tar_path) shutil.move(tar_path, location) shutil.rmtree(tar_dir) except Exception as e: logger.warning("Error downloading {0}: {1}".format(url, e.__str__())) system_paths = [ "GeoLite2-Country.mmdb", "/usr/local/share/GeoIP/GeoLite2-Country.mmdb", "/usr/share/GeoIP/GeoLite2-Country.mmdb", "/var/lib/GeoIP/GeoLite2-Country.mmdb", "/var/local/lib/GeoIP/GeoLite2-Country.mmdb", "C:\\GeoIP\\GeoLite2-Country.mmdb" ] db_path = None for system_path in system_paths: if os.path.exists(system_path): db_path = system_path break if db_path is None: db_path = os.path.join(tempdir, "GeoLite2-Country.mmdb") if not os.path.exists(db_path): download_country_database(db_path) if not os.path.exists(db_path): return None else: db_age = datetime.now() - datetime.fromtimestamp( os.stat(db_path).st_mtime) if db_age > timedelta(days=7): download_country_database() db_path = db_path db_reader = geoip2.database.Reader(db_path) country = None try: country = db_reader.country(ip_address).country.iso_code except geoip2.errors.AddressNotFoundError: pass return country
Returns reverse DNS and country information for the given IP address
def get_ip_address_info(ip_address, cache=None, nameservers=None, timeout=2.0, parallel=False): """ Returns reverse DNS and country information for the given IP address Args: ip_address (str): The IP address to check cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds parallel (bool): parallel processing Returns: OrderedDict: ``ip_address``, ``reverse_dns`` """ ip_address = ip_address.lower() if cache: info = cache.get(ip_address, None) if info: return info info = OrderedDict() info["ip_address"] = ip_address reverse_dns = get_reverse_dns(ip_address, nameservers=nameservers, timeout=timeout) country = get_ip_address_country(ip_address, parallel=parallel) info["country"] = country info["reverse_dns"] = reverse_dns info["base_domain"] = None if reverse_dns is not None: base_domain = get_base_domain(reverse_dns) info["base_domain"] = base_domain return info
Converts a string to a string that is safe for a filename Args: string ( str ): A string to make safe for a filename
def get_filename_safe_string(string): """ Converts a string to a string that is safe for a filename Args: string (str): A string to make safe for a filename Returns: str: A string safe for a filename """ invalid_filename_chars = ['\\', '/', ':', '"', '*', '?', '|', '\n', '\r'] if string is None: string = "None" for char in invalid_filename_chars: string = string.replace(char, "") string = string.rstrip(".") return string
Uses the msgconvert Perl utility to convert an Outlook MS file to standard RFC 822 format
def convert_outlook_msg(msg_bytes): """ Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to standard RFC 822 format Args: msg_bytes (bytes): the content of the .msg file Returns: A RFC 822 string """ if not is_outlook_msg(msg_bytes): raise ValueError("The supplied bytes are not an Outlook MSG file") orig_dir = os.getcwd() tmp_dir = tempfile.mkdtemp() os.chdir(tmp_dir) with open("sample.msg", "wb") as msg_file: msg_file.write(msg_bytes) try: subprocess.check_call(["msgconvert", "sample.msg"], stdout=null_file, stderr=null_file) eml_path = "sample.eml" with open(eml_path, "rb") as eml_file: rfc822 = eml_file.read() except FileNotFoundError: raise EmailParserError( "Failed to convert Outlook MSG: msgconvert utility not found") finally: os.chdir(orig_dir) shutil.rmtree(tmp_dir) return rfc822
A simplified email parser
def parse_email(data, strip_attachment_payloads=False): """ A simplified email parser Args: data: The RFC 822 message string, or MSG binary strip_attachment_payloads (bool): Remove attachment payloads Returns (dict): Parsed email data """ if type(data) == bytes: if is_outlook_msg(data): data = convert_outlook_msg(data) data = data.decode("utf-8", errors="replace") parsed_email = mailparser.parse_from_string(data) headers = json.loads(parsed_email.headers_json).copy() parsed_email = json.loads(parsed_email.mail_json).copy() parsed_email["headers"] = headers if "received" in parsed_email: for received in parsed_email["received"]: if "date_utc" in received: if received["date_utc"] is None: del received["date_utc"] else: received["date_utc"] = received["date_utc"].replace("T", " ") if "from" not in parsed_email: if "From" in parsed_email["headers"]: parsed_email["from"] = parsed_email["Headers"]["From"] else: parsed_email["from"] = None if parsed_email["from"] is not None: parsed_email["from"] = parse_email_address(parsed_email["from"][0]) if "date" in parsed_email: parsed_email["date"] = parsed_email["date"].replace("T", " ") else: parsed_email["date"] = None if "reply_to" in parsed_email: parsed_email["reply_to"] = list(map(lambda x: parse_email_address(x), parsed_email["reply_to"])) else: parsed_email["reply_to"] = [] if "to" in parsed_email: parsed_email["to"] = list(map(lambda x: parse_email_address(x), parsed_email["to"])) else: parsed_email["to"] = [] if "cc" in parsed_email: parsed_email["cc"] = list(map(lambda x: parse_email_address(x), parsed_email["cc"])) else: parsed_email["cc"] = [] if "bcc" in parsed_email: parsed_email["bcc"] = list(map(lambda x: parse_email_address(x), parsed_email["bcc"])) else: parsed_email["bcc"] = [] if "delivered_to" in parsed_email: parsed_email["delivered_to"] = list( map(lambda x: parse_email_address(x), parsed_email["delivered_to"]) ) if "attachments" not in parsed_email: parsed_email["attachments"] = [] else: for attachment in parsed_email["attachments"]: if "payload" in attachment: payload = attachment["payload"] try: if "content_transfer_encoding" in attachment: if attachment["content_transfer_encoding"] == "base64": payload = decode_base64(payload) else: payload = str.encode(payload) attachment["sha256"] = hashlib.sha256(payload).hexdigest() except Exception as e: logger.debug("Unable to decode attachment: {0}".format( e.__str__() )) if strip_attachment_payloads: for attachment in parsed_email["attachments"]: if "payload" in attachment: del attachment["payload"] if "subject" not in parsed_email: parsed_email["subject"] = None parsed_email["filename_safe_subject"] = get_filename_safe_string( parsed_email["subject"]) if "body" not in parsed_email: parsed_email["body"] = None return parsed_email
Converts a comma separated string to a list
def _str_to_list(s): """Converts a comma separated string to a list""" _list = s.split(",") return list(map(lambda i: i.lstrip(), _list))
Separated this function for multiprocessing
def cli_parse(file_path, sa, nameservers, dns_timeout, parallel=False): """Separated this function for multiprocessing""" try: file_results = parse_report_file(file_path, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=sa, parallel=parallel) except ParserError as error: return error, file_path finally: global counter with counter.get_lock(): counter.value += 1 return file_results, file_path
Called when the module is executed
def _main(): """Called when the module is executed""" def process_reports(reports_): output_str = "{0}\n".format(json.dumps(reports_, ensure_ascii=False, indent=2)) if not opts.silent: print(output_str) if opts.kafka_hosts: try: ssl_context = None if opts.kafka_skip_certificate_verification: logger.debug("Skipping Kafka certificate verification") ssl_context = create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = CERT_NONE kafka_client = kafkaclient.KafkaClient( opts.kafka_hosts, username=opts.kafka_username, password=opts.kafka_password, ssl_context=ssl_context ) except Exception as error_: logger.error("Kafka Error: {0}".format(error_.__str__())) if opts.save_aggregate: for report in reports_["aggregate_reports"]: try: if opts.elasticsearch_hosts: elastic.save_aggregate_report_to_elasticsearch( report, index_suffix=opts.elasticsearch_index_suffix, monthly_indexes=opts.elasticsearch_monthly_indexes) except elastic.AlreadySaved as warning: logger.warning(warning.__str__()) except elastic.ElasticsearchError as error_: logger.error("Elasticsearch Error: {0}".format( error_.__str__())) try: if opts.kafka_hosts: kafka_client.save_aggregate_reports_to_kafka( report, kafka_aggregate_topic) except Exception as error_: logger.error("Kafka Error: {0}".format( error_.__str__())) if opts.hec: try: aggregate_reports_ = reports_["aggregate_reports"] if len(aggregate_reports_) > 0: hec_client.save_aggregate_reports_to_splunk( aggregate_reports_) except splunk.SplunkError as e: logger.error("Splunk HEC error: {0}".format(e.__str__())) if opts.save_forensic: for report in reports_["forensic_reports"]: try: if opts.elasticsearch_hosts: elastic.save_forensic_report_to_elasticsearch( report, index_suffix=opts.elasticsearch_index_suffix, monthly_indexes=opts.elasticsearch_monthly_indexes) except elastic.AlreadySaved as warning: logger.warning(warning.__str__()) except elastic.ElasticsearchError as error_: logger.error("Elasticsearch Error: {0}".format( error_.__str__())) except InvalidDMARCReport as error_: logger.error(error_.__str__()) try: if opts.kafka_hosts: kafka_client.save_forensic_reports_to_kafka( report, kafka_forensic_topic) except Exception as error_: logger.error("Kafka Error: {0}".format( error_.__str__())) if opts.hec: try: forensic_reports_ = reports_["forensic_reports"] if len(forensic_reports_) > 0: hec_client.save_forensic_reports_to_splunk( forensic_reports_) except splunk.SplunkError as e: logger.error("Splunk HEC error: {0}".format(e.__str__())) arg_parser = ArgumentParser(description="Parses DMARC reports") arg_parser.add_argument("-c", "--config-file", help="A path to a configuration file " "(--silent implied)") arg_parser.add_argument("file_path", nargs="*", help="one or more paths to aggregate or forensic " "report files or emails") strip_attachment_help = "remove attachment payloads from forensic " \ "report output" arg_parser.add_argument("--strip-attachment-payloads", help=strip_attachment_help, action="store_true") arg_parser.add_argument("-o", "--output", help="write output files to the given directory") arg_parser.add_argument("-n", "--nameservers", nargs="+", help="nameservers to query " "(default is Cloudflare's nameservers)") arg_parser.add_argument("-t", "--dns_timeout", help="number of seconds to wait for an answer " "from DNS (default: 6.0)", type=float, default=6.0) arg_parser.add_argument("-s", "--silent", action="store_true", help="only print errors and warnings") arg_parser.add_argument("--debug", action="store_true", help="print debugging information") arg_parser.add_argument("--log-file", default=None, help="output logging to a file") arg_parser.add_argument("-v", "--version", action="version", version=__version__) aggregate_reports = [] forensic_reports = [] args = arg_parser.parse_args() opts = Namespace(file_path=args.file_path, config_file=args.config_file, strip_attachment_payloads=args.strip_attachment_payloads, output=args.output, nameservers=args.nameservers, silent=args.silent, dns_timeout=args.dns_timeout, debug=args.debug, save_aggregate=False, save_forensic=False, imap_host=None, imap_skip_certificate_verification=False, imap_ssl=True, imap_port=993, imap_user=None, imap_password=None, imap_reports_folder="INBOX", imap_archive_folder="Archive", imap_watch=False, imap_delete=False, imap_test=False, hec=None, hec_token=None, hec_index=None, hec_skip_certificate_verification=False, elasticsearch_hosts=None, elasticsearch_index_suffix=None, elasticsearch_ssl=True, elasticsearch_ssl_cert_path=None, elasticsearch_monthly_indexes=False, kafka_hosts=None, kafka_username=None, kafka_password=None, kafka_aggregate_topic=None, kafka_forensic_topic=None, kafka_ssl=False, kafka_skip_certificate_verification=False, smtp_host=None, smtp_port=25, smtp_ssl=False, smtp_skip_certificate_verification=False, smtp_user=None, smtp_password=None, smtp_from=None, smtp_to=[], smtp_subject="parsedmarc report", smtp_message="Please see the attached DMARC results.", log_file=args.log_file, n_procs=1, chunk_size=1 ) args = arg_parser.parse_args() if args.config_file: abs_path = os.path.abspath(args.config_file) if not os.path.exists(abs_path): logger.error("A file does not exist at {0}".format(abs_path)) exit(-1) opts.silent = True config = ConfigParser() config.read(args.config_file) if "general" in config.sections(): general_config = config["general"] if "strip_attachment_payloads" in general_config: opts.strip_attachment_payloads = general_config[ "strip_attachment_payloads"] if "output" in general_config: opts.output = general_config["output"] if "nameservers" in general_config: opts.nameservers = _str_to_list(general_config["nameservers"]) if "dns_timeout" in general_config: opts.dns_timeout = general_config.getfloat("dns_timeout") if "save_aggregate" in general_config: opts.save_aggregate = general_config["save_aggregate"] if "save_forensic" in general_config: opts.save_forensic = general_config["save_forensic"] if "debug" in general_config: opts.debug = general_config.getboolean("debug") if "silent" in general_config: opts.silent = general_config.getboolean("silent") if "log_file" in general_config: opts.log_file = general_config["log_file"] if "n_procs" in general_config: opts.n_procs = general_config.getint("n_procs") if "chunk_size" in general_config: opts.chunk_size = general_config.getint("chunk_size") if "imap" in config.sections(): imap_config = config["imap"] if "host" in imap_config: opts.imap_host = imap_config["host"] else: logger.error("host setting missing from the " "imap config section") exit(-1) if "port" in imap_config: opts.imap_port = imap_config["port"] if "ssl" in imap_config: opts.imap_ssl = imap_config.getboolean("ssl") if "skip_certificate_verification" in imap_config: imap_verify = imap_config.getboolean( "skip_certificate_verification") opts.imap_skip_certificate_verification = imap_verify if "user" in imap_config: opts.imap_user = imap_config["user"] else: logger.critical("user setting missing from the " "imap config section") exit(-1) if "password" in imap_config: opts.imap_password = imap_config["password"] else: logger.critical("password setting missing from the " "imap config section") exit(-1) if "reports_folder" in imap_config: opts.imap_reports_folder = imap_config["reports_folder"] if "archive_folder" in imap_config: opts.imap_archive_folder = imap_config["archive_folder"] if "watch" in imap_config: opts.imap_watch = imap_config.getboolean("watch") if "delete" in imap_config: opts.imap_delete = imap_config.getboolean("delete") if "test" in imap_config: opts.imap_test = imap_config.getboolean("test") if "elasticsearch" in config: elasticsearch_config = config["elasticsearch"] if "hosts" in elasticsearch_config: opts.elasticsearch_hosts = _str_to_list(elasticsearch_config[ "hosts"]) else: logger.critical("hosts setting missing from the " "elasticsearch config section") exit(-1) if "index_suffix" in elasticsearch_config: opts.elasticsearch_index_suffix = elasticsearch_config[ "index_suffix"] if "monthly_indexes" in elasticsearch_config: monthly = elasticsearch_config.getboolean("monthly_indexes") opts.elasticsearch_monthly_indexes = monthly if "ssl" in elasticsearch_config: opts.elasticsearch_ssl = elasticsearch_config.getboolean( "ssl") if "cert_path" in elasticsearch_config: opts.elasticsearch_ssl_cert_path = elasticsearch_config[ "cert_path"] if "splunk_hec" in config.sections(): hec_config = config["splunk_hec"] if "url" in hec_config: opts.hec = hec_config["url"] else: logger.critical("url setting missing from the " "splunk_hec config section") exit(-1) if "token" in hec_config: opts.hec_token = hec_config["token"] else: logger.critical("token setting missing from the " "splunk_hec config section") exit(-1) if "index" in hec_config: opts.hec_index = hec_config["index"] else: logger.critical("index setting missing from the " "splunk_hec config section") exit(-1) if "skip_certificate_verification" in hec_config: opts.hec_skip_certificate_verification = hec_config[ "skip_certificate_verification"] if "kafka" in config.sections(): kafka_config = config["kafka"] if "hosts" in kafka_config: opts.kafka_hosts = _str_to_list(kafka_config["hosts"]) else: logger.critical("hosts setting missing from the " "kafka config section") exit(-1) if "user" in kafka_config: opts.kafka_username = kafka_config["user"] else: logger.critical("user setting missing from the " "kafka config section") exit(-1) if "password" in kafka_config: opts.kafka_password = kafka_config["password"] else: logger.critical("password setting missing from the " "kafka config section") exit(-1) if "ssl" in kafka_config: opts.kafka_ssl = kafka_config["ssl"].getboolean() if "skip_certificate_verification" in kafka_config: kafka_verify = kafka_config.getboolean( "skip_certificate_verification") opts.kafka_skip_certificate_verification = kafka_verify if "aggregate_topic" in kafka_config: opts.kafka_aggregate = kafka_config["aggregate_topic"] else: logger.critical("aggregate_topic setting missing from the " "kafka config section") exit(-1) if "forensic_topic" in kafka_config: opts.kafka_username = kafka_config["forensic_topic"] else: logger.critical("forensic_topic setting missing from the " "splunk_hec config section") if "smtp" in config.sections(): smtp_config = config["smtp"] if "host" in smtp_config: opts.smtp_host = smtp_config["host"] else: logger.critical("host setting missing from the " "smtp config section") exit(-1) if "port" in smtp_config: opts.smtp_port = smtp_config["port"] if "ssl" in smtp_config: opts.smtp_ssl = smtp_config.getboolean("ssl") if "skip_certificate_verification" in smtp_config: smtp_verify = smtp_config.getboolean( "skip_certificate_verification") opts.smtp_skip_certificate_verification = smtp_verify if "user" in smtp_config: opts.smtp_user = smtp_config["user"] else: logger.critical("user setting missing from the " "smtp config section") exit(-1) if "password" in smtp_config: opts.smtp_password = smtp_config["password"] else: logger.critical("password setting missing from the " "smtp config section") exit(-1) if "from" in smtp_config: opts.smtp_from = smtp_config["from"] else: logger.critical("from setting missing from the " "smtp config section") if "to" in smtp_config: opts.smtp_to = _str_to_list(smtp_config["to"]) else: logger.critical("to setting missing from the " "smtp config section") if "subject" in smtp_config: opts.smtp_subject = smtp_config["subject"] if "attachment" in smtp_config: opts.smtp_attachment = smtp_config["attachment"] if "message" in smtp_config: opts.smtp_message = smtp_config["message"] logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.WARNING) if opts.debug: logging.basicConfig(level=logging.DEBUG) logger.setLevel(logging.DEBUG) if opts.log_file: fh = logging.FileHandler(opts.log_file) formatter = logging.Formatter( '%(asctime)s - ' '%(levelname)s - [%(filename)s:%(lineno)d] - %(message)s') fh.setFormatter(formatter) logger.addHandler(fh) if opts.imap_host is None and len(opts.file_path) == 0: logger.error("You must supply input files, or an IMAP configuration") exit(1) if opts.save_aggregate or opts.save_forensic: try: if opts.elasticsearch_hosts: es_aggregate_index = "dmarc_aggregate" es_forensic_index = "dmarc_forensic" if opts.elasticsearch_index_suffix: suffix = opts.elasticsearch_index_suffix es_aggregate_index = "{0}_{1}".format( es_aggregate_index, suffix) es_forensic_index = "{0}_{1}".format( es_forensic_index, suffix) elastic.set_hosts(opts.elasticsearch_hosts, opts.elasticsearch_ssl, opts.elasticsearch_ssl_cert_path) elastic.migrate_indexes(aggregate_indexes=[es_aggregate_index], forensic_indexes=[es_forensic_index]) except elastic.ElasticsearchError as error: logger.error("Elasticsearch Error: {0}".format(error.__str__())) exit(1) if opts.hec: if opts.hec_token is None or opts.hec_index is None: logger.error("HEC token and HEC index are required when " "using HEC URL") exit(1) verify = True if opts.hec_skip_certificate_verification: verify = False hec_client = splunk.HECClient(opts.hec, opts.hec_token, opts.hec_index, verify=verify) kafka_aggregate_topic = opts.kafka_aggregate_topic kafka_forensic_topic = opts.kafka_forensic_topic file_paths = [] for file_path in args.file_path: file_paths += glob(file_path) file_paths = list(set(file_paths)) counter = Value('i', 0) pool = Pool(opts.n_procs, initializer=init, initargs=(counter,)) results = pool.starmap_async(cli_parse, zip(file_paths, repeat(opts.strip_attachment_payloads), repeat(opts.nameservers), repeat(opts.dns_timeout), repeat(opts.n_procs >= 1)), opts.chunk_size) pbar = tqdm(total=len(file_paths)) while not results.ready(): pbar.update(counter.value - pbar.n) time.sleep(0.1) pbar.close() results = results.get() pool.close() pool.join() for result in results: if type(result[0]) is InvalidDMARCReport: logger.error("Failed to parse {0} - {1}".format(result[1], result[0])) else: if result[0]["report_type"] == "aggregate": aggregate_reports.append(result[0]["report"]) elif result[0]["report_type"] == "forensic": forensic_reports.append(result[0]["report"]) if opts.imap_host: try: if opts.imap_user is None or opts.imap_password is None: logger.error("IMAP user and password must be specified if" "host is specified") rf = opts.imap_reports_folder af = opts.imap_archive_folder ns = opts.nameservers sa = opts.strip_attachment_payloads ssl = True ssl_context = None if opts.imap_skip_certificate_verification: logger.debug("Skipping IMAP certificate verification") ssl_context = create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = CERT_NONE if opts.imap_ssl is False: ssl = False reports = get_dmarc_reports_from_inbox(host=opts.imap_host, port=opts.imap_port, ssl=ssl, ssl_context=ssl_context, user=opts.imap_user, password=opts.imap_password, reports_folder=rf, archive_folder=af, delete=opts.imap_delete, nameservers=ns, test=opts.imap_test, strip_attachment_payloads=sa ) aggregate_reports += reports["aggregate_reports"] forensic_reports += reports["forensic_reports"] except IMAPError as error: logger.error("IMAP Error: {0}".format(error.__str__())) exit(1) results = OrderedDict([("aggregate_reports", aggregate_reports), ("forensic_reports", forensic_reports)]) if opts.output: save_output(results, output_directory=opts.output) process_reports(results) if opts.smtp_host: try: ssl_context = None if opts.smtp_skip_certificate_verification: logger.debug("Skipping SMTP certificate verification") ssl_context = create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = CERT_NONE email_results(results, opts.smtp_host, opts.smtp_from, opts.smtp_to, ssl=opts.smtp_ssl, user=opts.smtp_user, password=opts.smtp_password, subject=opts.smtp_subject, ssl_context=ssl_context) except SMTPError as error: logger.error("SMTP Error: {0}".format(error.__str__())) exit(1) if opts.imap_host and opts.imap_watch: logger.info("Watching for email - Quit with ctrl-c") ssl = True ssl_context = None if opts.imap_skip_certificate_verification: logger.debug("Skipping IMAP certificate verification") ssl_context = create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = CERT_NONE if opts.imap_ssl is False: ssl = False try: sa = opts.strip_attachment_payloads watch_inbox(opts.imap_host, opts.imap_user, opts.imap_password, process_reports, port=opts.imap_port, ssl=ssl, ssl_context=ssl_context, reports_folder=opts.imap_reports_folder, archive_folder=opts.imap_archive_folder, delete=opts.imap_delete, test=opts.imap_test, nameservers=opts.nameservers, dns_timeout=opts.dns_timeout, strip_attachment_payloads=sa) except IMAPError as error: logger.error("IMAP error: {0}".format(error.__str__())) exit(1)
Drain will put a connection into a drain state. All subscriptions will immediately be put into a drain state. Upon completion the publishers will be drained and can not publish any additional messages. Upon draining of the publishers the connection will be closed. Use the closed_cb option to know when the connection has moved from draining to closed.
def drain(self, sid=None): """ Drain will put a connection into a drain state. All subscriptions will immediately be put into a drain state. Upon completion, the publishers will be drained and can not publish any additional messages. Upon draining of the publishers, the connection will be closed. Use the `closed_cb' option to know when the connection has moved from draining to closed. If a sid is passed, just the subscription with that sid will be drained without closing the connection. """ if self.is_draining: return if self.is_closed: raise ErrConnectionClosed if self.is_connecting or self.is_reconnecting: raise ErrConnectionReconnecting if sid is not None: return self._drain_sub(sid) # Start draining the subscriptions self._status = Client.DRAINING_SUBS drain_tasks = [] for ssid, sub in self._subs.items(): task = self._drain_sub(ssid) drain_tasks.append(task) drain_is_done = asyncio.gather(*drain_tasks) try: yield from asyncio.wait_for(drain_is_done, self.options["drain_timeout"]) except asyncio.TimeoutError: drain_is_done.exception() drain_is_done.cancel() if self._error_cb is not None: yield from self._error_cb(ErrDrainTimeout) except asyncio.CancelledError: pass finally: self._status = Client.DRAINING_PUBS yield from self.flush() yield from self._close(Client.CLOSED)
Sends a PUB command to the server on the specified subject.
def publish(self, subject, payload): """ Sends a PUB command to the server on the specified subject. ->> PUB hello 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 5 """ if self.is_closed: raise ErrConnectionClosed if self.is_draining_pubs: raise ErrConnectionDraining payload_size = len(payload) if payload_size > self._max_payload: raise ErrMaxPayload yield from self._publish(subject, _EMPTY_, payload, payload_size)
Publishes a message tagging it with a reply subscription which can be used by those receiving the message to respond.
def publish_request(self, subject, reply, payload): """ Publishes a message tagging it with a reply subscription which can be used by those receiving the message to respond. ->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5 """ if self.is_closed: raise ErrConnectionClosed if self.is_draining_pubs: raise ErrConnectionDraining payload_size = len(payload) if payload_size > self._max_payload: raise ErrMaxPayload yield from self._publish(subject, reply.encode(), payload, payload_size)
Sends PUB command to the NATS server.
def _publish(self, subject, reply, payload, payload_size): """ Sends PUB command to the NATS server. """ if subject == "": # Avoid sending messages with empty replies. raise ErrBadSubject payload_size_bytes = ("%d" % payload_size).encode() pub_cmd = b''.join([PUB_OP, _SPC_, subject.encode( ), _SPC_, reply, _SPC_, payload_size_bytes, _CRLF_, payload, _CRLF_]) self.stats['out_msgs'] += 1 self.stats['out_bytes'] += payload_size yield from self._send_command(pub_cmd) if self._flush_queue.empty(): yield from self._flush_pending()
Takes a subject string and optional queue string to send a SUB cmd and a callback which to which messages ( Msg ) will be dispatched to be processed sequentially by default.
def subscribe(self, subject, queue="", cb=None, future=None, max_msgs=0, is_async=False, pending_msgs_limit=DEFAULT_SUB_PENDING_MSGS_LIMIT, pending_bytes_limit=DEFAULT_SUB_PENDING_BYTES_LIMIT, ): """ Takes a subject string and optional queue string to send a SUB cmd, and a callback which to which messages (Msg) will be dispatched to be processed sequentially by default. """ if subject == "": raise ErrBadSubject if self.is_closed: raise ErrConnectionClosed if self.is_draining: raise ErrConnectionDraining sub = Subscription(subject=subject, queue=queue, max_msgs=max_msgs, is_async=is_async, ) if cb is not None: if asyncio.iscoroutinefunction(cb): sub.coro = cb elif sub.is_async: raise NatsError( "nats: must use coroutine for async subscriptions") else: # NOTE: Consider to deprecate this eventually, it should always # be coroutines otherwise they could affect the single thread, # for now still allow to be flexible. sub.cb = cb sub.pending_msgs_limit = pending_msgs_limit sub.pending_bytes_limit = pending_bytes_limit sub.pending_queue = asyncio.Queue( maxsize=pending_msgs_limit, loop=self._loop, ) # Close the delivery coroutine over the sub and error handler # instead of having subscription type hold over state of the conn. err_cb = self._error_cb @asyncio.coroutine def wait_for_msgs(): nonlocal sub nonlocal err_cb while True: try: msg = yield from sub.pending_queue.get() sub.pending_size -= len(msg.data) try: # Invoke depending of type of handler. if sub.coro is not None: if sub.is_async: # NOTE: Deprecate this usage in a next release, # the handler implementation ought to decide # the concurrency level at which the messages # should be processed. self._loop.create_task(sub.coro(msg)) else: yield from sub.coro(msg) elif sub.cb is not None: if sub.is_async: raise NatsError( "nats: must use coroutine for async subscriptions") else: # Schedule regular callbacks to be processed sequentially. self._loop.call_soon(sub.cb, msg) except asyncio.CancelledError: # In case the coroutine handler gets cancelled # then stop task loop and return. break except Exception as e: # All errors from calling a handler # are async errors. if err_cb is not None: yield from err_cb(e) except asyncio.CancelledError: break # Start task for each subscription, it should be cancelled # on both unsubscribe and closing as well. sub.wait_for_msgs_task = self._loop.create_task( wait_for_msgs()) elif future is not None: # Used to handle the single response from a request. sub.future = future else: raise NatsError("nats: invalid subscription type") self._ssid += 1 ssid = self._ssid self._subs[ssid] = sub yield from self._subscribe(sub, ssid) return ssid
Sets the subcription to use a task per message to be processed.
def subscribe_async(self, subject, **kwargs): """ Sets the subcription to use a task per message to be processed. ..deprecated:: 7.0 Will be removed 9.0. """ kwargs["is_async"] = True sid = yield from self.subscribe(subject, **kwargs) return sid
Takes a subscription sequence id and removes the subscription from the client optionally after receiving more than max_msgs.
def unsubscribe(self, ssid, max_msgs=0): """ Takes a subscription sequence id and removes the subscription from the client, optionally after receiving more than max_msgs. """ if self.is_closed: raise ErrConnectionClosed if self.is_draining: raise ErrConnectionDraining self._remove_sub(ssid, max_msgs) # We will send these for all subs when we reconnect anyway, # so that we can suppress here. if not self.is_reconnecting: yield from self.auto_unsubscribe(ssid, max_msgs)
Implements the request/ response pattern via pub/ sub using a single wildcard subscription that handles the responses.
def request(self, subject, payload, timeout=0.5, expected=1, cb=None): """ Implements the request/response pattern via pub/sub using a single wildcard subscription that handles the responses. """ if self.is_draining_pubs: raise ErrConnectionDraining # If callback given then continue to use old style. if cb is not None: next_inbox = INBOX_PREFIX[:] next_inbox.extend(self._nuid.next()) inbox = next_inbox.decode() sid = yield from self.subscribe(inbox, cb=cb) yield from self.auto_unsubscribe(sid, expected) yield from self.publish_request(subject, inbox, payload) return sid if self._resp_sub_prefix is None: self._resp_map = {} # Create a prefix and single wildcard subscription once. self._resp_sub_prefix = INBOX_PREFIX[:] self._resp_sub_prefix.extend(self._nuid.next()) self._resp_sub_prefix.extend(b'.') resp_mux_subject = self._resp_sub_prefix[:] resp_mux_subject.extend(b'*') sub = Subscription(subject=resp_mux_subject.decode()) # FIXME: Allow setting pending limits for responses mux subscription. sub.pending_msgs_limit = DEFAULT_SUB_PENDING_MSGS_LIMIT sub.pending_bytes_limit = DEFAULT_SUB_PENDING_BYTES_LIMIT sub.pending_queue = asyncio.Queue( maxsize=sub.pending_msgs_limit, loop=self._loop, ) # Single task for handling the requests @asyncio.coroutine def wait_for_msgs(): nonlocal sub while True: try: msg = yield from sub.pending_queue.get() token = msg.subject[INBOX_PREFIX_LEN:] try: fut = self._resp_map[token] fut.set_result(msg) del self._resp_map[token] except (asyncio.CancelledError, asyncio.InvalidStateError): # Request may have timed out already so remove entry. del self._resp_map[token] continue except KeyError: # Future already handled so drop any extra # responses which may have made it. continue except asyncio.CancelledError: break sub.wait_for_msgs_task = self._loop.create_task( wait_for_msgs()) # Store the subscription in the subscriptions map, # then send the protocol commands to the server. self._ssid += 1 ssid = self._ssid self._subs[ssid] = sub yield from self._subscribe(sub, ssid) # Use a new NUID for the token inbox and then use the future. token = self._nuid.next() inbox = self._resp_sub_prefix[:] inbox.extend(token) future = asyncio.Future(loop=self._loop) self._resp_map[token.decode()] = future yield from self.publish_request(subject, inbox.decode(), payload) # Wait for the response or give up on timeout. try: msg = yield from asyncio.wait_for(future, timeout, loop=self._loop) return msg except asyncio.TimeoutError: future.cancel() raise ErrTimeout
Implements the request/ response pattern via pub/ sub using an ephemeral subscription which will be published with a limited interest of 1 reply returning the response or raising a Timeout error.
def timed_request(self, subject, payload, timeout=0.5): """ Implements the request/response pattern via pub/sub using an ephemeral subscription which will be published with a limited interest of 1 reply returning the response or raising a Timeout error. ->> SUB _INBOX.2007314fe0fcb2cdc2a2914c1 90 ->> UNSUB 90 1 ->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5 """ next_inbox = INBOX_PREFIX[:] next_inbox.extend(self._nuid.next()) inbox = next_inbox.decode() future = asyncio.Future(loop=self._loop) sid = yield from self.subscribe(inbox, future=future, max_msgs=1) yield from self.auto_unsubscribe(sid, 1) yield from self.publish_request(subject, inbox, payload) try: msg = yield from asyncio.wait_for(future, timeout, loop=self._loop) return msg except asyncio.TimeoutError: future.cancel() raise ErrTimeout
Sends a ping to the server expecting a pong back ensuring what we have written so far has made it to the server and also enabling measuring of roundtrip time. In case a pong is not returned within the allowed timeout then it will raise ErrTimeout.
def flush(self, timeout=60): """ Sends a ping to the server expecting a pong back ensuring what we have written so far has made it to the server and also enabling measuring of roundtrip time. In case a pong is not returned within the allowed timeout, then it will raise ErrTimeout. """ if timeout <= 0: raise ErrBadTimeout if self.is_closed: raise ErrConnectionClosed future = asyncio.Future(loop=self._loop) try: yield from self._send_ping(future) yield from asyncio.wait_for(future, timeout, loop=self._loop) except asyncio.TimeoutError: future.cancel() raise ErrTimeout
Looks up in the server pool for an available server and attempts to connect.
def _select_next_server(self): """ Looks up in the server pool for an available server and attempts to connect. """ while True: if len(self._server_pool) == 0: self._current_server = None raise ErrNoServers now = time.monotonic() s = self._server_pool.pop(0) if self.options["max_reconnect_attempts"] > 0: if s.reconnects > self.options["max_reconnect_attempts"]: # Discard server since already tried to reconnect too many times continue # Not yet exceeded max_reconnect_attempts so can still use # this server in the future. self._server_pool.append(s) if s.last_attempt is not None and now < s.last_attempt + self.options["reconnect_time_wait"]: # Backoff connecting to server if we attempted recently. yield from asyncio.sleep(self.options["reconnect_time_wait"], loop=self._loop) try: s.last_attempt = time.monotonic() r, w = yield from asyncio.open_connection( s.uri.hostname, s.uri.port, loop=self._loop, limit=DEFAULT_BUFFER_SIZE) self._current_server = s # We keep a reference to the initial transport we used when # establishing the connection in case we later upgrade to TLS # after getting the first INFO message. This is in order to # prevent the GC closing the socket after we send CONNECT # and replace the transport. # # See https://github.com/nats-io/asyncio-nats/issues/43 self._bare_io_reader = self._io_reader = r self._bare_io_writer = self._io_writer = w break except Exception as e: s.last_attempt = time.monotonic() s.reconnects += 1 self._err = e if self._error_cb is not None: yield from self._error_cb(e) continue
Processes the raw error message sent by the server and close connection with current server.
def _process_err(self, err_msg): """ Processes the raw error message sent by the server and close connection with current server. """ if STALE_CONNECTION in err_msg: yield from self._process_op_err(ErrStaleConnection) return if AUTHORIZATION_VIOLATION in err_msg: self._err = ErrAuthorization else: m = b'nats: ' + err_msg[0] self._err = NatsError(m.decode()) do_cbs = False if not self.is_connecting: do_cbs = True # FIXME: Some errors such as 'Invalid Subscription' # do not cause the server to close the connection. # For now we handle similar as other clients and close. self._loop.create_task(self._close(Client.CLOSED, do_cbs))
Process errors which occured while reading or parsing the protocol. If allow_reconnect is enabled it will try to switch the server to which it is currently connected otherwise it will disconnect.
def _process_op_err(self, e): """ Process errors which occured while reading or parsing the protocol. If allow_reconnect is enabled it will try to switch the server to which it is currently connected otherwise it will disconnect. """ if self.is_connecting or self.is_closed or self.is_reconnecting: return if self.options["allow_reconnect"] and self.is_connected: self._status = Client.RECONNECTING self._ps.reset() if self._reconnection_task is not None and not self._reconnection_task.cancelled(): # Cancel the previous task in case it may still be running. self._reconnection_task.cancel() self._reconnection_task = self._loop.create_task(self._attempt_reconnect()) else: self._process_disconnect() self._err = e yield from self._close(Client.CLOSED, True)
Generates a JSON string with the params to be used when sending CONNECT to the server.
def _connect_command(self): ''' Generates a JSON string with the params to be used when sending CONNECT to the server. ->> CONNECT {"lang": "python3"} ''' options = { "verbose": self.options["verbose"], "pedantic": self.options["pedantic"], "lang": __lang__, "version": __version__, "protocol": PROTOCOL } if "auth_required" in self._server_info: if self._server_info["auth_required"]: # In case there is no password, then consider handle # sending a token instead. if self.options["user"] is not None and self.options["password"] is not None: options["user"] = self.options["user"] options["pass"] = self.options["password"] elif self.options["token"] is not None: options["auth_token"] = self.options["token"] elif self._current_server.uri.password is None: options["auth_token"] = self._current_server.uri.username else: options["user"] = self._current_server.uri.username options["pass"] = self._current_server.uri.password if self.options["name"] is not None: options["name"] = self.options["name"] if self.options["no_echo"] is not None: options["echo"] = not self.options["no_echo"] connect_opts = json.dumps(options, sort_keys=True) return b''.join([CONNECT_OP + _SPC_ + connect_opts.encode() + _CRLF_])
Process PONG sent by server.
def _process_pong(self): """ Process PONG sent by server. """ if len(self._pongs) > 0: future = self._pongs.pop(0) future.set_result(True) self._pongs_received += 1 self._pings_outstanding -= 1
Process MSG sent by server.
def _process_msg(self, sid, subject, reply, data): """ Process MSG sent by server. """ payload_size = len(data) self.stats['in_msgs'] += 1 self.stats['in_bytes'] += payload_size sub = self._subs.get(sid) if sub is None: # Skip in case no subscription present. return sub.received += 1 if sub.max_msgs > 0 and sub.received >= sub.max_msgs: # Enough messages so can throwaway subscription now. self._subs.pop(sid, None) msg = self._build_message(subject, reply, data) # Check if it is an old style request. if sub.future is not None: if sub.future.cancelled(): # Already gave up, nothing to do. return sub.future.set_result(msg) return # Let subscription wait_for_msgs coroutine process the messages, # but in case sending to the subscription task would block, # then consider it to be an slow consumer and drop the message. try: sub.pending_size += payload_size if sub.pending_size >= sub.pending_bytes_limit: # Substract again the bytes since throwing away # the message so would not be pending data. sub.pending_size -= payload_size if self._error_cb is not None: yield from self._error_cb( ErrSlowConsumer(subject=subject, sid=sid)) return sub.pending_queue.put_nowait(msg) except asyncio.QueueFull: if self._error_cb is not None: yield from self._error_cb( ErrSlowConsumer(subject=subject, sid=sid))
Process INFO lines sent by the server to reconfigure client with latest updates from cluster to enable server discovery.
def _process_info(self, info): """ Process INFO lines sent by the server to reconfigure client with latest updates from cluster to enable server discovery. """ if 'connect_urls' in info: if info['connect_urls']: connect_urls = [] for connect_url in info['connect_urls']: uri = urlparse("nats://%s" % connect_url) srv = Srv(uri) srv.discovered = True # Filter for any similar server in the server pool already. should_add = True for s in self._server_pool: if uri.netloc == s.uri.netloc: should_add = False if should_add: connect_urls.append(srv) if self.options["dont_randomize"] is not True: shuffle(connect_urls) for srv in connect_urls: self._server_pool.append(srv)
Process INFO received from the server and CONNECT to the server with authentication. It is also responsible of setting up the reading and ping interval tasks from the client.
def _process_connect_init(self): """ Process INFO received from the server and CONNECT to the server with authentication. It is also responsible of setting up the reading and ping interval tasks from the client. """ self._status = Client.CONNECTING connection_completed = self._io_reader.readline() info_line = yield from asyncio.wait_for(connection_completed, self.options["connect_timeout"]) if INFO_OP not in info_line: raise NatsError("nats: empty response from server when expecting INFO message") _, info = info_line.split(INFO_OP + _SPC_, 1) try: srv_info = json.loads(info.decode()) except: raise NatsError("nats: info message, json parse error") self._process_info(srv_info) self._server_info = srv_info if 'max_payload' in self._server_info: self._max_payload = self._server_info["max_payload"] if 'tls_required' in self._server_info and self._server_info['tls_required']: ssl_context = None if "tls" in self.options: ssl_context = self.options.get('tls') elif self._current_server.uri.scheme == 'tls': ssl_context = ssl.create_default_context() else: raise NatsError('nats: no ssl context provided') transport = self._io_writer.transport sock = transport.get_extra_info('socket') if not sock: # This shouldn't happen raise NatsError('nats: unable to get socket') yield from self._io_writer.drain() # just in case something is left self._io_reader, self._io_writer = \ yield from asyncio.open_connection( loop=self._loop, limit=DEFAULT_BUFFER_SIZE, sock=sock, ssl=ssl_context, server_hostname=self._current_server.uri.hostname, ) # Refresh state of parser upon reconnect. if self.is_reconnecting: self._ps.reset() connect_cmd = self._connect_command() self._io_writer.write(connect_cmd) self._io_writer.write(PING_PROTO) yield from self._io_writer.drain() # FIXME: Add readline timeout next_op = yield from self._io_reader.readline() if self.options["verbose"] and OK_OP in next_op: next_op = yield from self._io_reader.readline() if ERR_OP in next_op: err_line = next_op.decode() _, err_msg = err_line.split(" ", 1) # FIXME: Maybe handling could be more special here, # checking for ErrAuthorization for example. # yield from self._process_err(err_msg) raise NatsError("nats: " + err_msg.rstrip('\r\n')) if PONG_PROTO in next_op: self._status = Client.CONNECTED self._reading_task = self._loop.create_task(self._read_loop()) self._pongs = [] self._pings_outstanding = 0 self._ping_interval_task = self._loop.create_task( self._ping_interval()) # Task for kicking the flusher queue self._flusher_task = self._loop.create_task(self._flusher())
Coroutine which continuously tries to consume pending commands and then flushes them to the socket.
def _flusher(self): """ Coroutine which continuously tries to consume pending commands and then flushes them to the socket. """ while True: if not self.is_connected or self.is_connecting: break try: yield from self._flush_queue.get() if self._pending_data_size > 0: self._io_writer.writelines(self._pending[:]) self._pending = [] self._pending_data_size = 0 yield from self._io_writer.drain() except OSError as e: if self._error_cb is not None: yield from self._error_cb(e) yield from self._process_op_err(e) break except asyncio.CancelledError: break
Coroutine which gathers bytes sent by the server and feeds them to the protocol parser. In case of error while reading it will stop running and its task has to be rescheduled.
def _read_loop(self): """ Coroutine which gathers bytes sent by the server and feeds them to the protocol parser. In case of error while reading, it will stop running and its task has to be rescheduled. """ while True: try: should_bail = self.is_closed or self.is_reconnecting if should_bail or self._io_reader is None: break if self.is_connected and self._io_reader.at_eof(): if self._error_cb is not None: yield from self._error_cb(ErrStaleConnection) yield from self._process_op_err(ErrStaleConnection) break b = yield from self._io_reader.read(DEFAULT_BUFFER_SIZE) yield from self._ps.parse(b) except ErrProtocol: yield from self._process_op_err(ErrProtocol) break except OSError as e: yield from self._process_op_err(e) break except asyncio.CancelledError: break
Compute and save coactivation map given input image as seed.
def coactivation(dataset, seed, threshold=0.0, output_dir='.', prefix='', r=6): """ Compute and save coactivation map given input image as seed. This is essentially just a wrapper for a meta-analysis defined by the contrast between those studies that activate within the seed and those that don't. Args: dataset: a Dataset instance containing study and activation data. seed: either a Nifti or Analyze image defining the boundaries of the seed, or a list of triples (x/y/z) defining the seed(s). Note that voxels do not need to be contiguous to define a seed--all supra- threshold voxels will be lumped together. threshold: optional float indicating the threshold above which voxels are considered to be part of the seed ROI (default = 0) r: optional integer indicating radius (in mm) of spheres to grow (only used if seed is a list of coordinates). output_dir: output directory to write to. Defaults to current. If none, defaults to using the first part of the seed filename. prefix: optional string to prepend to all coactivation images. Output: A set of meta-analysis images identical to that generated by meta.MetaAnalysis. """ if isinstance(seed, string_types): ids = dataset.get_studies(mask=seed, activation_threshold=threshold) else: ids = dataset.get_studies(peaks=seed, r=r, activation_threshold=threshold) ma = meta.MetaAnalysis(dataset, ids) ma.save_results(output_dir, prefix)
Decodes a set of images.
def decode(self, images, save=None, round=4, names=None, **kwargs): """ Decodes a set of images. Args: images: The images to decode. Can be: - A single String specifying the filename of the image to decode - A list of filenames - A single NumPy array containing the image data save: Optional filename to save results to. If None (default), returns all results as an array. round: Optional integer indicating number of decimals to round result to. Defaults to 4. names: Optional list of names corresponding to the images in filenames. If passed, must be of same length and in same order as filenames. By default, the columns in the output will be named using the image filenames. Returns: An n_features x n_files numpy array, where each feature is a row and each image is a column. The meaning of the values depends on the decoding method used. """ if isinstance(images, string_types): images = [images] if isinstance(images, list): imgs_to_decode = imageutils.load_imgs(images, self.masker) else: imgs_to_decode = images methods = { 'pearson': self._pearson_correlation, 'dot': self._dot_product, 'roi': self._roi_association } result = np.around( methods[self.method](imgs_to_decode, **kwargs), round) # if save is not None: if names is None: if type(images).__module__ == np.__name__: names = ['image_%d' % i for i in range(images.shape[1])] elif self.method == 'roi': names = ['cluster_%d' % i for i in range(result.shape[1])] else: names = images result = pd.DataFrame(result, columns=names, index=self.feature_names) if save is not None: result.to_csv(save, index_label='Feature') return result
Load features from current Dataset instance or a list of files. Args: features: List containing paths to or names of features to extract. Each element in the list must be a string containing either a path to an image or the name of a feature ( as named in the current Dataset ). Mixing of paths and feature names within the list is not allowed. image_type: Optional suffix indicating which kind of image to use for analysis. Only used if features are taken from the Dataset ; if features is a list of filenames image_type is ignored. from_array: If True the features argument is interpreted as a string pointing to the location of a 2D ndarray on disk containing feature data where rows are voxels and columns are individual features. threshold: If features are taken from the dataset this is the threshold passed to the meta - analysis module to generate fresh images.
def load_features(self, features, image_type=None, from_array=False, threshold=0.001): """ Load features from current Dataset instance or a list of files. Args: features: List containing paths to, or names of, features to extract. Each element in the list must be a string containing either a path to an image, or the name of a feature (as named in the current Dataset). Mixing of paths and feature names within the list is not allowed. image_type: Optional suffix indicating which kind of image to use for analysis. Only used if features are taken from the Dataset; if features is a list of filenames, image_type is ignored. from_array: If True, the features argument is interpreted as a string pointing to the location of a 2D ndarray on disk containing feature data, where rows are voxels and columns are individual features. threshold: If features are taken from the dataset, this is the threshold passed to the meta-analysis module to generate fresh images. """ if from_array: if isinstance(features, list): features = features[0] self._load_features_from_array(features) elif path.exists(features[0]): self._load_features_from_images(features) else: self._load_features_from_dataset( features, image_type=image_type, threshold=threshold)
Load feature data from a 2D ndarray on disk.
def _load_features_from_array(self, features): """ Load feature data from a 2D ndarray on disk. """ self.feature_images = np.load(features) self.feature_names = range(self.feature_images.shape[1])
Load feature image data from the current Dataset instance. See load_features () for documentation.
def _load_features_from_dataset(self, features=None, image_type=None, threshold=0.001): """ Load feature image data from the current Dataset instance. See load_features() for documentation. """ self.feature_names = self.dataset.feature_table.feature_names if features is not None: self.feature_names = [f for f in features if f in self.feature_names] from neurosynth.analysis import meta self.feature_images = meta.analyze_features( self.dataset, self.feature_names, image_type=image_type, threshold=threshold) # Apply a mask if one was originally passed if self.masker.layers: in_mask = self.masker.get_mask(in_global_mask=True) self.feature_images = self.feature_images[in_mask, :]
Load feature image data from image files.
def _load_features_from_images(self, images, names=None): """ Load feature image data from image files. Args: images: A list of image filenames. names: An optional list of strings to use as the feature names. Must be in the same order as the images. """ if names is not None and len(names) != len(images): raise Exception( "Lists of feature names and images must be of same length!") self.feature_names = names if names is not None else images self.feature_images = imageutils.load_imgs(images, self.masker)
Decode images using Pearson s r.
def _pearson_correlation(self, imgs_to_decode): """ Decode images using Pearson's r. Computes the correlation between each input image and each feature image across voxels. Args: imgs_to_decode: An ndarray of images to decode, with voxels in rows and images in columns. Returns: An n_features x n_images 2D array, with each cell representing the pearson correlation between the i'th feature and the j'th image across all voxels. """ x, y = imgs_to_decode.astype(float), self.feature_images.astype(float) return self._xy_corr(x, y)
Decoding using the dot product.
def _dot_product(self, imgs_to_decode): """ Decoding using the dot product. """ return np.dot(imgs_to_decode.T, self.feature_images).T
Computes the strength of association between activation in a mask and presence/ absence of a semantic feature. This is essentially a generalization of the voxel - wise reverse inference z - score to the multivoxel case.
def _roi_association(self, imgs_to_decode, value='z', binarize=None): """ Computes the strength of association between activation in a mask and presence/absence of a semantic feature. This is essentially a generalization of the voxel-wise reverse inference z-score to the multivoxel case. """ imgs_to_decode = imgs_to_decode.squeeze() x = average_within_regions(self.dataset, imgs_to_decode).astype(float) y = self.dataset.feature_table.data[self.feature_names].values if binarize is not None: y[y > binarize] = 1. y[y < 1.] = 0. r = self._xy_corr(x.T, y) if value == 'r': return r elif value == 'z': f_r = np.arctanh(r) return f_r * np.sqrt(y.shape[0] - 3)
Implements various kinds of feature selection
def feature_selection(feat_select, X, y): """" Implements various kinds of feature selection """ # K-best if re.match('.*-best', feat_select) is not None: n = int(feat_select.split('-')[0]) selector = SelectKBest(k=n) import warnings with warnings.catch_warnings(): warnings.simplefilter('ignore', category=UserWarning) features_selected = np.where( selector.fit(X, y).get_support() is True)[0] elif re.match('.*-randombest', feat_select) is not None: n = int(feat_select.split('-')[0]) from random import shuffle features = range(0, X.shape[1]) shuffle(features) features_selected = features[:n] return features_selected
Set up data for a classification task given a set of masks
def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True, studies=None, features=None, regularization="scale"): """ Set up data for a classification task given a set of masks Given a set of masks, this function retrieves studies associated with each mask at the specified threshold, optionally removes overlap and filters by studies and features, and returns studies by feature matrix (X) and class labels (y) Args: dataset: a Neurosynth dataset maks: a list of paths to Nifti masks threshold: percentage of voxels active within the mask for study to be included remove_overlap: A boolean indicating if studies studies that appear in more than one mask should be excluded studies: An optional list of study names used to constrain the set used in classification. If None, will use all features in the dataset. features: An optional list of feature names used to constrain the set used in classification. If None, will use all features in the dataset. regularize: Optional boolean indicating if X should be regularized Returns: A tuple (X, y) of np arrays. X is a feature by studies matrix and y is a vector of class labels """ import nibabel as nib import os # Load masks using NiBabel try: loaded_masks = [nib.load(os.path.relpath(m)) for m in masks] except OSError: print('Error loading masks. Check the path') # Get a list of studies that activate for each mask file--i.e., a list of # lists grouped_ids = [dataset.get_studies(mask=m, activation_threshold=threshold) for m in loaded_masks] # Flattened ids flat_ids = reduce(lambda a, b: a + b, grouped_ids) # Remove duplicates if remove_overlap: import collections flat_ids = [id for (id, count) in collections.Counter(flat_ids).items() if count == 1] grouped_ids = [[x for x in m if x in flat_ids] for m in grouped_ids] # Remove # Create class label(y) y = [[idx] * len(ids) for (idx, ids) in enumerate(grouped_ids)] y = reduce(lambda a, b: a + b, y) # Flatten y = np.array(y) # Extract feature set for each class separately X = [dataset.get_feature_data(ids=group_ids, features=features) for group_ids in grouped_ids] X = np.vstack(tuple(X)) if regularization: X = regularize(X, method=regularization) return (X, y)
Returns a list with the order that features requested appear in dataset
def get_feature_order(dataset, features): """ Returns a list with the order that features requested appear in dataset """ all_features = dataset.get_feature_names() i = [all_features.index(f) for f in features] return i
Perform classification on specified regions
def classify_regions(dataset, masks, method='ERF', threshold=0.08, remove_overlap=True, regularization='scale', output='summary', studies=None, features=None, class_weight='auto', classifier=None, cross_val='4-Fold', param_grid=None, scoring='accuracy'): """ Perform classification on specified regions Given a set of masks, this function retrieves studies associated with each mask at the specified threshold, optionally removes overlap and filters by studies and features. Then it trains an algorithm to classify studies based on features and tests performance. Args: dataset: a Neurosynth dataset maks: a list of paths to Nifti masks method: a string indicating which method to used. 'SVM': Support Vector Classifier with rbf kernel 'ERF': Extremely Randomized Forest classifier 'Dummy': A dummy classifier using stratified classes as predictor threshold: percentage of voxels active within the mask for study to be included remove_overlap: A boolean indicating if studies studies that appear in more than one mask should be excluded regularization: A string indicating type of regularization to use. If None, performs no regularization. 'scale': Unit scale without demeaning output: A string indicating output type 'summary': Dictionary with summary statistics including score and n 'summary_clf': Same as above but also includes classifier 'clf': Only returns classifier Warning: using cv without grid will return an untrained classifier studies: An optional list of study names used to constrain the set used in classification. If None, will use all features in the dataset. features: An optional list of feature names used to constrain the set used in classification. If None, will use all features in the dataset. class_weight: Parameter to pass to classifier determining how to weight classes classifier: An optional sci-kit learn classifier to use instead of pre-set up classifiers set up using 'method' cross_val: A string indicating type of cross validation to use. Can also pass a scikit_classifier param_grid: A dictionary indicating which parameters to optimize using GridSearchCV. If None, no GridSearch will be used Returns: A tuple (X, y) of np arrays. X is a feature by studies matrix and y is a vector of class labels """ (X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap, studies, features, regularization=regularization) return classify(X, y, method, classifier, output, cross_val, class_weight, scoring=scoring, param_grid=param_grid)
Wrapper for scikit - learn classification functions Imlements various types of classification and cross validation
def classify(X, y, clf_method='ERF', classifier=None, output='summary_clf', cross_val=None, class_weight=None, regularization=None, param_grid=None, scoring='accuracy', refit_all=True, feat_select=None): """ Wrapper for scikit-learn classification functions Imlements various types of classification and cross validation """ # Build classifier clf = Classifier(clf_method, classifier, param_grid) # Fit & test model with or without cross-validation if cross_val is not None: score = clf.cross_val_fit(X, y, cross_val, scoring=scoring, feat_select=feat_select, class_weight=class_weight) else: # Does not support scoring function score = clf.fit(X, y, class_weight=class_weight).score(X, y) # Return some stuff... from collections import Counter if output == 'clf': return clf else: if output == 'summary': output = {'score': score, 'n': dict(Counter(y))} elif output == 'summary_clf': output = { 'score': score, 'n': dict(Counter(y)), 'clf': clf, 'features_selected': clf.features_selected, 'predictions': clf.predictions } return output
Fits X to outcomes y using clf
def fit(self, X, y, cv=None, class_weight='auto'): """ Fits X to outcomes y, using clf """ # Incorporate error checking such as : # if isinstance(self.classifier, ScikitClassifier): # do one thingNone # otherwiseNone. self.X = X self.y = y self.set_class_weight(class_weight=class_weight, y=y) self.clf = self.clf.fit(X, y) return self.clf
Sets the class_weight of the classifier to match y
def set_class_weight(self, class_weight='auto', y=None): """ Sets the class_weight of the classifier to match y """ if class_weight is None: cw = None try: self.clf.set_params(class_weight=cw) except ValueError: pass elif class_weight == 'auto': c = np.bincount(y) ii = np.nonzero(c)[0] c = c / float(c.sum()) cw = dict(zip(ii[::-1], c[ii])) try: self.clf.set_params(class_weight=cw) except ValueError: import warnings warnings.warn( "Tried to set class_weight, but failed. The classifier " "probably doesn't support it")
Fits X to outcomes y using clf and cv_method
def cross_val_fit(self, X, y, cross_val='4-Fold', scoring='accuracy', feat_select=None, class_weight='auto'): """ Fits X to outcomes y, using clf and cv_method """ from sklearn import cross_validation self.X = X self.y = y self.set_class_weight(class_weight=class_weight, y=y) # Set cross validator if isinstance(cross_val, string_types): if re.match('.*-Fold', cross_val) is not None: n = int(cross_val.split('-')[0]) self.cver = cross_validation.StratifiedKFold(self.y, n) else: raise Exception('Unrecognized cross validation method') else: self.cver = cross_val if feat_select is not None: self.features_selected = [] # Perform cross-validated classification from sklearn.grid_search import GridSearchCV if isinstance(self.clf, GridSearchCV): import warnings if feat_select is not None: warnings.warn( "Cross-validated feature selection not supported with " "GridSearchCV") self.clf.set_params(cv=self.cver, scoring=scoring) with warnings.catch_warnings(): warnings.simplefilter('ignore', category=UserWarning) self.clf = self.clf.fit(X, y) self.cvs = self.clf.best_score_ else: self.cvs = self.feat_select_cvs( feat_select=feat_select, scoring=scoring) if feat_select is not None: fs = feature_selection( feat_select, X, y) self.features_selected.append(fs) X = X[:, fs] self.clf.fit(X, y) return self.cvs.mean()
Returns cross validated scores ( just like cross_val_score ) but includes feature selection as part of the cross validation loop
def feat_select_cvs(self, scoring=None, feat_select=None): """ Returns cross validated scores (just like cross_val_score), but includes feature selection as part of the cross validation loop """ scores = [] self.predictions = [] for train, test in self.cver: X_train, X_test, y_train, y_test = self.X[ train], self.X[test], self.y[train], self.y[test] if feat_select is not None: # Get which features are kept fs = feature_selection( feat_select, X_train, y_train) self.features_selected.append(fs) # Filter X to only keep selected features X_train, X_test = X_train[ :, fs], X_test[:, fs] # Set scoring (not implement as accuracy is default) # Train classifier self.clf.fit(X_train, y_train) # Test classifier predicition, s = get_score( X_test, y_test, self.clf, scoring=scoring) scores.append(s) self.predictions.append((y_test, predicition)) return np.array(scores)
Given a dataset fits either features or voxels to y
def fit_dataset(self, dataset, y, features=None, feature_type='features'): """ Given a dataset, fits either features or voxels to y """ # Get data from dataset if feature_type == 'features': X = np.rot90(dataset.feature_table.data.toarray()) elif feature_type == 'voxels': X = np.rot90(dataset.image_table.data.toarray()) self.sk_classifier.fit(X, y)
list: list ANDNOT list
def p_list_andnot(self, p): 'list : list ANDNOT list' p[0] = p[1].loc[set(p[1].index) - set(p[3].index)]
list: list AND list
def p_list_and(self, p): 'list : list AND list' p[0] = pd.concat( [p[1], p[3]], axis=1).dropna().apply(self.func, axis=1)
list: list OR list
def p_list_or(self, p): 'list : list OR list' p[0] = pd.concat( [p[1], p[3]], axis=1).fillna(0.0).apply(self.func, axis=1)
list: feature | WORD
def p_list_feature(self, p): '''list : feature | WORD ''' p[0] = self.dataset.get_studies( features=p[1], frequency_threshold=self.threshold, func=self.func, return_type='weights')
Aggregates over all voxels within each ROI in the input image.
def average_within_regions(dataset, regions, masker=None, threshold=None, remove_zero=True): """ Aggregates over all voxels within each ROI in the input image. Takes a Dataset and a Nifti image that defines distinct regions, and returns a numpy matrix of ROIs x mappables, where the value at each ROI is the proportion of active voxels in that ROI. Each distinct ROI must have a unique value in the image; non-contiguous voxels with the same value will be assigned to the same ROI. Args: dataset: Either a Dataset instance from which image data are extracted, or a Numpy array containing image data to use. If the latter, the array contains voxels in rows and features/studies in columns. The number of voxels must be equal to the length of the vectorized image mask in the regions image. regions: An image defining the boundaries of the regions to use. Can be one of: 1) A string name of the NIFTI or Analyze-format image 2) A NiBabel SpatialImage 3) A list of NiBabel images 4) A 1D numpy array of the same length as the mask vector in the Dataset's current Masker. masker: Optional masker used to load image if regions is not a numpy array. Must be passed if dataset is a numpy array. threshold: An optional float in the range of 0 - 1 or integer. If passed, the array will be binarized, with ROI values above the threshold assigned to True and values below the threshold assigned to False. (E.g., if threshold = 0.05, only ROIs in which more than 5% of voxels are active will be considered active.) If threshold is integer, studies will only be considered active if they activate more than that number of voxels in the ROI. remove_zero: An optional boolean; when True, assume that voxels with value of 0 should not be considered as a separate ROI, and will be ignored. Returns: A 2D numpy array with ROIs in rows and mappables in columns. """ if masker is not None: masker = masker else: if isinstance(dataset, Dataset): masker = dataset.masker else: if not type(regions).__module__.startswith('numpy'): raise ValueError( "If dataset is a numpy array and regions is not a numpy " "array, a masker must be provided.") if not type(regions).__module__.startswith('numpy'): regions = masker.mask(regions) if isinstance(dataset, Dataset): dataset = dataset.get_image_data(dense=False) # If multiple images are passed, give each one a unique value if regions.ndim == 2: m = regions for i in range(regions.shape[1]): _nz = np.nonzero(m[:, i])[0] if isinstance(threshold, int): m[_nz, i] = 1.0 else: m[_nz, i] = 1.0 / np.count_nonzero(m[:, i]) # Otherwise create an ROI-coding matrix else: labels = np.unique(regions) if remove_zero: labels = labels[np.nonzero(labels)] n_regions = labels.size m = np.zeros((regions.size, n_regions)) for i in range(n_regions): if isinstance(threshold, int): m[regions == labels[i], i] = 1.0 else: m[regions == labels[i], i] = 1.0 / \ np.sum(regions == labels[i]) # Call dot() on the array itself as this will use sparse matrix # multiplication if possible. result = dataset.T.dot(m).T if threshold is not None: result[result < threshold] = 0.0 result = result.astype(bool) return result
Imposes a 3D grid on the brain volume and averages across all voxels that fall within each cell. Args: dataset: Data to apply grid to. Either a Dataset instance or a numpy array with voxels in rows and features in columns. masker: Optional Masker instance used to map between the created grid and the dataset. This is only needed if dataset is a numpy array ; if dataset is a Dataset instance the Masker in the dataset will be used. scale: int ; scaling factor ( in mm ) to pass onto create_grid (). threshold: Optional float to pass to reduce. average_within_regions (). Returns: A tuple of length 2 where the first element is a numpy array of dimensions n_cubes x n_studies and the second element is a numpy array with the same dimensions as the Masker instance in the current Dataset that maps voxel identities onto cell IDs in the grid.
def apply_grid(dataset, masker=None, scale=5, threshold=None): """ Imposes a 3D grid on the brain volume and averages across all voxels that fall within each cell. Args: dataset: Data to apply grid to. Either a Dataset instance, or a numpy array with voxels in rows and features in columns. masker: Optional Masker instance used to map between the created grid and the dataset. This is only needed if dataset is a numpy array; if dataset is a Dataset instance, the Masker in the dataset will be used. scale: int; scaling factor (in mm) to pass onto create_grid(). threshold: Optional float to pass to reduce.average_within_regions(). Returns: A tuple of length 2, where the first element is a numpy array of dimensions n_cubes x n_studies, and the second element is a numpy array, with the same dimensions as the Masker instance in the current Dataset, that maps voxel identities onto cell IDs in the grid. """ if masker is None: if isinstance(dataset, Dataset): masker = dataset.masker else: raise ValueError( "If dataset is a numpy array, a masker must be provided.") grid = imageutils.create_grid(masker.volume, scale) cm = masker.mask(grid, in_global_mask=True) data = average_within_regions(dataset, cm, threshold) return (data, grid)
Returns mappable data for a random subset of voxels.
def get_random_voxels(dataset, n_voxels): """ Returns mappable data for a random subset of voxels. May be useful as a baseline in predictive analyses--e.g., to compare performance of a more principled feature selection method with simple random selection. Args: dataset: A Dataset instance n_voxels: An integer specifying the number of random voxels to select. Returns: A 2D numpy array with (randomly-selected) voxels in rows and mappables in columns. """ voxels = np.arange(dataset.masker.n_vox_in_vol) np.random.shuffle(voxels) selected = voxels[0:n_voxels] return dataset.get_image_data(voxels=selected)
Return top forty words from each topic in trained topic model.
def _get_top_words(model, feature_names, n_top_words=40): """ Return top forty words from each topic in trained topic model. """ topic_words = [] for topic in model.components_: top_words = [feature_names[i] for i in topic.argsort()[:-n_top_words-1:-1]] topic_words += [top_words] return topic_words
Perform topic modeling using Latent Dirichlet Allocation with the Java toolbox MALLET.
def run_lda(abstracts, n_topics=50, n_words=31, n_iters=1000, alpha=None, beta=0.001): """ Perform topic modeling using Latent Dirichlet Allocation with the Java toolbox MALLET. Args: abstracts: A pandas DataFrame with two columns ('pmid' and 'abstract') containing article abstracts. n_topics: Number of topics to generate. Default=50. n_words: Number of top words to return for each topic. Default=31, based on Poldrack et al. (2012). n_iters: Number of iterations to run in training topic model. Default=1000. alpha: The Dirichlet prior on the per-document topic distributions. Default: 50 / n_topics, based on Poldrack et al. (2012). beta: The Dirichlet prior on the per-topic word distribution. Default: 0.001, based on Poldrack et al. (2012). Returns: weights_df: A pandas DataFrame derived from the MALLET output-doc-topics output file. Contains the weight assigned to each article for each topic, which can be used to select articles for topic-based meta-analyses (accepted threshold from Poldrack article is 0.001). [n_topics]+1 columns: 'pmid' is the first column and the following columns are the topic names. The names of the topics match the names in df (e.g., topic_000). keys_df: A pandas DataFrame derived from the MALLET output-topic-keys output file. Contains the top [n_words] words for each topic, which can act as a summary of the topic's content. Two columns: 'topic' and 'terms'. The names of the topics match the names in weights (e.g., topic_000). """ if abstracts.index.name != 'pmid': abstracts.index = abstracts['pmid'] resdir = os.path.abspath(get_resource_path()) tempdir = os.path.join(resdir, 'topic_models') absdir = os.path.join(tempdir, 'abstracts') if not os.path.isdir(tempdir): os.mkdir(tempdir) if alpha is None: alpha = 50. / n_topics # Check for presence of abstract files and convert if necessary if not os.path.isdir(absdir): print('Abstracts folder not found. Creating abstract files...') os.mkdir(absdir) for pmid in abstracts.index.values: abstract = abstracts.loc[pmid]['abstract'] with open(os.path.join(absdir, str(pmid) + '.txt'), 'w') as fo: fo.write(abstract) # Run MALLET topic modeling print('Generating topics...') mallet_bin = join(dirname(dirname(__file__)), 'resources/mallet/bin/mallet') import_str = ('{mallet} import-dir ' '--input {absdir} ' '--output {outdir}/topic-input.mallet ' '--keep-sequence ' '--remove-stopwords').format(mallet=mallet_bin, absdir=absdir, outdir=tempdir) train_str = ('{mallet} train-topics ' '--input {out}/topic-input.mallet ' '--num-topics {n_topics} ' '--num-top-words {n_words} ' '--output-topic-keys {out}/topic_keys.txt ' '--output-doc-topics {out}/doc_topics.txt ' '--num-iterations {n_iters} ' '--output-model {out}/saved_model.mallet ' '--random-seed 1 ' '--alpha {alpha} ' '--beta {beta}').format(mallet=mallet_bin, out=tempdir, n_topics=n_topics, n_words=n_words, n_iters=n_iters, alpha=alpha, beta=beta) subprocess.call(import_str, shell=True) subprocess.call(train_str, shell=True) # Read in and convert doc_topics and topic_keys. def clean_str(string): return os.path.basename(os.path.splitext(string)[0]) def get_sort(lst): return [i[0] for i in sorted(enumerate(lst), key=lambda x:x[1])] topic_names = ['topic_{0:03d}'.format(i) for i in range(n_topics)] # doc_topics: Topic weights for each paper. # The conversion here is pretty ugly at the moment. # First row should be dropped. First column is row number and can be used # as the index. # Second column is 'file: /full/path/to/pmid.txt' <-- Parse to get pmid. # After that, odd columns are topic numbers and even columns are the # weights for the topics in the preceding column. These columns are sorted # on an individual pmid basis by the weights. n_cols = (2 * n_topics) + 1 dt_df = pd.read_csv(os.path.join(tempdir, 'doc_topics.txt'), delimiter='\t', skiprows=1, header=None, index_col=0) dt_df = dt_df[dt_df.columns[:n_cols]] # Get pmids from filenames dt_df[1] = dt_df[1].apply(clean_str) # Put weights (even cols) and topics (odd cols) into separate dfs. weights_df = dt_df[dt_df.columns[2::2]] weights_df.index = dt_df[1] weights_df.columns = range(n_topics) topics_df = dt_df[dt_df.columns[1::2]] topics_df.index = dt_df[1] topics_df.columns = range(n_topics) # Sort columns in weights_df separately for each row using topics_df. sorters_df = topics_df.apply(get_sort, axis=1) weights = weights_df.as_matrix() sorters = sorters_df.as_matrix() # there has to be a better way to do this. for i in range(sorters.shape[0]): weights[i, :] = weights[i, sorters[i, :]] # Define topic names (e.g., topic_000) index = dt_df[1] weights_df = pd.DataFrame(columns=topic_names, data=weights, index=index) weights_df.index.name = 'pmid' # topic_keys: Top [n_words] words for each topic. keys_df = pd.read_csv(os.path.join(tempdir, 'topic_keys.txt'), delimiter='\t', header=None, index_col=0) # Second column is a list of the terms. keys_df = keys_df[[2]] keys_df.rename(columns={2: 'terms'}, inplace=True) keys_df.index = topic_names keys_df.index.name = 'topic' # Remove all temporary files (abstract files, model, and outputs). shutil.rmtree(tempdir) # Return article topic weights and topic keys. return weights_df, keys_df
Correlates row vector x with each row vector in 2D array y.
def pearson(x, y): """ Correlates row vector x with each row vector in 2D array y. """ data = np.vstack((x, y)) ms = data.mean(axis=1)[(slice(None, None, None), None)] datam = data - ms datass = np.sqrt(np.sum(datam**2, axis=1)) temp = np.dot(datam[1:], datam[0].T) rs = temp / (datass[1:] * datass[0]) return rs
Two - way chi - square test of independence. Takes a 3D array as input: N ( voxels ) x 2 x 2 where the last two dimensions are the contingency table for each of N voxels. Returns an array of p - values.
def two_way(cells): """ Two-way chi-square test of independence. Takes a 3D array as input: N(voxels) x 2 x 2, where the last two dimensions are the contingency table for each of N voxels. Returns an array of p-values. """ # Mute divide-by-zero warning for bad voxels since we account for that # later warnings.simplefilter("ignore", RuntimeWarning) cells = cells.astype('float64') # Make sure we don't overflow total = np.apply_over_axes(np.sum, cells, [1, 2]).ravel() chi_sq = np.zeros(cells.shape, dtype='float64') for i in range(2): for j in range(2): exp = np.sum(cells[:, i, :], 1).ravel() * \ np.sum(cells[:, :, j], 1).ravel() / total bad_vox = np.where(exp == 0)[0] chi_sq[:, i, j] = (cells[:, i, j] - exp) ** 2 / exp chi_sq[bad_vox, i, j] = 1.0 # Set p-value for invalid voxels to 1 chi_sq = np.apply_over_axes(np.sum, chi_sq, [1, 2]).ravel() return special.chdtrc(1, chi_sq)
One - way chi - square test of independence. Takes a 1D array as input and compares activation at each voxel to proportion expected under a uniform distribution throughout the array. Note that if you re testing activation with this make sure that only valid voxels ( e. g. in - mask gray matter voxels ) are included in the array or results won t make any sense!
def one_way(data, n): """ One-way chi-square test of independence. Takes a 1D array as input and compares activation at each voxel to proportion expected under a uniform distribution throughout the array. Note that if you're testing activation with this, make sure that only valid voxels (e.g., in-mask gray matter voxels) are included in the array, or results won't make any sense! """ term = data.astype('float64') no_term = n - term t_exp = np.mean(term, 0) t_exp = np.array([t_exp, ] * data.shape[0]) nt_exp = n - t_exp t_mss = (term - t_exp) ** 2 / t_exp nt_mss = (no_term - nt_exp) ** 2 / nt_exp chi2 = t_mss + nt_mss return special.chdtrc(1, chi2)
Determine FDR threshold given a p value array and desired false discovery rate q.
def fdr(p, q=.05): """ Determine FDR threshold given a p value array and desired false discovery rate q. """ s = np.sort(p) nvox = p.shape[0] null = np.array(range(1, nvox + 1), dtype='float') * q / nvox below = np.where(s <= null)[0] return s[max(below)] if len(below) else -1
Download the latest data files. Args: path ( str ): Location to save the retrieved data files. Defaults to current directory. unpack ( bool ): If True unzips the data file post - download.
def download(path='.', url=None, unpack=False): """ Download the latest data files. Args: path (str): Location to save the retrieved data files. Defaults to current directory. unpack (bool): If True, unzips the data file post-download. """ if url is None: url = 'https://github.com/neurosynth/neurosynth-data/blob/master/current_data.tar.gz?raw=true' if os.path.exists(path) and os.path.isdir(path): basename = os.path.basename(url).split('?')[0] filename = os.path.join(path, basename) else: filename = path f = open(filename, 'wb') u = urlopen(url) file_size = int(u.headers["Content-Length"][0]) print("Downloading the latest Neurosynth files: {0} bytes: {1}".format( url, file_size)) bytes_dl = 0 block_size = 8192 while True: buffer = u.read(block_size) if not buffer: break bytes_dl += len(buffer) f.write(buffer) p = float(bytes_dl) / file_size status = r"{0} [{1:.2%}]".format(bytes_dl, p) status = status + chr(8) * (len(status) + 1) sys.stdout.write(status) f.close() if unpack: import tarfile tarfile.open(filename, 'r:gz').extractall(os.path.dirname(filename))
Download the abstracts for a dataset/ list of pmids
def download_abstracts(dataset, path='.', email=None, out_file=None): """ Download the abstracts for a dataset/list of pmids """ try: from Bio import Entrez, Medline except: raise Exception( 'Module biopython is required for downloading abstracts from PubMed.') if email is None: raise Exception('No email address provided.') Entrez.email = email if isinstance(dataset, Dataset): pmids = dataset.image_table.ids.astype(str).tolist() elif isinstance(dataset, list): pmids = [str(pmid) for pmid in dataset] else: raise Exception( 'Dataset type not recognized: {0}'.format(type(dataset))) records = [] # PubMed only allows you to search ~1000 at a time. I chose 900 to be safe. chunks = [pmids[x: x + 900] for x in range(0, len(pmids), 900)] for chunk in chunks: h = Entrez.efetch(db='pubmed', id=chunk, rettype='medline', retmode='text') records += list(Medline.parse(h)) # Pull data for studies with abstracts data = [[study['PMID'], study['AB']] for study in records if study.get('AB', None)] df = pd.DataFrame(columns=['pmid', 'abstract'], data=data) if out_file is not None: df.to_csv(os.path.join(os.path.abspath(path), out_file), index=False) return df
Load activation data from a text file.
def _load_activations(self, filename): """ Load activation data from a text file. Args: filename (str): a string pointing to the location of the txt file to read from. """ logger.info("Loading activation data from %s..." % filename) activations = pd.read_csv(filename, sep='\t') activations.columns = [col.lower() for col in list(activations.columns)] # Make sure all mandatory columns exist mc = ['x', 'y', 'z', 'id', 'space'] if (set(mc) - set(list(activations.columns))): logger.error( "At least one of mandatory columns (x, y, z, id, and space) " "is missing from input file.") return # Transform to target space where needed spaces = activations['space'].unique() xyz = activations[['x', 'y', 'z']].values for s in spaces: if s != self.transformer.target: inds = activations['space'] == s xyz[inds] = self.transformer.apply(s, xyz[inds]) activations[['x', 'y', 'z']] = xyz # xyz --> ijk ijk = pd.DataFrame( transformations.xyz_to_mat(xyz), columns=['i', 'j', 'k']) activations = pd.concat([activations, ijk], axis=1) return activations
Create and store a new ImageTable instance based on the current Dataset. Will generally be called privately but may be useful as a convenience method in cases where the user wants to re - generate the table with a new smoothing kernel of different radius.
def create_image_table(self, r=None): """ Create and store a new ImageTable instance based on the current Dataset. Will generally be called privately, but may be useful as a convenience method in cases where the user wants to re-generate the table with a new smoothing kernel of different radius. Args: r (int): An optional integer indicating the radius of the smoothing kernel. By default, this is None, which will keep whatever value is currently set in the Dataset instance. """ logger.info("Creating image table...") if r is not None: self.r = r self.image_table = ImageTable(self)