partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
ReplLockManager.tryAcquire
|
Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
|
pysyncobj/batteries.py
|
def tryAcquire(self, lockID, callback=None, sync=False, timeout=None):
"""Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
"""
return self.__lockImpl.acquire(lockID, self.__selfID, time.time(), callback=callback, sync=sync, timeout=timeout)
|
def tryAcquire(self, lockID, callback=None, sync=False, timeout=None):
"""Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
"""
return self.__lockImpl.acquire(lockID, self.__selfID, time.time(), callback=callback, sync=sync, timeout=timeout)
|
[
"Attempt",
"to",
"acquire",
"lock",
"."
] |
bakwc/PySyncObj
|
python
|
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/batteries.py#L496-L509
|
[
"def",
"tryAcquire",
"(",
"self",
",",
"lockID",
",",
"callback",
"=",
"None",
",",
"sync",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"self",
".",
"__lockImpl",
".",
"acquire",
"(",
"lockID",
",",
"self",
".",
"__selfID",
",",
"time",
".",
"time",
"(",
")",
",",
"callback",
"=",
"callback",
",",
"sync",
"=",
"sync",
",",
"timeout",
"=",
"timeout",
")"
] |
be3b0aaa932d5156f5df140c23c962430f51b7b8
|
test
|
ReplLockManager.isAcquired
|
Check if lock is acquired by ourselves.
:param lockID: unique lock identifier.
:type lockID: str
:return True if lock is acquired by ourselves.
|
pysyncobj/batteries.py
|
def isAcquired(self, lockID):
"""Check if lock is acquired by ourselves.
:param lockID: unique lock identifier.
:type lockID: str
:return True if lock is acquired by ourselves.
"""
return self.__lockImpl.isAcquired(lockID, self.__selfID, time.time())
|
def isAcquired(self, lockID):
"""Check if lock is acquired by ourselves.
:param lockID: unique lock identifier.
:type lockID: str
:return True if lock is acquired by ourselves.
"""
return self.__lockImpl.isAcquired(lockID, self.__selfID, time.time())
|
[
"Check",
"if",
"lock",
"is",
"acquired",
"by",
"ourselves",
"."
] |
bakwc/PySyncObj
|
python
|
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/batteries.py#L511-L518
|
[
"def",
"isAcquired",
"(",
"self",
",",
"lockID",
")",
":",
"return",
"self",
".",
"__lockImpl",
".",
"isAcquired",
"(",
"lockID",
",",
"self",
".",
"__selfID",
",",
"time",
".",
"time",
"(",
")",
")"
] |
be3b0aaa932d5156f5df140c23c962430f51b7b8
|
test
|
ReplLockManager.release
|
Release previously-acquired lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is released or failed to release.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
|
pysyncobj/batteries.py
|
def release(self, lockID, callback=None, sync=False, timeout=None):
"""
Release previously-acquired lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is released or failed to release.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
"""
self.__lockImpl.release(lockID, self.__selfID, callback=callback, sync=sync, timeout=timeout)
|
def release(self, lockID, callback=None, sync=False, timeout=None):
"""
Release previously-acquired lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is released or failed to release.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
"""
self.__lockImpl.release(lockID, self.__selfID, callback=callback, sync=sync, timeout=timeout)
|
[
"Release",
"previously",
"-",
"acquired",
"lock",
"."
] |
bakwc/PySyncObj
|
python
|
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/batteries.py#L520-L533
|
[
"def",
"release",
"(",
"self",
",",
"lockID",
",",
"callback",
"=",
"None",
",",
"sync",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"__lockImpl",
".",
"release",
"(",
"lockID",
",",
"self",
".",
"__selfID",
",",
"callback",
"=",
"callback",
",",
"sync",
"=",
"sync",
",",
"timeout",
"=",
"timeout",
")"
] |
be3b0aaa932d5156f5df140c23c962430f51b7b8
|
test
|
check
|
Decorator which wraps checks and returns an error response on failure.
|
watchman/decorators.py
|
def check(func):
"""
Decorator which wraps checks and returns an error response on failure.
"""
def wrapped(*args, **kwargs):
check_name = func.__name__
arg_name = None
if args:
arg_name = args[0]
try:
if arg_name:
logger.debug("Checking '%s' for '%s'", check_name, arg_name)
else:
logger.debug("Checking '%s'", check_name)
response = func(*args, **kwargs)
except Exception as e:
message = str(e)
response = {
"ok": False,
"error": message,
"stacktrace": traceback.format_exc(),
}
# The check contains several individual checks (e.g., one per
# database). Preface the results by name.
if arg_name:
response = {arg_name: response}
logger.exception(
"Error calling '%s' for '%s': %s",
check_name,
arg_name,
message
)
else:
logger.exception(
"Error calling '%s': %s",
check_name,
message
)
return response
return wrapped
|
def check(func):
"""
Decorator which wraps checks and returns an error response on failure.
"""
def wrapped(*args, **kwargs):
check_name = func.__name__
arg_name = None
if args:
arg_name = args[0]
try:
if arg_name:
logger.debug("Checking '%s' for '%s'", check_name, arg_name)
else:
logger.debug("Checking '%s'", check_name)
response = func(*args, **kwargs)
except Exception as e:
message = str(e)
response = {
"ok": False,
"error": message,
"stacktrace": traceback.format_exc(),
}
# The check contains several individual checks (e.g., one per
# database). Preface the results by name.
if arg_name:
response = {arg_name: response}
logger.exception(
"Error calling '%s' for '%s': %s",
check_name,
arg_name,
message
)
else:
logger.exception(
"Error calling '%s': %s",
check_name,
message
)
return response
return wrapped
|
[
"Decorator",
"which",
"wraps",
"checks",
"and",
"returns",
"an",
"error",
"response",
"on",
"failure",
"."
] |
mwarkentin/django-watchman
|
python
|
https://github.com/mwarkentin/django-watchman/blob/6ef98ba54dc52f27e7b42d42028b59dc67550268/watchman/decorators.py#L14-L54
|
[
"def",
"check",
"(",
"func",
")",
":",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"check_name",
"=",
"func",
".",
"__name__",
"arg_name",
"=",
"None",
"if",
"args",
":",
"arg_name",
"=",
"args",
"[",
"0",
"]",
"try",
":",
"if",
"arg_name",
":",
"logger",
".",
"debug",
"(",
"\"Checking '%s' for '%s'\"",
",",
"check_name",
",",
"arg_name",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Checking '%s'\"",
",",
"check_name",
")",
"response",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"message",
"=",
"str",
"(",
"e",
")",
"response",
"=",
"{",
"\"ok\"",
":",
"False",
",",
"\"error\"",
":",
"message",
",",
"\"stacktrace\"",
":",
"traceback",
".",
"format_exc",
"(",
")",
",",
"}",
"# The check contains several individual checks (e.g., one per",
"# database). Preface the results by name.",
"if",
"arg_name",
":",
"response",
"=",
"{",
"arg_name",
":",
"response",
"}",
"logger",
".",
"exception",
"(",
"\"Error calling '%s' for '%s': %s\"",
",",
"check_name",
",",
"arg_name",
",",
"message",
")",
"else",
":",
"logger",
".",
"exception",
"(",
"\"Error calling '%s': %s\"",
",",
"check_name",
",",
"message",
")",
"return",
"response",
"return",
"wrapped"
] |
6ef98ba54dc52f27e7b42d42028b59dc67550268
|
test
|
token_required
|
Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be
customized.
|
watchman/decorators.py
|
def token_required(view_func):
"""
Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be
customized.
"""
def _parse_auth_header(auth_header):
"""
Parse the `Authorization` header
Expected format: `WATCHMAN-TOKEN Token="ABC123"`
"""
# TODO: Figure out full set of allowed characters
# http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
reg = re.compile('(\w+)[=] ?"?([\w-]+)"?')
header_dict = dict(reg.findall(auth_header))
return header_dict['Token']
def _get_passed_token(request):
"""
Try to get the passed token, starting with the header and fall back to `GET` param
"""
try:
auth_header = request.META['HTTP_AUTHORIZATION']
token = _parse_auth_header(auth_header)
except KeyError:
token = request.GET.get(settings.WATCHMAN_TOKEN_NAME)
return token
def _validate_token(request):
if settings.WATCHMAN_TOKENS:
watchman_tokens = settings.WATCHMAN_TOKENS.split(',')
elif settings.WATCHMAN_TOKEN:
watchman_tokens = [settings.WATCHMAN_TOKEN, ]
else:
return True
return _get_passed_token(request) in watchman_tokens
@csrf_exempt
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if _validate_token(request):
return view_func(request, *args, **kwargs)
return HttpResponseForbidden()
return _wrapped_view
|
def token_required(view_func):
"""
Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be
customized.
"""
def _parse_auth_header(auth_header):
"""
Parse the `Authorization` header
Expected format: `WATCHMAN-TOKEN Token="ABC123"`
"""
# TODO: Figure out full set of allowed characters
# http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
reg = re.compile('(\w+)[=] ?"?([\w-]+)"?')
header_dict = dict(reg.findall(auth_header))
return header_dict['Token']
def _get_passed_token(request):
"""
Try to get the passed token, starting with the header and fall back to `GET` param
"""
try:
auth_header = request.META['HTTP_AUTHORIZATION']
token = _parse_auth_header(auth_header)
except KeyError:
token = request.GET.get(settings.WATCHMAN_TOKEN_NAME)
return token
def _validate_token(request):
if settings.WATCHMAN_TOKENS:
watchman_tokens = settings.WATCHMAN_TOKENS.split(',')
elif settings.WATCHMAN_TOKEN:
watchman_tokens = [settings.WATCHMAN_TOKEN, ]
else:
return True
return _get_passed_token(request) in watchman_tokens
@csrf_exempt
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if _validate_token(request):
return view_func(request, *args, **kwargs)
return HttpResponseForbidden()
return _wrapped_view
|
[
"Decorator",
"which",
"ensures",
"that",
"one",
"of",
"the",
"WATCHMAN_TOKENS",
"is",
"provided",
"if",
"set",
"."
] |
mwarkentin/django-watchman
|
python
|
https://github.com/mwarkentin/django-watchman/blob/6ef98ba54dc52f27e7b42d42028b59dc67550268/watchman/decorators.py#L57-L111
|
[
"def",
"token_required",
"(",
"view_func",
")",
":",
"def",
"_parse_auth_header",
"(",
"auth_header",
")",
":",
"\"\"\"\n Parse the `Authorization` header\n\n Expected format: `WATCHMAN-TOKEN Token=\"ABC123\"`\n \"\"\"",
"# TODO: Figure out full set of allowed characters",
"# http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers",
"# https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2",
"# https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2",
"reg",
"=",
"re",
".",
"compile",
"(",
"'(\\w+)[=] ?\"?([\\w-]+)\"?'",
")",
"header_dict",
"=",
"dict",
"(",
"reg",
".",
"findall",
"(",
"auth_header",
")",
")",
"return",
"header_dict",
"[",
"'Token'",
"]",
"def",
"_get_passed_token",
"(",
"request",
")",
":",
"\"\"\"\n Try to get the passed token, starting with the header and fall back to `GET` param\n \"\"\"",
"try",
":",
"auth_header",
"=",
"request",
".",
"META",
"[",
"'HTTP_AUTHORIZATION'",
"]",
"token",
"=",
"_parse_auth_header",
"(",
"auth_header",
")",
"except",
"KeyError",
":",
"token",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"settings",
".",
"WATCHMAN_TOKEN_NAME",
")",
"return",
"token",
"def",
"_validate_token",
"(",
"request",
")",
":",
"if",
"settings",
".",
"WATCHMAN_TOKENS",
":",
"watchman_tokens",
"=",
"settings",
".",
"WATCHMAN_TOKENS",
".",
"split",
"(",
"','",
")",
"elif",
"settings",
".",
"WATCHMAN_TOKEN",
":",
"watchman_tokens",
"=",
"[",
"settings",
".",
"WATCHMAN_TOKEN",
",",
"]",
"else",
":",
"return",
"True",
"return",
"_get_passed_token",
"(",
"request",
")",
"in",
"watchman_tokens",
"@",
"csrf_exempt",
"@",
"wraps",
"(",
"view_func",
")",
"def",
"_wrapped_view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"_validate_token",
"(",
"request",
")",
":",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"HttpResponseForbidden",
"(",
")",
"return",
"_wrapped_view"
] |
6ef98ba54dc52f27e7b42d42028b59dc67550268
|
test
|
set_hosts
|
Sets the Elasticsearch hosts to use
Args:
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
|
parsedmarc/elastic.py
|
def set_hosts(hosts, use_ssl=False, ssl_cert_path=None):
"""
Sets the Elasticsearch hosts to use
Args:
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
"""
if type(hosts) != list:
hosts = [hosts]
conn_params = {
"hosts": hosts,
"timeout": 20
}
if use_ssl:
conn_params['use_ssl'] = True
if ssl_cert_path:
conn_params['verify_certs'] = True
conn_params['ca_certs'] = ssl_cert_path
else:
conn_params['verify_certs'] = False
connections.create_connection(**conn_params)
|
def set_hosts(hosts, use_ssl=False, ssl_cert_path=None):
"""
Sets the Elasticsearch hosts to use
Args:
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
"""
if type(hosts) != list:
hosts = [hosts]
conn_params = {
"hosts": hosts,
"timeout": 20
}
if use_ssl:
conn_params['use_ssl'] = True
if ssl_cert_path:
conn_params['verify_certs'] = True
conn_params['ca_certs'] = ssl_cert_path
else:
conn_params['verify_certs'] = False
connections.create_connection(**conn_params)
|
[
"Sets",
"the",
"Elasticsearch",
"hosts",
"to",
"use"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/elastic.py#L172-L194
|
[
"def",
"set_hosts",
"(",
"hosts",
",",
"use_ssl",
"=",
"False",
",",
"ssl_cert_path",
"=",
"None",
")",
":",
"if",
"type",
"(",
"hosts",
")",
"!=",
"list",
":",
"hosts",
"=",
"[",
"hosts",
"]",
"conn_params",
"=",
"{",
"\"hosts\"",
":",
"hosts",
",",
"\"timeout\"",
":",
"20",
"}",
"if",
"use_ssl",
":",
"conn_params",
"[",
"'use_ssl'",
"]",
"=",
"True",
"if",
"ssl_cert_path",
":",
"conn_params",
"[",
"'verify_certs'",
"]",
"=",
"True",
"conn_params",
"[",
"'ca_certs'",
"]",
"=",
"ssl_cert_path",
"else",
":",
"conn_params",
"[",
"'verify_certs'",
"]",
"=",
"False",
"connections",
".",
"create_connection",
"(",
"*",
"*",
"conn_params",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
create_indexes
|
Create Elasticsearch indexes
Args:
names (list): A list of index names
settings (dict): Index settings
|
parsedmarc/elastic.py
|
def create_indexes(names, settings=None):
"""
Create Elasticsearch indexes
Args:
names (list): A list of index names
settings (dict): Index settings
"""
for name in names:
index = Index(name)
try:
if not index.exists():
logger.debug("Creating Elasticsearch index: {0}".format(name))
if settings is None:
index.settings(number_of_shards=1,
number_of_replicas=1)
else:
index.settings(**settings)
index.create()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__()))
|
def create_indexes(names, settings=None):
"""
Create Elasticsearch indexes
Args:
names (list): A list of index names
settings (dict): Index settings
"""
for name in names:
index = Index(name)
try:
if not index.exists():
logger.debug("Creating Elasticsearch index: {0}".format(name))
if settings is None:
index.settings(number_of_shards=1,
number_of_replicas=1)
else:
index.settings(**settings)
index.create()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__()))
|
[
"Create",
"Elasticsearch",
"indexes"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/elastic.py#L197-L219
|
[
"def",
"create_indexes",
"(",
"names",
",",
"settings",
"=",
"None",
")",
":",
"for",
"name",
"in",
"names",
":",
"index",
"=",
"Index",
"(",
"name",
")",
"try",
":",
"if",
"not",
"index",
".",
"exists",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Creating Elasticsearch index: {0}\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"settings",
"is",
"None",
":",
"index",
".",
"settings",
"(",
"number_of_shards",
"=",
"1",
",",
"number_of_replicas",
"=",
"1",
")",
"else",
":",
"index",
".",
"settings",
"(",
"*",
"*",
"settings",
")",
"index",
".",
"create",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ElasticsearchError",
"(",
"\"Elasticsearch error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
migrate_indexes
|
Updates index mappings
Args:
aggregate_indexes (list): A list of aggregate index names
forensic_indexes (list): A list of forensic index names
|
parsedmarc/elastic.py
|
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):
"""
Updates index mappings
Args:
aggregate_indexes (list): A list of aggregate index names
forensic_indexes (list): A list of forensic index names
"""
version = 2
if aggregate_indexes is None:
aggregate_indexes = []
if forensic_indexes is None:
forensic_indexes = []
for aggregate_index_name in aggregate_indexes:
if not Index(aggregate_index_name).exists():
continue
aggregate_index = Index(aggregate_index_name)
doc = "doc"
fo_field = "published_policy.fo"
fo = "fo"
fo_mapping = aggregate_index.get_field_mapping(fields=[fo_field])
fo_mapping = fo_mapping[list(fo_mapping.keys())[0]]["mappings"]
if doc not in fo_mapping:
continue
fo_mapping = fo_mapping[doc][fo_field]["mapping"][fo]
fo_type = fo_mapping["type"]
if fo_type == "long":
new_index_name = "{0}-v{1}".format(aggregate_index_name, version)
body = {"properties": {"published_policy.fo": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
}
Index(new_index_name).create()
Index(new_index_name).put_mapping(doc_type=doc, body=body)
reindex(connections.get_connection(), aggregate_index_name,
new_index_name)
Index(aggregate_index_name).delete()
for forensic_index in forensic_indexes:
pass
|
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):
"""
Updates index mappings
Args:
aggregate_indexes (list): A list of aggregate index names
forensic_indexes (list): A list of forensic index names
"""
version = 2
if aggregate_indexes is None:
aggregate_indexes = []
if forensic_indexes is None:
forensic_indexes = []
for aggregate_index_name in aggregate_indexes:
if not Index(aggregate_index_name).exists():
continue
aggregate_index = Index(aggregate_index_name)
doc = "doc"
fo_field = "published_policy.fo"
fo = "fo"
fo_mapping = aggregate_index.get_field_mapping(fields=[fo_field])
fo_mapping = fo_mapping[list(fo_mapping.keys())[0]]["mappings"]
if doc not in fo_mapping:
continue
fo_mapping = fo_mapping[doc][fo_field]["mapping"][fo]
fo_type = fo_mapping["type"]
if fo_type == "long":
new_index_name = "{0}-v{1}".format(aggregate_index_name, version)
body = {"properties": {"published_policy.fo": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
}
Index(new_index_name).create()
Index(new_index_name).put_mapping(doc_type=doc, body=body)
reindex(connections.get_connection(), aggregate_index_name,
new_index_name)
Index(aggregate_index_name).delete()
for forensic_index in forensic_indexes:
pass
|
[
"Updates",
"index",
"mappings"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/elastic.py#L222-L269
|
[
"def",
"migrate_indexes",
"(",
"aggregate_indexes",
"=",
"None",
",",
"forensic_indexes",
"=",
"None",
")",
":",
"version",
"=",
"2",
"if",
"aggregate_indexes",
"is",
"None",
":",
"aggregate_indexes",
"=",
"[",
"]",
"if",
"forensic_indexes",
"is",
"None",
":",
"forensic_indexes",
"=",
"[",
"]",
"for",
"aggregate_index_name",
"in",
"aggregate_indexes",
":",
"if",
"not",
"Index",
"(",
"aggregate_index_name",
")",
".",
"exists",
"(",
")",
":",
"continue",
"aggregate_index",
"=",
"Index",
"(",
"aggregate_index_name",
")",
"doc",
"=",
"\"doc\"",
"fo_field",
"=",
"\"published_policy.fo\"",
"fo",
"=",
"\"fo\"",
"fo_mapping",
"=",
"aggregate_index",
".",
"get_field_mapping",
"(",
"fields",
"=",
"[",
"fo_field",
"]",
")",
"fo_mapping",
"=",
"fo_mapping",
"[",
"list",
"(",
"fo_mapping",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"]",
"[",
"\"mappings\"",
"]",
"if",
"doc",
"not",
"in",
"fo_mapping",
":",
"continue",
"fo_mapping",
"=",
"fo_mapping",
"[",
"doc",
"]",
"[",
"fo_field",
"]",
"[",
"\"mapping\"",
"]",
"[",
"fo",
"]",
"fo_type",
"=",
"fo_mapping",
"[",
"\"type\"",
"]",
"if",
"fo_type",
"==",
"\"long\"",
":",
"new_index_name",
"=",
"\"{0}-v{1}\"",
".",
"format",
"(",
"aggregate_index_name",
",",
"version",
")",
"body",
"=",
"{",
"\"properties\"",
":",
"{",
"\"published_policy.fo\"",
":",
"{",
"\"type\"",
":",
"\"text\"",
",",
"\"fields\"",
":",
"{",
"\"keyword\"",
":",
"{",
"\"type\"",
":",
"\"keyword\"",
",",
"\"ignore_above\"",
":",
"256",
"}",
"}",
"}",
"}",
"}",
"Index",
"(",
"new_index_name",
")",
".",
"create",
"(",
")",
"Index",
"(",
"new_index_name",
")",
".",
"put_mapping",
"(",
"doc_type",
"=",
"doc",
",",
"body",
"=",
"body",
")",
"reindex",
"(",
"connections",
".",
"get_connection",
"(",
")",
",",
"aggregate_index_name",
",",
"new_index_name",
")",
"Index",
"(",
"aggregate_index_name",
")",
".",
"delete",
"(",
")",
"for",
"forensic_index",
"in",
"forensic_indexes",
":",
"pass"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
save_aggregate_report_to_elasticsearch
|
Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved
|
parsedmarc/elastic.py
|
def save_aggregate_report_to_elasticsearch(aggregate_report,
index_suffix=None,
monthly_indexes=False):
"""
Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved
"""
logger.debug("Saving aggregate report to Elasticsearch")
aggregate_report = aggregate_report.copy()
metadata = aggregate_report["report_metadata"]
org_name = metadata["org_name"]
report_id = metadata["report_id"]
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"],
aggregate_report["end_date"]]
org_name_query = Q(dict(match=dict(org_name=org_name)))
report_id_query = Q(dict(match=dict(report_id=report_id)))
domain_query = Q(dict(match={"published_policy.domain": domain}))
begin_date_query = Q(dict(match=dict(date_range=begin_date)))
end_date_query = Q(dict(match=dict(date_range=end_date)))
search = Search(index="dmarc_aggregate*")
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("An aggregate report ID {0} from {1} about {2} "
"with a date range of {3} UTC to {4} UTC already "
"exists in "
"Elasticsearch".format(report_id,
org_name,
domain,
begin_date_human,
end_date_human))
published_policy = _PublishedPolicy(
domain=aggregate_report["policy_published"]["domain"],
adkim=aggregate_report["policy_published"]["adkim"],
aspf=aggregate_report["policy_published"]["aspf"],
p=aggregate_report["policy_published"]["p"],
sp=aggregate_report["policy_published"]["sp"],
pct=aggregate_report["policy_published"]["pct"],
fo=aggregate_report["policy_published"]["fo"]
)
for record in aggregate_report["records"]:
agg_doc = _AggregateReportDoc(
xml_schemea=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
org_email=metadata["org_email"],
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
source_country=record["source"]["country"],
source_reverse_dns=record["source"]["reverse_dns"],
source_base_domain=record["source"]["base_domain"],
message_count=record["count"],
disposition=record["policy_evaluated"]["disposition"],
dkim_aligned=record["policy_evaluated"]["dkim"] == "pass",
spf_aligned=record["policy_evaluated"]["spf"] == "pass",
header_from=record["identifiers"]["header_from"],
envelope_from=record["identifiers"]["envelope_from"],
envelope_to=record["identifiers"]["envelope_to"]
)
for override in record["policy_evaluated"]["policy_override_reasons"]:
agg_doc.add_policy_override(type_=override["type"],
comment=override["comment"])
for dkim_result in record["auth_results"]["dkim"]:
agg_doc.add_dkim_result(domain=dkim_result["domain"],
selector=dkim_result["selector"],
result=dkim_result["result"])
for spf_result in record["auth_results"]["spf"]:
agg_doc.add_spf_result(domain=spf_result["domain"],
scope=spf_result["scope"],
result=spf_result["result"])
index = "dmarc_aggregate"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
agg_doc.meta.index = index
try:
agg_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__()))
|
def save_aggregate_report_to_elasticsearch(aggregate_report,
index_suffix=None,
monthly_indexes=False):
"""
Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved
"""
logger.debug("Saving aggregate report to Elasticsearch")
aggregate_report = aggregate_report.copy()
metadata = aggregate_report["report_metadata"]
org_name = metadata["org_name"]
report_id = metadata["report_id"]
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"],
aggregate_report["end_date"]]
org_name_query = Q(dict(match=dict(org_name=org_name)))
report_id_query = Q(dict(match=dict(report_id=report_id)))
domain_query = Q(dict(match={"published_policy.domain": domain}))
begin_date_query = Q(dict(match=dict(date_range=begin_date)))
end_date_query = Q(dict(match=dict(date_range=end_date)))
search = Search(index="dmarc_aggregate*")
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("An aggregate report ID {0} from {1} about {2} "
"with a date range of {3} UTC to {4} UTC already "
"exists in "
"Elasticsearch".format(report_id,
org_name,
domain,
begin_date_human,
end_date_human))
published_policy = _PublishedPolicy(
domain=aggregate_report["policy_published"]["domain"],
adkim=aggregate_report["policy_published"]["adkim"],
aspf=aggregate_report["policy_published"]["aspf"],
p=aggregate_report["policy_published"]["p"],
sp=aggregate_report["policy_published"]["sp"],
pct=aggregate_report["policy_published"]["pct"],
fo=aggregate_report["policy_published"]["fo"]
)
for record in aggregate_report["records"]:
agg_doc = _AggregateReportDoc(
xml_schemea=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
org_email=metadata["org_email"],
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
source_country=record["source"]["country"],
source_reverse_dns=record["source"]["reverse_dns"],
source_base_domain=record["source"]["base_domain"],
message_count=record["count"],
disposition=record["policy_evaluated"]["disposition"],
dkim_aligned=record["policy_evaluated"]["dkim"] == "pass",
spf_aligned=record["policy_evaluated"]["spf"] == "pass",
header_from=record["identifiers"]["header_from"],
envelope_from=record["identifiers"]["envelope_from"],
envelope_to=record["identifiers"]["envelope_to"]
)
for override in record["policy_evaluated"]["policy_override_reasons"]:
agg_doc.add_policy_override(type_=override["type"],
comment=override["comment"])
for dkim_result in record["auth_results"]["dkim"]:
agg_doc.add_dkim_result(domain=dkim_result["domain"],
selector=dkim_result["selector"],
result=dkim_result["result"])
for spf_result in record["auth_results"]["spf"]:
agg_doc.add_spf_result(domain=spf_result["domain"],
scope=spf_result["scope"],
result=spf_result["result"])
index = "dmarc_aggregate"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
agg_doc.meta.index = index
try:
agg_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__()))
|
[
"Saves",
"a",
"parsed",
"DMARC",
"aggregate",
"report",
"to",
"ElasticSearch"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/elastic.py#L272-L384
|
[
"def",
"save_aggregate_report_to_elasticsearch",
"(",
"aggregate_report",
",",
"index_suffix",
"=",
"None",
",",
"monthly_indexes",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Saving aggregate report to Elasticsearch\"",
")",
"aggregate_report",
"=",
"aggregate_report",
".",
"copy",
"(",
")",
"metadata",
"=",
"aggregate_report",
"[",
"\"report_metadata\"",
"]",
"org_name",
"=",
"metadata",
"[",
"\"org_name\"",
"]",
"report_id",
"=",
"metadata",
"[",
"\"report_id\"",
"]",
"domain",
"=",
"aggregate_report",
"[",
"\"policy_published\"",
"]",
"[",
"\"domain\"",
"]",
"begin_date",
"=",
"human_timestamp_to_datetime",
"(",
"metadata",
"[",
"\"begin_date\"",
"]",
")",
"end_date",
"=",
"human_timestamp_to_datetime",
"(",
"metadata",
"[",
"\"end_date\"",
"]",
")",
"begin_date_human",
"=",
"begin_date",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"end_date_human",
"=",
"end_date",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"if",
"monthly_indexes",
":",
"index_date",
"=",
"begin_date",
".",
"strftime",
"(",
"\"%Y-%m\"",
")",
"else",
":",
"index_date",
"=",
"begin_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"aggregate_report",
"[",
"\"begin_date\"",
"]",
"=",
"begin_date",
"aggregate_report",
"[",
"\"end_date\"",
"]",
"=",
"end_date",
"date_range",
"=",
"[",
"aggregate_report",
"[",
"\"begin_date\"",
"]",
",",
"aggregate_report",
"[",
"\"end_date\"",
"]",
"]",
"org_name_query",
"=",
"Q",
"(",
"dict",
"(",
"match",
"=",
"dict",
"(",
"org_name",
"=",
"org_name",
")",
")",
")",
"report_id_query",
"=",
"Q",
"(",
"dict",
"(",
"match",
"=",
"dict",
"(",
"report_id",
"=",
"report_id",
")",
")",
")",
"domain_query",
"=",
"Q",
"(",
"dict",
"(",
"match",
"=",
"{",
"\"published_policy.domain\"",
":",
"domain",
"}",
")",
")",
"begin_date_query",
"=",
"Q",
"(",
"dict",
"(",
"match",
"=",
"dict",
"(",
"date_range",
"=",
"begin_date",
")",
")",
")",
"end_date_query",
"=",
"Q",
"(",
"dict",
"(",
"match",
"=",
"dict",
"(",
"date_range",
"=",
"end_date",
")",
")",
")",
"search",
"=",
"Search",
"(",
"index",
"=",
"\"dmarc_aggregate*\"",
")",
"query",
"=",
"org_name_query",
"&",
"report_id_query",
"&",
"domain_query",
"query",
"=",
"query",
"&",
"begin_date_query",
"&",
"end_date_query",
"search",
".",
"query",
"=",
"query",
"existing",
"=",
"search",
".",
"execute",
"(",
")",
"if",
"len",
"(",
"existing",
")",
">",
"0",
":",
"raise",
"AlreadySaved",
"(",
"\"An aggregate report ID {0} from {1} about {2} \"",
"\"with a date range of {3} UTC to {4} UTC already \"",
"\"exists in \"",
"\"Elasticsearch\"",
".",
"format",
"(",
"report_id",
",",
"org_name",
",",
"domain",
",",
"begin_date_human",
",",
"end_date_human",
")",
")",
"published_policy",
"=",
"_PublishedPolicy",
"(",
"domain",
"=",
"aggregate_report",
"[",
"\"policy_published\"",
"]",
"[",
"\"domain\"",
"]",
",",
"adkim",
"=",
"aggregate_report",
"[",
"\"policy_published\"",
"]",
"[",
"\"adkim\"",
"]",
",",
"aspf",
"=",
"aggregate_report",
"[",
"\"policy_published\"",
"]",
"[",
"\"aspf\"",
"]",
",",
"p",
"=",
"aggregate_report",
"[",
"\"policy_published\"",
"]",
"[",
"\"p\"",
"]",
",",
"sp",
"=",
"aggregate_report",
"[",
"\"policy_published\"",
"]",
"[",
"\"sp\"",
"]",
",",
"pct",
"=",
"aggregate_report",
"[",
"\"policy_published\"",
"]",
"[",
"\"pct\"",
"]",
",",
"fo",
"=",
"aggregate_report",
"[",
"\"policy_published\"",
"]",
"[",
"\"fo\"",
"]",
")",
"for",
"record",
"in",
"aggregate_report",
"[",
"\"records\"",
"]",
":",
"agg_doc",
"=",
"_AggregateReportDoc",
"(",
"xml_schemea",
"=",
"aggregate_report",
"[",
"\"xml_schema\"",
"]",
",",
"org_name",
"=",
"metadata",
"[",
"\"org_name\"",
"]",
",",
"org_email",
"=",
"metadata",
"[",
"\"org_email\"",
"]",
",",
"org_extra_contact_info",
"=",
"metadata",
"[",
"\"org_extra_contact_info\"",
"]",
",",
"report_id",
"=",
"metadata",
"[",
"\"report_id\"",
"]",
",",
"date_range",
"=",
"date_range",
",",
"errors",
"=",
"metadata",
"[",
"\"errors\"",
"]",
",",
"published_policy",
"=",
"published_policy",
",",
"source_ip_address",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"ip_address\"",
"]",
",",
"source_country",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"country\"",
"]",
",",
"source_reverse_dns",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"reverse_dns\"",
"]",
",",
"source_base_domain",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"base_domain\"",
"]",
",",
"message_count",
"=",
"record",
"[",
"\"count\"",
"]",
",",
"disposition",
"=",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"disposition\"",
"]",
",",
"dkim_aligned",
"=",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"dkim\"",
"]",
"==",
"\"pass\"",
",",
"spf_aligned",
"=",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"spf\"",
"]",
"==",
"\"pass\"",
",",
"header_from",
"=",
"record",
"[",
"\"identifiers\"",
"]",
"[",
"\"header_from\"",
"]",
",",
"envelope_from",
"=",
"record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_from\"",
"]",
",",
"envelope_to",
"=",
"record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_to\"",
"]",
")",
"for",
"override",
"in",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"policy_override_reasons\"",
"]",
":",
"agg_doc",
".",
"add_policy_override",
"(",
"type_",
"=",
"override",
"[",
"\"type\"",
"]",
",",
"comment",
"=",
"override",
"[",
"\"comment\"",
"]",
")",
"for",
"dkim_result",
"in",
"record",
"[",
"\"auth_results\"",
"]",
"[",
"\"dkim\"",
"]",
":",
"agg_doc",
".",
"add_dkim_result",
"(",
"domain",
"=",
"dkim_result",
"[",
"\"domain\"",
"]",
",",
"selector",
"=",
"dkim_result",
"[",
"\"selector\"",
"]",
",",
"result",
"=",
"dkim_result",
"[",
"\"result\"",
"]",
")",
"for",
"spf_result",
"in",
"record",
"[",
"\"auth_results\"",
"]",
"[",
"\"spf\"",
"]",
":",
"agg_doc",
".",
"add_spf_result",
"(",
"domain",
"=",
"spf_result",
"[",
"\"domain\"",
"]",
",",
"scope",
"=",
"spf_result",
"[",
"\"scope\"",
"]",
",",
"result",
"=",
"spf_result",
"[",
"\"result\"",
"]",
")",
"index",
"=",
"\"dmarc_aggregate\"",
"if",
"index_suffix",
":",
"index",
"=",
"\"{0}_{1}\"",
".",
"format",
"(",
"index",
",",
"index_suffix",
")",
"index",
"=",
"\"{0}-{1}\"",
".",
"format",
"(",
"index",
",",
"index_date",
")",
"create_indexes",
"(",
"[",
"index",
"]",
")",
"agg_doc",
".",
"meta",
".",
"index",
"=",
"index",
"try",
":",
"agg_doc",
".",
"save",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ElasticsearchError",
"(",
"\"Elasticsearch error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
save_forensic_report_to_elasticsearch
|
Saves a parsed DMARC forensic report to ElasticSearch
Args:
forensic_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily
indexes
Raises:
AlreadySaved
|
parsedmarc/elastic.py
|
def save_forensic_report_to_elasticsearch(forensic_report,
index_suffix=None,
monthly_indexes=False):
"""
Saves a parsed DMARC forensic report to ElasticSearch
Args:
forensic_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily
indexes
Raises:
AlreadySaved
"""
logger.debug("Saving forensic report to Elasticsearch")
forensic_report = forensic_report.copy()
sample_date = None
if forensic_report["parsed_sample"]["date"] is not None:
sample_date = forensic_report["parsed_sample"]["date"]
sample_date = human_timestamp_to_datetime(sample_date)
original_headers = forensic_report["parsed_sample"]["headers"]
headers = OrderedDict()
for original_header in original_headers:
headers[original_header.lower()] = original_headers[original_header]
arrival_date_human = forensic_report["arrival_date_utc"]
arrival_date = human_timestamp_to_datetime(arrival_date_human)
search = Search(index="dmarc_forensic*")
arrival_query = {"match": {"arrival_date": arrival_date}}
q = Q(arrival_query)
from_ = None
to_ = None
subject = None
if "from" in headers:
from_ = headers["from"]
from_query = {"match": {"sample.headers.from": from_}}
q = q & Q(from_query)
if "to" in headers:
to_ = headers["to"]
to_query = {"match": {"sample.headers.to": to_}}
q = q & Q(to_query)
if "subject" in headers:
subject = headers["subject"]
subject_query = {"match": {"sample.headers.subject": subject}}
q = q & Q(subject_query)
search.query = q
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("A forensic sample to {0} from {1} "
"with a subject of {2} and arrival date of {3} "
"already exists in "
"Elasticsearch".format(to_,
from_,
subject,
arrival_date_human
))
parsed_sample = forensic_report["parsed_sample"]
sample = _ForensicSampleDoc(
raw=forensic_report["sample"],
headers=headers,
headers_only=forensic_report["sample_headers_only"],
date=sample_date,
subject=forensic_report["parsed_sample"]["subject"],
filename_safe_subject=parsed_sample["filename_safe_subject"],
body=forensic_report["parsed_sample"]["body"]
)
for address in forensic_report["parsed_sample"]["to"]:
sample.add_to(display_name=address["display_name"],
address=address["address"])
for address in forensic_report["parsed_sample"]["reply_to"]:
sample.add_reply_to(display_name=address["display_name"],
address=address["address"])
for address in forensic_report["parsed_sample"]["cc"]:
sample.add_cc(display_name=address["display_name"],
address=address["address"])
for address in forensic_report["parsed_sample"]["bcc"]:
sample.add_bcc(display_name=address["display_name"],
address=address["address"])
for attachment in forensic_report["parsed_sample"]["attachments"]:
sample.add_attachment(filename=attachment["filename"],
content_type=attachment["mail_content_type"],
sha256=attachment["sha256"])
try:
forensic_doc = _ForensicReportDoc(
feedback_type=forensic_report["feedback_type"],
user_agent=forensic_report["user_agent"],
version=forensic_report["version"],
original_mail_from=forensic_report["original_mail_from"],
arrival_date=arrival_date,
domain=forensic_report["reported_domain"],
original_envelope_id=forensic_report["original_envelope_id"],
authentication_results=forensic_report["authentication_results"],
delivery_results=forensic_report["delivery_result"],
source_ip_address=forensic_report["source"]["ip_address"],
source_country=forensic_report["source"]["country"],
source_reverse_dns=forensic_report["source"]["reverse_dns"],
source_base_domain=forensic_report["source"]["base_domain"],
authentication_mechanisms=forensic_report[
"authentication_mechanisms"],
auth_failure=forensic_report["auth_failure"],
dkim_domain=forensic_report["dkim_domain"],
original_rcpt_to=forensic_report["original_rcpt_to"],
sample=sample
)
index = "dmarc_forensic"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
if monthly_indexes:
index_date = arrival_date.strftime("%Y-%m")
else:
index_date = arrival_date.strftime("%Y-%m-%d")
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
forensic_doc.meta.index = index
try:
forensic_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__()))
except KeyError as e:
raise InvalidForensicReport(
"Forensic report missing required field: {0}".format(e.__str__()))
|
def save_forensic_report_to_elasticsearch(forensic_report,
index_suffix=None,
monthly_indexes=False):
"""
Saves a parsed DMARC forensic report to ElasticSearch
Args:
forensic_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily
indexes
Raises:
AlreadySaved
"""
logger.debug("Saving forensic report to Elasticsearch")
forensic_report = forensic_report.copy()
sample_date = None
if forensic_report["parsed_sample"]["date"] is not None:
sample_date = forensic_report["parsed_sample"]["date"]
sample_date = human_timestamp_to_datetime(sample_date)
original_headers = forensic_report["parsed_sample"]["headers"]
headers = OrderedDict()
for original_header in original_headers:
headers[original_header.lower()] = original_headers[original_header]
arrival_date_human = forensic_report["arrival_date_utc"]
arrival_date = human_timestamp_to_datetime(arrival_date_human)
search = Search(index="dmarc_forensic*")
arrival_query = {"match": {"arrival_date": arrival_date}}
q = Q(arrival_query)
from_ = None
to_ = None
subject = None
if "from" in headers:
from_ = headers["from"]
from_query = {"match": {"sample.headers.from": from_}}
q = q & Q(from_query)
if "to" in headers:
to_ = headers["to"]
to_query = {"match": {"sample.headers.to": to_}}
q = q & Q(to_query)
if "subject" in headers:
subject = headers["subject"]
subject_query = {"match": {"sample.headers.subject": subject}}
q = q & Q(subject_query)
search.query = q
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("A forensic sample to {0} from {1} "
"with a subject of {2} and arrival date of {3} "
"already exists in "
"Elasticsearch".format(to_,
from_,
subject,
arrival_date_human
))
parsed_sample = forensic_report["parsed_sample"]
sample = _ForensicSampleDoc(
raw=forensic_report["sample"],
headers=headers,
headers_only=forensic_report["sample_headers_only"],
date=sample_date,
subject=forensic_report["parsed_sample"]["subject"],
filename_safe_subject=parsed_sample["filename_safe_subject"],
body=forensic_report["parsed_sample"]["body"]
)
for address in forensic_report["parsed_sample"]["to"]:
sample.add_to(display_name=address["display_name"],
address=address["address"])
for address in forensic_report["parsed_sample"]["reply_to"]:
sample.add_reply_to(display_name=address["display_name"],
address=address["address"])
for address in forensic_report["parsed_sample"]["cc"]:
sample.add_cc(display_name=address["display_name"],
address=address["address"])
for address in forensic_report["parsed_sample"]["bcc"]:
sample.add_bcc(display_name=address["display_name"],
address=address["address"])
for attachment in forensic_report["parsed_sample"]["attachments"]:
sample.add_attachment(filename=attachment["filename"],
content_type=attachment["mail_content_type"],
sha256=attachment["sha256"])
try:
forensic_doc = _ForensicReportDoc(
feedback_type=forensic_report["feedback_type"],
user_agent=forensic_report["user_agent"],
version=forensic_report["version"],
original_mail_from=forensic_report["original_mail_from"],
arrival_date=arrival_date,
domain=forensic_report["reported_domain"],
original_envelope_id=forensic_report["original_envelope_id"],
authentication_results=forensic_report["authentication_results"],
delivery_results=forensic_report["delivery_result"],
source_ip_address=forensic_report["source"]["ip_address"],
source_country=forensic_report["source"]["country"],
source_reverse_dns=forensic_report["source"]["reverse_dns"],
source_base_domain=forensic_report["source"]["base_domain"],
authentication_mechanisms=forensic_report[
"authentication_mechanisms"],
auth_failure=forensic_report["auth_failure"],
dkim_domain=forensic_report["dkim_domain"],
original_rcpt_to=forensic_report["original_rcpt_to"],
sample=sample
)
index = "dmarc_forensic"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
if monthly_indexes:
index_date = arrival_date.strftime("%Y-%m")
else:
index_date = arrival_date.strftime("%Y-%m-%d")
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
forensic_doc.meta.index = index
try:
forensic_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__()))
except KeyError as e:
raise InvalidForensicReport(
"Forensic report missing required field: {0}".format(e.__str__()))
|
[
"Saves",
"a",
"parsed",
"DMARC",
"forensic",
"report",
"to",
"ElasticSearch"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/elastic.py#L387-L517
|
[
"def",
"save_forensic_report_to_elasticsearch",
"(",
"forensic_report",
",",
"index_suffix",
"=",
"None",
",",
"monthly_indexes",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Saving forensic report to Elasticsearch\"",
")",
"forensic_report",
"=",
"forensic_report",
".",
"copy",
"(",
")",
"sample_date",
"=",
"None",
"if",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"date\"",
"]",
"is",
"not",
"None",
":",
"sample_date",
"=",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"date\"",
"]",
"sample_date",
"=",
"human_timestamp_to_datetime",
"(",
"sample_date",
")",
"original_headers",
"=",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"headers\"",
"]",
"headers",
"=",
"OrderedDict",
"(",
")",
"for",
"original_header",
"in",
"original_headers",
":",
"headers",
"[",
"original_header",
".",
"lower",
"(",
")",
"]",
"=",
"original_headers",
"[",
"original_header",
"]",
"arrival_date_human",
"=",
"forensic_report",
"[",
"\"arrival_date_utc\"",
"]",
"arrival_date",
"=",
"human_timestamp_to_datetime",
"(",
"arrival_date_human",
")",
"search",
"=",
"Search",
"(",
"index",
"=",
"\"dmarc_forensic*\"",
")",
"arrival_query",
"=",
"{",
"\"match\"",
":",
"{",
"\"arrival_date\"",
":",
"arrival_date",
"}",
"}",
"q",
"=",
"Q",
"(",
"arrival_query",
")",
"from_",
"=",
"None",
"to_",
"=",
"None",
"subject",
"=",
"None",
"if",
"\"from\"",
"in",
"headers",
":",
"from_",
"=",
"headers",
"[",
"\"from\"",
"]",
"from_query",
"=",
"{",
"\"match\"",
":",
"{",
"\"sample.headers.from\"",
":",
"from_",
"}",
"}",
"q",
"=",
"q",
"&",
"Q",
"(",
"from_query",
")",
"if",
"\"to\"",
"in",
"headers",
":",
"to_",
"=",
"headers",
"[",
"\"to\"",
"]",
"to_query",
"=",
"{",
"\"match\"",
":",
"{",
"\"sample.headers.to\"",
":",
"to_",
"}",
"}",
"q",
"=",
"q",
"&",
"Q",
"(",
"to_query",
")",
"if",
"\"subject\"",
"in",
"headers",
":",
"subject",
"=",
"headers",
"[",
"\"subject\"",
"]",
"subject_query",
"=",
"{",
"\"match\"",
":",
"{",
"\"sample.headers.subject\"",
":",
"subject",
"}",
"}",
"q",
"=",
"q",
"&",
"Q",
"(",
"subject_query",
")",
"search",
".",
"query",
"=",
"q",
"existing",
"=",
"search",
".",
"execute",
"(",
")",
"if",
"len",
"(",
"existing",
")",
">",
"0",
":",
"raise",
"AlreadySaved",
"(",
"\"A forensic sample to {0} from {1} \"",
"\"with a subject of {2} and arrival date of {3} \"",
"\"already exists in \"",
"\"Elasticsearch\"",
".",
"format",
"(",
"to_",
",",
"from_",
",",
"subject",
",",
"arrival_date_human",
")",
")",
"parsed_sample",
"=",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"sample",
"=",
"_ForensicSampleDoc",
"(",
"raw",
"=",
"forensic_report",
"[",
"\"sample\"",
"]",
",",
"headers",
"=",
"headers",
",",
"headers_only",
"=",
"forensic_report",
"[",
"\"sample_headers_only\"",
"]",
",",
"date",
"=",
"sample_date",
",",
"subject",
"=",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"subject\"",
"]",
",",
"filename_safe_subject",
"=",
"parsed_sample",
"[",
"\"filename_safe_subject\"",
"]",
",",
"body",
"=",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"body\"",
"]",
")",
"for",
"address",
"in",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"to\"",
"]",
":",
"sample",
".",
"add_to",
"(",
"display_name",
"=",
"address",
"[",
"\"display_name\"",
"]",
",",
"address",
"=",
"address",
"[",
"\"address\"",
"]",
")",
"for",
"address",
"in",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"reply_to\"",
"]",
":",
"sample",
".",
"add_reply_to",
"(",
"display_name",
"=",
"address",
"[",
"\"display_name\"",
"]",
",",
"address",
"=",
"address",
"[",
"\"address\"",
"]",
")",
"for",
"address",
"in",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"cc\"",
"]",
":",
"sample",
".",
"add_cc",
"(",
"display_name",
"=",
"address",
"[",
"\"display_name\"",
"]",
",",
"address",
"=",
"address",
"[",
"\"address\"",
"]",
")",
"for",
"address",
"in",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"bcc\"",
"]",
":",
"sample",
".",
"add_bcc",
"(",
"display_name",
"=",
"address",
"[",
"\"display_name\"",
"]",
",",
"address",
"=",
"address",
"[",
"\"address\"",
"]",
")",
"for",
"attachment",
"in",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"attachments\"",
"]",
":",
"sample",
".",
"add_attachment",
"(",
"filename",
"=",
"attachment",
"[",
"\"filename\"",
"]",
",",
"content_type",
"=",
"attachment",
"[",
"\"mail_content_type\"",
"]",
",",
"sha256",
"=",
"attachment",
"[",
"\"sha256\"",
"]",
")",
"try",
":",
"forensic_doc",
"=",
"_ForensicReportDoc",
"(",
"feedback_type",
"=",
"forensic_report",
"[",
"\"feedback_type\"",
"]",
",",
"user_agent",
"=",
"forensic_report",
"[",
"\"user_agent\"",
"]",
",",
"version",
"=",
"forensic_report",
"[",
"\"version\"",
"]",
",",
"original_mail_from",
"=",
"forensic_report",
"[",
"\"original_mail_from\"",
"]",
",",
"arrival_date",
"=",
"arrival_date",
",",
"domain",
"=",
"forensic_report",
"[",
"\"reported_domain\"",
"]",
",",
"original_envelope_id",
"=",
"forensic_report",
"[",
"\"original_envelope_id\"",
"]",
",",
"authentication_results",
"=",
"forensic_report",
"[",
"\"authentication_results\"",
"]",
",",
"delivery_results",
"=",
"forensic_report",
"[",
"\"delivery_result\"",
"]",
",",
"source_ip_address",
"=",
"forensic_report",
"[",
"\"source\"",
"]",
"[",
"\"ip_address\"",
"]",
",",
"source_country",
"=",
"forensic_report",
"[",
"\"source\"",
"]",
"[",
"\"country\"",
"]",
",",
"source_reverse_dns",
"=",
"forensic_report",
"[",
"\"source\"",
"]",
"[",
"\"reverse_dns\"",
"]",
",",
"source_base_domain",
"=",
"forensic_report",
"[",
"\"source\"",
"]",
"[",
"\"base_domain\"",
"]",
",",
"authentication_mechanisms",
"=",
"forensic_report",
"[",
"\"authentication_mechanisms\"",
"]",
",",
"auth_failure",
"=",
"forensic_report",
"[",
"\"auth_failure\"",
"]",
",",
"dkim_domain",
"=",
"forensic_report",
"[",
"\"dkim_domain\"",
"]",
",",
"original_rcpt_to",
"=",
"forensic_report",
"[",
"\"original_rcpt_to\"",
"]",
",",
"sample",
"=",
"sample",
")",
"index",
"=",
"\"dmarc_forensic\"",
"if",
"index_suffix",
":",
"index",
"=",
"\"{0}_{1}\"",
".",
"format",
"(",
"index",
",",
"index_suffix",
")",
"if",
"monthly_indexes",
":",
"index_date",
"=",
"arrival_date",
".",
"strftime",
"(",
"\"%Y-%m\"",
")",
"else",
":",
"index_date",
"=",
"arrival_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"index",
"=",
"\"{0}-{1}\"",
".",
"format",
"(",
"index",
",",
"index_date",
")",
"create_indexes",
"(",
"[",
"index",
"]",
")",
"forensic_doc",
".",
"meta",
".",
"index",
"=",
"index",
"try",
":",
"forensic_doc",
".",
"save",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ElasticsearchError",
"(",
"\"Elasticsearch error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Forensic report missing required field: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
KafkaClient.strip_metadata
|
Duplicates org_name, org_email and report_id into JSON root
and removes report_metadata key to bring it more inline
with Elastic output.
|
parsedmarc/kafkaclient.py
|
def strip_metadata(report):
"""
Duplicates org_name, org_email and report_id into JSON root
and removes report_metadata key to bring it more inline
with Elastic output.
"""
report['org_name'] = report['report_metadata']['org_name']
report['org_email'] = report['report_metadata']['org_email']
report['report_id'] = report['report_metadata']['report_id']
report.pop('report_metadata')
return report
|
def strip_metadata(report):
"""
Duplicates org_name, org_email and report_id into JSON root
and removes report_metadata key to bring it more inline
with Elastic output.
"""
report['org_name'] = report['report_metadata']['org_name']
report['org_email'] = report['report_metadata']['org_email']
report['report_id'] = report['report_metadata']['report_id']
report.pop('report_metadata')
return report
|
[
"Duplicates",
"org_name",
"org_email",
"and",
"report_id",
"into",
"JSON",
"root",
"and",
"removes",
"report_metadata",
"key",
"to",
"bring",
"it",
"more",
"inline",
"with",
"Elastic",
"output",
"."
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/kafkaclient.py#L58-L69
|
[
"def",
"strip_metadata",
"(",
"report",
")",
":",
"report",
"[",
"'org_name'",
"]",
"=",
"report",
"[",
"'report_metadata'",
"]",
"[",
"'org_name'",
"]",
"report",
"[",
"'org_email'",
"]",
"=",
"report",
"[",
"'report_metadata'",
"]",
"[",
"'org_email'",
"]",
"report",
"[",
"'report_id'",
"]",
"=",
"report",
"[",
"'report_metadata'",
"]",
"[",
"'report_id'",
"]",
"report",
".",
"pop",
"(",
"'report_metadata'",
")",
"return",
"report"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
KafkaClient.generate_daterange
|
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic?
|
parsedmarc/kafkaclient.py
|
def generate_daterange(report):
"""
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic?
"""
metadata = report["report_metadata"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%dT%H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%dT%H:%M:%S")
date_range = [begin_date_human,
end_date_human]
logger.debug("date_range is {}".format(date_range))
return date_range
|
def generate_daterange(report):
"""
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic?
"""
metadata = report["report_metadata"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%dT%H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%dT%H:%M:%S")
date_range = [begin_date_human,
end_date_human]
logger.debug("date_range is {}".format(date_range))
return date_range
|
[
"Creates",
"a",
"date_range",
"timestamp",
"with",
"format",
"YYYY",
"-",
"MM",
"-",
"DD",
"-",
"T",
"-",
"HH",
":",
"MM",
":",
"SS",
"based",
"on",
"begin",
"and",
"end",
"dates",
"for",
"easier",
"parsing",
"in",
"Kibana",
"."
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/kafkaclient.py#L72-L88
|
[
"def",
"generate_daterange",
"(",
"report",
")",
":",
"metadata",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"begin_date",
"=",
"human_timestamp_to_datetime",
"(",
"metadata",
"[",
"\"begin_date\"",
"]",
")",
"end_date",
"=",
"human_timestamp_to_datetime",
"(",
"metadata",
"[",
"\"end_date\"",
"]",
")",
"begin_date_human",
"=",
"begin_date",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"end_date_human",
"=",
"end_date",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"date_range",
"=",
"[",
"begin_date_human",
",",
"end_date_human",
"]",
"logger",
".",
"debug",
"(",
"\"date_range is {}\"",
".",
"format",
"(",
"date_range",
")",
")",
"return",
"date_range"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
KafkaClient.save_aggregate_reports_to_kafka
|
Saves aggregate DMARC reports to Kafka
Args:
aggregate_reports (list): A list of aggregate report dictionaries
to save to Kafka
aggregate_topic (str): The name of the Kafka topic
|
parsedmarc/kafkaclient.py
|
def save_aggregate_reports_to_kafka(self, aggregate_reports,
aggregate_topic):
"""
Saves aggregate DMARC reports to Kafka
Args:
aggregate_reports (list): A list of aggregate report dictionaries
to save to Kafka
aggregate_topic (str): The name of the Kafka topic
"""
if (type(aggregate_reports) == dict or
type(aggregate_reports) == OrderedDict):
aggregate_reports = [aggregate_reports]
if len(aggregate_reports) < 1:
return
for report in aggregate_reports:
report['date_range'] = self.generate_daterange(report)
report = self.strip_metadata(report)
for slice in report['records']:
slice['date_range'] = report['date_range']
slice['org_name'] = report['org_name']
slice['org_email'] = report['org_email']
slice['policy_published'] = report['policy_published']
slice['report_id'] = report['report_id']
logger.debug("Sending slice.")
try:
logger.debug("Saving aggregate report to Kafka")
self.producer.send(aggregate_topic, slice)
except UnknownTopicOrPartitionError:
raise KafkaError(
"Kafka error: Unknown topic or partition on broker")
except Exception as e:
raise KafkaError(
"Kafka error: {0}".format(e.__str__()))
try:
self.producer.flush()
except Exception as e:
raise KafkaError(
"Kafka error: {0}".format(e.__str__()))
|
def save_aggregate_reports_to_kafka(self, aggregate_reports,
aggregate_topic):
"""
Saves aggregate DMARC reports to Kafka
Args:
aggregate_reports (list): A list of aggregate report dictionaries
to save to Kafka
aggregate_topic (str): The name of the Kafka topic
"""
if (type(aggregate_reports) == dict or
type(aggregate_reports) == OrderedDict):
aggregate_reports = [aggregate_reports]
if len(aggregate_reports) < 1:
return
for report in aggregate_reports:
report['date_range'] = self.generate_daterange(report)
report = self.strip_metadata(report)
for slice in report['records']:
slice['date_range'] = report['date_range']
slice['org_name'] = report['org_name']
slice['org_email'] = report['org_email']
slice['policy_published'] = report['policy_published']
slice['report_id'] = report['report_id']
logger.debug("Sending slice.")
try:
logger.debug("Saving aggregate report to Kafka")
self.producer.send(aggregate_topic, slice)
except UnknownTopicOrPartitionError:
raise KafkaError(
"Kafka error: Unknown topic or partition on broker")
except Exception as e:
raise KafkaError(
"Kafka error: {0}".format(e.__str__()))
try:
self.producer.flush()
except Exception as e:
raise KafkaError(
"Kafka error: {0}".format(e.__str__()))
|
[
"Saves",
"aggregate",
"DMARC",
"reports",
"to",
"Kafka"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/kafkaclient.py#L90-L132
|
[
"def",
"save_aggregate_reports_to_kafka",
"(",
"self",
",",
"aggregate_reports",
",",
"aggregate_topic",
")",
":",
"if",
"(",
"type",
"(",
"aggregate_reports",
")",
"==",
"dict",
"or",
"type",
"(",
"aggregate_reports",
")",
"==",
"OrderedDict",
")",
":",
"aggregate_reports",
"=",
"[",
"aggregate_reports",
"]",
"if",
"len",
"(",
"aggregate_reports",
")",
"<",
"1",
":",
"return",
"for",
"report",
"in",
"aggregate_reports",
":",
"report",
"[",
"'date_range'",
"]",
"=",
"self",
".",
"generate_daterange",
"(",
"report",
")",
"report",
"=",
"self",
".",
"strip_metadata",
"(",
"report",
")",
"for",
"slice",
"in",
"report",
"[",
"'records'",
"]",
":",
"slice",
"[",
"'date_range'",
"]",
"=",
"report",
"[",
"'date_range'",
"]",
"slice",
"[",
"'org_name'",
"]",
"=",
"report",
"[",
"'org_name'",
"]",
"slice",
"[",
"'org_email'",
"]",
"=",
"report",
"[",
"'org_email'",
"]",
"slice",
"[",
"'policy_published'",
"]",
"=",
"report",
"[",
"'policy_published'",
"]",
"slice",
"[",
"'report_id'",
"]",
"=",
"report",
"[",
"'report_id'",
"]",
"logger",
".",
"debug",
"(",
"\"Sending slice.\"",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Saving aggregate report to Kafka\"",
")",
"self",
".",
"producer",
".",
"send",
"(",
"aggregate_topic",
",",
"slice",
")",
"except",
"UnknownTopicOrPartitionError",
":",
"raise",
"KafkaError",
"(",
"\"Kafka error: Unknown topic or partition on broker\"",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"KafkaError",
"(",
"\"Kafka error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"try",
":",
"self",
".",
"producer",
".",
"flush",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"KafkaError",
"(",
"\"Kafka error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
KafkaClient.save_forensic_reports_to_kafka
|
Saves forensic DMARC reports to Kafka, sends individual
records (slices) since Kafka requires messages to be <= 1MB
by default.
Args:
forensic_reports (list): A list of forensic report dicts
to save to Kafka
forensic_topic (str): The name of the Kafka topic
|
parsedmarc/kafkaclient.py
|
def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic):
"""
Saves forensic DMARC reports to Kafka, sends individual
records (slices) since Kafka requires messages to be <= 1MB
by default.
Args:
forensic_reports (list): A list of forensic report dicts
to save to Kafka
forensic_topic (str): The name of the Kafka topic
"""
if type(forensic_reports) == dict:
forensic_reports = [forensic_reports]
if len(forensic_reports) < 1:
return
try:
logger.debug("Saving forensic reports to Kafka")
self.producer.send(forensic_topic, forensic_reports)
except UnknownTopicOrPartitionError:
raise KafkaError(
"Kafka error: Unknown topic or partition on broker")
except Exception as e:
raise KafkaError(
"Kafka error: {0}".format(e.__str__()))
try:
self.producer.flush()
except Exception as e:
raise KafkaError(
"Kafka error: {0}".format(e.__str__()))
|
def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic):
"""
Saves forensic DMARC reports to Kafka, sends individual
records (slices) since Kafka requires messages to be <= 1MB
by default.
Args:
forensic_reports (list): A list of forensic report dicts
to save to Kafka
forensic_topic (str): The name of the Kafka topic
"""
if type(forensic_reports) == dict:
forensic_reports = [forensic_reports]
if len(forensic_reports) < 1:
return
try:
logger.debug("Saving forensic reports to Kafka")
self.producer.send(forensic_topic, forensic_reports)
except UnknownTopicOrPartitionError:
raise KafkaError(
"Kafka error: Unknown topic or partition on broker")
except Exception as e:
raise KafkaError(
"Kafka error: {0}".format(e.__str__()))
try:
self.producer.flush()
except Exception as e:
raise KafkaError(
"Kafka error: {0}".format(e.__str__()))
|
[
"Saves",
"forensic",
"DMARC",
"reports",
"to",
"Kafka",
"sends",
"individual",
"records",
"(",
"slices",
")",
"since",
"Kafka",
"requires",
"messages",
"to",
"be",
"<",
"=",
"1MB",
"by",
"default",
"."
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/kafkaclient.py#L134-L165
|
[
"def",
"save_forensic_reports_to_kafka",
"(",
"self",
",",
"forensic_reports",
",",
"forensic_topic",
")",
":",
"if",
"type",
"(",
"forensic_reports",
")",
"==",
"dict",
":",
"forensic_reports",
"=",
"[",
"forensic_reports",
"]",
"if",
"len",
"(",
"forensic_reports",
")",
"<",
"1",
":",
"return",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Saving forensic reports to Kafka\"",
")",
"self",
".",
"producer",
".",
"send",
"(",
"forensic_topic",
",",
"forensic_reports",
")",
"except",
"UnknownTopicOrPartitionError",
":",
"raise",
"KafkaError",
"(",
"\"Kafka error: Unknown topic or partition on broker\"",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"KafkaError",
"(",
"\"Kafka error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"try",
":",
"self",
".",
"producer",
".",
"flush",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"KafkaError",
"(",
"\"Kafka error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
_parse_report_record
|
Converts a record from a DMARC aggregate report into a more consistent
format
Args:
record (OrderedDict): The record to convert
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
Returns:
OrderedDict: The converted record
|
parsedmarc/__init__.py
|
def _parse_report_record(record, nameservers=None, dns_timeout=2.0,
parallel=False):
"""
Converts a record from a DMARC aggregate report into a more consistent
format
Args:
record (OrderedDict): The record to convert
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
Returns:
OrderedDict: The converted record
"""
if nameservers is None:
nameservers = ["1.1.1.1", "1.0.0.1",
"2606:4700:4700::1111", "2606:4700:4700::1001",
]
record = record.copy()
new_record = OrderedDict()
new_record_source = get_ip_address_info(record["row"]["source_ip"],
cache=IP_ADDRESS_CACHE,
nameservers=nameservers,
timeout=dns_timeout,
parallel=parallel)
new_record["source"] = new_record_source
new_record["count"] = int(record["row"]["count"])
policy_evaluated = record["row"]["policy_evaluated"].copy()
new_policy_evaluated = OrderedDict([("disposition", "none"),
("dkim", "fail"),
("spf", "fail"),
("policy_override_reasons", [])
])
if "disposition" in policy_evaluated:
new_policy_evaluated["disposition"] = policy_evaluated["disposition"]
if new_policy_evaluated["disposition"].strip().lower() == "pass":
new_policy_evaluated["disposition"] = "none"
if "dkim" in policy_evaluated:
new_policy_evaluated["dkim"] = policy_evaluated["dkim"]
if "spf" in policy_evaluated:
new_policy_evaluated["spf"] = policy_evaluated["spf"]
reasons = []
spf_aligned = policy_evaluated["spf"] == "pass"
dkim_aligned = policy_evaluated["dkim"] == "pass"
dmarc_aligned = spf_aligned or dkim_aligned
new_record["alignment"] = dict()
new_record["alignment"]["spf"] = spf_aligned
new_record["alignment"]["dkim"] = dkim_aligned
new_record["alignment"]["dmarc"] = dmarc_aligned
if "reason" in policy_evaluated:
if type(policy_evaluated["reason"]) == list:
reasons = policy_evaluated["reason"]
else:
reasons = [policy_evaluated["reason"]]
for reason in reasons:
if "comment" not in reason:
reason["comment"] = None
new_policy_evaluated["policy_override_reasons"] = reasons
new_record["policy_evaluated"] = new_policy_evaluated
new_record["identifiers"] = record["identifiers"].copy()
new_record["auth_results"] = OrderedDict([("dkim", []), ("spf", [])])
if record["auth_results"] is not None:
auth_results = record["auth_results"].copy()
if "spf" not in auth_results:
auth_results["spf"] = []
if "dkim" not in auth_results:
auth_results["dkim"] = []
else:
auth_results = new_record["auth_results"].copy()
if type(auth_results["dkim"]) != list:
auth_results["dkim"] = [auth_results["dkim"]]
for result in auth_results["dkim"]:
if "domain" in result and result["domain"] is not None:
new_result = OrderedDict([("domain", result["domain"])])
if "selector" in result and result["selector"] is not None:
new_result["selector"] = result["selector"]
else:
new_result["selector"] = "none"
if "result" in result and result["result"] is not None:
new_result["result"] = result["result"]
else:
new_result["result"] = "none"
new_record["auth_results"]["dkim"].append(new_result)
if type(auth_results["spf"]) != list:
auth_results["spf"] = [auth_results["spf"]]
for result in auth_results["spf"]:
new_result = OrderedDict([("domain", result["domain"])])
if "scope" in result and result["scope"] is not None:
new_result["scope"] = result["scope"]
else:
new_result["scope"] = "mfrom"
if "result" in result and result["result"] is not None:
new_result["result"] = result["result"]
else:
new_result["result"] = "none"
new_record["auth_results"]["spf"].append(new_result)
if "envelope_from" not in new_record["identifiers"]:
envelope_from = None
if len(auth_results["spf"]) > 0:
envelope_from = new_record["auth_results"]["spf"][-1]["domain"]
if envelope_from is not None:
envelope_from = str(envelope_from).lower()
new_record["identifiers"]["envelope_from"] = envelope_from
elif new_record["identifiers"]["envelope_from"] is None:
if len(auth_results["spf"]) > 0:
envelope_from = new_record["auth_results"]["spf"][-1]["domain"]
if envelope_from is not None:
envelope_from = str(envelope_from).lower()
new_record["identifiers"]["envelope_from"] = envelope_from
envelope_to = None
if "envelope_to" in new_record["identifiers"]:
envelope_to = new_record["identifiers"]["envelope_to"]
del new_record["identifiers"]["envelope_to"]
new_record["identifiers"]["envelope_to"] = envelope_to
return new_record
|
def _parse_report_record(record, nameservers=None, dns_timeout=2.0,
parallel=False):
"""
Converts a record from a DMARC aggregate report into a more consistent
format
Args:
record (OrderedDict): The record to convert
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
Returns:
OrderedDict: The converted record
"""
if nameservers is None:
nameservers = ["1.1.1.1", "1.0.0.1",
"2606:4700:4700::1111", "2606:4700:4700::1001",
]
record = record.copy()
new_record = OrderedDict()
new_record_source = get_ip_address_info(record["row"]["source_ip"],
cache=IP_ADDRESS_CACHE,
nameservers=nameservers,
timeout=dns_timeout,
parallel=parallel)
new_record["source"] = new_record_source
new_record["count"] = int(record["row"]["count"])
policy_evaluated = record["row"]["policy_evaluated"].copy()
new_policy_evaluated = OrderedDict([("disposition", "none"),
("dkim", "fail"),
("spf", "fail"),
("policy_override_reasons", [])
])
if "disposition" in policy_evaluated:
new_policy_evaluated["disposition"] = policy_evaluated["disposition"]
if new_policy_evaluated["disposition"].strip().lower() == "pass":
new_policy_evaluated["disposition"] = "none"
if "dkim" in policy_evaluated:
new_policy_evaluated["dkim"] = policy_evaluated["dkim"]
if "spf" in policy_evaluated:
new_policy_evaluated["spf"] = policy_evaluated["spf"]
reasons = []
spf_aligned = policy_evaluated["spf"] == "pass"
dkim_aligned = policy_evaluated["dkim"] == "pass"
dmarc_aligned = spf_aligned or dkim_aligned
new_record["alignment"] = dict()
new_record["alignment"]["spf"] = spf_aligned
new_record["alignment"]["dkim"] = dkim_aligned
new_record["alignment"]["dmarc"] = dmarc_aligned
if "reason" in policy_evaluated:
if type(policy_evaluated["reason"]) == list:
reasons = policy_evaluated["reason"]
else:
reasons = [policy_evaluated["reason"]]
for reason in reasons:
if "comment" not in reason:
reason["comment"] = None
new_policy_evaluated["policy_override_reasons"] = reasons
new_record["policy_evaluated"] = new_policy_evaluated
new_record["identifiers"] = record["identifiers"].copy()
new_record["auth_results"] = OrderedDict([("dkim", []), ("spf", [])])
if record["auth_results"] is not None:
auth_results = record["auth_results"].copy()
if "spf" not in auth_results:
auth_results["spf"] = []
if "dkim" not in auth_results:
auth_results["dkim"] = []
else:
auth_results = new_record["auth_results"].copy()
if type(auth_results["dkim"]) != list:
auth_results["dkim"] = [auth_results["dkim"]]
for result in auth_results["dkim"]:
if "domain" in result and result["domain"] is not None:
new_result = OrderedDict([("domain", result["domain"])])
if "selector" in result and result["selector"] is not None:
new_result["selector"] = result["selector"]
else:
new_result["selector"] = "none"
if "result" in result and result["result"] is not None:
new_result["result"] = result["result"]
else:
new_result["result"] = "none"
new_record["auth_results"]["dkim"].append(new_result)
if type(auth_results["spf"]) != list:
auth_results["spf"] = [auth_results["spf"]]
for result in auth_results["spf"]:
new_result = OrderedDict([("domain", result["domain"])])
if "scope" in result and result["scope"] is not None:
new_result["scope"] = result["scope"]
else:
new_result["scope"] = "mfrom"
if "result" in result and result["result"] is not None:
new_result["result"] = result["result"]
else:
new_result["result"] = "none"
new_record["auth_results"]["spf"].append(new_result)
if "envelope_from" not in new_record["identifiers"]:
envelope_from = None
if len(auth_results["spf"]) > 0:
envelope_from = new_record["auth_results"]["spf"][-1]["domain"]
if envelope_from is not None:
envelope_from = str(envelope_from).lower()
new_record["identifiers"]["envelope_from"] = envelope_from
elif new_record["identifiers"]["envelope_from"] is None:
if len(auth_results["spf"]) > 0:
envelope_from = new_record["auth_results"]["spf"][-1]["domain"]
if envelope_from is not None:
envelope_from = str(envelope_from).lower()
new_record["identifiers"]["envelope_from"] = envelope_from
envelope_to = None
if "envelope_to" in new_record["identifiers"]:
envelope_to = new_record["identifiers"]["envelope_to"]
del new_record["identifiers"]["envelope_to"]
new_record["identifiers"]["envelope_to"] = envelope_to
return new_record
|
[
"Converts",
"a",
"record",
"from",
"a",
"DMARC",
"aggregate",
"report",
"into",
"a",
"more",
"consistent",
"format"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L86-L208
|
[
"def",
"_parse_report_record",
"(",
"record",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"2.0",
",",
"parallel",
"=",
"False",
")",
":",
"if",
"nameservers",
"is",
"None",
":",
"nameservers",
"=",
"[",
"\"1.1.1.1\"",
",",
"\"1.0.0.1\"",
",",
"\"2606:4700:4700::1111\"",
",",
"\"2606:4700:4700::1001\"",
",",
"]",
"record",
"=",
"record",
".",
"copy",
"(",
")",
"new_record",
"=",
"OrderedDict",
"(",
")",
"new_record_source",
"=",
"get_ip_address_info",
"(",
"record",
"[",
"\"row\"",
"]",
"[",
"\"source_ip\"",
"]",
",",
"cache",
"=",
"IP_ADDRESS_CACHE",
",",
"nameservers",
"=",
"nameservers",
",",
"timeout",
"=",
"dns_timeout",
",",
"parallel",
"=",
"parallel",
")",
"new_record",
"[",
"\"source\"",
"]",
"=",
"new_record_source",
"new_record",
"[",
"\"count\"",
"]",
"=",
"int",
"(",
"record",
"[",
"\"row\"",
"]",
"[",
"\"count\"",
"]",
")",
"policy_evaluated",
"=",
"record",
"[",
"\"row\"",
"]",
"[",
"\"policy_evaluated\"",
"]",
".",
"copy",
"(",
")",
"new_policy_evaluated",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"disposition\"",
",",
"\"none\"",
")",
",",
"(",
"\"dkim\"",
",",
"\"fail\"",
")",
",",
"(",
"\"spf\"",
",",
"\"fail\"",
")",
",",
"(",
"\"policy_override_reasons\"",
",",
"[",
"]",
")",
"]",
")",
"if",
"\"disposition\"",
"in",
"policy_evaluated",
":",
"new_policy_evaluated",
"[",
"\"disposition\"",
"]",
"=",
"policy_evaluated",
"[",
"\"disposition\"",
"]",
"if",
"new_policy_evaluated",
"[",
"\"disposition\"",
"]",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"\"pass\"",
":",
"new_policy_evaluated",
"[",
"\"disposition\"",
"]",
"=",
"\"none\"",
"if",
"\"dkim\"",
"in",
"policy_evaluated",
":",
"new_policy_evaluated",
"[",
"\"dkim\"",
"]",
"=",
"policy_evaluated",
"[",
"\"dkim\"",
"]",
"if",
"\"spf\"",
"in",
"policy_evaluated",
":",
"new_policy_evaluated",
"[",
"\"spf\"",
"]",
"=",
"policy_evaluated",
"[",
"\"spf\"",
"]",
"reasons",
"=",
"[",
"]",
"spf_aligned",
"=",
"policy_evaluated",
"[",
"\"spf\"",
"]",
"==",
"\"pass\"",
"dkim_aligned",
"=",
"policy_evaluated",
"[",
"\"dkim\"",
"]",
"==",
"\"pass\"",
"dmarc_aligned",
"=",
"spf_aligned",
"or",
"dkim_aligned",
"new_record",
"[",
"\"alignment\"",
"]",
"=",
"dict",
"(",
")",
"new_record",
"[",
"\"alignment\"",
"]",
"[",
"\"spf\"",
"]",
"=",
"spf_aligned",
"new_record",
"[",
"\"alignment\"",
"]",
"[",
"\"dkim\"",
"]",
"=",
"dkim_aligned",
"new_record",
"[",
"\"alignment\"",
"]",
"[",
"\"dmarc\"",
"]",
"=",
"dmarc_aligned",
"if",
"\"reason\"",
"in",
"policy_evaluated",
":",
"if",
"type",
"(",
"policy_evaluated",
"[",
"\"reason\"",
"]",
")",
"==",
"list",
":",
"reasons",
"=",
"policy_evaluated",
"[",
"\"reason\"",
"]",
"else",
":",
"reasons",
"=",
"[",
"policy_evaluated",
"[",
"\"reason\"",
"]",
"]",
"for",
"reason",
"in",
"reasons",
":",
"if",
"\"comment\"",
"not",
"in",
"reason",
":",
"reason",
"[",
"\"comment\"",
"]",
"=",
"None",
"new_policy_evaluated",
"[",
"\"policy_override_reasons\"",
"]",
"=",
"reasons",
"new_record",
"[",
"\"policy_evaluated\"",
"]",
"=",
"new_policy_evaluated",
"new_record",
"[",
"\"identifiers\"",
"]",
"=",
"record",
"[",
"\"identifiers\"",
"]",
".",
"copy",
"(",
")",
"new_record",
"[",
"\"auth_results\"",
"]",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"dkim\"",
",",
"[",
"]",
")",
",",
"(",
"\"spf\"",
",",
"[",
"]",
")",
"]",
")",
"if",
"record",
"[",
"\"auth_results\"",
"]",
"is",
"not",
"None",
":",
"auth_results",
"=",
"record",
"[",
"\"auth_results\"",
"]",
".",
"copy",
"(",
")",
"if",
"\"spf\"",
"not",
"in",
"auth_results",
":",
"auth_results",
"[",
"\"spf\"",
"]",
"=",
"[",
"]",
"if",
"\"dkim\"",
"not",
"in",
"auth_results",
":",
"auth_results",
"[",
"\"dkim\"",
"]",
"=",
"[",
"]",
"else",
":",
"auth_results",
"=",
"new_record",
"[",
"\"auth_results\"",
"]",
".",
"copy",
"(",
")",
"if",
"type",
"(",
"auth_results",
"[",
"\"dkim\"",
"]",
")",
"!=",
"list",
":",
"auth_results",
"[",
"\"dkim\"",
"]",
"=",
"[",
"auth_results",
"[",
"\"dkim\"",
"]",
"]",
"for",
"result",
"in",
"auth_results",
"[",
"\"dkim\"",
"]",
":",
"if",
"\"domain\"",
"in",
"result",
"and",
"result",
"[",
"\"domain\"",
"]",
"is",
"not",
"None",
":",
"new_result",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"domain\"",
",",
"result",
"[",
"\"domain\"",
"]",
")",
"]",
")",
"if",
"\"selector\"",
"in",
"result",
"and",
"result",
"[",
"\"selector\"",
"]",
"is",
"not",
"None",
":",
"new_result",
"[",
"\"selector\"",
"]",
"=",
"result",
"[",
"\"selector\"",
"]",
"else",
":",
"new_result",
"[",
"\"selector\"",
"]",
"=",
"\"none\"",
"if",
"\"result\"",
"in",
"result",
"and",
"result",
"[",
"\"result\"",
"]",
"is",
"not",
"None",
":",
"new_result",
"[",
"\"result\"",
"]",
"=",
"result",
"[",
"\"result\"",
"]",
"else",
":",
"new_result",
"[",
"\"result\"",
"]",
"=",
"\"none\"",
"new_record",
"[",
"\"auth_results\"",
"]",
"[",
"\"dkim\"",
"]",
".",
"append",
"(",
"new_result",
")",
"if",
"type",
"(",
"auth_results",
"[",
"\"spf\"",
"]",
")",
"!=",
"list",
":",
"auth_results",
"[",
"\"spf\"",
"]",
"=",
"[",
"auth_results",
"[",
"\"spf\"",
"]",
"]",
"for",
"result",
"in",
"auth_results",
"[",
"\"spf\"",
"]",
":",
"new_result",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"domain\"",
",",
"result",
"[",
"\"domain\"",
"]",
")",
"]",
")",
"if",
"\"scope\"",
"in",
"result",
"and",
"result",
"[",
"\"scope\"",
"]",
"is",
"not",
"None",
":",
"new_result",
"[",
"\"scope\"",
"]",
"=",
"result",
"[",
"\"scope\"",
"]",
"else",
":",
"new_result",
"[",
"\"scope\"",
"]",
"=",
"\"mfrom\"",
"if",
"\"result\"",
"in",
"result",
"and",
"result",
"[",
"\"result\"",
"]",
"is",
"not",
"None",
":",
"new_result",
"[",
"\"result\"",
"]",
"=",
"result",
"[",
"\"result\"",
"]",
"else",
":",
"new_result",
"[",
"\"result\"",
"]",
"=",
"\"none\"",
"new_record",
"[",
"\"auth_results\"",
"]",
"[",
"\"spf\"",
"]",
".",
"append",
"(",
"new_result",
")",
"if",
"\"envelope_from\"",
"not",
"in",
"new_record",
"[",
"\"identifiers\"",
"]",
":",
"envelope_from",
"=",
"None",
"if",
"len",
"(",
"auth_results",
"[",
"\"spf\"",
"]",
")",
">",
"0",
":",
"envelope_from",
"=",
"new_record",
"[",
"\"auth_results\"",
"]",
"[",
"\"spf\"",
"]",
"[",
"-",
"1",
"]",
"[",
"\"domain\"",
"]",
"if",
"envelope_from",
"is",
"not",
"None",
":",
"envelope_from",
"=",
"str",
"(",
"envelope_from",
")",
".",
"lower",
"(",
")",
"new_record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_from\"",
"]",
"=",
"envelope_from",
"elif",
"new_record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_from\"",
"]",
"is",
"None",
":",
"if",
"len",
"(",
"auth_results",
"[",
"\"spf\"",
"]",
")",
">",
"0",
":",
"envelope_from",
"=",
"new_record",
"[",
"\"auth_results\"",
"]",
"[",
"\"spf\"",
"]",
"[",
"-",
"1",
"]",
"[",
"\"domain\"",
"]",
"if",
"envelope_from",
"is",
"not",
"None",
":",
"envelope_from",
"=",
"str",
"(",
"envelope_from",
")",
".",
"lower",
"(",
")",
"new_record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_from\"",
"]",
"=",
"envelope_from",
"envelope_to",
"=",
"None",
"if",
"\"envelope_to\"",
"in",
"new_record",
"[",
"\"identifiers\"",
"]",
":",
"envelope_to",
"=",
"new_record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_to\"",
"]",
"del",
"new_record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_to\"",
"]",
"new_record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_to\"",
"]",
"=",
"envelope_to",
"return",
"new_record"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
parse_aggregate_report_xml
|
Parses a DMARC XML report string and returns a consistent OrderedDict
Args:
xml (str): A string of DMARC aggregate report XML
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed aggregate DMARC report
|
parsedmarc/__init__.py
|
def parse_aggregate_report_xml(xml, nameservers=None, timeout=2.0,
parallel=False):
"""Parses a DMARC XML report string and returns a consistent OrderedDict
Args:
xml (str): A string of DMARC aggregate report XML
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed aggregate DMARC report
"""
errors = []
try:
xmltodict.parse(xml)["feedback"]
except Exception as e:
errors.append(e.__str__())
try:
# Replace XML header (sometimes they are invalid)
xml = xml_header_regex.sub("<?xml version=\"1.0\"?>", xml)
# Remove invalid schema tags
xml = xml_schema_regex.sub('', xml)
report = xmltodict.parse(xml)["feedback"]
report_metadata = report["report_metadata"]
schema = "draft"
if "version" in report:
schema = report["version"]
new_report = OrderedDict([("xml_schema", schema)])
new_report_metadata = OrderedDict()
if report_metadata["org_name"] is None:
if report_metadata["email"] is not None:
report_metadata["org_name"] = report_metadata[
"email"].split("@")[-1]
org_name = report_metadata["org_name"]
if org_name is not None:
org_name = get_base_domain(org_name)
new_report_metadata["org_name"] = org_name
new_report_metadata["org_email"] = report_metadata["email"]
extra = None
if "extra_contact_info" in report_metadata:
extra = report_metadata["extra_contact_info"]
new_report_metadata["org_extra_contact_info"] = extra
new_report_metadata["report_id"] = report_metadata["report_id"]
report_id = new_report_metadata["report_id"]
report_id = report_id.replace("<",
"").replace(">", "").split("@")[0]
new_report_metadata["report_id"] = report_id
date_range = report["report_metadata"]["date_range"]
date_range["begin"] = timestamp_to_human(date_range["begin"])
date_range["end"] = timestamp_to_human(date_range["end"])
new_report_metadata["begin_date"] = date_range["begin"]
new_report_metadata["end_date"] = date_range["end"]
if "error" in report["report_metadata"]:
if type(report["report_metadata"]["error"]) != list:
errors = [report["report_metadata"]["error"]]
else:
errors = report["report_metadata"]["error"]
new_report_metadata["errors"] = errors
new_report["report_metadata"] = new_report_metadata
records = []
policy_published = report["policy_published"]
new_policy_published = OrderedDict()
new_policy_published["domain"] = policy_published["domain"]
adkim = "r"
if "adkim" in policy_published:
if policy_published["adkim"] is not None:
adkim = policy_published["adkim"]
new_policy_published["adkim"] = adkim
aspf = "r"
if "aspf" in policy_published:
if policy_published["aspf"] is not None:
aspf = policy_published["aspf"]
new_policy_published["aspf"] = aspf
new_policy_published["p"] = policy_published["p"]
sp = new_policy_published["p"]
if "sp" in policy_published:
if policy_published["sp"] is not None:
sp = report["policy_published"]["sp"]
new_policy_published["sp"] = sp
pct = "100"
if "pct" in policy_published:
if policy_published["pct"] is not None:
pct = report["policy_published"]["pct"]
new_policy_published["pct"] = pct
fo = "0"
if "fo" in policy_published:
if policy_published["fo"] is not None:
fo = report["policy_published"]["fo"]
new_policy_published["fo"] = fo
new_report["policy_published"] = new_policy_published
if type(report["record"]) == list:
for record in report["record"]:
report_record = _parse_report_record(record,
nameservers=nameservers,
dns_timeout=timeout,
parallel=parallel)
records.append(report_record)
else:
report_record = _parse_report_record(report["record"],
nameservers=nameservers,
dns_timeout=timeout,
parallel=parallel)
records.append(report_record)
new_report["records"] = records
return new_report
except expat.ExpatError as error:
raise InvalidAggregateReport(
"Invalid XML: {0}".format(error.__str__()))
except KeyError as error:
raise InvalidAggregateReport(
"Missing field: {0}".format(error.__str__()))
except AttributeError:
raise InvalidAggregateReport("Report missing required section")
except Exception as error:
raise InvalidAggregateReport(
"Unexpected error: {0}".format(error.__str__()))
|
def parse_aggregate_report_xml(xml, nameservers=None, timeout=2.0,
parallel=False):
"""Parses a DMARC XML report string and returns a consistent OrderedDict
Args:
xml (str): A string of DMARC aggregate report XML
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed aggregate DMARC report
"""
errors = []
try:
xmltodict.parse(xml)["feedback"]
except Exception as e:
errors.append(e.__str__())
try:
# Replace XML header (sometimes they are invalid)
xml = xml_header_regex.sub("<?xml version=\"1.0\"?>", xml)
# Remove invalid schema tags
xml = xml_schema_regex.sub('', xml)
report = xmltodict.parse(xml)["feedback"]
report_metadata = report["report_metadata"]
schema = "draft"
if "version" in report:
schema = report["version"]
new_report = OrderedDict([("xml_schema", schema)])
new_report_metadata = OrderedDict()
if report_metadata["org_name"] is None:
if report_metadata["email"] is not None:
report_metadata["org_name"] = report_metadata[
"email"].split("@")[-1]
org_name = report_metadata["org_name"]
if org_name is not None:
org_name = get_base_domain(org_name)
new_report_metadata["org_name"] = org_name
new_report_metadata["org_email"] = report_metadata["email"]
extra = None
if "extra_contact_info" in report_metadata:
extra = report_metadata["extra_contact_info"]
new_report_metadata["org_extra_contact_info"] = extra
new_report_metadata["report_id"] = report_metadata["report_id"]
report_id = new_report_metadata["report_id"]
report_id = report_id.replace("<",
"").replace(">", "").split("@")[0]
new_report_metadata["report_id"] = report_id
date_range = report["report_metadata"]["date_range"]
date_range["begin"] = timestamp_to_human(date_range["begin"])
date_range["end"] = timestamp_to_human(date_range["end"])
new_report_metadata["begin_date"] = date_range["begin"]
new_report_metadata["end_date"] = date_range["end"]
if "error" in report["report_metadata"]:
if type(report["report_metadata"]["error"]) != list:
errors = [report["report_metadata"]["error"]]
else:
errors = report["report_metadata"]["error"]
new_report_metadata["errors"] = errors
new_report["report_metadata"] = new_report_metadata
records = []
policy_published = report["policy_published"]
new_policy_published = OrderedDict()
new_policy_published["domain"] = policy_published["domain"]
adkim = "r"
if "adkim" in policy_published:
if policy_published["adkim"] is not None:
adkim = policy_published["adkim"]
new_policy_published["adkim"] = adkim
aspf = "r"
if "aspf" in policy_published:
if policy_published["aspf"] is not None:
aspf = policy_published["aspf"]
new_policy_published["aspf"] = aspf
new_policy_published["p"] = policy_published["p"]
sp = new_policy_published["p"]
if "sp" in policy_published:
if policy_published["sp"] is not None:
sp = report["policy_published"]["sp"]
new_policy_published["sp"] = sp
pct = "100"
if "pct" in policy_published:
if policy_published["pct"] is not None:
pct = report["policy_published"]["pct"]
new_policy_published["pct"] = pct
fo = "0"
if "fo" in policy_published:
if policy_published["fo"] is not None:
fo = report["policy_published"]["fo"]
new_policy_published["fo"] = fo
new_report["policy_published"] = new_policy_published
if type(report["record"]) == list:
for record in report["record"]:
report_record = _parse_report_record(record,
nameservers=nameservers,
dns_timeout=timeout,
parallel=parallel)
records.append(report_record)
else:
report_record = _parse_report_record(report["record"],
nameservers=nameservers,
dns_timeout=timeout,
parallel=parallel)
records.append(report_record)
new_report["records"] = records
return new_report
except expat.ExpatError as error:
raise InvalidAggregateReport(
"Invalid XML: {0}".format(error.__str__()))
except KeyError as error:
raise InvalidAggregateReport(
"Missing field: {0}".format(error.__str__()))
except AttributeError:
raise InvalidAggregateReport("Report missing required section")
except Exception as error:
raise InvalidAggregateReport(
"Unexpected error: {0}".format(error.__str__()))
|
[
"Parses",
"a",
"DMARC",
"XML",
"report",
"string",
"and",
"returns",
"a",
"consistent",
"OrderedDict"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L211-L339
|
[
"def",
"parse_aggregate_report_xml",
"(",
"xml",
",",
"nameservers",
"=",
"None",
",",
"timeout",
"=",
"2.0",
",",
"parallel",
"=",
"False",
")",
":",
"errors",
"=",
"[",
"]",
"try",
":",
"xmltodict",
".",
"parse",
"(",
"xml",
")",
"[",
"\"feedback\"",
"]",
"except",
"Exception",
"as",
"e",
":",
"errors",
".",
"append",
"(",
"e",
".",
"__str__",
"(",
")",
")",
"try",
":",
"# Replace XML header (sometimes they are invalid)",
"xml",
"=",
"xml_header_regex",
".",
"sub",
"(",
"\"<?xml version=\\\"1.0\\\"?>\"",
",",
"xml",
")",
"# Remove invalid schema tags",
"xml",
"=",
"xml_schema_regex",
".",
"sub",
"(",
"''",
",",
"xml",
")",
"report",
"=",
"xmltodict",
".",
"parse",
"(",
"xml",
")",
"[",
"\"feedback\"",
"]",
"report_metadata",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"schema",
"=",
"\"draft\"",
"if",
"\"version\"",
"in",
"report",
":",
"schema",
"=",
"report",
"[",
"\"version\"",
"]",
"new_report",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"xml_schema\"",
",",
"schema",
")",
"]",
")",
"new_report_metadata",
"=",
"OrderedDict",
"(",
")",
"if",
"report_metadata",
"[",
"\"org_name\"",
"]",
"is",
"None",
":",
"if",
"report_metadata",
"[",
"\"email\"",
"]",
"is",
"not",
"None",
":",
"report_metadata",
"[",
"\"org_name\"",
"]",
"=",
"report_metadata",
"[",
"\"email\"",
"]",
".",
"split",
"(",
"\"@\"",
")",
"[",
"-",
"1",
"]",
"org_name",
"=",
"report_metadata",
"[",
"\"org_name\"",
"]",
"if",
"org_name",
"is",
"not",
"None",
":",
"org_name",
"=",
"get_base_domain",
"(",
"org_name",
")",
"new_report_metadata",
"[",
"\"org_name\"",
"]",
"=",
"org_name",
"new_report_metadata",
"[",
"\"org_email\"",
"]",
"=",
"report_metadata",
"[",
"\"email\"",
"]",
"extra",
"=",
"None",
"if",
"\"extra_contact_info\"",
"in",
"report_metadata",
":",
"extra",
"=",
"report_metadata",
"[",
"\"extra_contact_info\"",
"]",
"new_report_metadata",
"[",
"\"org_extra_contact_info\"",
"]",
"=",
"extra",
"new_report_metadata",
"[",
"\"report_id\"",
"]",
"=",
"report_metadata",
"[",
"\"report_id\"",
"]",
"report_id",
"=",
"new_report_metadata",
"[",
"\"report_id\"",
"]",
"report_id",
"=",
"report_id",
".",
"replace",
"(",
"\"<\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\">\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\"@\"",
")",
"[",
"0",
"]",
"new_report_metadata",
"[",
"\"report_id\"",
"]",
"=",
"report_id",
"date_range",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"date_range\"",
"]",
"date_range",
"[",
"\"begin\"",
"]",
"=",
"timestamp_to_human",
"(",
"date_range",
"[",
"\"begin\"",
"]",
")",
"date_range",
"[",
"\"end\"",
"]",
"=",
"timestamp_to_human",
"(",
"date_range",
"[",
"\"end\"",
"]",
")",
"new_report_metadata",
"[",
"\"begin_date\"",
"]",
"=",
"date_range",
"[",
"\"begin\"",
"]",
"new_report_metadata",
"[",
"\"end_date\"",
"]",
"=",
"date_range",
"[",
"\"end\"",
"]",
"if",
"\"error\"",
"in",
"report",
"[",
"\"report_metadata\"",
"]",
":",
"if",
"type",
"(",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"error\"",
"]",
")",
"!=",
"list",
":",
"errors",
"=",
"[",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"error\"",
"]",
"]",
"else",
":",
"errors",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"error\"",
"]",
"new_report_metadata",
"[",
"\"errors\"",
"]",
"=",
"errors",
"new_report",
"[",
"\"report_metadata\"",
"]",
"=",
"new_report_metadata",
"records",
"=",
"[",
"]",
"policy_published",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"new_policy_published",
"=",
"OrderedDict",
"(",
")",
"new_policy_published",
"[",
"\"domain\"",
"]",
"=",
"policy_published",
"[",
"\"domain\"",
"]",
"adkim",
"=",
"\"r\"",
"if",
"\"adkim\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"adkim\"",
"]",
"is",
"not",
"None",
":",
"adkim",
"=",
"policy_published",
"[",
"\"adkim\"",
"]",
"new_policy_published",
"[",
"\"adkim\"",
"]",
"=",
"adkim",
"aspf",
"=",
"\"r\"",
"if",
"\"aspf\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"aspf\"",
"]",
"is",
"not",
"None",
":",
"aspf",
"=",
"policy_published",
"[",
"\"aspf\"",
"]",
"new_policy_published",
"[",
"\"aspf\"",
"]",
"=",
"aspf",
"new_policy_published",
"[",
"\"p\"",
"]",
"=",
"policy_published",
"[",
"\"p\"",
"]",
"sp",
"=",
"new_policy_published",
"[",
"\"p\"",
"]",
"if",
"\"sp\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"sp\"",
"]",
"is",
"not",
"None",
":",
"sp",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"sp\"",
"]",
"new_policy_published",
"[",
"\"sp\"",
"]",
"=",
"sp",
"pct",
"=",
"\"100\"",
"if",
"\"pct\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"pct\"",
"]",
"is",
"not",
"None",
":",
"pct",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"pct\"",
"]",
"new_policy_published",
"[",
"\"pct\"",
"]",
"=",
"pct",
"fo",
"=",
"\"0\"",
"if",
"\"fo\"",
"in",
"policy_published",
":",
"if",
"policy_published",
"[",
"\"fo\"",
"]",
"is",
"not",
"None",
":",
"fo",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"fo\"",
"]",
"new_policy_published",
"[",
"\"fo\"",
"]",
"=",
"fo",
"new_report",
"[",
"\"policy_published\"",
"]",
"=",
"new_policy_published",
"if",
"type",
"(",
"report",
"[",
"\"record\"",
"]",
")",
"==",
"list",
":",
"for",
"record",
"in",
"report",
"[",
"\"record\"",
"]",
":",
"report_record",
"=",
"_parse_report_record",
"(",
"record",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"timeout",
",",
"parallel",
"=",
"parallel",
")",
"records",
".",
"append",
"(",
"report_record",
")",
"else",
":",
"report_record",
"=",
"_parse_report_record",
"(",
"report",
"[",
"\"record\"",
"]",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"timeout",
",",
"parallel",
"=",
"parallel",
")",
"records",
".",
"append",
"(",
"report_record",
")",
"new_report",
"[",
"\"records\"",
"]",
"=",
"records",
"return",
"new_report",
"except",
"expat",
".",
"ExpatError",
"as",
"error",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Invalid XML: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Missing field: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"AttributeError",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Report missing required section\"",
")",
"except",
"Exception",
"as",
"error",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Unexpected error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
extract_xml
|
Extracts xml from a zip or gzip file at the given path, file-like object,
or bytes.
Args:
input_: A path to a file, a file like object, or bytes
Returns:
str: The extracted XML
|
parsedmarc/__init__.py
|
def extract_xml(input_):
"""
Extracts xml from a zip or gzip file at the given path, file-like object,
or bytes.
Args:
input_: A path to a file, a file like object, or bytes
Returns:
str: The extracted XML
"""
if type(input_) == str:
file_object = open(input_, "rb")
elif type(input_) == bytes:
file_object = BytesIO(input_)
else:
file_object = input_
try:
header = file_object.read(6)
file_object.seek(0)
if header.startswith(MAGIC_ZIP):
_zip = zipfile.ZipFile(file_object)
xml = _zip.open(_zip.namelist()[0]).read().decode()
elif header.startswith(MAGIC_GZIP):
xml = GzipFile(fileobj=file_object).read().decode()
elif header.startswith(MAGIC_XML):
xml = file_object.read().decode()
else:
file_object.close()
raise InvalidAggregateReport("Not a valid zip, gzip, or xml file")
file_object.close()
except UnicodeDecodeError:
raise InvalidAggregateReport("File objects must be opened in binary "
"(rb) mode")
except Exception as error:
raise InvalidAggregateReport(
"Invalid archive file: {0}".format(error.__str__()))
return xml
|
def extract_xml(input_):
"""
Extracts xml from a zip or gzip file at the given path, file-like object,
or bytes.
Args:
input_: A path to a file, a file like object, or bytes
Returns:
str: The extracted XML
"""
if type(input_) == str:
file_object = open(input_, "rb")
elif type(input_) == bytes:
file_object = BytesIO(input_)
else:
file_object = input_
try:
header = file_object.read(6)
file_object.seek(0)
if header.startswith(MAGIC_ZIP):
_zip = zipfile.ZipFile(file_object)
xml = _zip.open(_zip.namelist()[0]).read().decode()
elif header.startswith(MAGIC_GZIP):
xml = GzipFile(fileobj=file_object).read().decode()
elif header.startswith(MAGIC_XML):
xml = file_object.read().decode()
else:
file_object.close()
raise InvalidAggregateReport("Not a valid zip, gzip, or xml file")
file_object.close()
except UnicodeDecodeError:
raise InvalidAggregateReport("File objects must be opened in binary "
"(rb) mode")
except Exception as error:
raise InvalidAggregateReport(
"Invalid archive file: {0}".format(error.__str__()))
return xml
|
[
"Extracts",
"xml",
"from",
"a",
"zip",
"or",
"gzip",
"file",
"at",
"the",
"given",
"path",
"file",
"-",
"like",
"object",
"or",
"bytes",
"."
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L342-L383
|
[
"def",
"extract_xml",
"(",
"input_",
")",
":",
"if",
"type",
"(",
"input_",
")",
"==",
"str",
":",
"file_object",
"=",
"open",
"(",
"input_",
",",
"\"rb\"",
")",
"elif",
"type",
"(",
"input_",
")",
"==",
"bytes",
":",
"file_object",
"=",
"BytesIO",
"(",
"input_",
")",
"else",
":",
"file_object",
"=",
"input_",
"try",
":",
"header",
"=",
"file_object",
".",
"read",
"(",
"6",
")",
"file_object",
".",
"seek",
"(",
"0",
")",
"if",
"header",
".",
"startswith",
"(",
"MAGIC_ZIP",
")",
":",
"_zip",
"=",
"zipfile",
".",
"ZipFile",
"(",
"file_object",
")",
"xml",
"=",
"_zip",
".",
"open",
"(",
"_zip",
".",
"namelist",
"(",
")",
"[",
"0",
"]",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
"elif",
"header",
".",
"startswith",
"(",
"MAGIC_GZIP",
")",
":",
"xml",
"=",
"GzipFile",
"(",
"fileobj",
"=",
"file_object",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
"elif",
"header",
".",
"startswith",
"(",
"MAGIC_XML",
")",
":",
"xml",
"=",
"file_object",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
"else",
":",
"file_object",
".",
"close",
"(",
")",
"raise",
"InvalidAggregateReport",
"(",
"\"Not a valid zip, gzip, or xml file\"",
")",
"file_object",
".",
"close",
"(",
")",
"except",
"UnicodeDecodeError",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"File objects must be opened in binary \"",
"\"(rb) mode\"",
")",
"except",
"Exception",
"as",
"error",
":",
"raise",
"InvalidAggregateReport",
"(",
"\"Invalid archive file: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"return",
"xml"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
parse_aggregate_report_file
|
Parses a file at the given path, a file-like object. or bytes as a
aggregate DMARC report
Args:
_input: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC aggregate report
|
parsedmarc/__init__.py
|
def parse_aggregate_report_file(_input, nameservers=None, dns_timeout=2.0,
parallel=False):
"""Parses a file at the given path, a file-like object. or bytes as a
aggregate DMARC report
Args:
_input: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC aggregate report
"""
xml = extract_xml(_input)
return parse_aggregate_report_xml(xml,
nameservers=nameservers,
timeout=dns_timeout,
parallel=parallel)
|
def parse_aggregate_report_file(_input, nameservers=None, dns_timeout=2.0,
parallel=False):
"""Parses a file at the given path, a file-like object. or bytes as a
aggregate DMARC report
Args:
_input: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC aggregate report
"""
xml = extract_xml(_input)
return parse_aggregate_report_xml(xml,
nameservers=nameservers,
timeout=dns_timeout,
parallel=parallel)
|
[
"Parses",
"a",
"file",
"at",
"the",
"given",
"path",
"a",
"file",
"-",
"like",
"object",
".",
"or",
"bytes",
"as",
"a",
"aggregate",
"DMARC",
"report"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L386-L406
|
[
"def",
"parse_aggregate_report_file",
"(",
"_input",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"2.0",
",",
"parallel",
"=",
"False",
")",
":",
"xml",
"=",
"extract_xml",
"(",
"_input",
")",
"return",
"parse_aggregate_report_xml",
"(",
"xml",
",",
"nameservers",
"=",
"nameservers",
",",
"timeout",
"=",
"dns_timeout",
",",
"parallel",
"=",
"parallel",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
parsed_aggregate_reports_to_csv
|
Converts one or more parsed aggregate reports to flat CSV format, including
headers
Args:
reports: A parsed aggregate report or list of parsed aggregate reports
Returns:
str: Parsed aggregate report data in flat CSV format, including headers
|
parsedmarc/__init__.py
|
def parsed_aggregate_reports_to_csv(reports):
"""
Converts one or more parsed aggregate reports to flat CSV format, including
headers
Args:
reports: A parsed aggregate report or list of parsed aggregate reports
Returns:
str: Parsed aggregate report data in flat CSV format, including headers
"""
def to_str(obj):
return str(obj).lower()
fields = ["xml_schema", "org_name", "org_email",
"org_extra_contact_info", "report_id", "begin_date", "end_date",
"errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo",
"source_ip_address", "source_country", "source_reverse_dns",
"source_base_domain", "count", "disposition", "dkim_alignment",
"spf_alignment", "policy_override_reasons",
"policy_override_comments", "envelope_from", "header_from",
"envelope_to", "dkim_domains", "dkim_selectors", "dkim_results",
"spf_domains", "spf_scopes", "spf_results"]
csv_file_object = StringIO(newline="\n")
writer = DictWriter(csv_file_object, fields)
writer.writeheader()
if type(reports) == OrderedDict:
reports = [reports]
for report in reports:
xml_schema = report["xml_schema"]
org_name = report["report_metadata"]["org_name"]
org_email = report["report_metadata"]["org_email"]
org_extra_contact = report["report_metadata"]["org_extra_contact_info"]
report_id = report["report_metadata"]["report_id"]
begin_date = report["report_metadata"]["begin_date"]
end_date = report["report_metadata"]["end_date"]
errors = "|".join(report["report_metadata"]["errors"])
domain = report["policy_published"]["domain"]
adkim = report["policy_published"]["adkim"]
aspf = report["policy_published"]["aspf"]
p = report["policy_published"]["p"]
sp = report["policy_published"]["sp"]
pct = report["policy_published"]["pct"]
fo = report["policy_published"]["fo"]
report_dict = dict(xml_schema=xml_schema, org_name=org_name,
org_email=org_email,
org_extra_contact_info=org_extra_contact,
report_id=report_id, begin_date=begin_date,
end_date=end_date, errors=errors, domain=domain,
adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo)
for record in report["records"]:
row = report_dict
row["source_ip_address"] = record["source"]["ip_address"]
row["source_country"] = record["source"]["country"]
row["source_reverse_dns"] = record["source"]["reverse_dns"]
row["source_base_domain"] = record["source"]["base_domain"]
row["count"] = record["count"]
row["disposition"] = record["policy_evaluated"]["disposition"]
row["spf_alignment"] = record["policy_evaluated"]["spf"]
row["dkim_alignment"] = record["policy_evaluated"]["dkim"]
policy_override_reasons = list(map(
lambda r: r["type"],
record["policy_evaluated"]
["policy_override_reasons"]))
policy_override_comments = list(map(
lambda r: r["comment"] or "none",
record["policy_evaluated"]
["policy_override_reasons"]))
row["policy_override_reasons"] = ",".join(
policy_override_reasons)
row["policy_override_comments"] = "|".join(
policy_override_comments)
row["envelope_from"] = record["identifiers"]["envelope_from"]
row["header_from"] = record["identifiers"]["header_from"]
envelope_to = record["identifiers"]["envelope_to"]
row["envelope_to"] = envelope_to
dkim_domains = []
dkim_selectors = []
dkim_results = []
for dkim_result in record["auth_results"]["dkim"]:
dkim_domains.append(dkim_result["domain"])
if "selector" in dkim_result:
dkim_selectors.append(dkim_result["selector"])
dkim_results.append(dkim_result["result"])
row["dkim_domains"] = ",".join(map(to_str, dkim_domains))
row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors))
row["dkim_results"] = ",".join(map(to_str, dkim_results))
spf_domains = []
spf_scopes = []
spf_results = []
for spf_result in record["auth_results"]["spf"]:
spf_domains.append(spf_result["domain"])
spf_scopes.append(spf_result["scope"])
spf_results.append(spf_result["result"])
row["spf_domains"] = ",".join(map(to_str, spf_domains))
row["spf_scopes"] = ",".join(map(to_str, spf_scopes))
row["spf_results"] = ",".join(map(to_str, dkim_results))
writer.writerow(row)
csv_file_object.flush()
return csv_file_object.getvalue()
|
def parsed_aggregate_reports_to_csv(reports):
"""
Converts one or more parsed aggregate reports to flat CSV format, including
headers
Args:
reports: A parsed aggregate report or list of parsed aggregate reports
Returns:
str: Parsed aggregate report data in flat CSV format, including headers
"""
def to_str(obj):
return str(obj).lower()
fields = ["xml_schema", "org_name", "org_email",
"org_extra_contact_info", "report_id", "begin_date", "end_date",
"errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo",
"source_ip_address", "source_country", "source_reverse_dns",
"source_base_domain", "count", "disposition", "dkim_alignment",
"spf_alignment", "policy_override_reasons",
"policy_override_comments", "envelope_from", "header_from",
"envelope_to", "dkim_domains", "dkim_selectors", "dkim_results",
"spf_domains", "spf_scopes", "spf_results"]
csv_file_object = StringIO(newline="\n")
writer = DictWriter(csv_file_object, fields)
writer.writeheader()
if type(reports) == OrderedDict:
reports = [reports]
for report in reports:
xml_schema = report["xml_schema"]
org_name = report["report_metadata"]["org_name"]
org_email = report["report_metadata"]["org_email"]
org_extra_contact = report["report_metadata"]["org_extra_contact_info"]
report_id = report["report_metadata"]["report_id"]
begin_date = report["report_metadata"]["begin_date"]
end_date = report["report_metadata"]["end_date"]
errors = "|".join(report["report_metadata"]["errors"])
domain = report["policy_published"]["domain"]
adkim = report["policy_published"]["adkim"]
aspf = report["policy_published"]["aspf"]
p = report["policy_published"]["p"]
sp = report["policy_published"]["sp"]
pct = report["policy_published"]["pct"]
fo = report["policy_published"]["fo"]
report_dict = dict(xml_schema=xml_schema, org_name=org_name,
org_email=org_email,
org_extra_contact_info=org_extra_contact,
report_id=report_id, begin_date=begin_date,
end_date=end_date, errors=errors, domain=domain,
adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo)
for record in report["records"]:
row = report_dict
row["source_ip_address"] = record["source"]["ip_address"]
row["source_country"] = record["source"]["country"]
row["source_reverse_dns"] = record["source"]["reverse_dns"]
row["source_base_domain"] = record["source"]["base_domain"]
row["count"] = record["count"]
row["disposition"] = record["policy_evaluated"]["disposition"]
row["spf_alignment"] = record["policy_evaluated"]["spf"]
row["dkim_alignment"] = record["policy_evaluated"]["dkim"]
policy_override_reasons = list(map(
lambda r: r["type"],
record["policy_evaluated"]
["policy_override_reasons"]))
policy_override_comments = list(map(
lambda r: r["comment"] or "none",
record["policy_evaluated"]
["policy_override_reasons"]))
row["policy_override_reasons"] = ",".join(
policy_override_reasons)
row["policy_override_comments"] = "|".join(
policy_override_comments)
row["envelope_from"] = record["identifiers"]["envelope_from"]
row["header_from"] = record["identifiers"]["header_from"]
envelope_to = record["identifiers"]["envelope_to"]
row["envelope_to"] = envelope_to
dkim_domains = []
dkim_selectors = []
dkim_results = []
for dkim_result in record["auth_results"]["dkim"]:
dkim_domains.append(dkim_result["domain"])
if "selector" in dkim_result:
dkim_selectors.append(dkim_result["selector"])
dkim_results.append(dkim_result["result"])
row["dkim_domains"] = ",".join(map(to_str, dkim_domains))
row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors))
row["dkim_results"] = ",".join(map(to_str, dkim_results))
spf_domains = []
spf_scopes = []
spf_results = []
for spf_result in record["auth_results"]["spf"]:
spf_domains.append(spf_result["domain"])
spf_scopes.append(spf_result["scope"])
spf_results.append(spf_result["result"])
row["spf_domains"] = ",".join(map(to_str, spf_domains))
row["spf_scopes"] = ",".join(map(to_str, spf_scopes))
row["spf_results"] = ",".join(map(to_str, dkim_results))
writer.writerow(row)
csv_file_object.flush()
return csv_file_object.getvalue()
|
[
"Converts",
"one",
"or",
"more",
"parsed",
"aggregate",
"reports",
"to",
"flat",
"CSV",
"format",
"including",
"headers"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L409-L516
|
[
"def",
"parsed_aggregate_reports_to_csv",
"(",
"reports",
")",
":",
"def",
"to_str",
"(",
"obj",
")",
":",
"return",
"str",
"(",
"obj",
")",
".",
"lower",
"(",
")",
"fields",
"=",
"[",
"\"xml_schema\"",
",",
"\"org_name\"",
",",
"\"org_email\"",
",",
"\"org_extra_contact_info\"",
",",
"\"report_id\"",
",",
"\"begin_date\"",
",",
"\"end_date\"",
",",
"\"errors\"",
",",
"\"domain\"",
",",
"\"adkim\"",
",",
"\"aspf\"",
",",
"\"p\"",
",",
"\"sp\"",
",",
"\"pct\"",
",",
"\"fo\"",
",",
"\"source_ip_address\"",
",",
"\"source_country\"",
",",
"\"source_reverse_dns\"",
",",
"\"source_base_domain\"",
",",
"\"count\"",
",",
"\"disposition\"",
",",
"\"dkim_alignment\"",
",",
"\"spf_alignment\"",
",",
"\"policy_override_reasons\"",
",",
"\"policy_override_comments\"",
",",
"\"envelope_from\"",
",",
"\"header_from\"",
",",
"\"envelope_to\"",
",",
"\"dkim_domains\"",
",",
"\"dkim_selectors\"",
",",
"\"dkim_results\"",
",",
"\"spf_domains\"",
",",
"\"spf_scopes\"",
",",
"\"spf_results\"",
"]",
"csv_file_object",
"=",
"StringIO",
"(",
"newline",
"=",
"\"\\n\"",
")",
"writer",
"=",
"DictWriter",
"(",
"csv_file_object",
",",
"fields",
")",
"writer",
".",
"writeheader",
"(",
")",
"if",
"type",
"(",
"reports",
")",
"==",
"OrderedDict",
":",
"reports",
"=",
"[",
"reports",
"]",
"for",
"report",
"in",
"reports",
":",
"xml_schema",
"=",
"report",
"[",
"\"xml_schema\"",
"]",
"org_name",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"org_name\"",
"]",
"org_email",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"org_email\"",
"]",
"org_extra_contact",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"org_extra_contact_info\"",
"]",
"report_id",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"report_id\"",
"]",
"begin_date",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"begin_date\"",
"]",
"end_date",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"end_date\"",
"]",
"errors",
"=",
"\"|\"",
".",
"join",
"(",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"\"errors\"",
"]",
")",
"domain",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"domain\"",
"]",
"adkim",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"adkim\"",
"]",
"aspf",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"aspf\"",
"]",
"p",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"p\"",
"]",
"sp",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"sp\"",
"]",
"pct",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"pct\"",
"]",
"fo",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"[",
"\"fo\"",
"]",
"report_dict",
"=",
"dict",
"(",
"xml_schema",
"=",
"xml_schema",
",",
"org_name",
"=",
"org_name",
",",
"org_email",
"=",
"org_email",
",",
"org_extra_contact_info",
"=",
"org_extra_contact",
",",
"report_id",
"=",
"report_id",
",",
"begin_date",
"=",
"begin_date",
",",
"end_date",
"=",
"end_date",
",",
"errors",
"=",
"errors",
",",
"domain",
"=",
"domain",
",",
"adkim",
"=",
"adkim",
",",
"aspf",
"=",
"aspf",
",",
"p",
"=",
"p",
",",
"sp",
"=",
"sp",
",",
"pct",
"=",
"pct",
",",
"fo",
"=",
"fo",
")",
"for",
"record",
"in",
"report",
"[",
"\"records\"",
"]",
":",
"row",
"=",
"report_dict",
"row",
"[",
"\"source_ip_address\"",
"]",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"ip_address\"",
"]",
"row",
"[",
"\"source_country\"",
"]",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"country\"",
"]",
"row",
"[",
"\"source_reverse_dns\"",
"]",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"reverse_dns\"",
"]",
"row",
"[",
"\"source_base_domain\"",
"]",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"base_domain\"",
"]",
"row",
"[",
"\"count\"",
"]",
"=",
"record",
"[",
"\"count\"",
"]",
"row",
"[",
"\"disposition\"",
"]",
"=",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"disposition\"",
"]",
"row",
"[",
"\"spf_alignment\"",
"]",
"=",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"spf\"",
"]",
"row",
"[",
"\"dkim_alignment\"",
"]",
"=",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"dkim\"",
"]",
"policy_override_reasons",
"=",
"list",
"(",
"map",
"(",
"lambda",
"r",
":",
"r",
"[",
"\"type\"",
"]",
",",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"policy_override_reasons\"",
"]",
")",
")",
"policy_override_comments",
"=",
"list",
"(",
"map",
"(",
"lambda",
"r",
":",
"r",
"[",
"\"comment\"",
"]",
"or",
"\"none\"",
",",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"policy_override_reasons\"",
"]",
")",
")",
"row",
"[",
"\"policy_override_reasons\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"policy_override_reasons",
")",
"row",
"[",
"\"policy_override_comments\"",
"]",
"=",
"\"|\"",
".",
"join",
"(",
"policy_override_comments",
")",
"row",
"[",
"\"envelope_from\"",
"]",
"=",
"record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_from\"",
"]",
"row",
"[",
"\"header_from\"",
"]",
"=",
"record",
"[",
"\"identifiers\"",
"]",
"[",
"\"header_from\"",
"]",
"envelope_to",
"=",
"record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_to\"",
"]",
"row",
"[",
"\"envelope_to\"",
"]",
"=",
"envelope_to",
"dkim_domains",
"=",
"[",
"]",
"dkim_selectors",
"=",
"[",
"]",
"dkim_results",
"=",
"[",
"]",
"for",
"dkim_result",
"in",
"record",
"[",
"\"auth_results\"",
"]",
"[",
"\"dkim\"",
"]",
":",
"dkim_domains",
".",
"append",
"(",
"dkim_result",
"[",
"\"domain\"",
"]",
")",
"if",
"\"selector\"",
"in",
"dkim_result",
":",
"dkim_selectors",
".",
"append",
"(",
"dkim_result",
"[",
"\"selector\"",
"]",
")",
"dkim_results",
".",
"append",
"(",
"dkim_result",
"[",
"\"result\"",
"]",
")",
"row",
"[",
"\"dkim_domains\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"to_str",
",",
"dkim_domains",
")",
")",
"row",
"[",
"\"dkim_selectors\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"to_str",
",",
"dkim_selectors",
")",
")",
"row",
"[",
"\"dkim_results\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"to_str",
",",
"dkim_results",
")",
")",
"spf_domains",
"=",
"[",
"]",
"spf_scopes",
"=",
"[",
"]",
"spf_results",
"=",
"[",
"]",
"for",
"spf_result",
"in",
"record",
"[",
"\"auth_results\"",
"]",
"[",
"\"spf\"",
"]",
":",
"spf_domains",
".",
"append",
"(",
"spf_result",
"[",
"\"domain\"",
"]",
")",
"spf_scopes",
".",
"append",
"(",
"spf_result",
"[",
"\"scope\"",
"]",
")",
"spf_results",
".",
"append",
"(",
"spf_result",
"[",
"\"result\"",
"]",
")",
"row",
"[",
"\"spf_domains\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"to_str",
",",
"spf_domains",
")",
")",
"row",
"[",
"\"spf_scopes\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"to_str",
",",
"spf_scopes",
")",
")",
"row",
"[",
"\"spf_results\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"to_str",
",",
"dkim_results",
")",
")",
"writer",
".",
"writerow",
"(",
"row",
")",
"csv_file_object",
".",
"flush",
"(",
")",
"return",
"csv_file_object",
".",
"getvalue",
"(",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
parse_forensic_report
|
Converts a DMARC forensic report and sample to a ``OrderedDict``
Args:
feedback_report (str): A message's feedback report as a string
sample (str): The RFC 822 headers or RFC 822 message sample
msg_date (str): The message's date header
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: A parsed report and sample
|
parsedmarc/__init__.py
|
def parse_forensic_report(feedback_report, sample, msg_date,
nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False,
parallel=False):
"""
Converts a DMARC forensic report and sample to a ``OrderedDict``
Args:
feedback_report (str): A message's feedback report as a string
sample (str): The RFC 822 headers or RFC 822 message sample
msg_date (str): The message's date header
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: A parsed report and sample
"""
delivery_results = ["delivered", "spam", "policy", "reject", "other"]
try:
parsed_report = OrderedDict()
report_values = feedback_report_regex.findall(feedback_report)
for report_value in report_values:
key = report_value[0].lower().replace("-", "_")
parsed_report[key] = report_value[1]
if "arrival_date" not in parsed_report:
if msg_date is None:
raise InvalidForensicReport(
"Forensic sample is not a valid email")
parsed_report["arrival_date"] = msg_date.isoformat()
if "version" not in parsed_report:
parsed_report["version"] = 1
if "user_agent" not in parsed_report:
parsed_report["user_agent"] = None
if "delivery_result" not in parsed_report:
parsed_report["delivery_result"] = None
else:
for delivery_result in delivery_results:
if delivery_result in parsed_report["delivery_result"].lower():
parsed_report["delivery_result"] = delivery_result
break
if parsed_report["delivery_result"] not in delivery_results:
parsed_report["delivery_result"] = "other"
arrival_utc = human_timestamp_to_datetime(
parsed_report["arrival_date"], to_utc=True)
arrival_utc = arrival_utc.strftime("%Y-%m-%d %H:%M:%S")
parsed_report["arrival_date_utc"] = arrival_utc
ip_address = parsed_report["source_ip"]
parsed_report_source = get_ip_address_info(ip_address,
nameservers=nameservers,
timeout=dns_timeout,
parallel=parallel)
parsed_report["source"] = parsed_report_source
del parsed_report["source_ip"]
if "identity_alignment" not in parsed_report:
parsed_report["authentication_mechanisms"] = []
elif parsed_report["identity_alignment"] == "none":
parsed_report["authentication_mechanisms"] = []
del parsed_report["identity_alignment"]
else:
auth_mechanisms = parsed_report["identity_alignment"]
auth_mechanisms = auth_mechanisms.split(",")
parsed_report["authentication_mechanisms"] = auth_mechanisms
del parsed_report["identity_alignment"]
if "auth_failure" not in parsed_report:
parsed_report["auth_failure"] = "dmarc"
auth_failure = parsed_report["auth_failure"].split(",")
parsed_report["auth_failure"] = auth_failure
optional_fields = ["original_envelope_id", "dkim_domain",
"original_mail_from", "original_rcpt_to"]
for optional_field in optional_fields:
if optional_field not in parsed_report:
parsed_report[optional_field] = None
parsed_sample = parse_email(
sample,
strip_attachment_payloads=strip_attachment_payloads)
if "reported_domain" not in parsed_report:
parsed_report["reported_domain"] = parsed_sample["from"]["domain"]
sample_headers_only = False
number_of_attachments = len(parsed_sample["attachments"])
if number_of_attachments < 1 and parsed_sample["body"] is None:
sample_headers_only = True
if sample_headers_only and parsed_sample["has_defects"]:
del parsed_sample["defects"]
del parsed_sample["defects_categories"]
del parsed_sample["has_defects"]
parsed_report["sample_headers_only"] = sample_headers_only
parsed_report["sample"] = sample
parsed_report["parsed_sample"] = parsed_sample
return parsed_report
except KeyError as error:
raise InvalidForensicReport("Missing value: {0}".format(
error.__str__()))
except Exception as error:
raise InvalidForensicReport(
"Unexpected error: {0}".format(error.__str__()))
|
def parse_forensic_report(feedback_report, sample, msg_date,
nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False,
parallel=False):
"""
Converts a DMARC forensic report and sample to a ``OrderedDict``
Args:
feedback_report (str): A message's feedback report as a string
sample (str): The RFC 822 headers or RFC 822 message sample
msg_date (str): The message's date header
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: A parsed report and sample
"""
delivery_results = ["delivered", "spam", "policy", "reject", "other"]
try:
parsed_report = OrderedDict()
report_values = feedback_report_regex.findall(feedback_report)
for report_value in report_values:
key = report_value[0].lower().replace("-", "_")
parsed_report[key] = report_value[1]
if "arrival_date" not in parsed_report:
if msg_date is None:
raise InvalidForensicReport(
"Forensic sample is not a valid email")
parsed_report["arrival_date"] = msg_date.isoformat()
if "version" not in parsed_report:
parsed_report["version"] = 1
if "user_agent" not in parsed_report:
parsed_report["user_agent"] = None
if "delivery_result" not in parsed_report:
parsed_report["delivery_result"] = None
else:
for delivery_result in delivery_results:
if delivery_result in parsed_report["delivery_result"].lower():
parsed_report["delivery_result"] = delivery_result
break
if parsed_report["delivery_result"] not in delivery_results:
parsed_report["delivery_result"] = "other"
arrival_utc = human_timestamp_to_datetime(
parsed_report["arrival_date"], to_utc=True)
arrival_utc = arrival_utc.strftime("%Y-%m-%d %H:%M:%S")
parsed_report["arrival_date_utc"] = arrival_utc
ip_address = parsed_report["source_ip"]
parsed_report_source = get_ip_address_info(ip_address,
nameservers=nameservers,
timeout=dns_timeout,
parallel=parallel)
parsed_report["source"] = parsed_report_source
del parsed_report["source_ip"]
if "identity_alignment" not in parsed_report:
parsed_report["authentication_mechanisms"] = []
elif parsed_report["identity_alignment"] == "none":
parsed_report["authentication_mechanisms"] = []
del parsed_report["identity_alignment"]
else:
auth_mechanisms = parsed_report["identity_alignment"]
auth_mechanisms = auth_mechanisms.split(",")
parsed_report["authentication_mechanisms"] = auth_mechanisms
del parsed_report["identity_alignment"]
if "auth_failure" not in parsed_report:
parsed_report["auth_failure"] = "dmarc"
auth_failure = parsed_report["auth_failure"].split(",")
parsed_report["auth_failure"] = auth_failure
optional_fields = ["original_envelope_id", "dkim_domain",
"original_mail_from", "original_rcpt_to"]
for optional_field in optional_fields:
if optional_field not in parsed_report:
parsed_report[optional_field] = None
parsed_sample = parse_email(
sample,
strip_attachment_payloads=strip_attachment_payloads)
if "reported_domain" not in parsed_report:
parsed_report["reported_domain"] = parsed_sample["from"]["domain"]
sample_headers_only = False
number_of_attachments = len(parsed_sample["attachments"])
if number_of_attachments < 1 and parsed_sample["body"] is None:
sample_headers_only = True
if sample_headers_only and parsed_sample["has_defects"]:
del parsed_sample["defects"]
del parsed_sample["defects_categories"]
del parsed_sample["has_defects"]
parsed_report["sample_headers_only"] = sample_headers_only
parsed_report["sample"] = sample
parsed_report["parsed_sample"] = parsed_sample
return parsed_report
except KeyError as error:
raise InvalidForensicReport("Missing value: {0}".format(
error.__str__()))
except Exception as error:
raise InvalidForensicReport(
"Unexpected error: {0}".format(error.__str__()))
|
[
"Converts",
"a",
"DMARC",
"forensic",
"report",
"and",
"sample",
"to",
"a",
"OrderedDict"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L519-L633
|
[
"def",
"parse_forensic_report",
"(",
"feedback_report",
",",
"sample",
",",
"msg_date",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"2.0",
",",
"strip_attachment_payloads",
"=",
"False",
",",
"parallel",
"=",
"False",
")",
":",
"delivery_results",
"=",
"[",
"\"delivered\"",
",",
"\"spam\"",
",",
"\"policy\"",
",",
"\"reject\"",
",",
"\"other\"",
"]",
"try",
":",
"parsed_report",
"=",
"OrderedDict",
"(",
")",
"report_values",
"=",
"feedback_report_regex",
".",
"findall",
"(",
"feedback_report",
")",
"for",
"report_value",
"in",
"report_values",
":",
"key",
"=",
"report_value",
"[",
"0",
"]",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
"parsed_report",
"[",
"key",
"]",
"=",
"report_value",
"[",
"1",
"]",
"if",
"\"arrival_date\"",
"not",
"in",
"parsed_report",
":",
"if",
"msg_date",
"is",
"None",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Forensic sample is not a valid email\"",
")",
"parsed_report",
"[",
"\"arrival_date\"",
"]",
"=",
"msg_date",
".",
"isoformat",
"(",
")",
"if",
"\"version\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"version\"",
"]",
"=",
"1",
"if",
"\"user_agent\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"user_agent\"",
"]",
"=",
"None",
"if",
"\"delivery_result\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"None",
"else",
":",
"for",
"delivery_result",
"in",
"delivery_results",
":",
"if",
"delivery_result",
"in",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
".",
"lower",
"(",
")",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"delivery_result",
"break",
"if",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"not",
"in",
"delivery_results",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"\"other\"",
"arrival_utc",
"=",
"human_timestamp_to_datetime",
"(",
"parsed_report",
"[",
"\"arrival_date\"",
"]",
",",
"to_utc",
"=",
"True",
")",
"arrival_utc",
"=",
"arrival_utc",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"parsed_report",
"[",
"\"arrival_date_utc\"",
"]",
"=",
"arrival_utc",
"ip_address",
"=",
"parsed_report",
"[",
"\"source_ip\"",
"]",
"parsed_report_source",
"=",
"get_ip_address_info",
"(",
"ip_address",
",",
"nameservers",
"=",
"nameservers",
",",
"timeout",
"=",
"dns_timeout",
",",
"parallel",
"=",
"parallel",
")",
"parsed_report",
"[",
"\"source\"",
"]",
"=",
"parsed_report_source",
"del",
"parsed_report",
"[",
"\"source_ip\"",
"]",
"if",
"\"identity_alignment\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"[",
"]",
"elif",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"==",
"\"none\"",
":",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"[",
"]",
"del",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"else",
":",
"auth_mechanisms",
"=",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"auth_mechanisms",
"=",
"auth_mechanisms",
".",
"split",
"(",
"\",\"",
")",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"auth_mechanisms",
"del",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"if",
"\"auth_failure\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
"=",
"\"dmarc\"",
"auth_failure",
"=",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
".",
"split",
"(",
"\",\"",
")",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
"=",
"auth_failure",
"optional_fields",
"=",
"[",
"\"original_envelope_id\"",
",",
"\"dkim_domain\"",
",",
"\"original_mail_from\"",
",",
"\"original_rcpt_to\"",
"]",
"for",
"optional_field",
"in",
"optional_fields",
":",
"if",
"optional_field",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"optional_field",
"]",
"=",
"None",
"parsed_sample",
"=",
"parse_email",
"(",
"sample",
",",
"strip_attachment_payloads",
"=",
"strip_attachment_payloads",
")",
"if",
"\"reported_domain\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"reported_domain\"",
"]",
"=",
"parsed_sample",
"[",
"\"from\"",
"]",
"[",
"\"domain\"",
"]",
"sample_headers_only",
"=",
"False",
"number_of_attachments",
"=",
"len",
"(",
"parsed_sample",
"[",
"\"attachments\"",
"]",
")",
"if",
"number_of_attachments",
"<",
"1",
"and",
"parsed_sample",
"[",
"\"body\"",
"]",
"is",
"None",
":",
"sample_headers_only",
"=",
"True",
"if",
"sample_headers_only",
"and",
"parsed_sample",
"[",
"\"has_defects\"",
"]",
":",
"del",
"parsed_sample",
"[",
"\"defects\"",
"]",
"del",
"parsed_sample",
"[",
"\"defects_categories\"",
"]",
"del",
"parsed_sample",
"[",
"\"has_defects\"",
"]",
"parsed_report",
"[",
"\"sample_headers_only\"",
"]",
"=",
"sample_headers_only",
"parsed_report",
"[",
"\"sample\"",
"]",
"=",
"sample",
"parsed_report",
"[",
"\"parsed_sample\"",
"]",
"=",
"parsed_sample",
"return",
"parsed_report",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Missing value: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"Exception",
"as",
"error",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Unexpected error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
parsed_forensic_reports_to_csv
|
Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers
|
parsedmarc/__init__.py
|
def parsed_forensic_reports_to_csv(reports):
"""
Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers
"""
fields = ["feedback_type", "user_agent", "version", "original_envelope_id",
"original_mail_from", "original_rcpt_to", "arrival_date",
"arrival_date_utc", "subject", "message_id",
"authentication_results", "dkim_domain", "source_ip_address",
"source_country", "source_reverse_dns", "source_base_domain",
"delivery_result", "auth_failure", "reported_domain",
"authentication_mechanisms", "sample_headers_only"]
if type(reports) == OrderedDict:
reports = [reports]
csv_file = StringIO()
csv_writer = DictWriter(csv_file, fieldnames=fields)
csv_writer.writeheader()
for report in reports:
row = report.copy()
row["source_ip_address"] = report["source"]["ip_address"]
row["source_reverse_dns"] = report["source"]["reverse_dns"]
row["source_base_domain"] = report["source"]["base_domain"]
row["source_country"] = report["source"]["country"]
del row["source"]
row["subject"] = report["parsed_sample"]["subject"]
row["auth_failure"] = ",".join(report["auth_failure"])
authentication_mechanisms = report["authentication_mechanisms"]
row["authentication_mechanisms"] = ",".join(
authentication_mechanisms)
del row["sample"]
del row["parsed_sample"]
csv_writer.writerow(row)
return csv_file.getvalue()
|
def parsed_forensic_reports_to_csv(reports):
"""
Converts one or more parsed forensic reports to flat CSV format, including
headers
Args:
reports: A parsed forensic report or list of parsed forensic reports
Returns:
str: Parsed forensic report data in flat CSV format, including headers
"""
fields = ["feedback_type", "user_agent", "version", "original_envelope_id",
"original_mail_from", "original_rcpt_to", "arrival_date",
"arrival_date_utc", "subject", "message_id",
"authentication_results", "dkim_domain", "source_ip_address",
"source_country", "source_reverse_dns", "source_base_domain",
"delivery_result", "auth_failure", "reported_domain",
"authentication_mechanisms", "sample_headers_only"]
if type(reports) == OrderedDict:
reports = [reports]
csv_file = StringIO()
csv_writer = DictWriter(csv_file, fieldnames=fields)
csv_writer.writeheader()
for report in reports:
row = report.copy()
row["source_ip_address"] = report["source"]["ip_address"]
row["source_reverse_dns"] = report["source"]["reverse_dns"]
row["source_base_domain"] = report["source"]["base_domain"]
row["source_country"] = report["source"]["country"]
del row["source"]
row["subject"] = report["parsed_sample"]["subject"]
row["auth_failure"] = ",".join(report["auth_failure"])
authentication_mechanisms = report["authentication_mechanisms"]
row["authentication_mechanisms"] = ",".join(
authentication_mechanisms)
del row["sample"]
del row["parsed_sample"]
csv_writer.writerow(row)
return csv_file.getvalue()
|
[
"Converts",
"one",
"or",
"more",
"parsed",
"forensic",
"reports",
"to",
"flat",
"CSV",
"format",
"including",
"headers"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L636-L676
|
[
"def",
"parsed_forensic_reports_to_csv",
"(",
"reports",
")",
":",
"fields",
"=",
"[",
"\"feedback_type\"",
",",
"\"user_agent\"",
",",
"\"version\"",
",",
"\"original_envelope_id\"",
",",
"\"original_mail_from\"",
",",
"\"original_rcpt_to\"",
",",
"\"arrival_date\"",
",",
"\"arrival_date_utc\"",
",",
"\"subject\"",
",",
"\"message_id\"",
",",
"\"authentication_results\"",
",",
"\"dkim_domain\"",
",",
"\"source_ip_address\"",
",",
"\"source_country\"",
",",
"\"source_reverse_dns\"",
",",
"\"source_base_domain\"",
",",
"\"delivery_result\"",
",",
"\"auth_failure\"",
",",
"\"reported_domain\"",
",",
"\"authentication_mechanisms\"",
",",
"\"sample_headers_only\"",
"]",
"if",
"type",
"(",
"reports",
")",
"==",
"OrderedDict",
":",
"reports",
"=",
"[",
"reports",
"]",
"csv_file",
"=",
"StringIO",
"(",
")",
"csv_writer",
"=",
"DictWriter",
"(",
"csv_file",
",",
"fieldnames",
"=",
"fields",
")",
"csv_writer",
".",
"writeheader",
"(",
")",
"for",
"report",
"in",
"reports",
":",
"row",
"=",
"report",
".",
"copy",
"(",
")",
"row",
"[",
"\"source_ip_address\"",
"]",
"=",
"report",
"[",
"\"source\"",
"]",
"[",
"\"ip_address\"",
"]",
"row",
"[",
"\"source_reverse_dns\"",
"]",
"=",
"report",
"[",
"\"source\"",
"]",
"[",
"\"reverse_dns\"",
"]",
"row",
"[",
"\"source_base_domain\"",
"]",
"=",
"report",
"[",
"\"source\"",
"]",
"[",
"\"base_domain\"",
"]",
"row",
"[",
"\"source_country\"",
"]",
"=",
"report",
"[",
"\"source\"",
"]",
"[",
"\"country\"",
"]",
"del",
"row",
"[",
"\"source\"",
"]",
"row",
"[",
"\"subject\"",
"]",
"=",
"report",
"[",
"\"parsed_sample\"",
"]",
"[",
"\"subject\"",
"]",
"row",
"[",
"\"auth_failure\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"report",
"[",
"\"auth_failure\"",
"]",
")",
"authentication_mechanisms",
"=",
"report",
"[",
"\"authentication_mechanisms\"",
"]",
"row",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"authentication_mechanisms",
")",
"del",
"row",
"[",
"\"sample\"",
"]",
"del",
"row",
"[",
"\"parsed_sample\"",
"]",
"csv_writer",
".",
"writerow",
"(",
"row",
")",
"return",
"csv_file",
".",
"getvalue",
"(",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
parse_report_email
|
Parses a DMARC report from an email
Args:
input_: An emailed DMARC report in RFC 822 format, as bytes or a string
nameservers (list): A list of one or more nameservers to use
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict:
* ``report_type``: ``aggregate`` or ``forensic``
* ``report``: The parsed report
|
parsedmarc/__init__.py
|
def parse_report_email(input_, nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False, parallel=False):
"""
Parses a DMARC report from an email
Args:
input_: An emailed DMARC report in RFC 822 format, as bytes or a string
nameservers (list): A list of one or more nameservers to use
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict:
* ``report_type``: ``aggregate`` or ``forensic``
* ``report``: The parsed report
"""
result = None
try:
if is_outlook_msg(input_):
input_ = convert_outlook_msg(input_)
if type(input_) == bytes:
input_ = input_.decode(encoding="utf8")
msg = mailparser.parse_from_string(input_)
msg_headers = json.loads(msg.headers_json)
date = email.utils.format_datetime(datetime.utcnow())
if "Date" in msg_headers:
date = human_timestamp_to_datetime(
msg_headers["Date"])
msg = email.message_from_string(input_)
except Exception as e:
raise InvalidDMARCReport(e.__str__())
subject = None
feedback_report = None
sample = None
if "Subject" in msg_headers:
subject = msg_headers["Subject"]
for part in msg.walk():
content_type = part.get_content_type()
payload = part.get_payload()
if type(payload) != list:
payload = [payload]
payload = payload[0].__str__()
if content_type == "message/feedback-report":
try:
if "Feedback-Type" in payload:
feedback_report = payload
else:
feedback_report = b64decode(payload).__str__()
feedback_report = feedback_report.lstrip(
"b'").rstrip("'")
feedback_report = feedback_report.replace("\\r", "")
feedback_report = feedback_report.replace("\\n", "\n")
except (ValueError, TypeError, binascii.Error):
feedback_report = payload
elif content_type == "text/rfc822-headers":
sample = payload
elif content_type == "message/rfc822":
sample = payload
else:
try:
payload = b64decode(payload)
if payload.startswith(MAGIC_ZIP) or \
payload.startswith(MAGIC_GZIP) or \
payload.startswith(MAGIC_XML):
ns = nameservers
aggregate_report = parse_aggregate_report_file(
payload,
nameservers=ns,
dns_timeout=dns_timeout,
parallel=parallel)
result = OrderedDict([("report_type", "aggregate"),
("report", aggregate_report)])
return result
except (TypeError, ValueError, binascii.Error):
pass
except InvalidAggregateReport as e:
error = 'Message with subject "{0}" ' \
'is not a valid ' \
'aggregate DMARC report: {1}'.format(subject, e)
raise InvalidAggregateReport(error)
except FileNotFoundError as e:
error = 'Unable to parse message with ' \
'subject "{0}": {1}'.format(subject, e)
raise InvalidDMARCReport(error)
if feedback_report and sample:
try:
forensic_report = parse_forensic_report(
feedback_report,
sample,
date,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=strip_attachment_payloads,
parallel=parallel)
except InvalidForensicReport as e:
error = 'Message with subject "{0}" ' \
'is not a valid ' \
'forensic DMARC report: {1}'.format(subject, e)
raise InvalidForensicReport(error)
except Exception as e:
raise InvalidForensicReport(e.__str__())
result = OrderedDict([("report_type", "forensic"),
("report", forensic_report)])
return result
if result is None:
error = 'Message with subject "{0}" is ' \
'not a valid DMARC report'.format(subject)
raise InvalidDMARCReport(error)
|
def parse_report_email(input_, nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False, parallel=False):
"""
Parses a DMARC report from an email
Args:
input_: An emailed DMARC report in RFC 822 format, as bytes or a string
nameservers (list): A list of one or more nameservers to use
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict:
* ``report_type``: ``aggregate`` or ``forensic``
* ``report``: The parsed report
"""
result = None
try:
if is_outlook_msg(input_):
input_ = convert_outlook_msg(input_)
if type(input_) == bytes:
input_ = input_.decode(encoding="utf8")
msg = mailparser.parse_from_string(input_)
msg_headers = json.loads(msg.headers_json)
date = email.utils.format_datetime(datetime.utcnow())
if "Date" in msg_headers:
date = human_timestamp_to_datetime(
msg_headers["Date"])
msg = email.message_from_string(input_)
except Exception as e:
raise InvalidDMARCReport(e.__str__())
subject = None
feedback_report = None
sample = None
if "Subject" in msg_headers:
subject = msg_headers["Subject"]
for part in msg.walk():
content_type = part.get_content_type()
payload = part.get_payload()
if type(payload) != list:
payload = [payload]
payload = payload[0].__str__()
if content_type == "message/feedback-report":
try:
if "Feedback-Type" in payload:
feedback_report = payload
else:
feedback_report = b64decode(payload).__str__()
feedback_report = feedback_report.lstrip(
"b'").rstrip("'")
feedback_report = feedback_report.replace("\\r", "")
feedback_report = feedback_report.replace("\\n", "\n")
except (ValueError, TypeError, binascii.Error):
feedback_report = payload
elif content_type == "text/rfc822-headers":
sample = payload
elif content_type == "message/rfc822":
sample = payload
else:
try:
payload = b64decode(payload)
if payload.startswith(MAGIC_ZIP) or \
payload.startswith(MAGIC_GZIP) or \
payload.startswith(MAGIC_XML):
ns = nameservers
aggregate_report = parse_aggregate_report_file(
payload,
nameservers=ns,
dns_timeout=dns_timeout,
parallel=parallel)
result = OrderedDict([("report_type", "aggregate"),
("report", aggregate_report)])
return result
except (TypeError, ValueError, binascii.Error):
pass
except InvalidAggregateReport as e:
error = 'Message with subject "{0}" ' \
'is not a valid ' \
'aggregate DMARC report: {1}'.format(subject, e)
raise InvalidAggregateReport(error)
except FileNotFoundError as e:
error = 'Unable to parse message with ' \
'subject "{0}": {1}'.format(subject, e)
raise InvalidDMARCReport(error)
if feedback_report and sample:
try:
forensic_report = parse_forensic_report(
feedback_report,
sample,
date,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=strip_attachment_payloads,
parallel=parallel)
except InvalidForensicReport as e:
error = 'Message with subject "{0}" ' \
'is not a valid ' \
'forensic DMARC report: {1}'.format(subject, e)
raise InvalidForensicReport(error)
except Exception as e:
raise InvalidForensicReport(e.__str__())
result = OrderedDict([("report_type", "forensic"),
("report", forensic_report)])
return result
if result is None:
error = 'Message with subject "{0}" is ' \
'not a valid DMARC report'.format(subject)
raise InvalidDMARCReport(error)
|
[
"Parses",
"a",
"DMARC",
"report",
"from",
"an",
"email"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L679-L797
|
[
"def",
"parse_report_email",
"(",
"input_",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"2.0",
",",
"strip_attachment_payloads",
"=",
"False",
",",
"parallel",
"=",
"False",
")",
":",
"result",
"=",
"None",
"try",
":",
"if",
"is_outlook_msg",
"(",
"input_",
")",
":",
"input_",
"=",
"convert_outlook_msg",
"(",
"input_",
")",
"if",
"type",
"(",
"input_",
")",
"==",
"bytes",
":",
"input_",
"=",
"input_",
".",
"decode",
"(",
"encoding",
"=",
"\"utf8\"",
")",
"msg",
"=",
"mailparser",
".",
"parse_from_string",
"(",
"input_",
")",
"msg_headers",
"=",
"json",
".",
"loads",
"(",
"msg",
".",
"headers_json",
")",
"date",
"=",
"email",
".",
"utils",
".",
"format_datetime",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
"if",
"\"Date\"",
"in",
"msg_headers",
":",
"date",
"=",
"human_timestamp_to_datetime",
"(",
"msg_headers",
"[",
"\"Date\"",
"]",
")",
"msg",
"=",
"email",
".",
"message_from_string",
"(",
"input_",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"InvalidDMARCReport",
"(",
"e",
".",
"__str__",
"(",
")",
")",
"subject",
"=",
"None",
"feedback_report",
"=",
"None",
"sample",
"=",
"None",
"if",
"\"Subject\"",
"in",
"msg_headers",
":",
"subject",
"=",
"msg_headers",
"[",
"\"Subject\"",
"]",
"for",
"part",
"in",
"msg",
".",
"walk",
"(",
")",
":",
"content_type",
"=",
"part",
".",
"get_content_type",
"(",
")",
"payload",
"=",
"part",
".",
"get_payload",
"(",
")",
"if",
"type",
"(",
"payload",
")",
"!=",
"list",
":",
"payload",
"=",
"[",
"payload",
"]",
"payload",
"=",
"payload",
"[",
"0",
"]",
".",
"__str__",
"(",
")",
"if",
"content_type",
"==",
"\"message/feedback-report\"",
":",
"try",
":",
"if",
"\"Feedback-Type\"",
"in",
"payload",
":",
"feedback_report",
"=",
"payload",
"else",
":",
"feedback_report",
"=",
"b64decode",
"(",
"payload",
")",
".",
"__str__",
"(",
")",
"feedback_report",
"=",
"feedback_report",
".",
"lstrip",
"(",
"\"b'\"",
")",
".",
"rstrip",
"(",
"\"'\"",
")",
"feedback_report",
"=",
"feedback_report",
".",
"replace",
"(",
"\"\\\\r\"",
",",
"\"\"",
")",
"feedback_report",
"=",
"feedback_report",
".",
"replace",
"(",
"\"\\\\n\"",
",",
"\"\\n\"",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"binascii",
".",
"Error",
")",
":",
"feedback_report",
"=",
"payload",
"elif",
"content_type",
"==",
"\"text/rfc822-headers\"",
":",
"sample",
"=",
"payload",
"elif",
"content_type",
"==",
"\"message/rfc822\"",
":",
"sample",
"=",
"payload",
"else",
":",
"try",
":",
"payload",
"=",
"b64decode",
"(",
"payload",
")",
"if",
"payload",
".",
"startswith",
"(",
"MAGIC_ZIP",
")",
"or",
"payload",
".",
"startswith",
"(",
"MAGIC_GZIP",
")",
"or",
"payload",
".",
"startswith",
"(",
"MAGIC_XML",
")",
":",
"ns",
"=",
"nameservers",
"aggregate_report",
"=",
"parse_aggregate_report_file",
"(",
"payload",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"parallel",
"=",
"parallel",
")",
"result",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"report_type\"",
",",
"\"aggregate\"",
")",
",",
"(",
"\"report\"",
",",
"aggregate_report",
")",
"]",
")",
"return",
"result",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"binascii",
".",
"Error",
")",
":",
"pass",
"except",
"InvalidAggregateReport",
"as",
"e",
":",
"error",
"=",
"'Message with subject \"{0}\" '",
"'is not a valid '",
"'aggregate DMARC report: {1}'",
".",
"format",
"(",
"subject",
",",
"e",
")",
"raise",
"InvalidAggregateReport",
"(",
"error",
")",
"except",
"FileNotFoundError",
"as",
"e",
":",
"error",
"=",
"'Unable to parse message with '",
"'subject \"{0}\": {1}'",
".",
"format",
"(",
"subject",
",",
"e",
")",
"raise",
"InvalidDMARCReport",
"(",
"error",
")",
"if",
"feedback_report",
"and",
"sample",
":",
"try",
":",
"forensic_report",
"=",
"parse_forensic_report",
"(",
"feedback_report",
",",
"sample",
",",
"date",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"strip_attachment_payloads",
"=",
"strip_attachment_payloads",
",",
"parallel",
"=",
"parallel",
")",
"except",
"InvalidForensicReport",
"as",
"e",
":",
"error",
"=",
"'Message with subject \"{0}\" '",
"'is not a valid '",
"'forensic DMARC report: {1}'",
".",
"format",
"(",
"subject",
",",
"e",
")",
"raise",
"InvalidForensicReport",
"(",
"error",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"InvalidForensicReport",
"(",
"e",
".",
"__str__",
"(",
")",
")",
"result",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"report_type\"",
",",
"\"forensic\"",
")",
",",
"(",
"\"report\"",
",",
"forensic_report",
")",
"]",
")",
"return",
"result",
"if",
"result",
"is",
"None",
":",
"error",
"=",
"'Message with subject \"{0}\" is '",
"'not a valid DMARC report'",
".",
"format",
"(",
"subject",
")",
"raise",
"InvalidDMARCReport",
"(",
"error",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
parse_report_file
|
Parses a DMARC aggregate or forensic file at the given path, a
file-like object. or bytes
Args:
input_: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC report
|
parsedmarc/__init__.py
|
def parse_report_file(input_, nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False, parallel=False):
"""Parses a DMARC aggregate or forensic file at the given path, a
file-like object. or bytes
Args:
input_: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC report
"""
if type(input_) == str:
file_object = open(input_, "rb")
elif type(input_) == bytes:
file_object = BytesIO(input_)
else:
file_object = input_
content = file_object.read()
try:
report = parse_aggregate_report_file(content, nameservers=nameservers,
dns_timeout=dns_timeout,
parallel=parallel)
results = OrderedDict([("report_type", "aggregate"),
("report", report)])
except InvalidAggregateReport:
try:
sa = strip_attachment_payloads
results = parse_report_email(content,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=sa,
parallel=parallel)
except InvalidDMARCReport:
raise InvalidDMARCReport("Not a valid aggregate or forensic "
"report")
return results
|
def parse_report_file(input_, nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False, parallel=False):
"""Parses a DMARC aggregate or forensic file at the given path, a
file-like object. or bytes
Args:
input_: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC report
"""
if type(input_) == str:
file_object = open(input_, "rb")
elif type(input_) == bytes:
file_object = BytesIO(input_)
else:
file_object = input_
content = file_object.read()
try:
report = parse_aggregate_report_file(content, nameservers=nameservers,
dns_timeout=dns_timeout,
parallel=parallel)
results = OrderedDict([("report_type", "aggregate"),
("report", report)])
except InvalidAggregateReport:
try:
sa = strip_attachment_payloads
results = parse_report_email(content,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=sa,
parallel=parallel)
except InvalidDMARCReport:
raise InvalidDMARCReport("Not a valid aggregate or forensic "
"report")
return results
|
[
"Parses",
"a",
"DMARC",
"aggregate",
"or",
"forensic",
"file",
"at",
"the",
"given",
"path",
"a",
"file",
"-",
"like",
"object",
".",
"or",
"bytes"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L800-L842
|
[
"def",
"parse_report_file",
"(",
"input_",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"2.0",
",",
"strip_attachment_payloads",
"=",
"False",
",",
"parallel",
"=",
"False",
")",
":",
"if",
"type",
"(",
"input_",
")",
"==",
"str",
":",
"file_object",
"=",
"open",
"(",
"input_",
",",
"\"rb\"",
")",
"elif",
"type",
"(",
"input_",
")",
"==",
"bytes",
":",
"file_object",
"=",
"BytesIO",
"(",
"input_",
")",
"else",
":",
"file_object",
"=",
"input_",
"content",
"=",
"file_object",
".",
"read",
"(",
")",
"try",
":",
"report",
"=",
"parse_aggregate_report_file",
"(",
"content",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"parallel",
"=",
"parallel",
")",
"results",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"report_type\"",
",",
"\"aggregate\"",
")",
",",
"(",
"\"report\"",
",",
"report",
")",
"]",
")",
"except",
"InvalidAggregateReport",
":",
"try",
":",
"sa",
"=",
"strip_attachment_payloads",
"results",
"=",
"parse_report_email",
"(",
"content",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"strip_attachment_payloads",
"=",
"sa",
",",
"parallel",
"=",
"parallel",
")",
"except",
"InvalidDMARCReport",
":",
"raise",
"InvalidDMARCReport",
"(",
"\"Not a valid aggregate or forensic \"",
"\"report\"",
")",
"return",
"results"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
get_imap_capabilities
|
Returns a list of an IMAP server's capabilities
Args:
server (imapclient.IMAPClient): An instance of imapclient.IMAPClient
Returns (list): A list of capabilities
|
parsedmarc/__init__.py
|
def get_imap_capabilities(server):
"""
Returns a list of an IMAP server's capabilities
Args:
server (imapclient.IMAPClient): An instance of imapclient.IMAPClient
Returns (list): A list of capabilities
"""
capabilities = list(map(str, list(server.capabilities())))
for i in range(len(capabilities)):
capabilities[i] = str(capabilities[i]).replace("b'",
"").replace("'",
"")
logger.debug("IMAP server supports: {0}".format(capabilities))
return capabilities
|
def get_imap_capabilities(server):
"""
Returns a list of an IMAP server's capabilities
Args:
server (imapclient.IMAPClient): An instance of imapclient.IMAPClient
Returns (list): A list of capabilities
"""
capabilities = list(map(str, list(server.capabilities())))
for i in range(len(capabilities)):
capabilities[i] = str(capabilities[i]).replace("b'",
"").replace("'",
"")
logger.debug("IMAP server supports: {0}".format(capabilities))
return capabilities
|
[
"Returns",
"a",
"list",
"of",
"an",
"IMAP",
"server",
"s",
"capabilities"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L845-L862
|
[
"def",
"get_imap_capabilities",
"(",
"server",
")",
":",
"capabilities",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"list",
"(",
"server",
".",
"capabilities",
"(",
")",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"capabilities",
")",
")",
":",
"capabilities",
"[",
"i",
"]",
"=",
"str",
"(",
"capabilities",
"[",
"i",
"]",
")",
".",
"replace",
"(",
"\"b'\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
"logger",
".",
"debug",
"(",
"\"IMAP server supports: {0}\"",
".",
"format",
"(",
"capabilities",
")",
")",
"return",
"capabilities"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
get_dmarc_reports_from_inbox
|
Fetches and parses DMARC reports from sn inbox
Args:
host: The mail server hostname or IP address
user: The mail server user
password: The mail server password
connection: An IMAPCLient connection to reuse
port: The mail server port
ssl (bool): Use SSL/TLS
ssl_context (SSLContext): A SSL context
move_supported: Indicate if the IMAP server supports the MOVE command
(autodetect if None)
reports_folder: The IMAP folder where reports can be found
archive_folder: The folder to move processed mail to
delete (bool): Delete messages after processing them
test (bool): Do not move or delete messages after processing them
nameservers (list): A list of DNS nameservers to query
dns_timeout (float): Set the DNS query timeout
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
results (dict): Results from the previous run
Returns:
OrderedDict: Lists of ``aggregate_reports`` and ``forensic_reports``
|
parsedmarc/__init__.py
|
def get_dmarc_reports_from_inbox(host=None,
user=None,
password=None,
connection=None,
port=None,
ssl=True,
ssl_context=None,
move_supported=None,
reports_folder="INBOX",
archive_folder="Archive",
delete=False,
test=False,
nameservers=None,
dns_timeout=6.0,
strip_attachment_payloads=False,
results=None):
"""
Fetches and parses DMARC reports from sn inbox
Args:
host: The mail server hostname or IP address
user: The mail server user
password: The mail server password
connection: An IMAPCLient connection to reuse
port: The mail server port
ssl (bool): Use SSL/TLS
ssl_context (SSLContext): A SSL context
move_supported: Indicate if the IMAP server supports the MOVE command
(autodetect if None)
reports_folder: The IMAP folder where reports can be found
archive_folder: The folder to move processed mail to
delete (bool): Delete messages after processing them
test (bool): Do not move or delete messages after processing them
nameservers (list): A list of DNS nameservers to query
dns_timeout (float): Set the DNS query timeout
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
results (dict): Results from the previous run
Returns:
OrderedDict: Lists of ``aggregate_reports`` and ``forensic_reports``
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
if delete and test:
raise ValueError("delete and test options are mutually exclusive")
if connection is None and (user is None or password is None):
raise ValueError("Must supply a connection, or a username and "
"password")
aggregate_reports = []
forensic_reports = []
aggregate_report_msg_uids = []
forensic_report_msg_uids = []
aggregate_reports_folder = "{0}/Aggregate".format(archive_folder)
forensic_reports_folder = "{0}/Forensic".format(archive_folder)
invalid_reports_folder = "{0}/Invalid".format(archive_folder)
if results:
aggregate_reports = results["aggregate_reports"].copy()
forensic_reports = results["forensic_reports"].copy()
try:
if connection:
server = connection
else:
if not ssl:
logger.debug("Connecting to IMAP over plain text")
if ssl_context is None:
ssl_context = create_default_context()
server = imapclient.IMAPClient(host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
server.login(user, password)
if move_supported is None:
server_capabilities = get_imap_capabilities(server)
move_supported = "MOVE" in server_capabilities
def delete_messages(msg_uids):
logger.debug("Deleting message UID(s) {0}".format(",".join(
str(uid) for uid in msg_uids)))
if type(msg_uids) == str or type(msg_uids) == int:
msg_uids = [int(msg_uids)]
server.delete_messages(msg_uids, silent=True)
server.expunge(msg_uids)
def move_messages(msg_uids, folder):
if type(msg_uids) == str or type(msg_uids) == int:
msg_uids = [int(msg_uids)]
for chunk in chunks(msg_uids, 100):
if move_supported:
logger.debug("Moving message UID(s) {0} to {1}".format(
",".join(str(uid) for uid in chunk), folder
))
server.move(chunk, folder)
else:
logger.debug("Copying message UID(s) {0} to {1}".format(
",".join(str(uid) for uid in chunk), folder
))
server.copy(msg_uids, folder)
delete_messages(msg_uids)
if not server.folder_exists(archive_folder):
logger.debug("Creating IMAP folder: {0}".format(archive_folder))
server.create_folder(archive_folder)
try:
# Test subfolder creation
if not server.folder_exists(aggregate_reports_folder):
server.create_folder(aggregate_reports_folder)
logger.debug(
"Creating IMAP folder: {0}".format(
aggregate_reports_folder))
except imapclient.exceptions.IMAPClientError:
# Only replace / with . when . doesn't work
# This usually indicates a dovecot IMAP server
aggregate_reports_folder = aggregate_reports_folder.replace("/",
".")
forensic_reports_folder = forensic_reports_folder.replace("/",
".")
invalid_reports_folder = invalid_reports_folder.replace("/",
".")
subfolders = [aggregate_reports_folder,
forensic_reports_folder,
invalid_reports_folder]
for subfolder in subfolders:
if not server.folder_exists(subfolder):
logger.debug(
"Creating IMAP folder: {0}".format(subfolder))
server.create_folder(subfolder)
server.select_folder(reports_folder)
messages = server.search()
total_messages = len(messages)
logger.debug("Found {0} messages in IMAP folder {1}".format(
len(messages), reports_folder))
for i in range(len(messages)):
msg_uid = messages[i]
logger.debug("Processing message {0} of {1}: UID {2}".format(
i+1,
total_messages,
msg_uid
))
try:
try:
raw_msg = server.fetch(msg_uid,
["RFC822"])[msg_uid]
msg_keys = [b'RFC822', b'BODY[NULL]', b'BODY[]']
msg_key = ''
for key in msg_keys:
if key in raw_msg.keys():
msg_key = key
break
raw_msg = raw_msg[msg_key]
except (ConnectionResetError, socket.error,
TimeoutError,
imapclient.exceptions.IMAPClientError) as error:
error = error.__str__().lstrip("b'").rstrip("'").rstrip(
".")
logger.debug("IMAP error: {0}".format(error.__str__()))
logger.debug("Reconnecting to IMAP")
try:
server.shutdown()
except Exception as e:
logger.debug(
"Failed to log out: {0}".format(e.__str__()))
if not ssl:
logger.debug("Connecting to IMAP over plain text")
server = imapclient.IMAPClient(host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
server.login(user, password)
server.select_folder(reports_folder)
raw_msg = server.fetch(msg_uid,
["RFC822"])[msg_uid][b"RFC822"]
msg_content = raw_msg.decode("utf-8", errors="replace")
sa = strip_attachment_payloads
parsed_email = parse_report_email(msg_content,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=sa)
if parsed_email["report_type"] == "aggregate":
aggregate_reports.append(parsed_email["report"])
aggregate_report_msg_uids.append(msg_uid)
elif parsed_email["report_type"] == "forensic":
forensic_reports.append(parsed_email["report"])
forensic_report_msg_uids.append(msg_uid)
except InvalidDMARCReport as error:
logger.warning(error.__str__())
if not test:
if delete:
logger.debug(
"Deleting message UID {0}".format(msg_uid))
delete_messages([msg_uid])
else:
logger.debug(
"Moving message UID {0} to {1}".format(
msg_uid, invalid_reports_folder))
move_messages([msg_uid], invalid_reports_folder)
if not test:
if delete:
processed_messages = aggregate_report_msg_uids + \
forensic_report_msg_uids
number_of_processed_msgs = len(processed_messages)
for i in range(number_of_processed_msgs):
msg_uid = processed_messages[i]
logger.debug(
"Deleting message {0} of {1}: UID {2}".format(
i + 1, number_of_processed_msgs, msg_uid))
try:
delete_messages([msg_uid])
except imapclient.exceptions.IMAPClientError as e:
e = e.__str__().lstrip("b'").rstrip(
"'").rstrip(".")
message = "Error deleting message UID"
e = "{0} {1}: " "{2}".format(message, msg_uid, e)
logger.error("IMAP error: {0}".format(e))
except (ConnectionResetError, socket.error,
TimeoutError) as e:
logger.debug("IMAP error: {0}".format(e.__str__()))
logger.debug("Reconnecting to IMAP")
try:
server.shutdown()
except Exception as e:
logger.debug(
"Failed to log out: {0}".format(e.__str__()))
if not ssl:
logger.debug("Connecting to IMAP over plain text")
server = imapclient.IMAPClient(host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
server.login(user, password)
server.select_folder(reports_folder)
delete_messages([msg_uid])
else:
if len(aggregate_report_msg_uids) > 0:
log_message = "Moving aggregate report messages from"
logger.debug(
"{0} {1} to {1}".format(
log_message, reports_folder,
aggregate_reports_folder))
number_of_agg_report_msgs = len(aggregate_report_msg_uids)
for i in range(number_of_agg_report_msgs):
msg_uid = aggregate_report_msg_uids[i]
logger.debug(
"Moving message {0} of {1}: UID {2}".format(
i+1, number_of_agg_report_msgs, msg_uid))
try:
move_messages([msg_uid],
aggregate_reports_folder)
except imapclient.exceptions.IMAPClientError as e:
e = e.__str__().lstrip("b'").rstrip(
"'").rstrip(".")
message = "Error moving message UID"
e = "{0} {1}: {2}".format(message, msg_uid, e)
logger.error("IMAP error: {0}".format(e))
except (ConnectionResetError, socket.error,
TimeoutError) as error:
logger.debug("IMAP error: {0}".format(
error.__str__()))
logger.debug("Reconnecting to IMAP")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to log out: {0}".format(
e.__str__()))
if not ssl:
logger.debug(
"Connecting to IMAP over plain text")
server = imapclient.IMAPClient(
host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True
)
server.login(user, password)
server.select_folder(reports_folder)
move_messages([msg_uid],
aggregate_reports_folder)
if len(forensic_report_msg_uids) > 0:
message = "Moving forensic report messages from"
logger.debug(
"{0} {1} to {2}".format(message,
reports_folder,
forensic_reports_folder))
number_of_forensic_msgs = len(forensic_report_msg_uids)
for i in range(number_of_forensic_msgs):
msg_uid = forensic_report_msg_uids[i]
message = "Moving message"
logger.debug("{0} {1} of {2}: UID {2}".format(
message,
i + 1, number_of_forensic_msgs, msg_uid))
try:
move_messages([msg_uid],
forensic_reports_folder)
except imapclient.exceptions.IMAPClientError as e:
e = e.__str__().lstrip("b'").rstrip(
"'").rstrip(".")
e = "Error moving message UID {0}: {1}".format(
msg_uid, e)
logger.error("IMAP error: {0}".format(e))
except (ConnectionResetError, TimeoutError) as error:
logger.debug("IMAP error: {0}".format(
error.__str__()))
logger.debug("Reconnecting to IMAP")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to "
"disconnect: {0}".format(
e.__str__()))
if not ssl:
logger.debug(
"Connecting to IMAP over plain text")
server = imapclient.IMAPClient(
host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
server.login(user, password)
server.select_folder(reports_folder)
move_messages([msg_uid],
forensic_reports_folder)
results = OrderedDict([("aggregate_reports", aggregate_reports),
("forensic_reports", forensic_reports)])
if not test and total_messages > 0:
# Process emails that came in during the last run
results = get_dmarc_reports_from_inbox(
host=host,
user=user,
password=password,
connection=connection,
port=port,
ssl=ssl,
ssl_context=ssl_context,
move_supported=move_supported,
reports_folder=reports_folder,
archive_folder=archive_folder,
delete=delete,
test=test,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=strip_attachment_payloads,
results=results
)
return results
except imapclient.exceptions.IMAPClientError as error:
error = error.__str__().lstrip("b'").rstrip("'").rstrip(".")
# Workaround for random Exchange/Office365 IMAP errors
if "unexpected response" in error or "BAD" in error:
sleep_minutes = 5
logger.debug(
"{0}. "
"Waiting {1} minutes before trying again".format(
error,
sleep_minutes))
time.sleep(sleep_minutes * 60)
results = get_dmarc_reports_from_inbox(
host=host,
user=user,
password=password,
connection=connection,
port=port,
ssl=ssl,
ssl_context=ssl_context,
move_supported=move_supported,
reports_folder=reports_folder,
archive_folder=archive_folder,
delete=delete,
test=test,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=strip_attachment_payloads,
results=results
)
return results
raise IMAPError(error)
except socket.gaierror:
raise IMAPError("DNS resolution failed")
except ConnectionRefusedError:
raise IMAPError("Connection refused")
except ConnectionResetError:
sleep_minutes = 5
logger.debug(
"Connection reset. "
"Waiting {0} minutes before trying again".format(sleep_minutes))
time.sleep(sleep_minutes * 60)
results = get_dmarc_reports_from_inbox(
host=host,
user=user,
password=password,
connection=connection,
port=port,
ssl=ssl,
ssl_context=ssl_context,
move_supported=move_supported,
reports_folder=reports_folder,
archive_folder=archive_folder,
delete=delete,
test=test,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=strip_attachment_payloads,
results=results
)
return results
except ConnectionAbortedError:
raise IMAPError("Connection aborted")
except TimeoutError:
raise IMAPError("Connection timed out")
except SSLError as error:
raise IMAPError("SSL error: {0}".format(error.__str__()))
except CertificateError as error:
raise IMAPError("Certificate error: {0}".format(error.__str__()))
|
def get_dmarc_reports_from_inbox(host=None,
user=None,
password=None,
connection=None,
port=None,
ssl=True,
ssl_context=None,
move_supported=None,
reports_folder="INBOX",
archive_folder="Archive",
delete=False,
test=False,
nameservers=None,
dns_timeout=6.0,
strip_attachment_payloads=False,
results=None):
"""
Fetches and parses DMARC reports from sn inbox
Args:
host: The mail server hostname or IP address
user: The mail server user
password: The mail server password
connection: An IMAPCLient connection to reuse
port: The mail server port
ssl (bool): Use SSL/TLS
ssl_context (SSLContext): A SSL context
move_supported: Indicate if the IMAP server supports the MOVE command
(autodetect if None)
reports_folder: The IMAP folder where reports can be found
archive_folder: The folder to move processed mail to
delete (bool): Delete messages after processing them
test (bool): Do not move or delete messages after processing them
nameservers (list): A list of DNS nameservers to query
dns_timeout (float): Set the DNS query timeout
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
results (dict): Results from the previous run
Returns:
OrderedDict: Lists of ``aggregate_reports`` and ``forensic_reports``
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
if delete and test:
raise ValueError("delete and test options are mutually exclusive")
if connection is None and (user is None or password is None):
raise ValueError("Must supply a connection, or a username and "
"password")
aggregate_reports = []
forensic_reports = []
aggregate_report_msg_uids = []
forensic_report_msg_uids = []
aggregate_reports_folder = "{0}/Aggregate".format(archive_folder)
forensic_reports_folder = "{0}/Forensic".format(archive_folder)
invalid_reports_folder = "{0}/Invalid".format(archive_folder)
if results:
aggregate_reports = results["aggregate_reports"].copy()
forensic_reports = results["forensic_reports"].copy()
try:
if connection:
server = connection
else:
if not ssl:
logger.debug("Connecting to IMAP over plain text")
if ssl_context is None:
ssl_context = create_default_context()
server = imapclient.IMAPClient(host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
server.login(user, password)
if move_supported is None:
server_capabilities = get_imap_capabilities(server)
move_supported = "MOVE" in server_capabilities
def delete_messages(msg_uids):
logger.debug("Deleting message UID(s) {0}".format(",".join(
str(uid) for uid in msg_uids)))
if type(msg_uids) == str or type(msg_uids) == int:
msg_uids = [int(msg_uids)]
server.delete_messages(msg_uids, silent=True)
server.expunge(msg_uids)
def move_messages(msg_uids, folder):
if type(msg_uids) == str or type(msg_uids) == int:
msg_uids = [int(msg_uids)]
for chunk in chunks(msg_uids, 100):
if move_supported:
logger.debug("Moving message UID(s) {0} to {1}".format(
",".join(str(uid) for uid in chunk), folder
))
server.move(chunk, folder)
else:
logger.debug("Copying message UID(s) {0} to {1}".format(
",".join(str(uid) for uid in chunk), folder
))
server.copy(msg_uids, folder)
delete_messages(msg_uids)
if not server.folder_exists(archive_folder):
logger.debug("Creating IMAP folder: {0}".format(archive_folder))
server.create_folder(archive_folder)
try:
# Test subfolder creation
if not server.folder_exists(aggregate_reports_folder):
server.create_folder(aggregate_reports_folder)
logger.debug(
"Creating IMAP folder: {0}".format(
aggregate_reports_folder))
except imapclient.exceptions.IMAPClientError:
# Only replace / with . when . doesn't work
# This usually indicates a dovecot IMAP server
aggregate_reports_folder = aggregate_reports_folder.replace("/",
".")
forensic_reports_folder = forensic_reports_folder.replace("/",
".")
invalid_reports_folder = invalid_reports_folder.replace("/",
".")
subfolders = [aggregate_reports_folder,
forensic_reports_folder,
invalid_reports_folder]
for subfolder in subfolders:
if not server.folder_exists(subfolder):
logger.debug(
"Creating IMAP folder: {0}".format(subfolder))
server.create_folder(subfolder)
server.select_folder(reports_folder)
messages = server.search()
total_messages = len(messages)
logger.debug("Found {0} messages in IMAP folder {1}".format(
len(messages), reports_folder))
for i in range(len(messages)):
msg_uid = messages[i]
logger.debug("Processing message {0} of {1}: UID {2}".format(
i+1,
total_messages,
msg_uid
))
try:
try:
raw_msg = server.fetch(msg_uid,
["RFC822"])[msg_uid]
msg_keys = [b'RFC822', b'BODY[NULL]', b'BODY[]']
msg_key = ''
for key in msg_keys:
if key in raw_msg.keys():
msg_key = key
break
raw_msg = raw_msg[msg_key]
except (ConnectionResetError, socket.error,
TimeoutError,
imapclient.exceptions.IMAPClientError) as error:
error = error.__str__().lstrip("b'").rstrip("'").rstrip(
".")
logger.debug("IMAP error: {0}".format(error.__str__()))
logger.debug("Reconnecting to IMAP")
try:
server.shutdown()
except Exception as e:
logger.debug(
"Failed to log out: {0}".format(e.__str__()))
if not ssl:
logger.debug("Connecting to IMAP over plain text")
server = imapclient.IMAPClient(host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
server.login(user, password)
server.select_folder(reports_folder)
raw_msg = server.fetch(msg_uid,
["RFC822"])[msg_uid][b"RFC822"]
msg_content = raw_msg.decode("utf-8", errors="replace")
sa = strip_attachment_payloads
parsed_email = parse_report_email(msg_content,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=sa)
if parsed_email["report_type"] == "aggregate":
aggregate_reports.append(parsed_email["report"])
aggregate_report_msg_uids.append(msg_uid)
elif parsed_email["report_type"] == "forensic":
forensic_reports.append(parsed_email["report"])
forensic_report_msg_uids.append(msg_uid)
except InvalidDMARCReport as error:
logger.warning(error.__str__())
if not test:
if delete:
logger.debug(
"Deleting message UID {0}".format(msg_uid))
delete_messages([msg_uid])
else:
logger.debug(
"Moving message UID {0} to {1}".format(
msg_uid, invalid_reports_folder))
move_messages([msg_uid], invalid_reports_folder)
if not test:
if delete:
processed_messages = aggregate_report_msg_uids + \
forensic_report_msg_uids
number_of_processed_msgs = len(processed_messages)
for i in range(number_of_processed_msgs):
msg_uid = processed_messages[i]
logger.debug(
"Deleting message {0} of {1}: UID {2}".format(
i + 1, number_of_processed_msgs, msg_uid))
try:
delete_messages([msg_uid])
except imapclient.exceptions.IMAPClientError as e:
e = e.__str__().lstrip("b'").rstrip(
"'").rstrip(".")
message = "Error deleting message UID"
e = "{0} {1}: " "{2}".format(message, msg_uid, e)
logger.error("IMAP error: {0}".format(e))
except (ConnectionResetError, socket.error,
TimeoutError) as e:
logger.debug("IMAP error: {0}".format(e.__str__()))
logger.debug("Reconnecting to IMAP")
try:
server.shutdown()
except Exception as e:
logger.debug(
"Failed to log out: {0}".format(e.__str__()))
if not ssl:
logger.debug("Connecting to IMAP over plain text")
server = imapclient.IMAPClient(host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
server.login(user, password)
server.select_folder(reports_folder)
delete_messages([msg_uid])
else:
if len(aggregate_report_msg_uids) > 0:
log_message = "Moving aggregate report messages from"
logger.debug(
"{0} {1} to {1}".format(
log_message, reports_folder,
aggregate_reports_folder))
number_of_agg_report_msgs = len(aggregate_report_msg_uids)
for i in range(number_of_agg_report_msgs):
msg_uid = aggregate_report_msg_uids[i]
logger.debug(
"Moving message {0} of {1}: UID {2}".format(
i+1, number_of_agg_report_msgs, msg_uid))
try:
move_messages([msg_uid],
aggregate_reports_folder)
except imapclient.exceptions.IMAPClientError as e:
e = e.__str__().lstrip("b'").rstrip(
"'").rstrip(".")
message = "Error moving message UID"
e = "{0} {1}: {2}".format(message, msg_uid, e)
logger.error("IMAP error: {0}".format(e))
except (ConnectionResetError, socket.error,
TimeoutError) as error:
logger.debug("IMAP error: {0}".format(
error.__str__()))
logger.debug("Reconnecting to IMAP")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to log out: {0}".format(
e.__str__()))
if not ssl:
logger.debug(
"Connecting to IMAP over plain text")
server = imapclient.IMAPClient(
host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True
)
server.login(user, password)
server.select_folder(reports_folder)
move_messages([msg_uid],
aggregate_reports_folder)
if len(forensic_report_msg_uids) > 0:
message = "Moving forensic report messages from"
logger.debug(
"{0} {1} to {2}".format(message,
reports_folder,
forensic_reports_folder))
number_of_forensic_msgs = len(forensic_report_msg_uids)
for i in range(number_of_forensic_msgs):
msg_uid = forensic_report_msg_uids[i]
message = "Moving message"
logger.debug("{0} {1} of {2}: UID {2}".format(
message,
i + 1, number_of_forensic_msgs, msg_uid))
try:
move_messages([msg_uid],
forensic_reports_folder)
except imapclient.exceptions.IMAPClientError as e:
e = e.__str__().lstrip("b'").rstrip(
"'").rstrip(".")
e = "Error moving message UID {0}: {1}".format(
msg_uid, e)
logger.error("IMAP error: {0}".format(e))
except (ConnectionResetError, TimeoutError) as error:
logger.debug("IMAP error: {0}".format(
error.__str__()))
logger.debug("Reconnecting to IMAP")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to "
"disconnect: {0}".format(
e.__str__()))
if not ssl:
logger.debug(
"Connecting to IMAP over plain text")
server = imapclient.IMAPClient(
host,
port=port,
ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
server.login(user, password)
server.select_folder(reports_folder)
move_messages([msg_uid],
forensic_reports_folder)
results = OrderedDict([("aggregate_reports", aggregate_reports),
("forensic_reports", forensic_reports)])
if not test and total_messages > 0:
# Process emails that came in during the last run
results = get_dmarc_reports_from_inbox(
host=host,
user=user,
password=password,
connection=connection,
port=port,
ssl=ssl,
ssl_context=ssl_context,
move_supported=move_supported,
reports_folder=reports_folder,
archive_folder=archive_folder,
delete=delete,
test=test,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=strip_attachment_payloads,
results=results
)
return results
except imapclient.exceptions.IMAPClientError as error:
error = error.__str__().lstrip("b'").rstrip("'").rstrip(".")
# Workaround for random Exchange/Office365 IMAP errors
if "unexpected response" in error or "BAD" in error:
sleep_minutes = 5
logger.debug(
"{0}. "
"Waiting {1} minutes before trying again".format(
error,
sleep_minutes))
time.sleep(sleep_minutes * 60)
results = get_dmarc_reports_from_inbox(
host=host,
user=user,
password=password,
connection=connection,
port=port,
ssl=ssl,
ssl_context=ssl_context,
move_supported=move_supported,
reports_folder=reports_folder,
archive_folder=archive_folder,
delete=delete,
test=test,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=strip_attachment_payloads,
results=results
)
return results
raise IMAPError(error)
except socket.gaierror:
raise IMAPError("DNS resolution failed")
except ConnectionRefusedError:
raise IMAPError("Connection refused")
except ConnectionResetError:
sleep_minutes = 5
logger.debug(
"Connection reset. "
"Waiting {0} minutes before trying again".format(sleep_minutes))
time.sleep(sleep_minutes * 60)
results = get_dmarc_reports_from_inbox(
host=host,
user=user,
password=password,
connection=connection,
port=port,
ssl=ssl,
ssl_context=ssl_context,
move_supported=move_supported,
reports_folder=reports_folder,
archive_folder=archive_folder,
delete=delete,
test=test,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=strip_attachment_payloads,
results=results
)
return results
except ConnectionAbortedError:
raise IMAPError("Connection aborted")
except TimeoutError:
raise IMAPError("Connection timed out")
except SSLError as error:
raise IMAPError("SSL error: {0}".format(error.__str__()))
except CertificateError as error:
raise IMAPError("Certificate error: {0}".format(error.__str__()))
|
[
"Fetches",
"and",
"parses",
"DMARC",
"reports",
"from",
"sn",
"inbox"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L865-L1304
|
[
"def",
"get_dmarc_reports_from_inbox",
"(",
"host",
"=",
"None",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"connection",
"=",
"None",
",",
"port",
"=",
"None",
",",
"ssl",
"=",
"True",
",",
"ssl_context",
"=",
"None",
",",
"move_supported",
"=",
"None",
",",
"reports_folder",
"=",
"\"INBOX\"",
",",
"archive_folder",
"=",
"\"Archive\"",
",",
"delete",
"=",
"False",
",",
"test",
"=",
"False",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"6.0",
",",
"strip_attachment_payloads",
"=",
"False",
",",
"results",
"=",
"None",
")",
":",
"def",
"chunks",
"(",
"l",
",",
"n",
")",
":",
"\"\"\"Yield successive n-sized chunks from l.\"\"\"",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"l",
")",
",",
"n",
")",
":",
"yield",
"l",
"[",
"i",
":",
"i",
"+",
"n",
"]",
"if",
"delete",
"and",
"test",
":",
"raise",
"ValueError",
"(",
"\"delete and test options are mutually exclusive\"",
")",
"if",
"connection",
"is",
"None",
"and",
"(",
"user",
"is",
"None",
"or",
"password",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Must supply a connection, or a username and \"",
"\"password\"",
")",
"aggregate_reports",
"=",
"[",
"]",
"forensic_reports",
"=",
"[",
"]",
"aggregate_report_msg_uids",
"=",
"[",
"]",
"forensic_report_msg_uids",
"=",
"[",
"]",
"aggregate_reports_folder",
"=",
"\"{0}/Aggregate\"",
".",
"format",
"(",
"archive_folder",
")",
"forensic_reports_folder",
"=",
"\"{0}/Forensic\"",
".",
"format",
"(",
"archive_folder",
")",
"invalid_reports_folder",
"=",
"\"{0}/Invalid\"",
".",
"format",
"(",
"archive_folder",
")",
"if",
"results",
":",
"aggregate_reports",
"=",
"results",
"[",
"\"aggregate_reports\"",
"]",
".",
"copy",
"(",
")",
"forensic_reports",
"=",
"results",
"[",
"\"forensic_reports\"",
"]",
".",
"copy",
"(",
")",
"try",
":",
"if",
"connection",
":",
"server",
"=",
"connection",
"else",
":",
"if",
"not",
"ssl",
":",
"logger",
".",
"debug",
"(",
"\"Connecting to IMAP over plain text\"",
")",
"if",
"ssl_context",
"is",
"None",
":",
"ssl_context",
"=",
"create_default_context",
"(",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"use_uid",
"=",
"True",
")",
"server",
".",
"login",
"(",
"user",
",",
"password",
")",
"if",
"move_supported",
"is",
"None",
":",
"server_capabilities",
"=",
"get_imap_capabilities",
"(",
"server",
")",
"move_supported",
"=",
"\"MOVE\"",
"in",
"server_capabilities",
"def",
"delete_messages",
"(",
"msg_uids",
")",
":",
"logger",
".",
"debug",
"(",
"\"Deleting message UID(s) {0}\"",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"str",
"(",
"uid",
")",
"for",
"uid",
"in",
"msg_uids",
")",
")",
")",
"if",
"type",
"(",
"msg_uids",
")",
"==",
"str",
"or",
"type",
"(",
"msg_uids",
")",
"==",
"int",
":",
"msg_uids",
"=",
"[",
"int",
"(",
"msg_uids",
")",
"]",
"server",
".",
"delete_messages",
"(",
"msg_uids",
",",
"silent",
"=",
"True",
")",
"server",
".",
"expunge",
"(",
"msg_uids",
")",
"def",
"move_messages",
"(",
"msg_uids",
",",
"folder",
")",
":",
"if",
"type",
"(",
"msg_uids",
")",
"==",
"str",
"or",
"type",
"(",
"msg_uids",
")",
"==",
"int",
":",
"msg_uids",
"=",
"[",
"int",
"(",
"msg_uids",
")",
"]",
"for",
"chunk",
"in",
"chunks",
"(",
"msg_uids",
",",
"100",
")",
":",
"if",
"move_supported",
":",
"logger",
".",
"debug",
"(",
"\"Moving message UID(s) {0} to {1}\"",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"str",
"(",
"uid",
")",
"for",
"uid",
"in",
"chunk",
")",
",",
"folder",
")",
")",
"server",
".",
"move",
"(",
"chunk",
",",
"folder",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Copying message UID(s) {0} to {1}\"",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"str",
"(",
"uid",
")",
"for",
"uid",
"in",
"chunk",
")",
",",
"folder",
")",
")",
"server",
".",
"copy",
"(",
"msg_uids",
",",
"folder",
")",
"delete_messages",
"(",
"msg_uids",
")",
"if",
"not",
"server",
".",
"folder_exists",
"(",
"archive_folder",
")",
":",
"logger",
".",
"debug",
"(",
"\"Creating IMAP folder: {0}\"",
".",
"format",
"(",
"archive_folder",
")",
")",
"server",
".",
"create_folder",
"(",
"archive_folder",
")",
"try",
":",
"# Test subfolder creation",
"if",
"not",
"server",
".",
"folder_exists",
"(",
"aggregate_reports_folder",
")",
":",
"server",
".",
"create_folder",
"(",
"aggregate_reports_folder",
")",
"logger",
".",
"debug",
"(",
"\"Creating IMAP folder: {0}\"",
".",
"format",
"(",
"aggregate_reports_folder",
")",
")",
"except",
"imapclient",
".",
"exceptions",
".",
"IMAPClientError",
":",
"# Only replace / with . when . doesn't work",
"# This usually indicates a dovecot IMAP server",
"aggregate_reports_folder",
"=",
"aggregate_reports_folder",
".",
"replace",
"(",
"\"/\"",
",",
"\".\"",
")",
"forensic_reports_folder",
"=",
"forensic_reports_folder",
".",
"replace",
"(",
"\"/\"",
",",
"\".\"",
")",
"invalid_reports_folder",
"=",
"invalid_reports_folder",
".",
"replace",
"(",
"\"/\"",
",",
"\".\"",
")",
"subfolders",
"=",
"[",
"aggregate_reports_folder",
",",
"forensic_reports_folder",
",",
"invalid_reports_folder",
"]",
"for",
"subfolder",
"in",
"subfolders",
":",
"if",
"not",
"server",
".",
"folder_exists",
"(",
"subfolder",
")",
":",
"logger",
".",
"debug",
"(",
"\"Creating IMAP folder: {0}\"",
".",
"format",
"(",
"subfolder",
")",
")",
"server",
".",
"create_folder",
"(",
"subfolder",
")",
"server",
".",
"select_folder",
"(",
"reports_folder",
")",
"messages",
"=",
"server",
".",
"search",
"(",
")",
"total_messages",
"=",
"len",
"(",
"messages",
")",
"logger",
".",
"debug",
"(",
"\"Found {0} messages in IMAP folder {1}\"",
".",
"format",
"(",
"len",
"(",
"messages",
")",
",",
"reports_folder",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"messages",
")",
")",
":",
"msg_uid",
"=",
"messages",
"[",
"i",
"]",
"logger",
".",
"debug",
"(",
"\"Processing message {0} of {1}: UID {2}\"",
".",
"format",
"(",
"i",
"+",
"1",
",",
"total_messages",
",",
"msg_uid",
")",
")",
"try",
":",
"try",
":",
"raw_msg",
"=",
"server",
".",
"fetch",
"(",
"msg_uid",
",",
"[",
"\"RFC822\"",
"]",
")",
"[",
"msg_uid",
"]",
"msg_keys",
"=",
"[",
"b'RFC822'",
",",
"b'BODY[NULL]'",
",",
"b'BODY[]'",
"]",
"msg_key",
"=",
"''",
"for",
"key",
"in",
"msg_keys",
":",
"if",
"key",
"in",
"raw_msg",
".",
"keys",
"(",
")",
":",
"msg_key",
"=",
"key",
"break",
"raw_msg",
"=",
"raw_msg",
"[",
"msg_key",
"]",
"except",
"(",
"ConnectionResetError",
",",
"socket",
".",
"error",
",",
"TimeoutError",
",",
"imapclient",
".",
"exceptions",
".",
"IMAPClientError",
")",
"as",
"error",
":",
"error",
"=",
"error",
".",
"__str__",
"(",
")",
".",
"lstrip",
"(",
"\"b'\"",
")",
".",
"rstrip",
"(",
"\"'\"",
")",
".",
"rstrip",
"(",
"\".\"",
")",
"logger",
".",
"debug",
"(",
"\"IMAP error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting to IMAP\"",
")",
"try",
":",
"server",
".",
"shutdown",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to log out: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"if",
"not",
"ssl",
":",
"logger",
".",
"debug",
"(",
"\"Connecting to IMAP over plain text\"",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"use_uid",
"=",
"True",
")",
"server",
".",
"login",
"(",
"user",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"reports_folder",
")",
"raw_msg",
"=",
"server",
".",
"fetch",
"(",
"msg_uid",
",",
"[",
"\"RFC822\"",
"]",
")",
"[",
"msg_uid",
"]",
"[",
"b\"RFC822\"",
"]",
"msg_content",
"=",
"raw_msg",
".",
"decode",
"(",
"\"utf-8\"",
",",
"errors",
"=",
"\"replace\"",
")",
"sa",
"=",
"strip_attachment_payloads",
"parsed_email",
"=",
"parse_report_email",
"(",
"msg_content",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"strip_attachment_payloads",
"=",
"sa",
")",
"if",
"parsed_email",
"[",
"\"report_type\"",
"]",
"==",
"\"aggregate\"",
":",
"aggregate_reports",
".",
"append",
"(",
"parsed_email",
"[",
"\"report\"",
"]",
")",
"aggregate_report_msg_uids",
".",
"append",
"(",
"msg_uid",
")",
"elif",
"parsed_email",
"[",
"\"report_type\"",
"]",
"==",
"\"forensic\"",
":",
"forensic_reports",
".",
"append",
"(",
"parsed_email",
"[",
"\"report\"",
"]",
")",
"forensic_report_msg_uids",
".",
"append",
"(",
"msg_uid",
")",
"except",
"InvalidDMARCReport",
"as",
"error",
":",
"logger",
".",
"warning",
"(",
"error",
".",
"__str__",
"(",
")",
")",
"if",
"not",
"test",
":",
"if",
"delete",
":",
"logger",
".",
"debug",
"(",
"\"Deleting message UID {0}\"",
".",
"format",
"(",
"msg_uid",
")",
")",
"delete_messages",
"(",
"[",
"msg_uid",
"]",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Moving message UID {0} to {1}\"",
".",
"format",
"(",
"msg_uid",
",",
"invalid_reports_folder",
")",
")",
"move_messages",
"(",
"[",
"msg_uid",
"]",
",",
"invalid_reports_folder",
")",
"if",
"not",
"test",
":",
"if",
"delete",
":",
"processed_messages",
"=",
"aggregate_report_msg_uids",
"+",
"forensic_report_msg_uids",
"number_of_processed_msgs",
"=",
"len",
"(",
"processed_messages",
")",
"for",
"i",
"in",
"range",
"(",
"number_of_processed_msgs",
")",
":",
"msg_uid",
"=",
"processed_messages",
"[",
"i",
"]",
"logger",
".",
"debug",
"(",
"\"Deleting message {0} of {1}: UID {2}\"",
".",
"format",
"(",
"i",
"+",
"1",
",",
"number_of_processed_msgs",
",",
"msg_uid",
")",
")",
"try",
":",
"delete_messages",
"(",
"[",
"msg_uid",
"]",
")",
"except",
"imapclient",
".",
"exceptions",
".",
"IMAPClientError",
"as",
"e",
":",
"e",
"=",
"e",
".",
"__str__",
"(",
")",
".",
"lstrip",
"(",
"\"b'\"",
")",
".",
"rstrip",
"(",
"\"'\"",
")",
".",
"rstrip",
"(",
"\".\"",
")",
"message",
"=",
"\"Error deleting message UID\"",
"e",
"=",
"\"{0} {1}: \"",
"\"{2}\"",
".",
"format",
"(",
"message",
",",
"msg_uid",
",",
"e",
")",
"logger",
".",
"error",
"(",
"\"IMAP error: {0}\"",
".",
"format",
"(",
"e",
")",
")",
"except",
"(",
"ConnectionResetError",
",",
"socket",
".",
"error",
",",
"TimeoutError",
")",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting to IMAP\"",
")",
"try",
":",
"server",
".",
"shutdown",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to log out: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"if",
"not",
"ssl",
":",
"logger",
".",
"debug",
"(",
"\"Connecting to IMAP over plain text\"",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"use_uid",
"=",
"True",
")",
"server",
".",
"login",
"(",
"user",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"reports_folder",
")",
"delete_messages",
"(",
"[",
"msg_uid",
"]",
")",
"else",
":",
"if",
"len",
"(",
"aggregate_report_msg_uids",
")",
">",
"0",
":",
"log_message",
"=",
"\"Moving aggregate report messages from\"",
"logger",
".",
"debug",
"(",
"\"{0} {1} to {1}\"",
".",
"format",
"(",
"log_message",
",",
"reports_folder",
",",
"aggregate_reports_folder",
")",
")",
"number_of_agg_report_msgs",
"=",
"len",
"(",
"aggregate_report_msg_uids",
")",
"for",
"i",
"in",
"range",
"(",
"number_of_agg_report_msgs",
")",
":",
"msg_uid",
"=",
"aggregate_report_msg_uids",
"[",
"i",
"]",
"logger",
".",
"debug",
"(",
"\"Moving message {0} of {1}: UID {2}\"",
".",
"format",
"(",
"i",
"+",
"1",
",",
"number_of_agg_report_msgs",
",",
"msg_uid",
")",
")",
"try",
":",
"move_messages",
"(",
"[",
"msg_uid",
"]",
",",
"aggregate_reports_folder",
")",
"except",
"imapclient",
".",
"exceptions",
".",
"IMAPClientError",
"as",
"e",
":",
"e",
"=",
"e",
".",
"__str__",
"(",
")",
".",
"lstrip",
"(",
"\"b'\"",
")",
".",
"rstrip",
"(",
"\"'\"",
")",
".",
"rstrip",
"(",
"\".\"",
")",
"message",
"=",
"\"Error moving message UID\"",
"e",
"=",
"\"{0} {1}: {2}\"",
".",
"format",
"(",
"message",
",",
"msg_uid",
",",
"e",
")",
"logger",
".",
"error",
"(",
"\"IMAP error: {0}\"",
".",
"format",
"(",
"e",
")",
")",
"except",
"(",
"ConnectionResetError",
",",
"socket",
".",
"error",
",",
"TimeoutError",
")",
"as",
"error",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting to IMAP\"",
")",
"try",
":",
"server",
".",
"shutdown",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to log out: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"if",
"not",
"ssl",
":",
"logger",
".",
"debug",
"(",
"\"Connecting to IMAP over plain text\"",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"use_uid",
"=",
"True",
")",
"server",
".",
"login",
"(",
"user",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"reports_folder",
")",
"move_messages",
"(",
"[",
"msg_uid",
"]",
",",
"aggregate_reports_folder",
")",
"if",
"len",
"(",
"forensic_report_msg_uids",
")",
">",
"0",
":",
"message",
"=",
"\"Moving forensic report messages from\"",
"logger",
".",
"debug",
"(",
"\"{0} {1} to {2}\"",
".",
"format",
"(",
"message",
",",
"reports_folder",
",",
"forensic_reports_folder",
")",
")",
"number_of_forensic_msgs",
"=",
"len",
"(",
"forensic_report_msg_uids",
")",
"for",
"i",
"in",
"range",
"(",
"number_of_forensic_msgs",
")",
":",
"msg_uid",
"=",
"forensic_report_msg_uids",
"[",
"i",
"]",
"message",
"=",
"\"Moving message\"",
"logger",
".",
"debug",
"(",
"\"{0} {1} of {2}: UID {2}\"",
".",
"format",
"(",
"message",
",",
"i",
"+",
"1",
",",
"number_of_forensic_msgs",
",",
"msg_uid",
")",
")",
"try",
":",
"move_messages",
"(",
"[",
"msg_uid",
"]",
",",
"forensic_reports_folder",
")",
"except",
"imapclient",
".",
"exceptions",
".",
"IMAPClientError",
"as",
"e",
":",
"e",
"=",
"e",
".",
"__str__",
"(",
")",
".",
"lstrip",
"(",
"\"b'\"",
")",
".",
"rstrip",
"(",
"\"'\"",
")",
".",
"rstrip",
"(",
"\".\"",
")",
"e",
"=",
"\"Error moving message UID {0}: {1}\"",
".",
"format",
"(",
"msg_uid",
",",
"e",
")",
"logger",
".",
"error",
"(",
"\"IMAP error: {0}\"",
".",
"format",
"(",
"e",
")",
")",
"except",
"(",
"ConnectionResetError",
",",
"TimeoutError",
")",
"as",
"error",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting to IMAP\"",
")",
"try",
":",
"server",
".",
"shutdown",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to \"",
"\"disconnect: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"if",
"not",
"ssl",
":",
"logger",
".",
"debug",
"(",
"\"Connecting to IMAP over plain text\"",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"use_uid",
"=",
"True",
")",
"server",
".",
"login",
"(",
"user",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"reports_folder",
")",
"move_messages",
"(",
"[",
"msg_uid",
"]",
",",
"forensic_reports_folder",
")",
"results",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"aggregate_reports\"",
",",
"aggregate_reports",
")",
",",
"(",
"\"forensic_reports\"",
",",
"forensic_reports",
")",
"]",
")",
"if",
"not",
"test",
"and",
"total_messages",
">",
"0",
":",
"# Process emails that came in during the last run",
"results",
"=",
"get_dmarc_reports_from_inbox",
"(",
"host",
"=",
"host",
",",
"user",
"=",
"user",
",",
"password",
"=",
"password",
",",
"connection",
"=",
"connection",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"move_supported",
"=",
"move_supported",
",",
"reports_folder",
"=",
"reports_folder",
",",
"archive_folder",
"=",
"archive_folder",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"strip_attachment_payloads",
"=",
"strip_attachment_payloads",
",",
"results",
"=",
"results",
")",
"return",
"results",
"except",
"imapclient",
".",
"exceptions",
".",
"IMAPClientError",
"as",
"error",
":",
"error",
"=",
"error",
".",
"__str__",
"(",
")",
".",
"lstrip",
"(",
"\"b'\"",
")",
".",
"rstrip",
"(",
"\"'\"",
")",
".",
"rstrip",
"(",
"\".\"",
")",
"# Workaround for random Exchange/Office365 IMAP errors",
"if",
"\"unexpected response\"",
"in",
"error",
"or",
"\"BAD\"",
"in",
"error",
":",
"sleep_minutes",
"=",
"5",
"logger",
".",
"debug",
"(",
"\"{0}. \"",
"\"Waiting {1} minutes before trying again\"",
".",
"format",
"(",
"error",
",",
"sleep_minutes",
")",
")",
"time",
".",
"sleep",
"(",
"sleep_minutes",
"*",
"60",
")",
"results",
"=",
"get_dmarc_reports_from_inbox",
"(",
"host",
"=",
"host",
",",
"user",
"=",
"user",
",",
"password",
"=",
"password",
",",
"connection",
"=",
"connection",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"move_supported",
"=",
"move_supported",
",",
"reports_folder",
"=",
"reports_folder",
",",
"archive_folder",
"=",
"archive_folder",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"strip_attachment_payloads",
"=",
"strip_attachment_payloads",
",",
"results",
"=",
"results",
")",
"return",
"results",
"raise",
"IMAPError",
"(",
"error",
")",
"except",
"socket",
".",
"gaierror",
":",
"raise",
"IMAPError",
"(",
"\"DNS resolution failed\"",
")",
"except",
"ConnectionRefusedError",
":",
"raise",
"IMAPError",
"(",
"\"Connection refused\"",
")",
"except",
"ConnectionResetError",
":",
"sleep_minutes",
"=",
"5",
"logger",
".",
"debug",
"(",
"\"Connection reset. \"",
"\"Waiting {0} minutes before trying again\"",
".",
"format",
"(",
"sleep_minutes",
")",
")",
"time",
".",
"sleep",
"(",
"sleep_minutes",
"*",
"60",
")",
"results",
"=",
"get_dmarc_reports_from_inbox",
"(",
"host",
"=",
"host",
",",
"user",
"=",
"user",
",",
"password",
"=",
"password",
",",
"connection",
"=",
"connection",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"move_supported",
"=",
"move_supported",
",",
"reports_folder",
"=",
"reports_folder",
",",
"archive_folder",
"=",
"archive_folder",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"strip_attachment_payloads",
"=",
"strip_attachment_payloads",
",",
"results",
"=",
"results",
")",
"return",
"results",
"except",
"ConnectionAbortedError",
":",
"raise",
"IMAPError",
"(",
"\"Connection aborted\"",
")",
"except",
"TimeoutError",
":",
"raise",
"IMAPError",
"(",
"\"Connection timed out\"",
")",
"except",
"SSLError",
"as",
"error",
":",
"raise",
"IMAPError",
"(",
"\"SSL error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"CertificateError",
"as",
"error",
":",
"raise",
"IMAPError",
"(",
"\"Certificate error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
save_output
|
Save report data in the given directory
Args:
results (OrderedDict): Parsing results
output_directory: The patch to the directory to save in
|
parsedmarc/__init__.py
|
def save_output(results, output_directory="output"):
"""
Save report data in the given directory
Args:
results (OrderedDict): Parsing results
output_directory: The patch to the directory to save in
"""
aggregate_reports = results["aggregate_reports"]
forensic_reports = results["forensic_reports"]
if os.path.exists(output_directory):
if not os.path.isdir(output_directory):
raise ValueError("{0} is not a directory".format(output_directory))
else:
os.makedirs(output_directory)
with open("{0}".format(os.path.join(output_directory, "aggregate.json")),
"w", newline="\n", encoding="utf-8") as agg_json:
agg_json.write(json.dumps(aggregate_reports, ensure_ascii=False,
indent=2))
with open("{0}".format(os.path.join(output_directory, "aggregate.csv")),
"w", newline="\n", encoding="utf-8") as agg_csv:
csv = parsed_aggregate_reports_to_csv(aggregate_reports)
agg_csv.write(csv)
with open("{0}".format(os.path.join(output_directory, "forensic.json")),
"w", newline="\n", encoding="utf-8") as for_json:
for_json.write(json.dumps(forensic_reports, ensure_ascii=False,
indent=2))
with open("{0}".format(os.path.join(output_directory, "forensic.csv")),
"w", newline="\n", encoding="utf-8") as for_csv:
csv = parsed_forensic_reports_to_csv(forensic_reports)
for_csv.write(csv)
samples_directory = os.path.join(output_directory, "samples")
if not os.path.exists(samples_directory):
os.makedirs(samples_directory)
sample_filenames = []
for forensic_report in forensic_reports:
sample = forensic_report["sample"]
message_count = 0
parsed_sample = forensic_report["parsed_sample"]
subject = parsed_sample["filename_safe_subject"]
filename = subject
while filename in sample_filenames:
message_count += 1
filename = "{0} ({1})".format(subject, message_count)
sample_filenames.append(filename)
filename = "{0}.eml".format(filename)
path = os.path.join(samples_directory, filename)
with open(path, "w", newline="\n", encoding="utf-8") as sample_file:
sample_file.write(sample)
|
def save_output(results, output_directory="output"):
"""
Save report data in the given directory
Args:
results (OrderedDict): Parsing results
output_directory: The patch to the directory to save in
"""
aggregate_reports = results["aggregate_reports"]
forensic_reports = results["forensic_reports"]
if os.path.exists(output_directory):
if not os.path.isdir(output_directory):
raise ValueError("{0} is not a directory".format(output_directory))
else:
os.makedirs(output_directory)
with open("{0}".format(os.path.join(output_directory, "aggregate.json")),
"w", newline="\n", encoding="utf-8") as agg_json:
agg_json.write(json.dumps(aggregate_reports, ensure_ascii=False,
indent=2))
with open("{0}".format(os.path.join(output_directory, "aggregate.csv")),
"w", newline="\n", encoding="utf-8") as agg_csv:
csv = parsed_aggregate_reports_to_csv(aggregate_reports)
agg_csv.write(csv)
with open("{0}".format(os.path.join(output_directory, "forensic.json")),
"w", newline="\n", encoding="utf-8") as for_json:
for_json.write(json.dumps(forensic_reports, ensure_ascii=False,
indent=2))
with open("{0}".format(os.path.join(output_directory, "forensic.csv")),
"w", newline="\n", encoding="utf-8") as for_csv:
csv = parsed_forensic_reports_to_csv(forensic_reports)
for_csv.write(csv)
samples_directory = os.path.join(output_directory, "samples")
if not os.path.exists(samples_directory):
os.makedirs(samples_directory)
sample_filenames = []
for forensic_report in forensic_reports:
sample = forensic_report["sample"]
message_count = 0
parsed_sample = forensic_report["parsed_sample"]
subject = parsed_sample["filename_safe_subject"]
filename = subject
while filename in sample_filenames:
message_count += 1
filename = "{0} ({1})".format(subject, message_count)
sample_filenames.append(filename)
filename = "{0}.eml".format(filename)
path = os.path.join(samples_directory, filename)
with open(path, "w", newline="\n", encoding="utf-8") as sample_file:
sample_file.write(sample)
|
[
"Save",
"report",
"data",
"in",
"the",
"given",
"directory"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L1307-L1366
|
[
"def",
"save_output",
"(",
"results",
",",
"output_directory",
"=",
"\"output\"",
")",
":",
"aggregate_reports",
"=",
"results",
"[",
"\"aggregate_reports\"",
"]",
"forensic_reports",
"=",
"results",
"[",
"\"forensic_reports\"",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output_directory",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_directory",
")",
":",
"raise",
"ValueError",
"(",
"\"{0} is not a directory\"",
".",
"format",
"(",
"output_directory",
")",
")",
"else",
":",
"os",
".",
"makedirs",
"(",
"output_directory",
")",
"with",
"open",
"(",
"\"{0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"aggregate.json\"",
")",
")",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"agg_json",
":",
"agg_json",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"aggregate_reports",
",",
"ensure_ascii",
"=",
"False",
",",
"indent",
"=",
"2",
")",
")",
"with",
"open",
"(",
"\"{0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"aggregate.csv\"",
")",
")",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"agg_csv",
":",
"csv",
"=",
"parsed_aggregate_reports_to_csv",
"(",
"aggregate_reports",
")",
"agg_csv",
".",
"write",
"(",
"csv",
")",
"with",
"open",
"(",
"\"{0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"forensic.json\"",
")",
")",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"for_json",
":",
"for_json",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"forensic_reports",
",",
"ensure_ascii",
"=",
"False",
",",
"indent",
"=",
"2",
")",
")",
"with",
"open",
"(",
"\"{0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"forensic.csv\"",
")",
")",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"for_csv",
":",
"csv",
"=",
"parsed_forensic_reports_to_csv",
"(",
"forensic_reports",
")",
"for_csv",
".",
"write",
"(",
"csv",
")",
"samples_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"samples\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"samples_directory",
")",
":",
"os",
".",
"makedirs",
"(",
"samples_directory",
")",
"sample_filenames",
"=",
"[",
"]",
"for",
"forensic_report",
"in",
"forensic_reports",
":",
"sample",
"=",
"forensic_report",
"[",
"\"sample\"",
"]",
"message_count",
"=",
"0",
"parsed_sample",
"=",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"subject",
"=",
"parsed_sample",
"[",
"\"filename_safe_subject\"",
"]",
"filename",
"=",
"subject",
"while",
"filename",
"in",
"sample_filenames",
":",
"message_count",
"+=",
"1",
"filename",
"=",
"\"{0} ({1})\"",
".",
"format",
"(",
"subject",
",",
"message_count",
")",
"sample_filenames",
".",
"append",
"(",
"filename",
")",
"filename",
"=",
"\"{0}.eml\"",
".",
"format",
"(",
"filename",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"samples_directory",
",",
"filename",
")",
"with",
"open",
"(",
"path",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"sample_file",
":",
"sample_file",
".",
"write",
"(",
"sample",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
get_report_zip
|
Creates a zip file of parsed report output
Args:
results (OrderedDict): The parsed results
Returns:
bytes: zip file bytes
|
parsedmarc/__init__.py
|
def get_report_zip(results):
"""
Creates a zip file of parsed report output
Args:
results (OrderedDict): The parsed results
Returns:
bytes: zip file bytes
"""
def add_subdir(root_path, subdir):
subdir_path = os.path.join(root_path, subdir)
for subdir_root, subdir_dirs, subdir_files in os.walk(subdir_path):
for subdir_file in subdir_files:
subdir_file_path = os.path.join(root_path, subdir, subdir_file)
if os.path.isfile(subdir_file_path):
rel_path = os.path.relpath(subdir_root, subdir_file_path)
subdir_arc_name = os.path.join(rel_path, subdir_file)
zip_file.write(subdir_file_path, subdir_arc_name)
for subdir in subdir_dirs:
add_subdir(subdir_path, subdir)
storage = BytesIO()
tmp_dir = tempfile.mkdtemp()
try:
save_output(results, tmp_dir)
with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for root, dirs, files in os.walk(tmp_dir):
for file in files:
file_path = os.path.join(root, file)
if os.path.isfile(file_path):
arcname = os.path.join(os.path.relpath(root, tmp_dir),
file)
zip_file.write(file_path, arcname)
for directory in dirs:
dir_path = os.path.join(root, directory)
if os.path.isdir(dir_path):
zip_file.write(dir_path, directory)
add_subdir(root, directory)
finally:
shutil.rmtree(tmp_dir)
return storage.getvalue()
|
def get_report_zip(results):
"""
Creates a zip file of parsed report output
Args:
results (OrderedDict): The parsed results
Returns:
bytes: zip file bytes
"""
def add_subdir(root_path, subdir):
subdir_path = os.path.join(root_path, subdir)
for subdir_root, subdir_dirs, subdir_files in os.walk(subdir_path):
for subdir_file in subdir_files:
subdir_file_path = os.path.join(root_path, subdir, subdir_file)
if os.path.isfile(subdir_file_path):
rel_path = os.path.relpath(subdir_root, subdir_file_path)
subdir_arc_name = os.path.join(rel_path, subdir_file)
zip_file.write(subdir_file_path, subdir_arc_name)
for subdir in subdir_dirs:
add_subdir(subdir_path, subdir)
storage = BytesIO()
tmp_dir = tempfile.mkdtemp()
try:
save_output(results, tmp_dir)
with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for root, dirs, files in os.walk(tmp_dir):
for file in files:
file_path = os.path.join(root, file)
if os.path.isfile(file_path):
arcname = os.path.join(os.path.relpath(root, tmp_dir),
file)
zip_file.write(file_path, arcname)
for directory in dirs:
dir_path = os.path.join(root, directory)
if os.path.isdir(dir_path):
zip_file.write(dir_path, directory)
add_subdir(root, directory)
finally:
shutil.rmtree(tmp_dir)
return storage.getvalue()
|
[
"Creates",
"a",
"zip",
"file",
"of",
"parsed",
"report",
"output"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L1369-L1411
|
[
"def",
"get_report_zip",
"(",
"results",
")",
":",
"def",
"add_subdir",
"(",
"root_path",
",",
"subdir",
")",
":",
"subdir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_path",
",",
"subdir",
")",
"for",
"subdir_root",
",",
"subdir_dirs",
",",
"subdir_files",
"in",
"os",
".",
"walk",
"(",
"subdir_path",
")",
":",
"for",
"subdir_file",
"in",
"subdir_files",
":",
"subdir_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_path",
",",
"subdir",
",",
"subdir_file",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"subdir_file_path",
")",
":",
"rel_path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"subdir_root",
",",
"subdir_file_path",
")",
"subdir_arc_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"rel_path",
",",
"subdir_file",
")",
"zip_file",
".",
"write",
"(",
"subdir_file_path",
",",
"subdir_arc_name",
")",
"for",
"subdir",
"in",
"subdir_dirs",
":",
"add_subdir",
"(",
"subdir_path",
",",
"subdir",
")",
"storage",
"=",
"BytesIO",
"(",
")",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"save_output",
"(",
"results",
",",
"tmp_dir",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"storage",
",",
"'w'",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"as",
"zip_file",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"tmp_dir",
")",
":",
"for",
"file",
"in",
"files",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"arcname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"root",
",",
"tmp_dir",
")",
",",
"file",
")",
"zip_file",
".",
"write",
"(",
"file_path",
",",
"arcname",
")",
"for",
"directory",
"in",
"dirs",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"directory",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dir_path",
")",
":",
"zip_file",
".",
"write",
"(",
"dir_path",
",",
"directory",
")",
"add_subdir",
"(",
"root",
",",
"directory",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmp_dir",
")",
"return",
"storage",
".",
"getvalue",
"(",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
email_results
|
Emails parsing results as a zip file
Args:
results (OrderedDict): Parsing results
host: Mail server hostname or IP address
mail_from: The value of the message from header
mail_to : A list of addresses to mail to
port (int): Port to use
ssl (bool): Require a SSL connection from the start
user: An optional username
password: An optional password
subject: Overrides the default message subject
attachment_filename: Override the default attachment filename
message: Override the default plain text body
ssl_context: SSL context options
|
parsedmarc/__init__.py
|
def email_results(results, host, mail_from, mail_to, port=0,
ssl=False, user=None, password=None, subject=None,
attachment_filename=None, message=None, ssl_context=None):
"""
Emails parsing results as a zip file
Args:
results (OrderedDict): Parsing results
host: Mail server hostname or IP address
mail_from: The value of the message from header
mail_to : A list of addresses to mail to
port (int): Port to use
ssl (bool): Require a SSL connection from the start
user: An optional username
password: An optional password
subject: Overrides the default message subject
attachment_filename: Override the default attachment filename
message: Override the default plain text body
ssl_context: SSL context options
"""
logging.debug("Emailing report to: {0}".format(",".join(mail_to)))
date_string = datetime.now().strftime("%Y-%m-%d")
if attachment_filename:
if not attachment_filename.lower().endswith(".zip"):
attachment_filename += ".zip"
filename = attachment_filename
else:
filename = "DMARC-{0}.zip".format(date_string)
assert isinstance(mail_to, list)
msg = MIMEMultipart()
msg['From'] = mail_from
msg['To'] = ", ".join(mail_to)
msg['Date'] = email.utils.formatdate(localtime=True)
msg['Subject'] = subject or "DMARC results for {0}".format(date_string)
text = message or "Please see the attached zip file\n"
msg.attach(MIMEText(text))
zip_bytes = get_report_zip(results)
part = MIMEApplication(zip_bytes, Name=filename)
part['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
msg.attach(part)
try:
if ssl_context is None:
ssl_context = create_default_context()
if ssl:
server = smtplib.SMTP_SSL(host, port=port, context=ssl_context)
server.connect(host, port)
server.ehlo_or_helo_if_needed()
else:
server = smtplib.SMTP(host, port=port)
server.connect(host, port)
server.ehlo_or_helo_if_needed()
if server.has_extn("starttls"):
server.starttls(context=ssl_context)
server.ehlo()
else:
logger.warning("SMTP server does not support STARTTLS. "
"Proceeding in plain text!")
if user and password:
server.login(user, password)
server.sendmail(mail_from, mail_to, msg.as_string())
except smtplib.SMTPException as error:
error = error.__str__().lstrip("b'").rstrip("'").rstrip(".")
raise SMTPError(error)
except socket.gaierror:
raise SMTPError("DNS resolution failed")
except ConnectionRefusedError:
raise SMTPError("Connection refused")
except ConnectionResetError:
raise SMTPError("Connection reset")
except ConnectionAbortedError:
raise SMTPError("Connection aborted")
except TimeoutError:
raise SMTPError("Connection timed out")
except SSLError as error:
raise SMTPError("SSL error: {0}".format(error.__str__()))
except CertificateError as error:
raise SMTPError("Certificate error: {0}".format(error.__str__()))
|
def email_results(results, host, mail_from, mail_to, port=0,
ssl=False, user=None, password=None, subject=None,
attachment_filename=None, message=None, ssl_context=None):
"""
Emails parsing results as a zip file
Args:
results (OrderedDict): Parsing results
host: Mail server hostname or IP address
mail_from: The value of the message from header
mail_to : A list of addresses to mail to
port (int): Port to use
ssl (bool): Require a SSL connection from the start
user: An optional username
password: An optional password
subject: Overrides the default message subject
attachment_filename: Override the default attachment filename
message: Override the default plain text body
ssl_context: SSL context options
"""
logging.debug("Emailing report to: {0}".format(",".join(mail_to)))
date_string = datetime.now().strftime("%Y-%m-%d")
if attachment_filename:
if not attachment_filename.lower().endswith(".zip"):
attachment_filename += ".zip"
filename = attachment_filename
else:
filename = "DMARC-{0}.zip".format(date_string)
assert isinstance(mail_to, list)
msg = MIMEMultipart()
msg['From'] = mail_from
msg['To'] = ", ".join(mail_to)
msg['Date'] = email.utils.formatdate(localtime=True)
msg['Subject'] = subject or "DMARC results for {0}".format(date_string)
text = message or "Please see the attached zip file\n"
msg.attach(MIMEText(text))
zip_bytes = get_report_zip(results)
part = MIMEApplication(zip_bytes, Name=filename)
part['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
msg.attach(part)
try:
if ssl_context is None:
ssl_context = create_default_context()
if ssl:
server = smtplib.SMTP_SSL(host, port=port, context=ssl_context)
server.connect(host, port)
server.ehlo_or_helo_if_needed()
else:
server = smtplib.SMTP(host, port=port)
server.connect(host, port)
server.ehlo_or_helo_if_needed()
if server.has_extn("starttls"):
server.starttls(context=ssl_context)
server.ehlo()
else:
logger.warning("SMTP server does not support STARTTLS. "
"Proceeding in plain text!")
if user and password:
server.login(user, password)
server.sendmail(mail_from, mail_to, msg.as_string())
except smtplib.SMTPException as error:
error = error.__str__().lstrip("b'").rstrip("'").rstrip(".")
raise SMTPError(error)
except socket.gaierror:
raise SMTPError("DNS resolution failed")
except ConnectionRefusedError:
raise SMTPError("Connection refused")
except ConnectionResetError:
raise SMTPError("Connection reset")
except ConnectionAbortedError:
raise SMTPError("Connection aborted")
except TimeoutError:
raise SMTPError("Connection timed out")
except SSLError as error:
raise SMTPError("SSL error: {0}".format(error.__str__()))
except CertificateError as error:
raise SMTPError("Certificate error: {0}".format(error.__str__()))
|
[
"Emails",
"parsing",
"results",
"as",
"a",
"zip",
"file"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L1414-L1496
|
[
"def",
"email_results",
"(",
"results",
",",
"host",
",",
"mail_from",
",",
"mail_to",
",",
"port",
"=",
"0",
",",
"ssl",
"=",
"False",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"attachment_filename",
"=",
"None",
",",
"message",
"=",
"None",
",",
"ssl_context",
"=",
"None",
")",
":",
"logging",
".",
"debug",
"(",
"\"Emailing report to: {0}\"",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"mail_to",
")",
")",
")",
"date_string",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"if",
"attachment_filename",
":",
"if",
"not",
"attachment_filename",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".zip\"",
")",
":",
"attachment_filename",
"+=",
"\".zip\"",
"filename",
"=",
"attachment_filename",
"else",
":",
"filename",
"=",
"\"DMARC-{0}.zip\"",
".",
"format",
"(",
"date_string",
")",
"assert",
"isinstance",
"(",
"mail_to",
",",
"list",
")",
"msg",
"=",
"MIMEMultipart",
"(",
")",
"msg",
"[",
"'From'",
"]",
"=",
"mail_from",
"msg",
"[",
"'To'",
"]",
"=",
"\", \"",
".",
"join",
"(",
"mail_to",
")",
"msg",
"[",
"'Date'",
"]",
"=",
"email",
".",
"utils",
".",
"formatdate",
"(",
"localtime",
"=",
"True",
")",
"msg",
"[",
"'Subject'",
"]",
"=",
"subject",
"or",
"\"DMARC results for {0}\"",
".",
"format",
"(",
"date_string",
")",
"text",
"=",
"message",
"or",
"\"Please see the attached zip file\\n\"",
"msg",
".",
"attach",
"(",
"MIMEText",
"(",
"text",
")",
")",
"zip_bytes",
"=",
"get_report_zip",
"(",
"results",
")",
"part",
"=",
"MIMEApplication",
"(",
"zip_bytes",
",",
"Name",
"=",
"filename",
")",
"part",
"[",
"'Content-Disposition'",
"]",
"=",
"'attachment; filename=\"{0}\"'",
".",
"format",
"(",
"filename",
")",
"msg",
".",
"attach",
"(",
"part",
")",
"try",
":",
"if",
"ssl_context",
"is",
"None",
":",
"ssl_context",
"=",
"create_default_context",
"(",
")",
"if",
"ssl",
":",
"server",
"=",
"smtplib",
".",
"SMTP_SSL",
"(",
"host",
",",
"port",
"=",
"port",
",",
"context",
"=",
"ssl_context",
")",
"server",
".",
"connect",
"(",
"host",
",",
"port",
")",
"server",
".",
"ehlo_or_helo_if_needed",
"(",
")",
"else",
":",
"server",
"=",
"smtplib",
".",
"SMTP",
"(",
"host",
",",
"port",
"=",
"port",
")",
"server",
".",
"connect",
"(",
"host",
",",
"port",
")",
"server",
".",
"ehlo_or_helo_if_needed",
"(",
")",
"if",
"server",
".",
"has_extn",
"(",
"\"starttls\"",
")",
":",
"server",
".",
"starttls",
"(",
"context",
"=",
"ssl_context",
")",
"server",
".",
"ehlo",
"(",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"SMTP server does not support STARTTLS. \"",
"\"Proceeding in plain text!\"",
")",
"if",
"user",
"and",
"password",
":",
"server",
".",
"login",
"(",
"user",
",",
"password",
")",
"server",
".",
"sendmail",
"(",
"mail_from",
",",
"mail_to",
",",
"msg",
".",
"as_string",
"(",
")",
")",
"except",
"smtplib",
".",
"SMTPException",
"as",
"error",
":",
"error",
"=",
"error",
".",
"__str__",
"(",
")",
".",
"lstrip",
"(",
"\"b'\"",
")",
".",
"rstrip",
"(",
"\"'\"",
")",
".",
"rstrip",
"(",
"\".\"",
")",
"raise",
"SMTPError",
"(",
"error",
")",
"except",
"socket",
".",
"gaierror",
":",
"raise",
"SMTPError",
"(",
"\"DNS resolution failed\"",
")",
"except",
"ConnectionRefusedError",
":",
"raise",
"SMTPError",
"(",
"\"Connection refused\"",
")",
"except",
"ConnectionResetError",
":",
"raise",
"SMTPError",
"(",
"\"Connection reset\"",
")",
"except",
"ConnectionAbortedError",
":",
"raise",
"SMTPError",
"(",
"\"Connection aborted\"",
")",
"except",
"TimeoutError",
":",
"raise",
"SMTPError",
"(",
"\"Connection timed out\"",
")",
"except",
"SSLError",
"as",
"error",
":",
"raise",
"SMTPError",
"(",
"\"SSL error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"CertificateError",
"as",
"error",
":",
"raise",
"SMTPError",
"(",
"\"Certificate error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
watch_inbox
|
Use an IDLE IMAP connection to parse incoming emails, and pass the results
to a callback function
Args:
host: The mail server hostname or IP address
username: The mail server username
password: The mail server password
callback: The callback function to receive the parsing results
port: The mail server port
ssl (bool): Use SSL/TLS
ssl_context (SSLContext): A SSL context
reports_folder: The IMAP folder where reports can be found
archive_folder: The folder to move processed mail to
delete (bool): Delete messages after processing them
test (bool): Do not move or delete messages after processing them
wait (int): Number of seconds to wait for a IMAP IDLE response
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Set the DNS query timeout
strip_attachment_payloads (bool): Replace attachment payloads in
forensic report samples with None
|
parsedmarc/__init__.py
|
def watch_inbox(host, username, password, callback, port=None, ssl=True,
ssl_context=None, reports_folder="INBOX",
archive_folder="Archive", delete=False, test=False, wait=30,
nameservers=None, dns_timeout=6.0,
strip_attachment_payloads=False):
"""
Use an IDLE IMAP connection to parse incoming emails, and pass the results
to a callback function
Args:
host: The mail server hostname or IP address
username: The mail server username
password: The mail server password
callback: The callback function to receive the parsing results
port: The mail server port
ssl (bool): Use SSL/TLS
ssl_context (SSLContext): A SSL context
reports_folder: The IMAP folder where reports can be found
archive_folder: The folder to move processed mail to
delete (bool): Delete messages after processing them
test (bool): Do not move or delete messages after processing them
wait (int): Number of seconds to wait for a IMAP IDLE response
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Set the DNS query timeout
strip_attachment_payloads (bool): Replace attachment payloads in
forensic report samples with None
"""
rf = reports_folder
af = archive_folder
ns = nameservers
dt = dns_timeout
if ssl_context is None:
ssl_context = create_default_context()
server = imapclient.IMAPClient(host, port=port, ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
try:
server.login(username, password)
imap_capabilities = get_imap_capabilities(server)
if "IDLE" not in imap_capabilities:
raise IMAPError("Cannot watch inbox: IMAP server does not support "
"the IDLE command")
ms = "MOVE" in imap_capabilities
server.select_folder(rf)
idle_start_time = time.monotonic()
server.idle()
except imapclient.exceptions.IMAPClientError as error:
error = error.__str__().replace("b'", "").replace("'", "")
# Workaround for random Exchange/Office365 IMAP errors
if "unexpected response" in error or "BAD" in error:
sleep_minutes = 5
logger.debug(
"{0}. "
"Waiting {1} minutes before trying again".format(
error,
sleep_minutes))
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to log out: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
sa = strip_attachment_payloads
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt,
strip_attachment_payloads=sa)
callback(res)
server.idle()
else:
raise IMAPError(error)
except socket.gaierror:
raise IMAPError("DNS resolution failed")
except ConnectionRefusedError:
raise IMAPError("Connection refused")
except ConnectionResetError:
logger.debug("IMAP error: Connection reset")
logger.debug("Reconnecting watcher")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except KeyError:
logger.debug("IMAP error: Server returned unexpected result")
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to log out: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except ConnectionAbortedError:
raise IMAPError("Connection aborted")
except TimeoutError:
raise IMAPError("Connection timed out")
except SSLError as error:
raise IMAPError("SSL error: {0}".format(error.__str__()))
except CertificateError as error:
raise IMAPError("Certificate error: {0}".format(error.__str__()))
except BrokenPipeError:
logger.debug("IMAP error: Broken pipe")
logger.debug("Reconnecting watcher")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
while True:
try:
# Refresh the IDLE session every 5 minutes to stay connected
if time.monotonic() - idle_start_time > 5 * 60:
logger.debug("IMAP: Refreshing IDLE session")
server.idle_done()
server.idle()
idle_start_time = time.monotonic()
responses = server.idle_check(timeout=wait)
if responses is not None:
if len(responses) == 0:
# Gmail/G-Suite does not generate anything in the responses
server.idle_done()
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
idle_start_time = time.monotonic()
for response in responses:
logging.debug("Received response: {0}".format(response))
if response[0] > 0 and response[1] == b'RECENT':
server.idle_done()
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
idle_start_time = time.monotonic()
break
except imapclient.exceptions.IMAPClientError as error:
error = error.__str__().replace("b'", "").replace("'", "")
# Workaround for random Exchange/Office365 IMAP errors
if "unexpected response" in error or "BAD" in error:
sleep_minutes = 5
logger.debug(
"{0}. "
"Waiting {1} minutes before trying again".format(
error,
sleep_minutes))
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(
e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
else:
raise IMAPError(error)
except socket.gaierror:
raise IMAPError("DNS resolution failed")
except ConnectionRefusedError:
raise IMAPError("Connection refused")
except (KeyError, socket.error, BrokenPipeError, ConnectionResetError):
logger.debug("IMAP error: Connection reset")
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except KeyError:
logger.debug("IMAP error: Server returned unexpected result")
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to log out: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except ConnectionAbortedError:
raise IMAPError("Connection aborted")
except TimeoutError:
raise IMAPError("Connection timed out")
except SSLError as error:
raise IMAPError("SSL error: {0}".format(error.__str__()))
except CertificateError as error:
raise IMAPError("Certificate error: {0}".format(error.__str__()))
except BrokenPipeError:
logger.debug("IMAP error: Broken pipe")
logger.debug("Reconnecting watcher")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except KeyboardInterrupt:
break
try:
server.idle_done()
except BrokenPipeError:
pass
|
def watch_inbox(host, username, password, callback, port=None, ssl=True,
ssl_context=None, reports_folder="INBOX",
archive_folder="Archive", delete=False, test=False, wait=30,
nameservers=None, dns_timeout=6.0,
strip_attachment_payloads=False):
"""
Use an IDLE IMAP connection to parse incoming emails, and pass the results
to a callback function
Args:
host: The mail server hostname or IP address
username: The mail server username
password: The mail server password
callback: The callback function to receive the parsing results
port: The mail server port
ssl (bool): Use SSL/TLS
ssl_context (SSLContext): A SSL context
reports_folder: The IMAP folder where reports can be found
archive_folder: The folder to move processed mail to
delete (bool): Delete messages after processing them
test (bool): Do not move or delete messages after processing them
wait (int): Number of seconds to wait for a IMAP IDLE response
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Set the DNS query timeout
strip_attachment_payloads (bool): Replace attachment payloads in
forensic report samples with None
"""
rf = reports_folder
af = archive_folder
ns = nameservers
dt = dns_timeout
if ssl_context is None:
ssl_context = create_default_context()
server = imapclient.IMAPClient(host, port=port, ssl=ssl,
ssl_context=ssl_context,
use_uid=True)
try:
server.login(username, password)
imap_capabilities = get_imap_capabilities(server)
if "IDLE" not in imap_capabilities:
raise IMAPError("Cannot watch inbox: IMAP server does not support "
"the IDLE command")
ms = "MOVE" in imap_capabilities
server.select_folder(rf)
idle_start_time = time.monotonic()
server.idle()
except imapclient.exceptions.IMAPClientError as error:
error = error.__str__().replace("b'", "").replace("'", "")
# Workaround for random Exchange/Office365 IMAP errors
if "unexpected response" in error or "BAD" in error:
sleep_minutes = 5
logger.debug(
"{0}. "
"Waiting {1} minutes before trying again".format(
error,
sleep_minutes))
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to log out: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
sa = strip_attachment_payloads
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt,
strip_attachment_payloads=sa)
callback(res)
server.idle()
else:
raise IMAPError(error)
except socket.gaierror:
raise IMAPError("DNS resolution failed")
except ConnectionRefusedError:
raise IMAPError("Connection refused")
except ConnectionResetError:
logger.debug("IMAP error: Connection reset")
logger.debug("Reconnecting watcher")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except KeyError:
logger.debug("IMAP error: Server returned unexpected result")
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to log out: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except ConnectionAbortedError:
raise IMAPError("Connection aborted")
except TimeoutError:
raise IMAPError("Connection timed out")
except SSLError as error:
raise IMAPError("SSL error: {0}".format(error.__str__()))
except CertificateError as error:
raise IMAPError("Certificate error: {0}".format(error.__str__()))
except BrokenPipeError:
logger.debug("IMAP error: Broken pipe")
logger.debug("Reconnecting watcher")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
while True:
try:
# Refresh the IDLE session every 5 minutes to stay connected
if time.monotonic() - idle_start_time > 5 * 60:
logger.debug("IMAP: Refreshing IDLE session")
server.idle_done()
server.idle()
idle_start_time = time.monotonic()
responses = server.idle_check(timeout=wait)
if responses is not None:
if len(responses) == 0:
# Gmail/G-Suite does not generate anything in the responses
server.idle_done()
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
idle_start_time = time.monotonic()
for response in responses:
logging.debug("Received response: {0}".format(response))
if response[0] > 0 and response[1] == b'RECENT':
server.idle_done()
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
idle_start_time = time.monotonic()
break
except imapclient.exceptions.IMAPClientError as error:
error = error.__str__().replace("b'", "").replace("'", "")
# Workaround for random Exchange/Office365 IMAP errors
if "unexpected response" in error or "BAD" in error:
sleep_minutes = 5
logger.debug(
"{0}. "
"Waiting {1} minutes before trying again".format(
error,
sleep_minutes))
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(
e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
else:
raise IMAPError(error)
except socket.gaierror:
raise IMAPError("DNS resolution failed")
except ConnectionRefusedError:
raise IMAPError("Connection refused")
except (KeyError, socket.error, BrokenPipeError, ConnectionResetError):
logger.debug("IMAP error: Connection reset")
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except KeyError:
logger.debug("IMAP error: Server returned unexpected result")
logger.debug("Reconnecting watcher")
try:
server.logout()
except Exception as e:
logger.debug("Failed to log out: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
ms = "MOVE" in get_imap_capabilities(server)
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except ConnectionAbortedError:
raise IMAPError("Connection aborted")
except TimeoutError:
raise IMAPError("Connection timed out")
except SSLError as error:
raise IMAPError("SSL error: {0}".format(error.__str__()))
except CertificateError as error:
raise IMAPError("Certificate error: {0}".format(error.__str__()))
except BrokenPipeError:
logger.debug("IMAP error: Broken pipe")
logger.debug("Reconnecting watcher")
try:
server.shutdown()
except Exception as e:
logger.debug("Failed to disconnect: {0}".format(e.__str__()))
server = imapclient.IMAPClient(host)
server.login(username, password)
server.select_folder(rf)
idle_start_time = time.monotonic()
res = get_dmarc_reports_from_inbox(connection=server,
move_supported=ms,
reports_folder=rf,
archive_folder=af,
delete=delete,
test=test,
nameservers=ns,
dns_timeout=dt)
callback(res)
server.idle()
except KeyboardInterrupt:
break
try:
server.idle_done()
except BrokenPipeError:
pass
|
[
"Use",
"an",
"IDLE",
"IMAP",
"connection",
"to",
"parse",
"incoming",
"emails",
"and",
"pass",
"the",
"results",
"to",
"a",
"callback",
"function"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L1499-L1818
|
[
"def",
"watch_inbox",
"(",
"host",
",",
"username",
",",
"password",
",",
"callback",
",",
"port",
"=",
"None",
",",
"ssl",
"=",
"True",
",",
"ssl_context",
"=",
"None",
",",
"reports_folder",
"=",
"\"INBOX\"",
",",
"archive_folder",
"=",
"\"Archive\"",
",",
"delete",
"=",
"False",
",",
"test",
"=",
"False",
",",
"wait",
"=",
"30",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"6.0",
",",
"strip_attachment_payloads",
"=",
"False",
")",
":",
"rf",
"=",
"reports_folder",
"af",
"=",
"archive_folder",
"ns",
"=",
"nameservers",
"dt",
"=",
"dns_timeout",
"if",
"ssl_context",
"is",
"None",
":",
"ssl_context",
"=",
"create_default_context",
"(",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
",",
"port",
"=",
"port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"use_uid",
"=",
"True",
")",
"try",
":",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"imap_capabilities",
"=",
"get_imap_capabilities",
"(",
"server",
")",
"if",
"\"IDLE\"",
"not",
"in",
"imap_capabilities",
":",
"raise",
"IMAPError",
"(",
"\"Cannot watch inbox: IMAP server does not support \"",
"\"the IDLE command\"",
")",
"ms",
"=",
"\"MOVE\"",
"in",
"imap_capabilities",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"server",
".",
"idle",
"(",
")",
"except",
"imapclient",
".",
"exceptions",
".",
"IMAPClientError",
"as",
"error",
":",
"error",
"=",
"error",
".",
"__str__",
"(",
")",
".",
"replace",
"(",
"\"b'\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
"# Workaround for random Exchange/Office365 IMAP errors",
"if",
"\"unexpected response\"",
"in",
"error",
"or",
"\"BAD\"",
"in",
"error",
":",
"sleep_minutes",
"=",
"5",
"logger",
".",
"debug",
"(",
"\"{0}. \"",
"\"Waiting {1} minutes before trying again\"",
".",
"format",
"(",
"error",
",",
"sleep_minutes",
")",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting watcher\"",
")",
"try",
":",
"server",
".",
"logout",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to log out: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
")",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"ms",
"=",
"\"MOVE\"",
"in",
"get_imap_capabilities",
"(",
"server",
")",
"sa",
"=",
"strip_attachment_payloads",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
",",
"strip_attachment_payloads",
"=",
"sa",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"else",
":",
"raise",
"IMAPError",
"(",
"error",
")",
"except",
"socket",
".",
"gaierror",
":",
"raise",
"IMAPError",
"(",
"\"DNS resolution failed\"",
")",
"except",
"ConnectionRefusedError",
":",
"raise",
"IMAPError",
"(",
"\"Connection refused\"",
")",
"except",
"ConnectionResetError",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: Connection reset\"",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting watcher\"",
")",
"try",
":",
"server",
".",
"shutdown",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to disconnect: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
")",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"ms",
"=",
"\"MOVE\"",
"in",
"get_imap_capabilities",
"(",
"server",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"except",
"KeyError",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: Server returned unexpected result\"",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting watcher\"",
")",
"try",
":",
"server",
".",
"logout",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to log out: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
")",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"ms",
"=",
"\"MOVE\"",
"in",
"get_imap_capabilities",
"(",
"server",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"except",
"ConnectionAbortedError",
":",
"raise",
"IMAPError",
"(",
"\"Connection aborted\"",
")",
"except",
"TimeoutError",
":",
"raise",
"IMAPError",
"(",
"\"Connection timed out\"",
")",
"except",
"SSLError",
"as",
"error",
":",
"raise",
"IMAPError",
"(",
"\"SSL error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"CertificateError",
"as",
"error",
":",
"raise",
"IMAPError",
"(",
"\"Certificate error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"BrokenPipeError",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: Broken pipe\"",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting watcher\"",
")",
"try",
":",
"server",
".",
"shutdown",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to disconnect: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
")",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"ms",
"=",
"\"MOVE\"",
"in",
"get_imap_capabilities",
"(",
"server",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"while",
"True",
":",
"try",
":",
"# Refresh the IDLE session every 5 minutes to stay connected",
"if",
"time",
".",
"monotonic",
"(",
")",
"-",
"idle_start_time",
">",
"5",
"*",
"60",
":",
"logger",
".",
"debug",
"(",
"\"IMAP: Refreshing IDLE session\"",
")",
"server",
".",
"idle_done",
"(",
")",
"server",
".",
"idle",
"(",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"responses",
"=",
"server",
".",
"idle_check",
"(",
"timeout",
"=",
"wait",
")",
"if",
"responses",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"responses",
")",
"==",
"0",
":",
"# Gmail/G-Suite does not generate anything in the responses",
"server",
".",
"idle_done",
"(",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"for",
"response",
"in",
"responses",
":",
"logging",
".",
"debug",
"(",
"\"Received response: {0}\"",
".",
"format",
"(",
"response",
")",
")",
"if",
"response",
"[",
"0",
"]",
">",
"0",
"and",
"response",
"[",
"1",
"]",
"==",
"b'RECENT'",
":",
"server",
".",
"idle_done",
"(",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"break",
"except",
"imapclient",
".",
"exceptions",
".",
"IMAPClientError",
"as",
"error",
":",
"error",
"=",
"error",
".",
"__str__",
"(",
")",
".",
"replace",
"(",
"\"b'\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
"# Workaround for random Exchange/Office365 IMAP errors",
"if",
"\"unexpected response\"",
"in",
"error",
"or",
"\"BAD\"",
"in",
"error",
":",
"sleep_minutes",
"=",
"5",
"logger",
".",
"debug",
"(",
"\"{0}. \"",
"\"Waiting {1} minutes before trying again\"",
".",
"format",
"(",
"error",
",",
"sleep_minutes",
")",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting watcher\"",
")",
"try",
":",
"server",
".",
"logout",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to disconnect: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
")",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"ms",
"=",
"\"MOVE\"",
"in",
"get_imap_capabilities",
"(",
"server",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"else",
":",
"raise",
"IMAPError",
"(",
"error",
")",
"except",
"socket",
".",
"gaierror",
":",
"raise",
"IMAPError",
"(",
"\"DNS resolution failed\"",
")",
"except",
"ConnectionRefusedError",
":",
"raise",
"IMAPError",
"(",
"\"Connection refused\"",
")",
"except",
"(",
"KeyError",
",",
"socket",
".",
"error",
",",
"BrokenPipeError",
",",
"ConnectionResetError",
")",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: Connection reset\"",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting watcher\"",
")",
"try",
":",
"server",
".",
"logout",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to disconnect: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
")",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"ms",
"=",
"\"MOVE\"",
"in",
"get_imap_capabilities",
"(",
"server",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"except",
"KeyError",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: Server returned unexpected result\"",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting watcher\"",
")",
"try",
":",
"server",
".",
"logout",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to log out: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
")",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"ms",
"=",
"\"MOVE\"",
"in",
"get_imap_capabilities",
"(",
"server",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"except",
"ConnectionAbortedError",
":",
"raise",
"IMAPError",
"(",
"\"Connection aborted\"",
")",
"except",
"TimeoutError",
":",
"raise",
"IMAPError",
"(",
"\"Connection timed out\"",
")",
"except",
"SSLError",
"as",
"error",
":",
"raise",
"IMAPError",
"(",
"\"SSL error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"CertificateError",
"as",
"error",
":",
"raise",
"IMAPError",
"(",
"\"Certificate error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"BrokenPipeError",
":",
"logger",
".",
"debug",
"(",
"\"IMAP error: Broken pipe\"",
")",
"logger",
".",
"debug",
"(",
"\"Reconnecting watcher\"",
")",
"try",
":",
"server",
".",
"shutdown",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Failed to disconnect: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"server",
"=",
"imapclient",
".",
"IMAPClient",
"(",
"host",
")",
"server",
".",
"login",
"(",
"username",
",",
"password",
")",
"server",
".",
"select_folder",
"(",
"rf",
")",
"idle_start_time",
"=",
"time",
".",
"monotonic",
"(",
")",
"res",
"=",
"get_dmarc_reports_from_inbox",
"(",
"connection",
"=",
"server",
",",
"move_supported",
"=",
"ms",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"delete",
",",
"test",
"=",
"test",
",",
"nameservers",
"=",
"ns",
",",
"dns_timeout",
"=",
"dt",
")",
"callback",
"(",
"res",
")",
"server",
".",
"idle",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"break",
"try",
":",
"server",
".",
"idle_done",
"(",
")",
"except",
"BrokenPipeError",
":",
"pass"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
HECClient.save_aggregate_reports_to_splunk
|
Saves aggregate DMARC reports to Splunk
Args:
aggregate_reports: A list of aggregate report dictionaries
to save in Splunk
|
parsedmarc/splunk.py
|
def save_aggregate_reports_to_splunk(self, aggregate_reports):
"""
Saves aggregate DMARC reports to Splunk
Args:
aggregate_reports: A list of aggregate report dictionaries
to save in Splunk
"""
logger.debug("Saving aggregate reports to Splunk")
if type(aggregate_reports) == dict:
aggregate_reports = [aggregate_reports]
if len(aggregate_reports) < 1:
return
data = self._common_data.copy()
json_str = ""
for report in aggregate_reports:
for record in report["records"]:
new_report = dict()
for metadata in report["report_metadata"]:
new_report[metadata] = report["report_metadata"][metadata]
new_report["published_policy"] = report["policy_published"]
new_report["source_ip_address"] = record["source"][
"ip_address"]
new_report["source_country"] = record["source"]["country"]
new_report["source_reverse_dns"] = record["source"][
"reverse_dns"]
new_report["source_base_domain"] = record["source"][
"base_domain"]
new_report["message_count"] = record["count"]
new_report["disposition"] = record["policy_evaluated"][
"disposition"
]
new_report["spf_aligned"] = record["alignment"]["spf"]
new_report["dkim_aligned"] = record["alignment"]["dkim"]
new_report["passed_dmarc"] = record["alignment"]["dmarc"]
new_report["header_from"] = record["identifiers"][
"header_from"]
new_report["envelope_from"] = record["identifiers"][
"envelope_from"]
if "dkim" in record["auth_results"]:
new_report["dkim_results"] = record["auth_results"][
"dkim"]
if "spf" in record["auth_results"]:
new_report["spf_results"] = record["auth_results"][
"spf"]
data["sourcetype"] = "dmarc:aggregate"
timestamp = human_timestamp_to_timestamp(
new_report["begin_date"])
data["time"] = timestamp
data["event"] = new_report.copy()
json_str += "{0}\n".format(json.dumps(data))
if not self.session.verify:
logger.debug("Skipping certificate verification for Splunk HEC")
try:
response = self.session.post(self.url, data=json_str,
timeout=self.timeout)
response = response.json()
except Exception as e:
raise SplunkError(e.__str__())
if response["code"] != 0:
raise SplunkError(response["text"])
|
def save_aggregate_reports_to_splunk(self, aggregate_reports):
"""
Saves aggregate DMARC reports to Splunk
Args:
aggregate_reports: A list of aggregate report dictionaries
to save in Splunk
"""
logger.debug("Saving aggregate reports to Splunk")
if type(aggregate_reports) == dict:
aggregate_reports = [aggregate_reports]
if len(aggregate_reports) < 1:
return
data = self._common_data.copy()
json_str = ""
for report in aggregate_reports:
for record in report["records"]:
new_report = dict()
for metadata in report["report_metadata"]:
new_report[metadata] = report["report_metadata"][metadata]
new_report["published_policy"] = report["policy_published"]
new_report["source_ip_address"] = record["source"][
"ip_address"]
new_report["source_country"] = record["source"]["country"]
new_report["source_reverse_dns"] = record["source"][
"reverse_dns"]
new_report["source_base_domain"] = record["source"][
"base_domain"]
new_report["message_count"] = record["count"]
new_report["disposition"] = record["policy_evaluated"][
"disposition"
]
new_report["spf_aligned"] = record["alignment"]["spf"]
new_report["dkim_aligned"] = record["alignment"]["dkim"]
new_report["passed_dmarc"] = record["alignment"]["dmarc"]
new_report["header_from"] = record["identifiers"][
"header_from"]
new_report["envelope_from"] = record["identifiers"][
"envelope_from"]
if "dkim" in record["auth_results"]:
new_report["dkim_results"] = record["auth_results"][
"dkim"]
if "spf" in record["auth_results"]:
new_report["spf_results"] = record["auth_results"][
"spf"]
data["sourcetype"] = "dmarc:aggregate"
timestamp = human_timestamp_to_timestamp(
new_report["begin_date"])
data["time"] = timestamp
data["event"] = new_report.copy()
json_str += "{0}\n".format(json.dumps(data))
if not self.session.verify:
logger.debug("Skipping certificate verification for Splunk HEC")
try:
response = self.session.post(self.url, data=json_str,
timeout=self.timeout)
response = response.json()
except Exception as e:
raise SplunkError(e.__str__())
if response["code"] != 0:
raise SplunkError(response["text"])
|
[
"Saves",
"aggregate",
"DMARC",
"reports",
"to",
"Splunk"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/splunk.py#L58-L123
|
[
"def",
"save_aggregate_reports_to_splunk",
"(",
"self",
",",
"aggregate_reports",
")",
":",
"logger",
".",
"debug",
"(",
"\"Saving aggregate reports to Splunk\"",
")",
"if",
"type",
"(",
"aggregate_reports",
")",
"==",
"dict",
":",
"aggregate_reports",
"=",
"[",
"aggregate_reports",
"]",
"if",
"len",
"(",
"aggregate_reports",
")",
"<",
"1",
":",
"return",
"data",
"=",
"self",
".",
"_common_data",
".",
"copy",
"(",
")",
"json_str",
"=",
"\"\"",
"for",
"report",
"in",
"aggregate_reports",
":",
"for",
"record",
"in",
"report",
"[",
"\"records\"",
"]",
":",
"new_report",
"=",
"dict",
"(",
")",
"for",
"metadata",
"in",
"report",
"[",
"\"report_metadata\"",
"]",
":",
"new_report",
"[",
"metadata",
"]",
"=",
"report",
"[",
"\"report_metadata\"",
"]",
"[",
"metadata",
"]",
"new_report",
"[",
"\"published_policy\"",
"]",
"=",
"report",
"[",
"\"policy_published\"",
"]",
"new_report",
"[",
"\"source_ip_address\"",
"]",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"ip_address\"",
"]",
"new_report",
"[",
"\"source_country\"",
"]",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"country\"",
"]",
"new_report",
"[",
"\"source_reverse_dns\"",
"]",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"reverse_dns\"",
"]",
"new_report",
"[",
"\"source_base_domain\"",
"]",
"=",
"record",
"[",
"\"source\"",
"]",
"[",
"\"base_domain\"",
"]",
"new_report",
"[",
"\"message_count\"",
"]",
"=",
"record",
"[",
"\"count\"",
"]",
"new_report",
"[",
"\"disposition\"",
"]",
"=",
"record",
"[",
"\"policy_evaluated\"",
"]",
"[",
"\"disposition\"",
"]",
"new_report",
"[",
"\"spf_aligned\"",
"]",
"=",
"record",
"[",
"\"alignment\"",
"]",
"[",
"\"spf\"",
"]",
"new_report",
"[",
"\"dkim_aligned\"",
"]",
"=",
"record",
"[",
"\"alignment\"",
"]",
"[",
"\"dkim\"",
"]",
"new_report",
"[",
"\"passed_dmarc\"",
"]",
"=",
"record",
"[",
"\"alignment\"",
"]",
"[",
"\"dmarc\"",
"]",
"new_report",
"[",
"\"header_from\"",
"]",
"=",
"record",
"[",
"\"identifiers\"",
"]",
"[",
"\"header_from\"",
"]",
"new_report",
"[",
"\"envelope_from\"",
"]",
"=",
"record",
"[",
"\"identifiers\"",
"]",
"[",
"\"envelope_from\"",
"]",
"if",
"\"dkim\"",
"in",
"record",
"[",
"\"auth_results\"",
"]",
":",
"new_report",
"[",
"\"dkim_results\"",
"]",
"=",
"record",
"[",
"\"auth_results\"",
"]",
"[",
"\"dkim\"",
"]",
"if",
"\"spf\"",
"in",
"record",
"[",
"\"auth_results\"",
"]",
":",
"new_report",
"[",
"\"spf_results\"",
"]",
"=",
"record",
"[",
"\"auth_results\"",
"]",
"[",
"\"spf\"",
"]",
"data",
"[",
"\"sourcetype\"",
"]",
"=",
"\"dmarc:aggregate\"",
"timestamp",
"=",
"human_timestamp_to_timestamp",
"(",
"new_report",
"[",
"\"begin_date\"",
"]",
")",
"data",
"[",
"\"time\"",
"]",
"=",
"timestamp",
"data",
"[",
"\"event\"",
"]",
"=",
"new_report",
".",
"copy",
"(",
")",
"json_str",
"+=",
"\"{0}\\n\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"if",
"not",
"self",
".",
"session",
".",
"verify",
":",
"logger",
".",
"debug",
"(",
"\"Skipping certificate verification for Splunk HEC\"",
")",
"try",
":",
"response",
"=",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"url",
",",
"data",
"=",
"json_str",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"response",
"=",
"response",
".",
"json",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"SplunkError",
"(",
"e",
".",
"__str__",
"(",
")",
")",
"if",
"response",
"[",
"\"code\"",
"]",
"!=",
"0",
":",
"raise",
"SplunkError",
"(",
"response",
"[",
"\"text\"",
"]",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
HECClient.save_forensic_reports_to_splunk
|
Saves forensic DMARC reports to Splunk
Args:
forensic_reports (list): A list of forensic report dictionaries
to save in Splunk
|
parsedmarc/splunk.py
|
def save_forensic_reports_to_splunk(self, forensic_reports):
"""
Saves forensic DMARC reports to Splunk
Args:
forensic_reports (list): A list of forensic report dictionaries
to save in Splunk
"""
logger.debug("Saving forensic reports to Splunk")
if type(forensic_reports) == dict:
forensic_reports = [forensic_reports]
if len(forensic_reports) < 1:
return
json_str = ""
for report in forensic_reports:
data = self._common_data.copy()
data["sourcetype"] = "dmarc:forensic"
timestamp = human_timestamp_to_timestamp(
report["arrival_date_utc"])
data["time"] = timestamp
data["event"] = report.copy()
json_str += "{0}\n".format(json.dumps(data))
if not self.session.verify:
logger.debug("Skipping certificate verification for Splunk HEC")
try:
response = self.session.post(self.url, data=json_str,
timeout=self.timeout)
response = response.json()
except Exception as e:
raise SplunkError(e.__str__())
if response["code"] != 0:
raise SplunkError(response["text"])
|
def save_forensic_reports_to_splunk(self, forensic_reports):
"""
Saves forensic DMARC reports to Splunk
Args:
forensic_reports (list): A list of forensic report dictionaries
to save in Splunk
"""
logger.debug("Saving forensic reports to Splunk")
if type(forensic_reports) == dict:
forensic_reports = [forensic_reports]
if len(forensic_reports) < 1:
return
json_str = ""
for report in forensic_reports:
data = self._common_data.copy()
data["sourcetype"] = "dmarc:forensic"
timestamp = human_timestamp_to_timestamp(
report["arrival_date_utc"])
data["time"] = timestamp
data["event"] = report.copy()
json_str += "{0}\n".format(json.dumps(data))
if not self.session.verify:
logger.debug("Skipping certificate verification for Splunk HEC")
try:
response = self.session.post(self.url, data=json_str,
timeout=self.timeout)
response = response.json()
except Exception as e:
raise SplunkError(e.__str__())
if response["code"] != 0:
raise SplunkError(response["text"])
|
[
"Saves",
"forensic",
"DMARC",
"reports",
"to",
"Splunk"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/splunk.py#L125-L159
|
[
"def",
"save_forensic_reports_to_splunk",
"(",
"self",
",",
"forensic_reports",
")",
":",
"logger",
".",
"debug",
"(",
"\"Saving forensic reports to Splunk\"",
")",
"if",
"type",
"(",
"forensic_reports",
")",
"==",
"dict",
":",
"forensic_reports",
"=",
"[",
"forensic_reports",
"]",
"if",
"len",
"(",
"forensic_reports",
")",
"<",
"1",
":",
"return",
"json_str",
"=",
"\"\"",
"for",
"report",
"in",
"forensic_reports",
":",
"data",
"=",
"self",
".",
"_common_data",
".",
"copy",
"(",
")",
"data",
"[",
"\"sourcetype\"",
"]",
"=",
"\"dmarc:forensic\"",
"timestamp",
"=",
"human_timestamp_to_timestamp",
"(",
"report",
"[",
"\"arrival_date_utc\"",
"]",
")",
"data",
"[",
"\"time\"",
"]",
"=",
"timestamp",
"data",
"[",
"\"event\"",
"]",
"=",
"report",
".",
"copy",
"(",
")",
"json_str",
"+=",
"\"{0}\\n\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"if",
"not",
"self",
".",
"session",
".",
"verify",
":",
"logger",
".",
"debug",
"(",
"\"Skipping certificate verification for Splunk HEC\"",
")",
"try",
":",
"response",
"=",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"url",
",",
"data",
"=",
"json_str",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"response",
"=",
"response",
".",
"json",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"SplunkError",
"(",
"e",
".",
"__str__",
"(",
")",
")",
"if",
"response",
"[",
"\"code\"",
"]",
"!=",
"0",
":",
"raise",
"SplunkError",
"(",
"response",
"[",
"\"text\"",
"]",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
decode_base64
|
Decodes a base64 string, with padding being optional
Args:
data: A base64 encoded string
Returns:
bytes: The decoded bytes
|
parsedmarc/utils.py
|
def decode_base64(data):
"""
Decodes a base64 string, with padding being optional
Args:
data: A base64 encoded string
Returns:
bytes: The decoded bytes
"""
data = bytes(data, encoding="ascii")
missing_padding = len(data) % 4
if missing_padding != 0:
data += b'=' * (4 - missing_padding)
return base64.b64decode(data)
|
def decode_base64(data):
"""
Decodes a base64 string, with padding being optional
Args:
data: A base64 encoded string
Returns:
bytes: The decoded bytes
"""
data = bytes(data, encoding="ascii")
missing_padding = len(data) % 4
if missing_padding != 0:
data += b'=' * (4 - missing_padding)
return base64.b64decode(data)
|
[
"Decodes",
"a",
"base64",
"string",
"with",
"padding",
"being",
"optional"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L54-L69
|
[
"def",
"decode_base64",
"(",
"data",
")",
":",
"data",
"=",
"bytes",
"(",
"data",
",",
"encoding",
"=",
"\"ascii\"",
")",
"missing_padding",
"=",
"len",
"(",
"data",
")",
"%",
"4",
"if",
"missing_padding",
"!=",
"0",
":",
"data",
"+=",
"b'='",
"*",
"(",
"4",
"-",
"missing_padding",
")",
"return",
"base64",
".",
"b64decode",
"(",
"data",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
get_base_domain
|
Gets the base domain name for the given domain
.. note::
Results are based on a list of public domain suffixes at
https://publicsuffix.org/list/public_suffix_list.dat.
Args:
domain (str): A domain or subdomain
use_fresh_psl (bool): Download a fresh Public Suffix List
Returns:
str: The base domain of the given domain
|
parsedmarc/utils.py
|
def get_base_domain(domain, use_fresh_psl=False):
"""
Gets the base domain name for the given domain
.. note::
Results are based on a list of public domain suffixes at
https://publicsuffix.org/list/public_suffix_list.dat.
Args:
domain (str): A domain or subdomain
use_fresh_psl (bool): Download a fresh Public Suffix List
Returns:
str: The base domain of the given domain
"""
psl_path = os.path.join(tempdir, "public_suffix_list.dat")
def download_psl():
url = "https://publicsuffix.org/list/public_suffix_list.dat"
# Use a browser-like user agent string to bypass some proxy blocks
headers = {"User-Agent": USER_AGENT}
fresh_psl = requests.get(url, headers=headers).text
with open(psl_path, "w", encoding="utf-8") as fresh_psl_file:
fresh_psl_file.write(fresh_psl)
if use_fresh_psl:
if not os.path.exists(psl_path):
download_psl()
else:
psl_age = datetime.now() - datetime.fromtimestamp(
os.stat(psl_path).st_mtime)
if psl_age > timedelta(hours=24):
try:
download_psl()
except Exception as error:
logger.warning(
"Failed to download an updated PSL {0}".format(error))
with open(psl_path, encoding="utf-8") as psl_file:
psl = publicsuffix2.PublicSuffixList(psl_file)
return psl.get_public_suffix(domain)
else:
return publicsuffix2.get_public_suffix(domain)
|
def get_base_domain(domain, use_fresh_psl=False):
"""
Gets the base domain name for the given domain
.. note::
Results are based on a list of public domain suffixes at
https://publicsuffix.org/list/public_suffix_list.dat.
Args:
domain (str): A domain or subdomain
use_fresh_psl (bool): Download a fresh Public Suffix List
Returns:
str: The base domain of the given domain
"""
psl_path = os.path.join(tempdir, "public_suffix_list.dat")
def download_psl():
url = "https://publicsuffix.org/list/public_suffix_list.dat"
# Use a browser-like user agent string to bypass some proxy blocks
headers = {"User-Agent": USER_AGENT}
fresh_psl = requests.get(url, headers=headers).text
with open(psl_path, "w", encoding="utf-8") as fresh_psl_file:
fresh_psl_file.write(fresh_psl)
if use_fresh_psl:
if not os.path.exists(psl_path):
download_psl()
else:
psl_age = datetime.now() - datetime.fromtimestamp(
os.stat(psl_path).st_mtime)
if psl_age > timedelta(hours=24):
try:
download_psl()
except Exception as error:
logger.warning(
"Failed to download an updated PSL {0}".format(error))
with open(psl_path, encoding="utf-8") as psl_file:
psl = publicsuffix2.PublicSuffixList(psl_file)
return psl.get_public_suffix(domain)
else:
return publicsuffix2.get_public_suffix(domain)
|
[
"Gets",
"the",
"base",
"domain",
"name",
"for",
"the",
"given",
"domain"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L72-L115
|
[
"def",
"get_base_domain",
"(",
"domain",
",",
"use_fresh_psl",
"=",
"False",
")",
":",
"psl_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"\"public_suffix_list.dat\"",
")",
"def",
"download_psl",
"(",
")",
":",
"url",
"=",
"\"https://publicsuffix.org/list/public_suffix_list.dat\"",
"# Use a browser-like user agent string to bypass some proxy blocks",
"headers",
"=",
"{",
"\"User-Agent\"",
":",
"USER_AGENT",
"}",
"fresh_psl",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
".",
"text",
"with",
"open",
"(",
"psl_path",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"fresh_psl_file",
":",
"fresh_psl_file",
".",
"write",
"(",
"fresh_psl",
")",
"if",
"use_fresh_psl",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"psl_path",
")",
":",
"download_psl",
"(",
")",
"else",
":",
"psl_age",
"=",
"datetime",
".",
"now",
"(",
")",
"-",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"stat",
"(",
"psl_path",
")",
".",
"st_mtime",
")",
"if",
"psl_age",
">",
"timedelta",
"(",
"hours",
"=",
"24",
")",
":",
"try",
":",
"download_psl",
"(",
")",
"except",
"Exception",
"as",
"error",
":",
"logger",
".",
"warning",
"(",
"\"Failed to download an updated PSL {0}\"",
".",
"format",
"(",
"error",
")",
")",
"with",
"open",
"(",
"psl_path",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"psl_file",
":",
"psl",
"=",
"publicsuffix2",
".",
"PublicSuffixList",
"(",
"psl_file",
")",
"return",
"psl",
".",
"get_public_suffix",
"(",
"domain",
")",
"else",
":",
"return",
"publicsuffix2",
".",
"get_public_suffix",
"(",
"domain",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
query_dns
|
Queries DNS
Args:
domain (str): The domain or subdomain to query about
record_type (str): The record type to query for
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
Returns:
list: A list of answers
|
parsedmarc/utils.py
|
def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0):
"""
Queries DNS
Args:
domain (str): The domain or subdomain to query about
record_type (str): The record type to query for
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
Returns:
list: A list of answers
"""
domain = str(domain).lower()
record_type = record_type.upper()
cache_key = "{0}_{1}".format(domain, record_type)
if cache:
records = cache.get(cache_key, None)
if records:
return records
resolver = dns.resolver.Resolver()
timeout = float(timeout)
if nameservers is None:
nameservers = ["1.1.1.1", "1.0.0.1",
"2606:4700:4700::1111", "2606:4700:4700::1001",
]
resolver.nameservers = nameservers
resolver.timeout = timeout
resolver.lifetime = timeout
if record_type == "TXT":
resource_records = list(map(
lambda r: r.strings,
resolver.query(domain, record_type, tcp=True)))
_resource_record = [
resource_record[0][:0].join(resource_record)
for resource_record in resource_records if resource_record]
records = [r.decode() for r in _resource_record]
else:
records = list(map(
lambda r: r.to_text().replace('"', '').rstrip("."),
resolver.query(domain, record_type, tcp=True)))
if cache:
cache[cache_key] = records
return records
|
def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0):
"""
Queries DNS
Args:
domain (str): The domain or subdomain to query about
record_type (str): The record type to query for
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
Returns:
list: A list of answers
"""
domain = str(domain).lower()
record_type = record_type.upper()
cache_key = "{0}_{1}".format(domain, record_type)
if cache:
records = cache.get(cache_key, None)
if records:
return records
resolver = dns.resolver.Resolver()
timeout = float(timeout)
if nameservers is None:
nameservers = ["1.1.1.1", "1.0.0.1",
"2606:4700:4700::1111", "2606:4700:4700::1001",
]
resolver.nameservers = nameservers
resolver.timeout = timeout
resolver.lifetime = timeout
if record_type == "TXT":
resource_records = list(map(
lambda r: r.strings,
resolver.query(domain, record_type, tcp=True)))
_resource_record = [
resource_record[0][:0].join(resource_record)
for resource_record in resource_records if resource_record]
records = [r.decode() for r in _resource_record]
else:
records = list(map(
lambda r: r.to_text().replace('"', '').rstrip("."),
resolver.query(domain, record_type, tcp=True)))
if cache:
cache[cache_key] = records
return records
|
[
"Queries",
"DNS"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L118-L165
|
[
"def",
"query_dns",
"(",
"domain",
",",
"record_type",
",",
"cache",
"=",
"None",
",",
"nameservers",
"=",
"None",
",",
"timeout",
"=",
"2.0",
")",
":",
"domain",
"=",
"str",
"(",
"domain",
")",
".",
"lower",
"(",
")",
"record_type",
"=",
"record_type",
".",
"upper",
"(",
")",
"cache_key",
"=",
"\"{0}_{1}\"",
".",
"format",
"(",
"domain",
",",
"record_type",
")",
"if",
"cache",
":",
"records",
"=",
"cache",
".",
"get",
"(",
"cache_key",
",",
"None",
")",
"if",
"records",
":",
"return",
"records",
"resolver",
"=",
"dns",
".",
"resolver",
".",
"Resolver",
"(",
")",
"timeout",
"=",
"float",
"(",
"timeout",
")",
"if",
"nameservers",
"is",
"None",
":",
"nameservers",
"=",
"[",
"\"1.1.1.1\"",
",",
"\"1.0.0.1\"",
",",
"\"2606:4700:4700::1111\"",
",",
"\"2606:4700:4700::1001\"",
",",
"]",
"resolver",
".",
"nameservers",
"=",
"nameservers",
"resolver",
".",
"timeout",
"=",
"timeout",
"resolver",
".",
"lifetime",
"=",
"timeout",
"if",
"record_type",
"==",
"\"TXT\"",
":",
"resource_records",
"=",
"list",
"(",
"map",
"(",
"lambda",
"r",
":",
"r",
".",
"strings",
",",
"resolver",
".",
"query",
"(",
"domain",
",",
"record_type",
",",
"tcp",
"=",
"True",
")",
")",
")",
"_resource_record",
"=",
"[",
"resource_record",
"[",
"0",
"]",
"[",
":",
"0",
"]",
".",
"join",
"(",
"resource_record",
")",
"for",
"resource_record",
"in",
"resource_records",
"if",
"resource_record",
"]",
"records",
"=",
"[",
"r",
".",
"decode",
"(",
")",
"for",
"r",
"in",
"_resource_record",
"]",
"else",
":",
"records",
"=",
"list",
"(",
"map",
"(",
"lambda",
"r",
":",
"r",
".",
"to_text",
"(",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"rstrip",
"(",
"\".\"",
")",
",",
"resolver",
".",
"query",
"(",
"domain",
",",
"record_type",
",",
"tcp",
"=",
"True",
")",
")",
")",
"if",
"cache",
":",
"cache",
"[",
"cache_key",
"]",
"=",
"records",
"return",
"records"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
get_reverse_dns
|
Resolves an IP address to a hostname using a reverse DNS query
Args:
ip_address (str): The IP address to resolve
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS query timeout in seconds
Returns:
str: The reverse DNS hostname (if any)
|
parsedmarc/utils.py
|
def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):
"""
Resolves an IP address to a hostname using a reverse DNS query
Args:
ip_address (str): The IP address to resolve
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS query timeout in seconds
Returns:
str: The reverse DNS hostname (if any)
"""
hostname = None
try:
address = dns.reversename.from_address(ip_address)
hostname = query_dns(address, "PTR", cache=cache,
nameservers=nameservers,
timeout=timeout)[0]
except dns.exception.DNSException:
pass
return hostname
|
def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):
"""
Resolves an IP address to a hostname using a reverse DNS query
Args:
ip_address (str): The IP address to resolve
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS query timeout in seconds
Returns:
str: The reverse DNS hostname (if any)
"""
hostname = None
try:
address = dns.reversename.from_address(ip_address)
hostname = query_dns(address, "PTR", cache=cache,
nameservers=nameservers,
timeout=timeout)[0]
except dns.exception.DNSException:
pass
return hostname
|
[
"Resolves",
"an",
"IP",
"address",
"to",
"a",
"hostname",
"using",
"a",
"reverse",
"DNS",
"query"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L168-L192
|
[
"def",
"get_reverse_dns",
"(",
"ip_address",
",",
"cache",
"=",
"None",
",",
"nameservers",
"=",
"None",
",",
"timeout",
"=",
"2.0",
")",
":",
"hostname",
"=",
"None",
"try",
":",
"address",
"=",
"dns",
".",
"reversename",
".",
"from_address",
"(",
"ip_address",
")",
"hostname",
"=",
"query_dns",
"(",
"address",
",",
"\"PTR\"",
",",
"cache",
"=",
"cache",
",",
"nameservers",
"=",
"nameservers",
",",
"timeout",
"=",
"timeout",
")",
"[",
"0",
"]",
"except",
"dns",
".",
"exception",
".",
"DNSException",
":",
"pass",
"return",
"hostname"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
human_timestamp_to_datetime
|
Converts a human-readable timestamp into a Python ``DateTime`` object
Args:
human_timestamp (str): A timestamp string
to_utc (bool): Convert the timestamp to UTC
Returns:
DateTime: The converted timestamp
|
parsedmarc/utils.py
|
def human_timestamp_to_datetime(human_timestamp, to_utc=False):
"""
Converts a human-readable timestamp into a Python ``DateTime`` object
Args:
human_timestamp (str): A timestamp string
to_utc (bool): Convert the timestamp to UTC
Returns:
DateTime: The converted timestamp
"""
settings = {}
if to_utc:
settings = {"TO_TIMEZONE": "UTC"}
return dateparser.parse(human_timestamp, settings=settings)
|
def human_timestamp_to_datetime(human_timestamp, to_utc=False):
"""
Converts a human-readable timestamp into a Python ``DateTime`` object
Args:
human_timestamp (str): A timestamp string
to_utc (bool): Convert the timestamp to UTC
Returns:
DateTime: The converted timestamp
"""
settings = {}
if to_utc:
settings = {"TO_TIMEZONE": "UTC"}
return dateparser.parse(human_timestamp, settings=settings)
|
[
"Converts",
"a",
"human",
"-",
"readable",
"timestamp",
"into",
"a",
"Python",
"DateTime",
"object"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L221-L238
|
[
"def",
"human_timestamp_to_datetime",
"(",
"human_timestamp",
",",
"to_utc",
"=",
"False",
")",
":",
"settings",
"=",
"{",
"}",
"if",
"to_utc",
":",
"settings",
"=",
"{",
"\"TO_TIMEZONE\"",
":",
"\"UTC\"",
"}",
"return",
"dateparser",
".",
"parse",
"(",
"human_timestamp",
",",
"settings",
"=",
"settings",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
get_ip_address_country
|
Uses the MaxMind Geolite2 Country database to return the ISO code for the
country associated with the given IPv4 or IPv6 address
Args:
ip_address (str): The IP address to query for
parallel (bool): Parallel processing
Returns:
str: And ISO country code associated with the given IP address
|
parsedmarc/utils.py
|
def get_ip_address_country(ip_address, parallel=False):
"""
Uses the MaxMind Geolite2 Country database to return the ISO code for the
country associated with the given IPv4 or IPv6 address
Args:
ip_address (str): The IP address to query for
parallel (bool): Parallel processing
Returns:
str: And ISO country code associated with the given IP address
"""
def download_country_database(location="GeoLite2-Country.mmdb"):
"""Downloads the MaxMind Geolite2 Country database
Args:
location (str): Local location for the database file
"""
if parallel:
logging.warning("Cannot download GeoIP database in parallel mode")
return
url = "https://geolite.maxmind.com/download/geoip/database/" \
"GeoLite2-Country.tar.gz"
# Use a browser-like user agent string to bypass some proxy blocks
headers = {"User-Agent": USER_AGENT}
original_filename = "GeoLite2-Country.mmdb"
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
tar_bytes = response.content
tar_file = tarfile.open(fileobj=BytesIO(tar_bytes), mode="r:gz")
tar_dir = tar_file.getnames()[0]
tar_path = "{0}/{1}".format(tar_dir, original_filename)
tar_file.extract(tar_path)
shutil.move(tar_path, location)
shutil.rmtree(tar_dir)
except Exception as e:
logger.warning("Error downloading {0}: {1}".format(url,
e.__str__()))
system_paths = [
"GeoLite2-Country.mmdb",
"/usr/local/share/GeoIP/GeoLite2-Country.mmdb",
"/usr/share/GeoIP/GeoLite2-Country.mmdb",
"/var/lib/GeoIP/GeoLite2-Country.mmdb",
"/var/local/lib/GeoIP/GeoLite2-Country.mmdb",
"C:\\GeoIP\\GeoLite2-Country.mmdb"
]
db_path = None
for system_path in system_paths:
if os.path.exists(system_path):
db_path = system_path
break
if db_path is None:
db_path = os.path.join(tempdir, "GeoLite2-Country.mmdb")
if not os.path.exists(db_path):
download_country_database(db_path)
if not os.path.exists(db_path):
return None
else:
db_age = datetime.now() - datetime.fromtimestamp(
os.stat(db_path).st_mtime)
if db_age > timedelta(days=7):
download_country_database()
db_path = db_path
db_reader = geoip2.database.Reader(db_path)
country = None
try:
country = db_reader.country(ip_address).country.iso_code
except geoip2.errors.AddressNotFoundError:
pass
return country
|
def get_ip_address_country(ip_address, parallel=False):
"""
Uses the MaxMind Geolite2 Country database to return the ISO code for the
country associated with the given IPv4 or IPv6 address
Args:
ip_address (str): The IP address to query for
parallel (bool): Parallel processing
Returns:
str: And ISO country code associated with the given IP address
"""
def download_country_database(location="GeoLite2-Country.mmdb"):
"""Downloads the MaxMind Geolite2 Country database
Args:
location (str): Local location for the database file
"""
if parallel:
logging.warning("Cannot download GeoIP database in parallel mode")
return
url = "https://geolite.maxmind.com/download/geoip/database/" \
"GeoLite2-Country.tar.gz"
# Use a browser-like user agent string to bypass some proxy blocks
headers = {"User-Agent": USER_AGENT}
original_filename = "GeoLite2-Country.mmdb"
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
tar_bytes = response.content
tar_file = tarfile.open(fileobj=BytesIO(tar_bytes), mode="r:gz")
tar_dir = tar_file.getnames()[0]
tar_path = "{0}/{1}".format(tar_dir, original_filename)
tar_file.extract(tar_path)
shutil.move(tar_path, location)
shutil.rmtree(tar_dir)
except Exception as e:
logger.warning("Error downloading {0}: {1}".format(url,
e.__str__()))
system_paths = [
"GeoLite2-Country.mmdb",
"/usr/local/share/GeoIP/GeoLite2-Country.mmdb",
"/usr/share/GeoIP/GeoLite2-Country.mmdb",
"/var/lib/GeoIP/GeoLite2-Country.mmdb",
"/var/local/lib/GeoIP/GeoLite2-Country.mmdb",
"C:\\GeoIP\\GeoLite2-Country.mmdb"
]
db_path = None
for system_path in system_paths:
if os.path.exists(system_path):
db_path = system_path
break
if db_path is None:
db_path = os.path.join(tempdir, "GeoLite2-Country.mmdb")
if not os.path.exists(db_path):
download_country_database(db_path)
if not os.path.exists(db_path):
return None
else:
db_age = datetime.now() - datetime.fromtimestamp(
os.stat(db_path).st_mtime)
if db_age > timedelta(days=7):
download_country_database()
db_path = db_path
db_reader = geoip2.database.Reader(db_path)
country = None
try:
country = db_reader.country(ip_address).country.iso_code
except geoip2.errors.AddressNotFoundError:
pass
return country
|
[
"Uses",
"the",
"MaxMind",
"Geolite2",
"Country",
"database",
"to",
"return",
"the",
"ISO",
"code",
"for",
"the",
"country",
"associated",
"with",
"the",
"given",
"IPv4",
"or",
"IPv6",
"address"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L255-L333
|
[
"def",
"get_ip_address_country",
"(",
"ip_address",
",",
"parallel",
"=",
"False",
")",
":",
"def",
"download_country_database",
"(",
"location",
"=",
"\"GeoLite2-Country.mmdb\"",
")",
":",
"\"\"\"Downloads the MaxMind Geolite2 Country database\n\n Args:\n location (str): Local location for the database file\n \"\"\"",
"if",
"parallel",
":",
"logging",
".",
"warning",
"(",
"\"Cannot download GeoIP database in parallel mode\"",
")",
"return",
"url",
"=",
"\"https://geolite.maxmind.com/download/geoip/database/\"",
"\"GeoLite2-Country.tar.gz\"",
"# Use a browser-like user agent string to bypass some proxy blocks",
"headers",
"=",
"{",
"\"User-Agent\"",
":",
"USER_AGENT",
"}",
"original_filename",
"=",
"\"GeoLite2-Country.mmdb\"",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"response",
".",
"raise_for_status",
"(",
")",
"tar_bytes",
"=",
"response",
".",
"content",
"tar_file",
"=",
"tarfile",
".",
"open",
"(",
"fileobj",
"=",
"BytesIO",
"(",
"tar_bytes",
")",
",",
"mode",
"=",
"\"r:gz\"",
")",
"tar_dir",
"=",
"tar_file",
".",
"getnames",
"(",
")",
"[",
"0",
"]",
"tar_path",
"=",
"\"{0}/{1}\"",
".",
"format",
"(",
"tar_dir",
",",
"original_filename",
")",
"tar_file",
".",
"extract",
"(",
"tar_path",
")",
"shutil",
".",
"move",
"(",
"tar_path",
",",
"location",
")",
"shutil",
".",
"rmtree",
"(",
"tar_dir",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"\"Error downloading {0}: {1}\"",
".",
"format",
"(",
"url",
",",
"e",
".",
"__str__",
"(",
")",
")",
")",
"system_paths",
"=",
"[",
"\"GeoLite2-Country.mmdb\"",
",",
"\"/usr/local/share/GeoIP/GeoLite2-Country.mmdb\"",
",",
"\"/usr/share/GeoIP/GeoLite2-Country.mmdb\"",
",",
"\"/var/lib/GeoIP/GeoLite2-Country.mmdb\"",
",",
"\"/var/local/lib/GeoIP/GeoLite2-Country.mmdb\"",
",",
"\"C:\\\\GeoIP\\\\GeoLite2-Country.mmdb\"",
"]",
"db_path",
"=",
"None",
"for",
"system_path",
"in",
"system_paths",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"system_path",
")",
":",
"db_path",
"=",
"system_path",
"break",
"if",
"db_path",
"is",
"None",
":",
"db_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"\"GeoLite2-Country.mmdb\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"db_path",
")",
":",
"download_country_database",
"(",
"db_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"db_path",
")",
":",
"return",
"None",
"else",
":",
"db_age",
"=",
"datetime",
".",
"now",
"(",
")",
"-",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"stat",
"(",
"db_path",
")",
".",
"st_mtime",
")",
"if",
"db_age",
">",
"timedelta",
"(",
"days",
"=",
"7",
")",
":",
"download_country_database",
"(",
")",
"db_path",
"=",
"db_path",
"db_reader",
"=",
"geoip2",
".",
"database",
".",
"Reader",
"(",
"db_path",
")",
"country",
"=",
"None",
"try",
":",
"country",
"=",
"db_reader",
".",
"country",
"(",
"ip_address",
")",
".",
"country",
".",
"iso_code",
"except",
"geoip2",
".",
"errors",
".",
"AddressNotFoundError",
":",
"pass",
"return",
"country"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
get_ip_address_info
|
Returns reverse DNS and country information for the given IP address
Args:
ip_address (str): The IP address to check
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): parallel processing
Returns:
OrderedDict: ``ip_address``, ``reverse_dns``
|
parsedmarc/utils.py
|
def get_ip_address_info(ip_address, cache=None, nameservers=None,
timeout=2.0, parallel=False):
"""
Returns reverse DNS and country information for the given IP address
Args:
ip_address (str): The IP address to check
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): parallel processing
Returns:
OrderedDict: ``ip_address``, ``reverse_dns``
"""
ip_address = ip_address.lower()
if cache:
info = cache.get(ip_address, None)
if info:
return info
info = OrderedDict()
info["ip_address"] = ip_address
reverse_dns = get_reverse_dns(ip_address,
nameservers=nameservers,
timeout=timeout)
country = get_ip_address_country(ip_address, parallel=parallel)
info["country"] = country
info["reverse_dns"] = reverse_dns
info["base_domain"] = None
if reverse_dns is not None:
base_domain = get_base_domain(reverse_dns)
info["base_domain"] = base_domain
return info
|
def get_ip_address_info(ip_address, cache=None, nameservers=None,
timeout=2.0, parallel=False):
"""
Returns reverse DNS and country information for the given IP address
Args:
ip_address (str): The IP address to check
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): parallel processing
Returns:
OrderedDict: ``ip_address``, ``reverse_dns``
"""
ip_address = ip_address.lower()
if cache:
info = cache.get(ip_address, None)
if info:
return info
info = OrderedDict()
info["ip_address"] = ip_address
reverse_dns = get_reverse_dns(ip_address,
nameservers=nameservers,
timeout=timeout)
country = get_ip_address_country(ip_address, parallel=parallel)
info["country"] = country
info["reverse_dns"] = reverse_dns
info["base_domain"] = None
if reverse_dns is not None:
base_domain = get_base_domain(reverse_dns)
info["base_domain"] = base_domain
return info
|
[
"Returns",
"reverse",
"DNS",
"and",
"country",
"information",
"for",
"the",
"given",
"IP",
"address"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L336-L371
|
[
"def",
"get_ip_address_info",
"(",
"ip_address",
",",
"cache",
"=",
"None",
",",
"nameservers",
"=",
"None",
",",
"timeout",
"=",
"2.0",
",",
"parallel",
"=",
"False",
")",
":",
"ip_address",
"=",
"ip_address",
".",
"lower",
"(",
")",
"if",
"cache",
":",
"info",
"=",
"cache",
".",
"get",
"(",
"ip_address",
",",
"None",
")",
"if",
"info",
":",
"return",
"info",
"info",
"=",
"OrderedDict",
"(",
")",
"info",
"[",
"\"ip_address\"",
"]",
"=",
"ip_address",
"reverse_dns",
"=",
"get_reverse_dns",
"(",
"ip_address",
",",
"nameservers",
"=",
"nameservers",
",",
"timeout",
"=",
"timeout",
")",
"country",
"=",
"get_ip_address_country",
"(",
"ip_address",
",",
"parallel",
"=",
"parallel",
")",
"info",
"[",
"\"country\"",
"]",
"=",
"country",
"info",
"[",
"\"reverse_dns\"",
"]",
"=",
"reverse_dns",
"info",
"[",
"\"base_domain\"",
"]",
"=",
"None",
"if",
"reverse_dns",
"is",
"not",
"None",
":",
"base_domain",
"=",
"get_base_domain",
"(",
"reverse_dns",
")",
"info",
"[",
"\"base_domain\"",
"]",
"=",
"base_domain",
"return",
"info"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
get_filename_safe_string
|
Converts a string to a string that is safe for a filename
Args:
string (str): A string to make safe for a filename
Returns:
str: A string safe for a filename
|
parsedmarc/utils.py
|
def get_filename_safe_string(string):
"""
Converts a string to a string that is safe for a filename
Args:
string (str): A string to make safe for a filename
Returns:
str: A string safe for a filename
"""
invalid_filename_chars = ['\\', '/', ':', '"', '*', '?', '|', '\n',
'\r']
if string is None:
string = "None"
for char in invalid_filename_chars:
string = string.replace(char, "")
string = string.rstrip(".")
return string
|
def get_filename_safe_string(string):
"""
Converts a string to a string that is safe for a filename
Args:
string (str): A string to make safe for a filename
Returns:
str: A string safe for a filename
"""
invalid_filename_chars = ['\\', '/', ':', '"', '*', '?', '|', '\n',
'\r']
if string is None:
string = "None"
for char in invalid_filename_chars:
string = string.replace(char, "")
string = string.rstrip(".")
return string
|
[
"Converts",
"a",
"string",
"to",
"a",
"string",
"that",
"is",
"safe",
"for",
"a",
"filename",
"Args",
":",
"string",
"(",
"str",
")",
":",
"A",
"string",
"to",
"make",
"safe",
"for",
"a",
"filename"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L393-L410
|
[
"def",
"get_filename_safe_string",
"(",
"string",
")",
":",
"invalid_filename_chars",
"=",
"[",
"'\\\\'",
",",
"'/'",
",",
"':'",
",",
"'\"'",
",",
"'*'",
",",
"'?'",
",",
"'|'",
",",
"'\\n'",
",",
"'\\r'",
"]",
"if",
"string",
"is",
"None",
":",
"string",
"=",
"\"None\"",
"for",
"char",
"in",
"invalid_filename_chars",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"char",
",",
"\"\"",
")",
"string",
"=",
"string",
".",
"rstrip",
"(",
"\".\"",
")",
"return",
"string"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
convert_outlook_msg
|
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
standard RFC 822 format
Args:
msg_bytes (bytes): the content of the .msg file
Returns:
A RFC 822 string
|
parsedmarc/utils.py
|
def convert_outlook_msg(msg_bytes):
"""
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
standard RFC 822 format
Args:
msg_bytes (bytes): the content of the .msg file
Returns:
A RFC 822 string
"""
if not is_outlook_msg(msg_bytes):
raise ValueError("The supplied bytes are not an Outlook MSG file")
orig_dir = os.getcwd()
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
with open("sample.msg", "wb") as msg_file:
msg_file.write(msg_bytes)
try:
subprocess.check_call(["msgconvert", "sample.msg"],
stdout=null_file, stderr=null_file)
eml_path = "sample.eml"
with open(eml_path, "rb") as eml_file:
rfc822 = eml_file.read()
except FileNotFoundError:
raise EmailParserError(
"Failed to convert Outlook MSG: msgconvert utility not found")
finally:
os.chdir(orig_dir)
shutil.rmtree(tmp_dir)
return rfc822
|
def convert_outlook_msg(msg_bytes):
"""
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
standard RFC 822 format
Args:
msg_bytes (bytes): the content of the .msg file
Returns:
A RFC 822 string
"""
if not is_outlook_msg(msg_bytes):
raise ValueError("The supplied bytes are not an Outlook MSG file")
orig_dir = os.getcwd()
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
with open("sample.msg", "wb") as msg_file:
msg_file.write(msg_bytes)
try:
subprocess.check_call(["msgconvert", "sample.msg"],
stdout=null_file, stderr=null_file)
eml_path = "sample.eml"
with open(eml_path, "rb") as eml_file:
rfc822 = eml_file.read()
except FileNotFoundError:
raise EmailParserError(
"Failed to convert Outlook MSG: msgconvert utility not found")
finally:
os.chdir(orig_dir)
shutil.rmtree(tmp_dir)
return rfc822
|
[
"Uses",
"the",
"msgconvert",
"Perl",
"utility",
"to",
"convert",
"an",
"Outlook",
"MS",
"file",
"to",
"standard",
"RFC",
"822",
"format"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L427-L458
|
[
"def",
"convert_outlook_msg",
"(",
"msg_bytes",
")",
":",
"if",
"not",
"is_outlook_msg",
"(",
"msg_bytes",
")",
":",
"raise",
"ValueError",
"(",
"\"The supplied bytes are not an Outlook MSG file\"",
")",
"orig_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"os",
".",
"chdir",
"(",
"tmp_dir",
")",
"with",
"open",
"(",
"\"sample.msg\"",
",",
"\"wb\"",
")",
"as",
"msg_file",
":",
"msg_file",
".",
"write",
"(",
"msg_bytes",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"\"msgconvert\"",
",",
"\"sample.msg\"",
"]",
",",
"stdout",
"=",
"null_file",
",",
"stderr",
"=",
"null_file",
")",
"eml_path",
"=",
"\"sample.eml\"",
"with",
"open",
"(",
"eml_path",
",",
"\"rb\"",
")",
"as",
"eml_file",
":",
"rfc822",
"=",
"eml_file",
".",
"read",
"(",
")",
"except",
"FileNotFoundError",
":",
"raise",
"EmailParserError",
"(",
"\"Failed to convert Outlook MSG: msgconvert utility not found\"",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"orig_dir",
")",
"shutil",
".",
"rmtree",
"(",
"tmp_dir",
")",
"return",
"rfc822"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
parse_email
|
A simplified email parser
Args:
data: The RFC 822 message string, or MSG binary
strip_attachment_payloads (bool): Remove attachment payloads
Returns (dict): Parsed email data
|
parsedmarc/utils.py
|
def parse_email(data, strip_attachment_payloads=False):
"""
A simplified email parser
Args:
data: The RFC 822 message string, or MSG binary
strip_attachment_payloads (bool): Remove attachment payloads
Returns (dict): Parsed email data
"""
if type(data) == bytes:
if is_outlook_msg(data):
data = convert_outlook_msg(data)
data = data.decode("utf-8", errors="replace")
parsed_email = mailparser.parse_from_string(data)
headers = json.loads(parsed_email.headers_json).copy()
parsed_email = json.loads(parsed_email.mail_json).copy()
parsed_email["headers"] = headers
if "received" in parsed_email:
for received in parsed_email["received"]:
if "date_utc" in received:
if received["date_utc"] is None:
del received["date_utc"]
else:
received["date_utc"] = received["date_utc"].replace("T",
" ")
if "from" not in parsed_email:
if "From" in parsed_email["headers"]:
parsed_email["from"] = parsed_email["Headers"]["From"]
else:
parsed_email["from"] = None
if parsed_email["from"] is not None:
parsed_email["from"] = parse_email_address(parsed_email["from"][0])
if "date" in parsed_email:
parsed_email["date"] = parsed_email["date"].replace("T", " ")
else:
parsed_email["date"] = None
if "reply_to" in parsed_email:
parsed_email["reply_to"] = list(map(lambda x: parse_email_address(x),
parsed_email["reply_to"]))
else:
parsed_email["reply_to"] = []
if "to" in parsed_email:
parsed_email["to"] = list(map(lambda x: parse_email_address(x),
parsed_email["to"]))
else:
parsed_email["to"] = []
if "cc" in parsed_email:
parsed_email["cc"] = list(map(lambda x: parse_email_address(x),
parsed_email["cc"]))
else:
parsed_email["cc"] = []
if "bcc" in parsed_email:
parsed_email["bcc"] = list(map(lambda x: parse_email_address(x),
parsed_email["bcc"]))
else:
parsed_email["bcc"] = []
if "delivered_to" in parsed_email:
parsed_email["delivered_to"] = list(
map(lambda x: parse_email_address(x),
parsed_email["delivered_to"])
)
if "attachments" not in parsed_email:
parsed_email["attachments"] = []
else:
for attachment in parsed_email["attachments"]:
if "payload" in attachment:
payload = attachment["payload"]
try:
if "content_transfer_encoding" in attachment:
if attachment["content_transfer_encoding"] == "base64":
payload = decode_base64(payload)
else:
payload = str.encode(payload)
attachment["sha256"] = hashlib.sha256(payload).hexdigest()
except Exception as e:
logger.debug("Unable to decode attachment: {0}".format(
e.__str__()
))
if strip_attachment_payloads:
for attachment in parsed_email["attachments"]:
if "payload" in attachment:
del attachment["payload"]
if "subject" not in parsed_email:
parsed_email["subject"] = None
parsed_email["filename_safe_subject"] = get_filename_safe_string(
parsed_email["subject"])
if "body" not in parsed_email:
parsed_email["body"] = None
return parsed_email
|
def parse_email(data, strip_attachment_payloads=False):
"""
A simplified email parser
Args:
data: The RFC 822 message string, or MSG binary
strip_attachment_payloads (bool): Remove attachment payloads
Returns (dict): Parsed email data
"""
if type(data) == bytes:
if is_outlook_msg(data):
data = convert_outlook_msg(data)
data = data.decode("utf-8", errors="replace")
parsed_email = mailparser.parse_from_string(data)
headers = json.loads(parsed_email.headers_json).copy()
parsed_email = json.loads(parsed_email.mail_json).copy()
parsed_email["headers"] = headers
if "received" in parsed_email:
for received in parsed_email["received"]:
if "date_utc" in received:
if received["date_utc"] is None:
del received["date_utc"]
else:
received["date_utc"] = received["date_utc"].replace("T",
" ")
if "from" not in parsed_email:
if "From" in parsed_email["headers"]:
parsed_email["from"] = parsed_email["Headers"]["From"]
else:
parsed_email["from"] = None
if parsed_email["from"] is not None:
parsed_email["from"] = parse_email_address(parsed_email["from"][0])
if "date" in parsed_email:
parsed_email["date"] = parsed_email["date"].replace("T", " ")
else:
parsed_email["date"] = None
if "reply_to" in parsed_email:
parsed_email["reply_to"] = list(map(lambda x: parse_email_address(x),
parsed_email["reply_to"]))
else:
parsed_email["reply_to"] = []
if "to" in parsed_email:
parsed_email["to"] = list(map(lambda x: parse_email_address(x),
parsed_email["to"]))
else:
parsed_email["to"] = []
if "cc" in parsed_email:
parsed_email["cc"] = list(map(lambda x: parse_email_address(x),
parsed_email["cc"]))
else:
parsed_email["cc"] = []
if "bcc" in parsed_email:
parsed_email["bcc"] = list(map(lambda x: parse_email_address(x),
parsed_email["bcc"]))
else:
parsed_email["bcc"] = []
if "delivered_to" in parsed_email:
parsed_email["delivered_to"] = list(
map(lambda x: parse_email_address(x),
parsed_email["delivered_to"])
)
if "attachments" not in parsed_email:
parsed_email["attachments"] = []
else:
for attachment in parsed_email["attachments"]:
if "payload" in attachment:
payload = attachment["payload"]
try:
if "content_transfer_encoding" in attachment:
if attachment["content_transfer_encoding"] == "base64":
payload = decode_base64(payload)
else:
payload = str.encode(payload)
attachment["sha256"] = hashlib.sha256(payload).hexdigest()
except Exception as e:
logger.debug("Unable to decode attachment: {0}".format(
e.__str__()
))
if strip_attachment_payloads:
for attachment in parsed_email["attachments"]:
if "payload" in attachment:
del attachment["payload"]
if "subject" not in parsed_email:
parsed_email["subject"] = None
parsed_email["filename_safe_subject"] = get_filename_safe_string(
parsed_email["subject"])
if "body" not in parsed_email:
parsed_email["body"] = None
return parsed_email
|
[
"A",
"simplified",
"email",
"parser"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L461-L564
|
[
"def",
"parse_email",
"(",
"data",
",",
"strip_attachment_payloads",
"=",
"False",
")",
":",
"if",
"type",
"(",
"data",
")",
"==",
"bytes",
":",
"if",
"is_outlook_msg",
"(",
"data",
")",
":",
"data",
"=",
"convert_outlook_msg",
"(",
"data",
")",
"data",
"=",
"data",
".",
"decode",
"(",
"\"utf-8\"",
",",
"errors",
"=",
"\"replace\"",
")",
"parsed_email",
"=",
"mailparser",
".",
"parse_from_string",
"(",
"data",
")",
"headers",
"=",
"json",
".",
"loads",
"(",
"parsed_email",
".",
"headers_json",
")",
".",
"copy",
"(",
")",
"parsed_email",
"=",
"json",
".",
"loads",
"(",
"parsed_email",
".",
"mail_json",
")",
".",
"copy",
"(",
")",
"parsed_email",
"[",
"\"headers\"",
"]",
"=",
"headers",
"if",
"\"received\"",
"in",
"parsed_email",
":",
"for",
"received",
"in",
"parsed_email",
"[",
"\"received\"",
"]",
":",
"if",
"\"date_utc\"",
"in",
"received",
":",
"if",
"received",
"[",
"\"date_utc\"",
"]",
"is",
"None",
":",
"del",
"received",
"[",
"\"date_utc\"",
"]",
"else",
":",
"received",
"[",
"\"date_utc\"",
"]",
"=",
"received",
"[",
"\"date_utc\"",
"]",
".",
"replace",
"(",
"\"T\"",
",",
"\" \"",
")",
"if",
"\"from\"",
"not",
"in",
"parsed_email",
":",
"if",
"\"From\"",
"in",
"parsed_email",
"[",
"\"headers\"",
"]",
":",
"parsed_email",
"[",
"\"from\"",
"]",
"=",
"parsed_email",
"[",
"\"Headers\"",
"]",
"[",
"\"From\"",
"]",
"else",
":",
"parsed_email",
"[",
"\"from\"",
"]",
"=",
"None",
"if",
"parsed_email",
"[",
"\"from\"",
"]",
"is",
"not",
"None",
":",
"parsed_email",
"[",
"\"from\"",
"]",
"=",
"parse_email_address",
"(",
"parsed_email",
"[",
"\"from\"",
"]",
"[",
"0",
"]",
")",
"if",
"\"date\"",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"date\"",
"]",
"=",
"parsed_email",
"[",
"\"date\"",
"]",
".",
"replace",
"(",
"\"T\"",
",",
"\" \"",
")",
"else",
":",
"parsed_email",
"[",
"\"date\"",
"]",
"=",
"None",
"if",
"\"reply_to\"",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"reply_to\"",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"parse_email_address",
"(",
"x",
")",
",",
"parsed_email",
"[",
"\"reply_to\"",
"]",
")",
")",
"else",
":",
"parsed_email",
"[",
"\"reply_to\"",
"]",
"=",
"[",
"]",
"if",
"\"to\"",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"to\"",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"parse_email_address",
"(",
"x",
")",
",",
"parsed_email",
"[",
"\"to\"",
"]",
")",
")",
"else",
":",
"parsed_email",
"[",
"\"to\"",
"]",
"=",
"[",
"]",
"if",
"\"cc\"",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"cc\"",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"parse_email_address",
"(",
"x",
")",
",",
"parsed_email",
"[",
"\"cc\"",
"]",
")",
")",
"else",
":",
"parsed_email",
"[",
"\"cc\"",
"]",
"=",
"[",
"]",
"if",
"\"bcc\"",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"bcc\"",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"parse_email_address",
"(",
"x",
")",
",",
"parsed_email",
"[",
"\"bcc\"",
"]",
")",
")",
"else",
":",
"parsed_email",
"[",
"\"bcc\"",
"]",
"=",
"[",
"]",
"if",
"\"delivered_to\"",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"delivered_to\"",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"parse_email_address",
"(",
"x",
")",
",",
"parsed_email",
"[",
"\"delivered_to\"",
"]",
")",
")",
"if",
"\"attachments\"",
"not",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"attachments\"",
"]",
"=",
"[",
"]",
"else",
":",
"for",
"attachment",
"in",
"parsed_email",
"[",
"\"attachments\"",
"]",
":",
"if",
"\"payload\"",
"in",
"attachment",
":",
"payload",
"=",
"attachment",
"[",
"\"payload\"",
"]",
"try",
":",
"if",
"\"content_transfer_encoding\"",
"in",
"attachment",
":",
"if",
"attachment",
"[",
"\"content_transfer_encoding\"",
"]",
"==",
"\"base64\"",
":",
"payload",
"=",
"decode_base64",
"(",
"payload",
")",
"else",
":",
"payload",
"=",
"str",
".",
"encode",
"(",
"payload",
")",
"attachment",
"[",
"\"sha256\"",
"]",
"=",
"hashlib",
".",
"sha256",
"(",
"payload",
")",
".",
"hexdigest",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Unable to decode attachment: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"if",
"strip_attachment_payloads",
":",
"for",
"attachment",
"in",
"parsed_email",
"[",
"\"attachments\"",
"]",
":",
"if",
"\"payload\"",
"in",
"attachment",
":",
"del",
"attachment",
"[",
"\"payload\"",
"]",
"if",
"\"subject\"",
"not",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"subject\"",
"]",
"=",
"None",
"parsed_email",
"[",
"\"filename_safe_subject\"",
"]",
"=",
"get_filename_safe_string",
"(",
"parsed_email",
"[",
"\"subject\"",
"]",
")",
"if",
"\"body\"",
"not",
"in",
"parsed_email",
":",
"parsed_email",
"[",
"\"body\"",
"]",
"=",
"None",
"return",
"parsed_email"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
_str_to_list
|
Converts a comma separated string to a list
|
parsedmarc/cli.py
|
def _str_to_list(s):
"""Converts a comma separated string to a list"""
_list = s.split(",")
return list(map(lambda i: i.lstrip(), _list))
|
def _str_to_list(s):
"""Converts a comma separated string to a list"""
_list = s.split(",")
return list(map(lambda i: i.lstrip(), _list))
|
[
"Converts",
"a",
"comma",
"separated",
"string",
"to",
"a",
"list"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/cli.py#L27-L30
|
[
"def",
"_str_to_list",
"(",
"s",
")",
":",
"_list",
"=",
"s",
".",
"split",
"(",
"\",\"",
")",
"return",
"list",
"(",
"map",
"(",
"lambda",
"i",
":",
"i",
".",
"lstrip",
"(",
")",
",",
"_list",
")",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
cli_parse
|
Separated this function for multiprocessing
|
parsedmarc/cli.py
|
def cli_parse(file_path, sa, nameservers, dns_timeout, parallel=False):
"""Separated this function for multiprocessing"""
try:
file_results = parse_report_file(file_path,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=sa,
parallel=parallel)
except ParserError as error:
return error, file_path
finally:
global counter
with counter.get_lock():
counter.value += 1
return file_results, file_path
|
def cli_parse(file_path, sa, nameservers, dns_timeout, parallel=False):
"""Separated this function for multiprocessing"""
try:
file_results = parse_report_file(file_path,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=sa,
parallel=parallel)
except ParserError as error:
return error, file_path
finally:
global counter
with counter.get_lock():
counter.value += 1
return file_results, file_path
|
[
"Separated",
"this",
"function",
"for",
"multiprocessing"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/cli.py#L33-L47
|
[
"def",
"cli_parse",
"(",
"file_path",
",",
"sa",
",",
"nameservers",
",",
"dns_timeout",
",",
"parallel",
"=",
"False",
")",
":",
"try",
":",
"file_results",
"=",
"parse_report_file",
"(",
"file_path",
",",
"nameservers",
"=",
"nameservers",
",",
"dns_timeout",
"=",
"dns_timeout",
",",
"strip_attachment_payloads",
"=",
"sa",
",",
"parallel",
"=",
"parallel",
")",
"except",
"ParserError",
"as",
"error",
":",
"return",
"error",
",",
"file_path",
"finally",
":",
"global",
"counter",
"with",
"counter",
".",
"get_lock",
"(",
")",
":",
"counter",
".",
"value",
"+=",
"1",
"return",
"file_results",
",",
"file_path"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
_main
|
Called when the module is executed
|
parsedmarc/cli.py
|
def _main():
"""Called when the module is executed"""
def process_reports(reports_):
output_str = "{0}\n".format(json.dumps(reports_,
ensure_ascii=False,
indent=2))
if not opts.silent:
print(output_str)
if opts.kafka_hosts:
try:
ssl_context = None
if opts.kafka_skip_certificate_verification:
logger.debug("Skipping Kafka certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
kafka_client = kafkaclient.KafkaClient(
opts.kafka_hosts,
username=opts.kafka_username,
password=opts.kafka_password,
ssl_context=ssl_context
)
except Exception as error_:
logger.error("Kafka Error: {0}".format(error_.__str__()))
if opts.save_aggregate:
for report in reports_["aggregate_reports"]:
try:
if opts.elasticsearch_hosts:
elastic.save_aggregate_report_to_elasticsearch(
report,
index_suffix=opts.elasticsearch_index_suffix,
monthly_indexes=opts.elasticsearch_monthly_indexes)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
try:
if opts.kafka_hosts:
kafka_client.save_aggregate_reports_to_kafka(
report, kafka_aggregate_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
if opts.hec:
try:
aggregate_reports_ = reports_["aggregate_reports"]
if len(aggregate_reports_) > 0:
hec_client.save_aggregate_reports_to_splunk(
aggregate_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
if opts.save_forensic:
for report in reports_["forensic_reports"]:
try:
if opts.elasticsearch_hosts:
elastic.save_forensic_report_to_elasticsearch(
report,
index_suffix=opts.elasticsearch_index_suffix,
monthly_indexes=opts.elasticsearch_monthly_indexes)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
except InvalidDMARCReport as error_:
logger.error(error_.__str__())
try:
if opts.kafka_hosts:
kafka_client.save_forensic_reports_to_kafka(
report, kafka_forensic_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
if opts.hec:
try:
forensic_reports_ = reports_["forensic_reports"]
if len(forensic_reports_) > 0:
hec_client.save_forensic_reports_to_splunk(
forensic_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
arg_parser = ArgumentParser(description="Parses DMARC reports")
arg_parser.add_argument("-c", "--config-file",
help="A path to a configuration file "
"(--silent implied)")
arg_parser.add_argument("file_path", nargs="*",
help="one or more paths to aggregate or forensic "
"report files or emails")
strip_attachment_help = "remove attachment payloads from forensic " \
"report output"
arg_parser.add_argument("--strip-attachment-payloads",
help=strip_attachment_help, action="store_true")
arg_parser.add_argument("-o", "--output",
help="write output files to the given directory")
arg_parser.add_argument("-n", "--nameservers", nargs="+",
help="nameservers to query "
"(default is Cloudflare's nameservers)")
arg_parser.add_argument("-t", "--dns_timeout",
help="number of seconds to wait for an answer "
"from DNS (default: 6.0)",
type=float,
default=6.0)
arg_parser.add_argument("-s", "--silent", action="store_true",
help="only print errors and warnings")
arg_parser.add_argument("--debug", action="store_true",
help="print debugging information")
arg_parser.add_argument("--log-file", default=None,
help="output logging to a file")
arg_parser.add_argument("-v", "--version", action="version",
version=__version__)
aggregate_reports = []
forensic_reports = []
args = arg_parser.parse_args()
opts = Namespace(file_path=args.file_path,
config_file=args.config_file,
strip_attachment_payloads=args.strip_attachment_payloads,
output=args.output,
nameservers=args.nameservers,
silent=args.silent,
dns_timeout=args.dns_timeout,
debug=args.debug,
save_aggregate=False,
save_forensic=False,
imap_host=None,
imap_skip_certificate_verification=False,
imap_ssl=True,
imap_port=993,
imap_user=None,
imap_password=None,
imap_reports_folder="INBOX",
imap_archive_folder="Archive",
imap_watch=False,
imap_delete=False,
imap_test=False,
hec=None,
hec_token=None,
hec_index=None,
hec_skip_certificate_verification=False,
elasticsearch_hosts=None,
elasticsearch_index_suffix=None,
elasticsearch_ssl=True,
elasticsearch_ssl_cert_path=None,
elasticsearch_monthly_indexes=False,
kafka_hosts=None,
kafka_username=None,
kafka_password=None,
kafka_aggregate_topic=None,
kafka_forensic_topic=None,
kafka_ssl=False,
kafka_skip_certificate_verification=False,
smtp_host=None,
smtp_port=25,
smtp_ssl=False,
smtp_skip_certificate_verification=False,
smtp_user=None,
smtp_password=None,
smtp_from=None,
smtp_to=[],
smtp_subject="parsedmarc report",
smtp_message="Please see the attached DMARC results.",
log_file=args.log_file,
n_procs=1,
chunk_size=1
)
args = arg_parser.parse_args()
if args.config_file:
abs_path = os.path.abspath(args.config_file)
if not os.path.exists(abs_path):
logger.error("A file does not exist at {0}".format(abs_path))
exit(-1)
opts.silent = True
config = ConfigParser()
config.read(args.config_file)
if "general" in config.sections():
general_config = config["general"]
if "strip_attachment_payloads" in general_config:
opts.strip_attachment_payloads = general_config[
"strip_attachment_payloads"]
if "output" in general_config:
opts.output = general_config["output"]
if "nameservers" in general_config:
opts.nameservers = _str_to_list(general_config["nameservers"])
if "dns_timeout" in general_config:
opts.dns_timeout = general_config.getfloat("dns_timeout")
if "save_aggregate" in general_config:
opts.save_aggregate = general_config["save_aggregate"]
if "save_forensic" in general_config:
opts.save_forensic = general_config["save_forensic"]
if "debug" in general_config:
opts.debug = general_config.getboolean("debug")
if "silent" in general_config:
opts.silent = general_config.getboolean("silent")
if "log_file" in general_config:
opts.log_file = general_config["log_file"]
if "n_procs" in general_config:
opts.n_procs = general_config.getint("n_procs")
if "chunk_size" in general_config:
opts.chunk_size = general_config.getint("chunk_size")
if "imap" in config.sections():
imap_config = config["imap"]
if "host" in imap_config:
opts.imap_host = imap_config["host"]
else:
logger.error("host setting missing from the "
"imap config section")
exit(-1)
if "port" in imap_config:
opts.imap_port = imap_config["port"]
if "ssl" in imap_config:
opts.imap_ssl = imap_config.getboolean("ssl")
if "skip_certificate_verification" in imap_config:
imap_verify = imap_config.getboolean(
"skip_certificate_verification")
opts.imap_skip_certificate_verification = imap_verify
if "user" in imap_config:
opts.imap_user = imap_config["user"]
else:
logger.critical("user setting missing from the "
"imap config section")
exit(-1)
if "password" in imap_config:
opts.imap_password = imap_config["password"]
else:
logger.critical("password setting missing from the "
"imap config section")
exit(-1)
if "reports_folder" in imap_config:
opts.imap_reports_folder = imap_config["reports_folder"]
if "archive_folder" in imap_config:
opts.imap_archive_folder = imap_config["archive_folder"]
if "watch" in imap_config:
opts.imap_watch = imap_config.getboolean("watch")
if "delete" in imap_config:
opts.imap_delete = imap_config.getboolean("delete")
if "test" in imap_config:
opts.imap_test = imap_config.getboolean("test")
if "elasticsearch" in config:
elasticsearch_config = config["elasticsearch"]
if "hosts" in elasticsearch_config:
opts.elasticsearch_hosts = _str_to_list(elasticsearch_config[
"hosts"])
else:
logger.critical("hosts setting missing from the "
"elasticsearch config section")
exit(-1)
if "index_suffix" in elasticsearch_config:
opts.elasticsearch_index_suffix = elasticsearch_config[
"index_suffix"]
if "monthly_indexes" in elasticsearch_config:
monthly = elasticsearch_config.getboolean("monthly_indexes")
opts.elasticsearch_monthly_indexes = monthly
if "ssl" in elasticsearch_config:
opts.elasticsearch_ssl = elasticsearch_config.getboolean(
"ssl")
if "cert_path" in elasticsearch_config:
opts.elasticsearch_ssl_cert_path = elasticsearch_config[
"cert_path"]
if "splunk_hec" in config.sections():
hec_config = config["splunk_hec"]
if "url" in hec_config:
opts.hec = hec_config["url"]
else:
logger.critical("url setting missing from the "
"splunk_hec config section")
exit(-1)
if "token" in hec_config:
opts.hec_token = hec_config["token"]
else:
logger.critical("token setting missing from the "
"splunk_hec config section")
exit(-1)
if "index" in hec_config:
opts.hec_index = hec_config["index"]
else:
logger.critical("index setting missing from the "
"splunk_hec config section")
exit(-1)
if "skip_certificate_verification" in hec_config:
opts.hec_skip_certificate_verification = hec_config[
"skip_certificate_verification"]
if "kafka" in config.sections():
kafka_config = config["kafka"]
if "hosts" in kafka_config:
opts.kafka_hosts = _str_to_list(kafka_config["hosts"])
else:
logger.critical("hosts setting missing from the "
"kafka config section")
exit(-1)
if "user" in kafka_config:
opts.kafka_username = kafka_config["user"]
else:
logger.critical("user setting missing from the "
"kafka config section")
exit(-1)
if "password" in kafka_config:
opts.kafka_password = kafka_config["password"]
else:
logger.critical("password setting missing from the "
"kafka config section")
exit(-1)
if "ssl" in kafka_config:
opts.kafka_ssl = kafka_config["ssl"].getboolean()
if "skip_certificate_verification" in kafka_config:
kafka_verify = kafka_config.getboolean(
"skip_certificate_verification")
opts.kafka_skip_certificate_verification = kafka_verify
if "aggregate_topic" in kafka_config:
opts.kafka_aggregate = kafka_config["aggregate_topic"]
else:
logger.critical("aggregate_topic setting missing from the "
"kafka config section")
exit(-1)
if "forensic_topic" in kafka_config:
opts.kafka_username = kafka_config["forensic_topic"]
else:
logger.critical("forensic_topic setting missing from the "
"splunk_hec config section")
if "smtp" in config.sections():
smtp_config = config["smtp"]
if "host" in smtp_config:
opts.smtp_host = smtp_config["host"]
else:
logger.critical("host setting missing from the "
"smtp config section")
exit(-1)
if "port" in smtp_config:
opts.smtp_port = smtp_config["port"]
if "ssl" in smtp_config:
opts.smtp_ssl = smtp_config.getboolean("ssl")
if "skip_certificate_verification" in smtp_config:
smtp_verify = smtp_config.getboolean(
"skip_certificate_verification")
opts.smtp_skip_certificate_verification = smtp_verify
if "user" in smtp_config:
opts.smtp_user = smtp_config["user"]
else:
logger.critical("user setting missing from the "
"smtp config section")
exit(-1)
if "password" in smtp_config:
opts.smtp_password = smtp_config["password"]
else:
logger.critical("password setting missing from the "
"smtp config section")
exit(-1)
if "from" in smtp_config:
opts.smtp_from = smtp_config["from"]
else:
logger.critical("from setting missing from the "
"smtp config section")
if "to" in smtp_config:
opts.smtp_to = _str_to_list(smtp_config["to"])
else:
logger.critical("to setting missing from the "
"smtp config section")
if "subject" in smtp_config:
opts.smtp_subject = smtp_config["subject"]
if "attachment" in smtp_config:
opts.smtp_attachment = smtp_config["attachment"]
if "message" in smtp_config:
opts.smtp_message = smtp_config["message"]
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.WARNING)
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
if opts.log_file:
fh = logging.FileHandler(opts.log_file)
formatter = logging.Formatter(
'%(asctime)s - '
'%(levelname)s - [%(filename)s:%(lineno)d] - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
if opts.imap_host is None and len(opts.file_path) == 0:
logger.error("You must supply input files, or an IMAP configuration")
exit(1)
if opts.save_aggregate or opts.save_forensic:
try:
if opts.elasticsearch_hosts:
es_aggregate_index = "dmarc_aggregate"
es_forensic_index = "dmarc_forensic"
if opts.elasticsearch_index_suffix:
suffix = opts.elasticsearch_index_suffix
es_aggregate_index = "{0}_{1}".format(
es_aggregate_index, suffix)
es_forensic_index = "{0}_{1}".format(
es_forensic_index, suffix)
elastic.set_hosts(opts.elasticsearch_hosts,
opts.elasticsearch_ssl,
opts.elasticsearch_ssl_cert_path)
elastic.migrate_indexes(aggregate_indexes=[es_aggregate_index],
forensic_indexes=[es_forensic_index])
except elastic.ElasticsearchError as error:
logger.error("Elasticsearch Error: {0}".format(error.__str__()))
exit(1)
if opts.hec:
if opts.hec_token is None or opts.hec_index is None:
logger.error("HEC token and HEC index are required when "
"using HEC URL")
exit(1)
verify = True
if opts.hec_skip_certificate_verification:
verify = False
hec_client = splunk.HECClient(opts.hec, opts.hec_token,
opts.hec_index,
verify=verify)
kafka_aggregate_topic = opts.kafka_aggregate_topic
kafka_forensic_topic = opts.kafka_forensic_topic
file_paths = []
for file_path in args.file_path:
file_paths += glob(file_path)
file_paths = list(set(file_paths))
counter = Value('i', 0)
pool = Pool(opts.n_procs, initializer=init, initargs=(counter,))
results = pool.starmap_async(cli_parse,
zip(file_paths,
repeat(opts.strip_attachment_payloads),
repeat(opts.nameservers),
repeat(opts.dns_timeout),
repeat(opts.n_procs >= 1)),
opts.chunk_size)
pbar = tqdm(total=len(file_paths))
while not results.ready():
pbar.update(counter.value - pbar.n)
time.sleep(0.1)
pbar.close()
results = results.get()
pool.close()
pool.join()
for result in results:
if type(result[0]) is InvalidDMARCReport:
logger.error("Failed to parse {0} - {1}".format(result[1],
result[0]))
else:
if result[0]["report_type"] == "aggregate":
aggregate_reports.append(result[0]["report"])
elif result[0]["report_type"] == "forensic":
forensic_reports.append(result[0]["report"])
if opts.imap_host:
try:
if opts.imap_user is None or opts.imap_password is None:
logger.error("IMAP user and password must be specified if"
"host is specified")
rf = opts.imap_reports_folder
af = opts.imap_archive_folder
ns = opts.nameservers
sa = opts.strip_attachment_payloads
ssl = True
ssl_context = None
if opts.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
if opts.imap_ssl is False:
ssl = False
reports = get_dmarc_reports_from_inbox(host=opts.imap_host,
port=opts.imap_port,
ssl=ssl,
ssl_context=ssl_context,
user=opts.imap_user,
password=opts.imap_password,
reports_folder=rf,
archive_folder=af,
delete=opts.imap_delete,
nameservers=ns,
test=opts.imap_test,
strip_attachment_payloads=sa
)
aggregate_reports += reports["aggregate_reports"]
forensic_reports += reports["forensic_reports"]
except IMAPError as error:
logger.error("IMAP Error: {0}".format(error.__str__()))
exit(1)
results = OrderedDict([("aggregate_reports", aggregate_reports),
("forensic_reports", forensic_reports)])
if opts.output:
save_output(results, output_directory=opts.output)
process_reports(results)
if opts.smtp_host:
try:
ssl_context = None
if opts.smtp_skip_certificate_verification:
logger.debug("Skipping SMTP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
email_results(results, opts.smtp_host, opts.smtp_from,
opts.smtp_to, ssl=opts.smtp_ssl,
user=opts.smtp_user,
password=opts.smtp_password,
subject=opts.smtp_subject,
ssl_context=ssl_context)
except SMTPError as error:
logger.error("SMTP Error: {0}".format(error.__str__()))
exit(1)
if opts.imap_host and opts.imap_watch:
logger.info("Watching for email - Quit with ctrl-c")
ssl = True
ssl_context = None
if opts.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
if opts.imap_ssl is False:
ssl = False
try:
sa = opts.strip_attachment_payloads
watch_inbox(opts.imap_host, opts.imap_user, opts.imap_password,
process_reports, port=opts.imap_port, ssl=ssl,
ssl_context=ssl_context,
reports_folder=opts.imap_reports_folder,
archive_folder=opts.imap_archive_folder,
delete=opts.imap_delete,
test=opts.imap_test, nameservers=opts.nameservers,
dns_timeout=opts.dns_timeout,
strip_attachment_payloads=sa)
except IMAPError as error:
logger.error("IMAP error: {0}".format(error.__str__()))
exit(1)
|
def _main():
"""Called when the module is executed"""
def process_reports(reports_):
output_str = "{0}\n".format(json.dumps(reports_,
ensure_ascii=False,
indent=2))
if not opts.silent:
print(output_str)
if opts.kafka_hosts:
try:
ssl_context = None
if opts.kafka_skip_certificate_verification:
logger.debug("Skipping Kafka certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
kafka_client = kafkaclient.KafkaClient(
opts.kafka_hosts,
username=opts.kafka_username,
password=opts.kafka_password,
ssl_context=ssl_context
)
except Exception as error_:
logger.error("Kafka Error: {0}".format(error_.__str__()))
if opts.save_aggregate:
for report in reports_["aggregate_reports"]:
try:
if opts.elasticsearch_hosts:
elastic.save_aggregate_report_to_elasticsearch(
report,
index_suffix=opts.elasticsearch_index_suffix,
monthly_indexes=opts.elasticsearch_monthly_indexes)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
try:
if opts.kafka_hosts:
kafka_client.save_aggregate_reports_to_kafka(
report, kafka_aggregate_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
if opts.hec:
try:
aggregate_reports_ = reports_["aggregate_reports"]
if len(aggregate_reports_) > 0:
hec_client.save_aggregate_reports_to_splunk(
aggregate_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
if opts.save_forensic:
for report in reports_["forensic_reports"]:
try:
if opts.elasticsearch_hosts:
elastic.save_forensic_report_to_elasticsearch(
report,
index_suffix=opts.elasticsearch_index_suffix,
monthly_indexes=opts.elasticsearch_monthly_indexes)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
except InvalidDMARCReport as error_:
logger.error(error_.__str__())
try:
if opts.kafka_hosts:
kafka_client.save_forensic_reports_to_kafka(
report, kafka_forensic_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
if opts.hec:
try:
forensic_reports_ = reports_["forensic_reports"]
if len(forensic_reports_) > 0:
hec_client.save_forensic_reports_to_splunk(
forensic_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
arg_parser = ArgumentParser(description="Parses DMARC reports")
arg_parser.add_argument("-c", "--config-file",
help="A path to a configuration file "
"(--silent implied)")
arg_parser.add_argument("file_path", nargs="*",
help="one or more paths to aggregate or forensic "
"report files or emails")
strip_attachment_help = "remove attachment payloads from forensic " \
"report output"
arg_parser.add_argument("--strip-attachment-payloads",
help=strip_attachment_help, action="store_true")
arg_parser.add_argument("-o", "--output",
help="write output files to the given directory")
arg_parser.add_argument("-n", "--nameservers", nargs="+",
help="nameservers to query "
"(default is Cloudflare's nameservers)")
arg_parser.add_argument("-t", "--dns_timeout",
help="number of seconds to wait for an answer "
"from DNS (default: 6.0)",
type=float,
default=6.0)
arg_parser.add_argument("-s", "--silent", action="store_true",
help="only print errors and warnings")
arg_parser.add_argument("--debug", action="store_true",
help="print debugging information")
arg_parser.add_argument("--log-file", default=None,
help="output logging to a file")
arg_parser.add_argument("-v", "--version", action="version",
version=__version__)
aggregate_reports = []
forensic_reports = []
args = arg_parser.parse_args()
opts = Namespace(file_path=args.file_path,
config_file=args.config_file,
strip_attachment_payloads=args.strip_attachment_payloads,
output=args.output,
nameservers=args.nameservers,
silent=args.silent,
dns_timeout=args.dns_timeout,
debug=args.debug,
save_aggregate=False,
save_forensic=False,
imap_host=None,
imap_skip_certificate_verification=False,
imap_ssl=True,
imap_port=993,
imap_user=None,
imap_password=None,
imap_reports_folder="INBOX",
imap_archive_folder="Archive",
imap_watch=False,
imap_delete=False,
imap_test=False,
hec=None,
hec_token=None,
hec_index=None,
hec_skip_certificate_verification=False,
elasticsearch_hosts=None,
elasticsearch_index_suffix=None,
elasticsearch_ssl=True,
elasticsearch_ssl_cert_path=None,
elasticsearch_monthly_indexes=False,
kafka_hosts=None,
kafka_username=None,
kafka_password=None,
kafka_aggregate_topic=None,
kafka_forensic_topic=None,
kafka_ssl=False,
kafka_skip_certificate_verification=False,
smtp_host=None,
smtp_port=25,
smtp_ssl=False,
smtp_skip_certificate_verification=False,
smtp_user=None,
smtp_password=None,
smtp_from=None,
smtp_to=[],
smtp_subject="parsedmarc report",
smtp_message="Please see the attached DMARC results.",
log_file=args.log_file,
n_procs=1,
chunk_size=1
)
args = arg_parser.parse_args()
if args.config_file:
abs_path = os.path.abspath(args.config_file)
if not os.path.exists(abs_path):
logger.error("A file does not exist at {0}".format(abs_path))
exit(-1)
opts.silent = True
config = ConfigParser()
config.read(args.config_file)
if "general" in config.sections():
general_config = config["general"]
if "strip_attachment_payloads" in general_config:
opts.strip_attachment_payloads = general_config[
"strip_attachment_payloads"]
if "output" in general_config:
opts.output = general_config["output"]
if "nameservers" in general_config:
opts.nameservers = _str_to_list(general_config["nameservers"])
if "dns_timeout" in general_config:
opts.dns_timeout = general_config.getfloat("dns_timeout")
if "save_aggregate" in general_config:
opts.save_aggregate = general_config["save_aggregate"]
if "save_forensic" in general_config:
opts.save_forensic = general_config["save_forensic"]
if "debug" in general_config:
opts.debug = general_config.getboolean("debug")
if "silent" in general_config:
opts.silent = general_config.getboolean("silent")
if "log_file" in general_config:
opts.log_file = general_config["log_file"]
if "n_procs" in general_config:
opts.n_procs = general_config.getint("n_procs")
if "chunk_size" in general_config:
opts.chunk_size = general_config.getint("chunk_size")
if "imap" in config.sections():
imap_config = config["imap"]
if "host" in imap_config:
opts.imap_host = imap_config["host"]
else:
logger.error("host setting missing from the "
"imap config section")
exit(-1)
if "port" in imap_config:
opts.imap_port = imap_config["port"]
if "ssl" in imap_config:
opts.imap_ssl = imap_config.getboolean("ssl")
if "skip_certificate_verification" in imap_config:
imap_verify = imap_config.getboolean(
"skip_certificate_verification")
opts.imap_skip_certificate_verification = imap_verify
if "user" in imap_config:
opts.imap_user = imap_config["user"]
else:
logger.critical("user setting missing from the "
"imap config section")
exit(-1)
if "password" in imap_config:
opts.imap_password = imap_config["password"]
else:
logger.critical("password setting missing from the "
"imap config section")
exit(-1)
if "reports_folder" in imap_config:
opts.imap_reports_folder = imap_config["reports_folder"]
if "archive_folder" in imap_config:
opts.imap_archive_folder = imap_config["archive_folder"]
if "watch" in imap_config:
opts.imap_watch = imap_config.getboolean("watch")
if "delete" in imap_config:
opts.imap_delete = imap_config.getboolean("delete")
if "test" in imap_config:
opts.imap_test = imap_config.getboolean("test")
if "elasticsearch" in config:
elasticsearch_config = config["elasticsearch"]
if "hosts" in elasticsearch_config:
opts.elasticsearch_hosts = _str_to_list(elasticsearch_config[
"hosts"])
else:
logger.critical("hosts setting missing from the "
"elasticsearch config section")
exit(-1)
if "index_suffix" in elasticsearch_config:
opts.elasticsearch_index_suffix = elasticsearch_config[
"index_suffix"]
if "monthly_indexes" in elasticsearch_config:
monthly = elasticsearch_config.getboolean("monthly_indexes")
opts.elasticsearch_monthly_indexes = monthly
if "ssl" in elasticsearch_config:
opts.elasticsearch_ssl = elasticsearch_config.getboolean(
"ssl")
if "cert_path" in elasticsearch_config:
opts.elasticsearch_ssl_cert_path = elasticsearch_config[
"cert_path"]
if "splunk_hec" in config.sections():
hec_config = config["splunk_hec"]
if "url" in hec_config:
opts.hec = hec_config["url"]
else:
logger.critical("url setting missing from the "
"splunk_hec config section")
exit(-1)
if "token" in hec_config:
opts.hec_token = hec_config["token"]
else:
logger.critical("token setting missing from the "
"splunk_hec config section")
exit(-1)
if "index" in hec_config:
opts.hec_index = hec_config["index"]
else:
logger.critical("index setting missing from the "
"splunk_hec config section")
exit(-1)
if "skip_certificate_verification" in hec_config:
opts.hec_skip_certificate_verification = hec_config[
"skip_certificate_verification"]
if "kafka" in config.sections():
kafka_config = config["kafka"]
if "hosts" in kafka_config:
opts.kafka_hosts = _str_to_list(kafka_config["hosts"])
else:
logger.critical("hosts setting missing from the "
"kafka config section")
exit(-1)
if "user" in kafka_config:
opts.kafka_username = kafka_config["user"]
else:
logger.critical("user setting missing from the "
"kafka config section")
exit(-1)
if "password" in kafka_config:
opts.kafka_password = kafka_config["password"]
else:
logger.critical("password setting missing from the "
"kafka config section")
exit(-1)
if "ssl" in kafka_config:
opts.kafka_ssl = kafka_config["ssl"].getboolean()
if "skip_certificate_verification" in kafka_config:
kafka_verify = kafka_config.getboolean(
"skip_certificate_verification")
opts.kafka_skip_certificate_verification = kafka_verify
if "aggregate_topic" in kafka_config:
opts.kafka_aggregate = kafka_config["aggregate_topic"]
else:
logger.critical("aggregate_topic setting missing from the "
"kafka config section")
exit(-1)
if "forensic_topic" in kafka_config:
opts.kafka_username = kafka_config["forensic_topic"]
else:
logger.critical("forensic_topic setting missing from the "
"splunk_hec config section")
if "smtp" in config.sections():
smtp_config = config["smtp"]
if "host" in smtp_config:
opts.smtp_host = smtp_config["host"]
else:
logger.critical("host setting missing from the "
"smtp config section")
exit(-1)
if "port" in smtp_config:
opts.smtp_port = smtp_config["port"]
if "ssl" in smtp_config:
opts.smtp_ssl = smtp_config.getboolean("ssl")
if "skip_certificate_verification" in smtp_config:
smtp_verify = smtp_config.getboolean(
"skip_certificate_verification")
opts.smtp_skip_certificate_verification = smtp_verify
if "user" in smtp_config:
opts.smtp_user = smtp_config["user"]
else:
logger.critical("user setting missing from the "
"smtp config section")
exit(-1)
if "password" in smtp_config:
opts.smtp_password = smtp_config["password"]
else:
logger.critical("password setting missing from the "
"smtp config section")
exit(-1)
if "from" in smtp_config:
opts.smtp_from = smtp_config["from"]
else:
logger.critical("from setting missing from the "
"smtp config section")
if "to" in smtp_config:
opts.smtp_to = _str_to_list(smtp_config["to"])
else:
logger.critical("to setting missing from the "
"smtp config section")
if "subject" in smtp_config:
opts.smtp_subject = smtp_config["subject"]
if "attachment" in smtp_config:
opts.smtp_attachment = smtp_config["attachment"]
if "message" in smtp_config:
opts.smtp_message = smtp_config["message"]
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.WARNING)
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
if opts.log_file:
fh = logging.FileHandler(opts.log_file)
formatter = logging.Formatter(
'%(asctime)s - '
'%(levelname)s - [%(filename)s:%(lineno)d] - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
if opts.imap_host is None and len(opts.file_path) == 0:
logger.error("You must supply input files, or an IMAP configuration")
exit(1)
if opts.save_aggregate or opts.save_forensic:
try:
if opts.elasticsearch_hosts:
es_aggregate_index = "dmarc_aggregate"
es_forensic_index = "dmarc_forensic"
if opts.elasticsearch_index_suffix:
suffix = opts.elasticsearch_index_suffix
es_aggregate_index = "{0}_{1}".format(
es_aggregate_index, suffix)
es_forensic_index = "{0}_{1}".format(
es_forensic_index, suffix)
elastic.set_hosts(opts.elasticsearch_hosts,
opts.elasticsearch_ssl,
opts.elasticsearch_ssl_cert_path)
elastic.migrate_indexes(aggregate_indexes=[es_aggregate_index],
forensic_indexes=[es_forensic_index])
except elastic.ElasticsearchError as error:
logger.error("Elasticsearch Error: {0}".format(error.__str__()))
exit(1)
if opts.hec:
if opts.hec_token is None or opts.hec_index is None:
logger.error("HEC token and HEC index are required when "
"using HEC URL")
exit(1)
verify = True
if opts.hec_skip_certificate_verification:
verify = False
hec_client = splunk.HECClient(opts.hec, opts.hec_token,
opts.hec_index,
verify=verify)
kafka_aggregate_topic = opts.kafka_aggregate_topic
kafka_forensic_topic = opts.kafka_forensic_topic
file_paths = []
for file_path in args.file_path:
file_paths += glob(file_path)
file_paths = list(set(file_paths))
counter = Value('i', 0)
pool = Pool(opts.n_procs, initializer=init, initargs=(counter,))
results = pool.starmap_async(cli_parse,
zip(file_paths,
repeat(opts.strip_attachment_payloads),
repeat(opts.nameservers),
repeat(opts.dns_timeout),
repeat(opts.n_procs >= 1)),
opts.chunk_size)
pbar = tqdm(total=len(file_paths))
while not results.ready():
pbar.update(counter.value - pbar.n)
time.sleep(0.1)
pbar.close()
results = results.get()
pool.close()
pool.join()
for result in results:
if type(result[0]) is InvalidDMARCReport:
logger.error("Failed to parse {0} - {1}".format(result[1],
result[0]))
else:
if result[0]["report_type"] == "aggregate":
aggregate_reports.append(result[0]["report"])
elif result[0]["report_type"] == "forensic":
forensic_reports.append(result[0]["report"])
if opts.imap_host:
try:
if opts.imap_user is None or opts.imap_password is None:
logger.error("IMAP user and password must be specified if"
"host is specified")
rf = opts.imap_reports_folder
af = opts.imap_archive_folder
ns = opts.nameservers
sa = opts.strip_attachment_payloads
ssl = True
ssl_context = None
if opts.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
if opts.imap_ssl is False:
ssl = False
reports = get_dmarc_reports_from_inbox(host=opts.imap_host,
port=opts.imap_port,
ssl=ssl,
ssl_context=ssl_context,
user=opts.imap_user,
password=opts.imap_password,
reports_folder=rf,
archive_folder=af,
delete=opts.imap_delete,
nameservers=ns,
test=opts.imap_test,
strip_attachment_payloads=sa
)
aggregate_reports += reports["aggregate_reports"]
forensic_reports += reports["forensic_reports"]
except IMAPError as error:
logger.error("IMAP Error: {0}".format(error.__str__()))
exit(1)
results = OrderedDict([("aggregate_reports", aggregate_reports),
("forensic_reports", forensic_reports)])
if opts.output:
save_output(results, output_directory=opts.output)
process_reports(results)
if opts.smtp_host:
try:
ssl_context = None
if opts.smtp_skip_certificate_verification:
logger.debug("Skipping SMTP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
email_results(results, opts.smtp_host, opts.smtp_from,
opts.smtp_to, ssl=opts.smtp_ssl,
user=opts.smtp_user,
password=opts.smtp_password,
subject=opts.smtp_subject,
ssl_context=ssl_context)
except SMTPError as error:
logger.error("SMTP Error: {0}".format(error.__str__()))
exit(1)
if opts.imap_host and opts.imap_watch:
logger.info("Watching for email - Quit with ctrl-c")
ssl = True
ssl_context = None
if opts.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
if opts.imap_ssl is False:
ssl = False
try:
sa = opts.strip_attachment_payloads
watch_inbox(opts.imap_host, opts.imap_user, opts.imap_password,
process_reports, port=opts.imap_port, ssl=ssl,
ssl_context=ssl_context,
reports_folder=opts.imap_reports_folder,
archive_folder=opts.imap_archive_folder,
delete=opts.imap_delete,
test=opts.imap_test, nameservers=opts.nameservers,
dns_timeout=opts.dns_timeout,
strip_attachment_payloads=sa)
except IMAPError as error:
logger.error("IMAP error: {0}".format(error.__str__()))
exit(1)
|
[
"Called",
"when",
"the",
"module",
"is",
"executed"
] |
domainaware/parsedmarc
|
python
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/cli.py#L55-L599
|
[
"def",
"_main",
"(",
")",
":",
"def",
"process_reports",
"(",
"reports_",
")",
":",
"output_str",
"=",
"\"{0}\\n\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"reports_",
",",
"ensure_ascii",
"=",
"False",
",",
"indent",
"=",
"2",
")",
")",
"if",
"not",
"opts",
".",
"silent",
":",
"print",
"(",
"output_str",
")",
"if",
"opts",
".",
"kafka_hosts",
":",
"try",
":",
"ssl_context",
"=",
"None",
"if",
"opts",
".",
"kafka_skip_certificate_verification",
":",
"logger",
".",
"debug",
"(",
"\"Skipping Kafka certificate verification\"",
")",
"ssl_context",
"=",
"create_default_context",
"(",
")",
"ssl_context",
".",
"check_hostname",
"=",
"False",
"ssl_context",
".",
"verify_mode",
"=",
"CERT_NONE",
"kafka_client",
"=",
"kafkaclient",
".",
"KafkaClient",
"(",
"opts",
".",
"kafka_hosts",
",",
"username",
"=",
"opts",
".",
"kafka_username",
",",
"password",
"=",
"opts",
".",
"kafka_password",
",",
"ssl_context",
"=",
"ssl_context",
")",
"except",
"Exception",
"as",
"error_",
":",
"logger",
".",
"error",
"(",
"\"Kafka Error: {0}\"",
".",
"format",
"(",
"error_",
".",
"__str__",
"(",
")",
")",
")",
"if",
"opts",
".",
"save_aggregate",
":",
"for",
"report",
"in",
"reports_",
"[",
"\"aggregate_reports\"",
"]",
":",
"try",
":",
"if",
"opts",
".",
"elasticsearch_hosts",
":",
"elastic",
".",
"save_aggregate_report_to_elasticsearch",
"(",
"report",
",",
"index_suffix",
"=",
"opts",
".",
"elasticsearch_index_suffix",
",",
"monthly_indexes",
"=",
"opts",
".",
"elasticsearch_monthly_indexes",
")",
"except",
"elastic",
".",
"AlreadySaved",
"as",
"warning",
":",
"logger",
".",
"warning",
"(",
"warning",
".",
"__str__",
"(",
")",
")",
"except",
"elastic",
".",
"ElasticsearchError",
"as",
"error_",
":",
"logger",
".",
"error",
"(",
"\"Elasticsearch Error: {0}\"",
".",
"format",
"(",
"error_",
".",
"__str__",
"(",
")",
")",
")",
"try",
":",
"if",
"opts",
".",
"kafka_hosts",
":",
"kafka_client",
".",
"save_aggregate_reports_to_kafka",
"(",
"report",
",",
"kafka_aggregate_topic",
")",
"except",
"Exception",
"as",
"error_",
":",
"logger",
".",
"error",
"(",
"\"Kafka Error: {0}\"",
".",
"format",
"(",
"error_",
".",
"__str__",
"(",
")",
")",
")",
"if",
"opts",
".",
"hec",
":",
"try",
":",
"aggregate_reports_",
"=",
"reports_",
"[",
"\"aggregate_reports\"",
"]",
"if",
"len",
"(",
"aggregate_reports_",
")",
">",
"0",
":",
"hec_client",
".",
"save_aggregate_reports_to_splunk",
"(",
"aggregate_reports_",
")",
"except",
"splunk",
".",
"SplunkError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Splunk HEC error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"if",
"opts",
".",
"save_forensic",
":",
"for",
"report",
"in",
"reports_",
"[",
"\"forensic_reports\"",
"]",
":",
"try",
":",
"if",
"opts",
".",
"elasticsearch_hosts",
":",
"elastic",
".",
"save_forensic_report_to_elasticsearch",
"(",
"report",
",",
"index_suffix",
"=",
"opts",
".",
"elasticsearch_index_suffix",
",",
"monthly_indexes",
"=",
"opts",
".",
"elasticsearch_monthly_indexes",
")",
"except",
"elastic",
".",
"AlreadySaved",
"as",
"warning",
":",
"logger",
".",
"warning",
"(",
"warning",
".",
"__str__",
"(",
")",
")",
"except",
"elastic",
".",
"ElasticsearchError",
"as",
"error_",
":",
"logger",
".",
"error",
"(",
"\"Elasticsearch Error: {0}\"",
".",
"format",
"(",
"error_",
".",
"__str__",
"(",
")",
")",
")",
"except",
"InvalidDMARCReport",
"as",
"error_",
":",
"logger",
".",
"error",
"(",
"error_",
".",
"__str__",
"(",
")",
")",
"try",
":",
"if",
"opts",
".",
"kafka_hosts",
":",
"kafka_client",
".",
"save_forensic_reports_to_kafka",
"(",
"report",
",",
"kafka_forensic_topic",
")",
"except",
"Exception",
"as",
"error_",
":",
"logger",
".",
"error",
"(",
"\"Kafka Error: {0}\"",
".",
"format",
"(",
"error_",
".",
"__str__",
"(",
")",
")",
")",
"if",
"opts",
".",
"hec",
":",
"try",
":",
"forensic_reports_",
"=",
"reports_",
"[",
"\"forensic_reports\"",
"]",
"if",
"len",
"(",
"forensic_reports_",
")",
">",
"0",
":",
"hec_client",
".",
"save_forensic_reports_to_splunk",
"(",
"forensic_reports_",
")",
"except",
"splunk",
".",
"SplunkError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Splunk HEC error: {0}\"",
".",
"format",
"(",
"e",
".",
"__str__",
"(",
")",
")",
")",
"arg_parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"\"Parses DMARC reports\"",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--config-file\"",
",",
"help",
"=",
"\"A path to a configuration file \"",
"\"(--silent implied)\"",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"file_path\"",
",",
"nargs",
"=",
"\"*\"",
",",
"help",
"=",
"\"one or more paths to aggregate or forensic \"",
"\"report files or emails\"",
")",
"strip_attachment_help",
"=",
"\"remove attachment payloads from forensic \"",
"\"report output\"",
"arg_parser",
".",
"add_argument",
"(",
"\"--strip-attachment-payloads\"",
",",
"help",
"=",
"strip_attachment_help",
",",
"action",
"=",
"\"store_true\"",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"help",
"=",
"\"write output files to the given directory\"",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--nameservers\"",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"nameservers to query \"",
"\"(default is Cloudflare's nameservers)\"",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--dns_timeout\"",
",",
"help",
"=",
"\"number of seconds to wait for an answer \"",
"\"from DNS (default: 6.0)\"",
",",
"type",
"=",
"float",
",",
"default",
"=",
"6.0",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--silent\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"only print errors and warnings\"",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"--debug\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"print debugging information\"",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"--log-file\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"output logging to a file\"",
")",
"arg_parser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"__version__",
")",
"aggregate_reports",
"=",
"[",
"]",
"forensic_reports",
"=",
"[",
"]",
"args",
"=",
"arg_parser",
".",
"parse_args",
"(",
")",
"opts",
"=",
"Namespace",
"(",
"file_path",
"=",
"args",
".",
"file_path",
",",
"config_file",
"=",
"args",
".",
"config_file",
",",
"strip_attachment_payloads",
"=",
"args",
".",
"strip_attachment_payloads",
",",
"output",
"=",
"args",
".",
"output",
",",
"nameservers",
"=",
"args",
".",
"nameservers",
",",
"silent",
"=",
"args",
".",
"silent",
",",
"dns_timeout",
"=",
"args",
".",
"dns_timeout",
",",
"debug",
"=",
"args",
".",
"debug",
",",
"save_aggregate",
"=",
"False",
",",
"save_forensic",
"=",
"False",
",",
"imap_host",
"=",
"None",
",",
"imap_skip_certificate_verification",
"=",
"False",
",",
"imap_ssl",
"=",
"True",
",",
"imap_port",
"=",
"993",
",",
"imap_user",
"=",
"None",
",",
"imap_password",
"=",
"None",
",",
"imap_reports_folder",
"=",
"\"INBOX\"",
",",
"imap_archive_folder",
"=",
"\"Archive\"",
",",
"imap_watch",
"=",
"False",
",",
"imap_delete",
"=",
"False",
",",
"imap_test",
"=",
"False",
",",
"hec",
"=",
"None",
",",
"hec_token",
"=",
"None",
",",
"hec_index",
"=",
"None",
",",
"hec_skip_certificate_verification",
"=",
"False",
",",
"elasticsearch_hosts",
"=",
"None",
",",
"elasticsearch_index_suffix",
"=",
"None",
",",
"elasticsearch_ssl",
"=",
"True",
",",
"elasticsearch_ssl_cert_path",
"=",
"None",
",",
"elasticsearch_monthly_indexes",
"=",
"False",
",",
"kafka_hosts",
"=",
"None",
",",
"kafka_username",
"=",
"None",
",",
"kafka_password",
"=",
"None",
",",
"kafka_aggregate_topic",
"=",
"None",
",",
"kafka_forensic_topic",
"=",
"None",
",",
"kafka_ssl",
"=",
"False",
",",
"kafka_skip_certificate_verification",
"=",
"False",
",",
"smtp_host",
"=",
"None",
",",
"smtp_port",
"=",
"25",
",",
"smtp_ssl",
"=",
"False",
",",
"smtp_skip_certificate_verification",
"=",
"False",
",",
"smtp_user",
"=",
"None",
",",
"smtp_password",
"=",
"None",
",",
"smtp_from",
"=",
"None",
",",
"smtp_to",
"=",
"[",
"]",
",",
"smtp_subject",
"=",
"\"parsedmarc report\"",
",",
"smtp_message",
"=",
"\"Please see the attached DMARC results.\"",
",",
"log_file",
"=",
"args",
".",
"log_file",
",",
"n_procs",
"=",
"1",
",",
"chunk_size",
"=",
"1",
")",
"args",
"=",
"arg_parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"config_file",
":",
"abs_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"config_file",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"abs_path",
")",
":",
"logger",
".",
"error",
"(",
"\"A file does not exist at {0}\"",
".",
"format",
"(",
"abs_path",
")",
")",
"exit",
"(",
"-",
"1",
")",
"opts",
".",
"silent",
"=",
"True",
"config",
"=",
"ConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"args",
".",
"config_file",
")",
"if",
"\"general\"",
"in",
"config",
".",
"sections",
"(",
")",
":",
"general_config",
"=",
"config",
"[",
"\"general\"",
"]",
"if",
"\"strip_attachment_payloads\"",
"in",
"general_config",
":",
"opts",
".",
"strip_attachment_payloads",
"=",
"general_config",
"[",
"\"strip_attachment_payloads\"",
"]",
"if",
"\"output\"",
"in",
"general_config",
":",
"opts",
".",
"output",
"=",
"general_config",
"[",
"\"output\"",
"]",
"if",
"\"nameservers\"",
"in",
"general_config",
":",
"opts",
".",
"nameservers",
"=",
"_str_to_list",
"(",
"general_config",
"[",
"\"nameservers\"",
"]",
")",
"if",
"\"dns_timeout\"",
"in",
"general_config",
":",
"opts",
".",
"dns_timeout",
"=",
"general_config",
".",
"getfloat",
"(",
"\"dns_timeout\"",
")",
"if",
"\"save_aggregate\"",
"in",
"general_config",
":",
"opts",
".",
"save_aggregate",
"=",
"general_config",
"[",
"\"save_aggregate\"",
"]",
"if",
"\"save_forensic\"",
"in",
"general_config",
":",
"opts",
".",
"save_forensic",
"=",
"general_config",
"[",
"\"save_forensic\"",
"]",
"if",
"\"debug\"",
"in",
"general_config",
":",
"opts",
".",
"debug",
"=",
"general_config",
".",
"getboolean",
"(",
"\"debug\"",
")",
"if",
"\"silent\"",
"in",
"general_config",
":",
"opts",
".",
"silent",
"=",
"general_config",
".",
"getboolean",
"(",
"\"silent\"",
")",
"if",
"\"log_file\"",
"in",
"general_config",
":",
"opts",
".",
"log_file",
"=",
"general_config",
"[",
"\"log_file\"",
"]",
"if",
"\"n_procs\"",
"in",
"general_config",
":",
"opts",
".",
"n_procs",
"=",
"general_config",
".",
"getint",
"(",
"\"n_procs\"",
")",
"if",
"\"chunk_size\"",
"in",
"general_config",
":",
"opts",
".",
"chunk_size",
"=",
"general_config",
".",
"getint",
"(",
"\"chunk_size\"",
")",
"if",
"\"imap\"",
"in",
"config",
".",
"sections",
"(",
")",
":",
"imap_config",
"=",
"config",
"[",
"\"imap\"",
"]",
"if",
"\"host\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_host",
"=",
"imap_config",
"[",
"\"host\"",
"]",
"else",
":",
"logger",
".",
"error",
"(",
"\"host setting missing from the \"",
"\"imap config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"port\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_port",
"=",
"imap_config",
"[",
"\"port\"",
"]",
"if",
"\"ssl\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_ssl",
"=",
"imap_config",
".",
"getboolean",
"(",
"\"ssl\"",
")",
"if",
"\"skip_certificate_verification\"",
"in",
"imap_config",
":",
"imap_verify",
"=",
"imap_config",
".",
"getboolean",
"(",
"\"skip_certificate_verification\"",
")",
"opts",
".",
"imap_skip_certificate_verification",
"=",
"imap_verify",
"if",
"\"user\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_user",
"=",
"imap_config",
"[",
"\"user\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"user setting missing from the \"",
"\"imap config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"password\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_password",
"=",
"imap_config",
"[",
"\"password\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"password setting missing from the \"",
"\"imap config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"reports_folder\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_reports_folder",
"=",
"imap_config",
"[",
"\"reports_folder\"",
"]",
"if",
"\"archive_folder\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_archive_folder",
"=",
"imap_config",
"[",
"\"archive_folder\"",
"]",
"if",
"\"watch\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_watch",
"=",
"imap_config",
".",
"getboolean",
"(",
"\"watch\"",
")",
"if",
"\"delete\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_delete",
"=",
"imap_config",
".",
"getboolean",
"(",
"\"delete\"",
")",
"if",
"\"test\"",
"in",
"imap_config",
":",
"opts",
".",
"imap_test",
"=",
"imap_config",
".",
"getboolean",
"(",
"\"test\"",
")",
"if",
"\"elasticsearch\"",
"in",
"config",
":",
"elasticsearch_config",
"=",
"config",
"[",
"\"elasticsearch\"",
"]",
"if",
"\"hosts\"",
"in",
"elasticsearch_config",
":",
"opts",
".",
"elasticsearch_hosts",
"=",
"_str_to_list",
"(",
"elasticsearch_config",
"[",
"\"hosts\"",
"]",
")",
"else",
":",
"logger",
".",
"critical",
"(",
"\"hosts setting missing from the \"",
"\"elasticsearch config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"index_suffix\"",
"in",
"elasticsearch_config",
":",
"opts",
".",
"elasticsearch_index_suffix",
"=",
"elasticsearch_config",
"[",
"\"index_suffix\"",
"]",
"if",
"\"monthly_indexes\"",
"in",
"elasticsearch_config",
":",
"monthly",
"=",
"elasticsearch_config",
".",
"getboolean",
"(",
"\"monthly_indexes\"",
")",
"opts",
".",
"elasticsearch_monthly_indexes",
"=",
"monthly",
"if",
"\"ssl\"",
"in",
"elasticsearch_config",
":",
"opts",
".",
"elasticsearch_ssl",
"=",
"elasticsearch_config",
".",
"getboolean",
"(",
"\"ssl\"",
")",
"if",
"\"cert_path\"",
"in",
"elasticsearch_config",
":",
"opts",
".",
"elasticsearch_ssl_cert_path",
"=",
"elasticsearch_config",
"[",
"\"cert_path\"",
"]",
"if",
"\"splunk_hec\"",
"in",
"config",
".",
"sections",
"(",
")",
":",
"hec_config",
"=",
"config",
"[",
"\"splunk_hec\"",
"]",
"if",
"\"url\"",
"in",
"hec_config",
":",
"opts",
".",
"hec",
"=",
"hec_config",
"[",
"\"url\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"url setting missing from the \"",
"\"splunk_hec config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"token\"",
"in",
"hec_config",
":",
"opts",
".",
"hec_token",
"=",
"hec_config",
"[",
"\"token\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"token setting missing from the \"",
"\"splunk_hec config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"index\"",
"in",
"hec_config",
":",
"opts",
".",
"hec_index",
"=",
"hec_config",
"[",
"\"index\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"index setting missing from the \"",
"\"splunk_hec config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"skip_certificate_verification\"",
"in",
"hec_config",
":",
"opts",
".",
"hec_skip_certificate_verification",
"=",
"hec_config",
"[",
"\"skip_certificate_verification\"",
"]",
"if",
"\"kafka\"",
"in",
"config",
".",
"sections",
"(",
")",
":",
"kafka_config",
"=",
"config",
"[",
"\"kafka\"",
"]",
"if",
"\"hosts\"",
"in",
"kafka_config",
":",
"opts",
".",
"kafka_hosts",
"=",
"_str_to_list",
"(",
"kafka_config",
"[",
"\"hosts\"",
"]",
")",
"else",
":",
"logger",
".",
"critical",
"(",
"\"hosts setting missing from the \"",
"\"kafka config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"user\"",
"in",
"kafka_config",
":",
"opts",
".",
"kafka_username",
"=",
"kafka_config",
"[",
"\"user\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"user setting missing from the \"",
"\"kafka config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"password\"",
"in",
"kafka_config",
":",
"opts",
".",
"kafka_password",
"=",
"kafka_config",
"[",
"\"password\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"password setting missing from the \"",
"\"kafka config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"ssl\"",
"in",
"kafka_config",
":",
"opts",
".",
"kafka_ssl",
"=",
"kafka_config",
"[",
"\"ssl\"",
"]",
".",
"getboolean",
"(",
")",
"if",
"\"skip_certificate_verification\"",
"in",
"kafka_config",
":",
"kafka_verify",
"=",
"kafka_config",
".",
"getboolean",
"(",
"\"skip_certificate_verification\"",
")",
"opts",
".",
"kafka_skip_certificate_verification",
"=",
"kafka_verify",
"if",
"\"aggregate_topic\"",
"in",
"kafka_config",
":",
"opts",
".",
"kafka_aggregate",
"=",
"kafka_config",
"[",
"\"aggregate_topic\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"aggregate_topic setting missing from the \"",
"\"kafka config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"forensic_topic\"",
"in",
"kafka_config",
":",
"opts",
".",
"kafka_username",
"=",
"kafka_config",
"[",
"\"forensic_topic\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"forensic_topic setting missing from the \"",
"\"splunk_hec config section\"",
")",
"if",
"\"smtp\"",
"in",
"config",
".",
"sections",
"(",
")",
":",
"smtp_config",
"=",
"config",
"[",
"\"smtp\"",
"]",
"if",
"\"host\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_host",
"=",
"smtp_config",
"[",
"\"host\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"host setting missing from the \"",
"\"smtp config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"port\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_port",
"=",
"smtp_config",
"[",
"\"port\"",
"]",
"if",
"\"ssl\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_ssl",
"=",
"smtp_config",
".",
"getboolean",
"(",
"\"ssl\"",
")",
"if",
"\"skip_certificate_verification\"",
"in",
"smtp_config",
":",
"smtp_verify",
"=",
"smtp_config",
".",
"getboolean",
"(",
"\"skip_certificate_verification\"",
")",
"opts",
".",
"smtp_skip_certificate_verification",
"=",
"smtp_verify",
"if",
"\"user\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_user",
"=",
"smtp_config",
"[",
"\"user\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"user setting missing from the \"",
"\"smtp config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"password\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_password",
"=",
"smtp_config",
"[",
"\"password\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"password setting missing from the \"",
"\"smtp config section\"",
")",
"exit",
"(",
"-",
"1",
")",
"if",
"\"from\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_from",
"=",
"smtp_config",
"[",
"\"from\"",
"]",
"else",
":",
"logger",
".",
"critical",
"(",
"\"from setting missing from the \"",
"\"smtp config section\"",
")",
"if",
"\"to\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_to",
"=",
"_str_to_list",
"(",
"smtp_config",
"[",
"\"to\"",
"]",
")",
"else",
":",
"logger",
".",
"critical",
"(",
"\"to setting missing from the \"",
"\"smtp config section\"",
")",
"if",
"\"subject\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_subject",
"=",
"smtp_config",
"[",
"\"subject\"",
"]",
"if",
"\"attachment\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_attachment",
"=",
"smtp_config",
"[",
"\"attachment\"",
"]",
"if",
"\"message\"",
"in",
"smtp_config",
":",
"opts",
".",
"smtp_message",
"=",
"smtp_config",
"[",
"\"message\"",
"]",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"WARNING",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"if",
"opts",
".",
"debug",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"if",
"opts",
".",
"log_file",
":",
"fh",
"=",
"logging",
".",
"FileHandler",
"(",
"opts",
".",
"log_file",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s - '",
"'%(levelname)s - [%(filename)s:%(lineno)d] - %(message)s'",
")",
"fh",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"fh",
")",
"if",
"opts",
".",
"imap_host",
"is",
"None",
"and",
"len",
"(",
"opts",
".",
"file_path",
")",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"\"You must supply input files, or an IMAP configuration\"",
")",
"exit",
"(",
"1",
")",
"if",
"opts",
".",
"save_aggregate",
"or",
"opts",
".",
"save_forensic",
":",
"try",
":",
"if",
"opts",
".",
"elasticsearch_hosts",
":",
"es_aggregate_index",
"=",
"\"dmarc_aggregate\"",
"es_forensic_index",
"=",
"\"dmarc_forensic\"",
"if",
"opts",
".",
"elasticsearch_index_suffix",
":",
"suffix",
"=",
"opts",
".",
"elasticsearch_index_suffix",
"es_aggregate_index",
"=",
"\"{0}_{1}\"",
".",
"format",
"(",
"es_aggregate_index",
",",
"suffix",
")",
"es_forensic_index",
"=",
"\"{0}_{1}\"",
".",
"format",
"(",
"es_forensic_index",
",",
"suffix",
")",
"elastic",
".",
"set_hosts",
"(",
"opts",
".",
"elasticsearch_hosts",
",",
"opts",
".",
"elasticsearch_ssl",
",",
"opts",
".",
"elasticsearch_ssl_cert_path",
")",
"elastic",
".",
"migrate_indexes",
"(",
"aggregate_indexes",
"=",
"[",
"es_aggregate_index",
"]",
",",
"forensic_indexes",
"=",
"[",
"es_forensic_index",
"]",
")",
"except",
"elastic",
".",
"ElasticsearchError",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"\"Elasticsearch Error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"exit",
"(",
"1",
")",
"if",
"opts",
".",
"hec",
":",
"if",
"opts",
".",
"hec_token",
"is",
"None",
"or",
"opts",
".",
"hec_index",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"HEC token and HEC index are required when \"",
"\"using HEC URL\"",
")",
"exit",
"(",
"1",
")",
"verify",
"=",
"True",
"if",
"opts",
".",
"hec_skip_certificate_verification",
":",
"verify",
"=",
"False",
"hec_client",
"=",
"splunk",
".",
"HECClient",
"(",
"opts",
".",
"hec",
",",
"opts",
".",
"hec_token",
",",
"opts",
".",
"hec_index",
",",
"verify",
"=",
"verify",
")",
"kafka_aggregate_topic",
"=",
"opts",
".",
"kafka_aggregate_topic",
"kafka_forensic_topic",
"=",
"opts",
".",
"kafka_forensic_topic",
"file_paths",
"=",
"[",
"]",
"for",
"file_path",
"in",
"args",
".",
"file_path",
":",
"file_paths",
"+=",
"glob",
"(",
"file_path",
")",
"file_paths",
"=",
"list",
"(",
"set",
"(",
"file_paths",
")",
")",
"counter",
"=",
"Value",
"(",
"'i'",
",",
"0",
")",
"pool",
"=",
"Pool",
"(",
"opts",
".",
"n_procs",
",",
"initializer",
"=",
"init",
",",
"initargs",
"=",
"(",
"counter",
",",
")",
")",
"results",
"=",
"pool",
".",
"starmap_async",
"(",
"cli_parse",
",",
"zip",
"(",
"file_paths",
",",
"repeat",
"(",
"opts",
".",
"strip_attachment_payloads",
")",
",",
"repeat",
"(",
"opts",
".",
"nameservers",
")",
",",
"repeat",
"(",
"opts",
".",
"dns_timeout",
")",
",",
"repeat",
"(",
"opts",
".",
"n_procs",
">=",
"1",
")",
")",
",",
"opts",
".",
"chunk_size",
")",
"pbar",
"=",
"tqdm",
"(",
"total",
"=",
"len",
"(",
"file_paths",
")",
")",
"while",
"not",
"results",
".",
"ready",
"(",
")",
":",
"pbar",
".",
"update",
"(",
"counter",
".",
"value",
"-",
"pbar",
".",
"n",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"pbar",
".",
"close",
"(",
")",
"results",
"=",
"results",
".",
"get",
"(",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"for",
"result",
"in",
"results",
":",
"if",
"type",
"(",
"result",
"[",
"0",
"]",
")",
"is",
"InvalidDMARCReport",
":",
"logger",
".",
"error",
"(",
"\"Failed to parse {0} - {1}\"",
".",
"format",
"(",
"result",
"[",
"1",
"]",
",",
"result",
"[",
"0",
"]",
")",
")",
"else",
":",
"if",
"result",
"[",
"0",
"]",
"[",
"\"report_type\"",
"]",
"==",
"\"aggregate\"",
":",
"aggregate_reports",
".",
"append",
"(",
"result",
"[",
"0",
"]",
"[",
"\"report\"",
"]",
")",
"elif",
"result",
"[",
"0",
"]",
"[",
"\"report_type\"",
"]",
"==",
"\"forensic\"",
":",
"forensic_reports",
".",
"append",
"(",
"result",
"[",
"0",
"]",
"[",
"\"report\"",
"]",
")",
"if",
"opts",
".",
"imap_host",
":",
"try",
":",
"if",
"opts",
".",
"imap_user",
"is",
"None",
"or",
"opts",
".",
"imap_password",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"IMAP user and password must be specified if\"",
"\"host is specified\"",
")",
"rf",
"=",
"opts",
".",
"imap_reports_folder",
"af",
"=",
"opts",
".",
"imap_archive_folder",
"ns",
"=",
"opts",
".",
"nameservers",
"sa",
"=",
"opts",
".",
"strip_attachment_payloads",
"ssl",
"=",
"True",
"ssl_context",
"=",
"None",
"if",
"opts",
".",
"imap_skip_certificate_verification",
":",
"logger",
".",
"debug",
"(",
"\"Skipping IMAP certificate verification\"",
")",
"ssl_context",
"=",
"create_default_context",
"(",
")",
"ssl_context",
".",
"check_hostname",
"=",
"False",
"ssl_context",
".",
"verify_mode",
"=",
"CERT_NONE",
"if",
"opts",
".",
"imap_ssl",
"is",
"False",
":",
"ssl",
"=",
"False",
"reports",
"=",
"get_dmarc_reports_from_inbox",
"(",
"host",
"=",
"opts",
".",
"imap_host",
",",
"port",
"=",
"opts",
".",
"imap_port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"user",
"=",
"opts",
".",
"imap_user",
",",
"password",
"=",
"opts",
".",
"imap_password",
",",
"reports_folder",
"=",
"rf",
",",
"archive_folder",
"=",
"af",
",",
"delete",
"=",
"opts",
".",
"imap_delete",
",",
"nameservers",
"=",
"ns",
",",
"test",
"=",
"opts",
".",
"imap_test",
",",
"strip_attachment_payloads",
"=",
"sa",
")",
"aggregate_reports",
"+=",
"reports",
"[",
"\"aggregate_reports\"",
"]",
"forensic_reports",
"+=",
"reports",
"[",
"\"forensic_reports\"",
"]",
"except",
"IMAPError",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"\"IMAP Error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"exit",
"(",
"1",
")",
"results",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"aggregate_reports\"",
",",
"aggregate_reports",
")",
",",
"(",
"\"forensic_reports\"",
",",
"forensic_reports",
")",
"]",
")",
"if",
"opts",
".",
"output",
":",
"save_output",
"(",
"results",
",",
"output_directory",
"=",
"opts",
".",
"output",
")",
"process_reports",
"(",
"results",
")",
"if",
"opts",
".",
"smtp_host",
":",
"try",
":",
"ssl_context",
"=",
"None",
"if",
"opts",
".",
"smtp_skip_certificate_verification",
":",
"logger",
".",
"debug",
"(",
"\"Skipping SMTP certificate verification\"",
")",
"ssl_context",
"=",
"create_default_context",
"(",
")",
"ssl_context",
".",
"check_hostname",
"=",
"False",
"ssl_context",
".",
"verify_mode",
"=",
"CERT_NONE",
"email_results",
"(",
"results",
",",
"opts",
".",
"smtp_host",
",",
"opts",
".",
"smtp_from",
",",
"opts",
".",
"smtp_to",
",",
"ssl",
"=",
"opts",
".",
"smtp_ssl",
",",
"user",
"=",
"opts",
".",
"smtp_user",
",",
"password",
"=",
"opts",
".",
"smtp_password",
",",
"subject",
"=",
"opts",
".",
"smtp_subject",
",",
"ssl_context",
"=",
"ssl_context",
")",
"except",
"SMTPError",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"\"SMTP Error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"exit",
"(",
"1",
")",
"if",
"opts",
".",
"imap_host",
"and",
"opts",
".",
"imap_watch",
":",
"logger",
".",
"info",
"(",
"\"Watching for email - Quit with ctrl-c\"",
")",
"ssl",
"=",
"True",
"ssl_context",
"=",
"None",
"if",
"opts",
".",
"imap_skip_certificate_verification",
":",
"logger",
".",
"debug",
"(",
"\"Skipping IMAP certificate verification\"",
")",
"ssl_context",
"=",
"create_default_context",
"(",
")",
"ssl_context",
".",
"check_hostname",
"=",
"False",
"ssl_context",
".",
"verify_mode",
"=",
"CERT_NONE",
"if",
"opts",
".",
"imap_ssl",
"is",
"False",
":",
"ssl",
"=",
"False",
"try",
":",
"sa",
"=",
"opts",
".",
"strip_attachment_payloads",
"watch_inbox",
"(",
"opts",
".",
"imap_host",
",",
"opts",
".",
"imap_user",
",",
"opts",
".",
"imap_password",
",",
"process_reports",
",",
"port",
"=",
"opts",
".",
"imap_port",
",",
"ssl",
"=",
"ssl",
",",
"ssl_context",
"=",
"ssl_context",
",",
"reports_folder",
"=",
"opts",
".",
"imap_reports_folder",
",",
"archive_folder",
"=",
"opts",
".",
"imap_archive_folder",
",",
"delete",
"=",
"opts",
".",
"imap_delete",
",",
"test",
"=",
"opts",
".",
"imap_test",
",",
"nameservers",
"=",
"opts",
".",
"nameservers",
",",
"dns_timeout",
"=",
"opts",
".",
"dns_timeout",
",",
"strip_attachment_payloads",
"=",
"sa",
")",
"except",
"IMAPError",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"\"IMAP error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"exit",
"(",
"1",
")"
] |
ecc9fd434c23d896ccd1f35795ccc047f946ed05
|
test
|
Client.drain
|
Drain will put a connection into a drain state. All subscriptions will
immediately be put into a drain state. Upon completion, the publishers
will be drained and can not publish any additional messages. Upon draining
of the publishers, the connection will be closed. Use the `closed_cb'
option to know when the connection has moved from draining to closed.
If a sid is passed, just the subscription with that sid will be drained
without closing the connection.
|
nats/aio/client.py
|
def drain(self, sid=None):
"""
Drain will put a connection into a drain state. All subscriptions will
immediately be put into a drain state. Upon completion, the publishers
will be drained and can not publish any additional messages. Upon draining
of the publishers, the connection will be closed. Use the `closed_cb'
option to know when the connection has moved from draining to closed.
If a sid is passed, just the subscription with that sid will be drained
without closing the connection.
"""
if self.is_draining:
return
if self.is_closed:
raise ErrConnectionClosed
if self.is_connecting or self.is_reconnecting:
raise ErrConnectionReconnecting
if sid is not None:
return self._drain_sub(sid)
# Start draining the subscriptions
self._status = Client.DRAINING_SUBS
drain_tasks = []
for ssid, sub in self._subs.items():
task = self._drain_sub(ssid)
drain_tasks.append(task)
drain_is_done = asyncio.gather(*drain_tasks)
try:
yield from asyncio.wait_for(drain_is_done, self.options["drain_timeout"])
except asyncio.TimeoutError:
drain_is_done.exception()
drain_is_done.cancel()
if self._error_cb is not None:
yield from self._error_cb(ErrDrainTimeout)
except asyncio.CancelledError:
pass
finally:
self._status = Client.DRAINING_PUBS
yield from self.flush()
yield from self._close(Client.CLOSED)
|
def drain(self, sid=None):
"""
Drain will put a connection into a drain state. All subscriptions will
immediately be put into a drain state. Upon completion, the publishers
will be drained and can not publish any additional messages. Upon draining
of the publishers, the connection will be closed. Use the `closed_cb'
option to know when the connection has moved from draining to closed.
If a sid is passed, just the subscription with that sid will be drained
without closing the connection.
"""
if self.is_draining:
return
if self.is_closed:
raise ErrConnectionClosed
if self.is_connecting or self.is_reconnecting:
raise ErrConnectionReconnecting
if sid is not None:
return self._drain_sub(sid)
# Start draining the subscriptions
self._status = Client.DRAINING_SUBS
drain_tasks = []
for ssid, sub in self._subs.items():
task = self._drain_sub(ssid)
drain_tasks.append(task)
drain_is_done = asyncio.gather(*drain_tasks)
try:
yield from asyncio.wait_for(drain_is_done, self.options["drain_timeout"])
except asyncio.TimeoutError:
drain_is_done.exception()
drain_is_done.cancel()
if self._error_cb is not None:
yield from self._error_cb(ErrDrainTimeout)
except asyncio.CancelledError:
pass
finally:
self._status = Client.DRAINING_PUBS
yield from self.flush()
yield from self._close(Client.CLOSED)
|
[
"Drain",
"will",
"put",
"a",
"connection",
"into",
"a",
"drain",
"state",
".",
"All",
"subscriptions",
"will",
"immediately",
"be",
"put",
"into",
"a",
"drain",
"state",
".",
"Upon",
"completion",
"the",
"publishers",
"will",
"be",
"drained",
"and",
"can",
"not",
"publish",
"any",
"additional",
"messages",
".",
"Upon",
"draining",
"of",
"the",
"publishers",
"the",
"connection",
"will",
"be",
"closed",
".",
"Use",
"the",
"closed_cb",
"option",
"to",
"know",
"when",
"the",
"connection",
"has",
"moved",
"from",
"draining",
"to",
"closed",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L338-L380
|
[
"def",
"drain",
"(",
"self",
",",
"sid",
"=",
"None",
")",
":",
"if",
"self",
".",
"is_draining",
":",
"return",
"if",
"self",
".",
"is_closed",
":",
"raise",
"ErrConnectionClosed",
"if",
"self",
".",
"is_connecting",
"or",
"self",
".",
"is_reconnecting",
":",
"raise",
"ErrConnectionReconnecting",
"if",
"sid",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_drain_sub",
"(",
"sid",
")",
"# Start draining the subscriptions",
"self",
".",
"_status",
"=",
"Client",
".",
"DRAINING_SUBS",
"drain_tasks",
"=",
"[",
"]",
"for",
"ssid",
",",
"sub",
"in",
"self",
".",
"_subs",
".",
"items",
"(",
")",
":",
"task",
"=",
"self",
".",
"_drain_sub",
"(",
"ssid",
")",
"drain_tasks",
".",
"append",
"(",
"task",
")",
"drain_is_done",
"=",
"asyncio",
".",
"gather",
"(",
"*",
"drain_tasks",
")",
"try",
":",
"yield",
"from",
"asyncio",
".",
"wait_for",
"(",
"drain_is_done",
",",
"self",
".",
"options",
"[",
"\"drain_timeout\"",
"]",
")",
"except",
"asyncio",
".",
"TimeoutError",
":",
"drain_is_done",
".",
"exception",
"(",
")",
"drain_is_done",
".",
"cancel",
"(",
")",
"if",
"self",
".",
"_error_cb",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_error_cb",
"(",
"ErrDrainTimeout",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"pass",
"finally",
":",
"self",
".",
"_status",
"=",
"Client",
".",
"DRAINING_PUBS",
"yield",
"from",
"self",
".",
"flush",
"(",
")",
"yield",
"from",
"self",
".",
"_close",
"(",
"Client",
".",
"CLOSED",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client.publish
|
Sends a PUB command to the server on the specified subject.
->> PUB hello 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 5
|
nats/aio/client.py
|
def publish(self, subject, payload):
"""
Sends a PUB command to the server on the specified subject.
->> PUB hello 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 5
"""
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining_pubs:
raise ErrConnectionDraining
payload_size = len(payload)
if payload_size > self._max_payload:
raise ErrMaxPayload
yield from self._publish(subject, _EMPTY_, payload, payload_size)
|
def publish(self, subject, payload):
"""
Sends a PUB command to the server on the specified subject.
->> PUB hello 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 5
"""
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining_pubs:
raise ErrConnectionDraining
payload_size = len(payload)
if payload_size > self._max_payload:
raise ErrMaxPayload
yield from self._publish(subject, _EMPTY_, payload, payload_size)
|
[
"Sends",
"a",
"PUB",
"command",
"to",
"the",
"server",
"on",
"the",
"specified",
"subject",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L419-L436
|
[
"def",
"publish",
"(",
"self",
",",
"subject",
",",
"payload",
")",
":",
"if",
"self",
".",
"is_closed",
":",
"raise",
"ErrConnectionClosed",
"if",
"self",
".",
"is_draining_pubs",
":",
"raise",
"ErrConnectionDraining",
"payload_size",
"=",
"len",
"(",
"payload",
")",
"if",
"payload_size",
">",
"self",
".",
"_max_payload",
":",
"raise",
"ErrMaxPayload",
"yield",
"from",
"self",
".",
"_publish",
"(",
"subject",
",",
"_EMPTY_",
",",
"payload",
",",
"payload_size",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client.publish_request
|
Publishes a message tagging it with a reply subscription
which can be used by those receiving the message to respond.
->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
|
nats/aio/client.py
|
def publish_request(self, subject, reply, payload):
"""
Publishes a message tagging it with a reply subscription
which can be used by those receiving the message to respond.
->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
"""
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining_pubs:
raise ErrConnectionDraining
payload_size = len(payload)
if payload_size > self._max_payload:
raise ErrMaxPayload
yield from self._publish(subject, reply.encode(), payload, payload_size)
|
def publish_request(self, subject, reply, payload):
"""
Publishes a message tagging it with a reply subscription
which can be used by those receiving the message to respond.
->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
"""
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining_pubs:
raise ErrConnectionDraining
payload_size = len(payload)
if payload_size > self._max_payload:
raise ErrMaxPayload
yield from self._publish(subject, reply.encode(), payload, payload_size)
|
[
"Publishes",
"a",
"message",
"tagging",
"it",
"with",
"a",
"reply",
"subscription",
"which",
"can",
"be",
"used",
"by",
"those",
"receiving",
"the",
"message",
"to",
"respond",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L439-L457
|
[
"def",
"publish_request",
"(",
"self",
",",
"subject",
",",
"reply",
",",
"payload",
")",
":",
"if",
"self",
".",
"is_closed",
":",
"raise",
"ErrConnectionClosed",
"if",
"self",
".",
"is_draining_pubs",
":",
"raise",
"ErrConnectionDraining",
"payload_size",
"=",
"len",
"(",
"payload",
")",
"if",
"payload_size",
">",
"self",
".",
"_max_payload",
":",
"raise",
"ErrMaxPayload",
"yield",
"from",
"self",
".",
"_publish",
"(",
"subject",
",",
"reply",
".",
"encode",
"(",
")",
",",
"payload",
",",
"payload_size",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._publish
|
Sends PUB command to the NATS server.
|
nats/aio/client.py
|
def _publish(self, subject, reply, payload, payload_size):
"""
Sends PUB command to the NATS server.
"""
if subject == "":
# Avoid sending messages with empty replies.
raise ErrBadSubject
payload_size_bytes = ("%d" % payload_size).encode()
pub_cmd = b''.join([PUB_OP, _SPC_, subject.encode(
), _SPC_, reply, _SPC_, payload_size_bytes, _CRLF_, payload, _CRLF_])
self.stats['out_msgs'] += 1
self.stats['out_bytes'] += payload_size
yield from self._send_command(pub_cmd)
if self._flush_queue.empty():
yield from self._flush_pending()
|
def _publish(self, subject, reply, payload, payload_size):
"""
Sends PUB command to the NATS server.
"""
if subject == "":
# Avoid sending messages with empty replies.
raise ErrBadSubject
payload_size_bytes = ("%d" % payload_size).encode()
pub_cmd = b''.join([PUB_OP, _SPC_, subject.encode(
), _SPC_, reply, _SPC_, payload_size_bytes, _CRLF_, payload, _CRLF_])
self.stats['out_msgs'] += 1
self.stats['out_bytes'] += payload_size
yield from self._send_command(pub_cmd)
if self._flush_queue.empty():
yield from self._flush_pending()
|
[
"Sends",
"PUB",
"command",
"to",
"the",
"NATS",
"server",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L460-L475
|
[
"def",
"_publish",
"(",
"self",
",",
"subject",
",",
"reply",
",",
"payload",
",",
"payload_size",
")",
":",
"if",
"subject",
"==",
"\"\"",
":",
"# Avoid sending messages with empty replies.",
"raise",
"ErrBadSubject",
"payload_size_bytes",
"=",
"(",
"\"%d\"",
"%",
"payload_size",
")",
".",
"encode",
"(",
")",
"pub_cmd",
"=",
"b''",
".",
"join",
"(",
"[",
"PUB_OP",
",",
"_SPC_",
",",
"subject",
".",
"encode",
"(",
")",
",",
"_SPC_",
",",
"reply",
",",
"_SPC_",
",",
"payload_size_bytes",
",",
"_CRLF_",
",",
"payload",
",",
"_CRLF_",
"]",
")",
"self",
".",
"stats",
"[",
"'out_msgs'",
"]",
"+=",
"1",
"self",
".",
"stats",
"[",
"'out_bytes'",
"]",
"+=",
"payload_size",
"yield",
"from",
"self",
".",
"_send_command",
"(",
"pub_cmd",
")",
"if",
"self",
".",
"_flush_queue",
".",
"empty",
"(",
")",
":",
"yield",
"from",
"self",
".",
"_flush_pending",
"(",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client.subscribe
|
Takes a subject string and optional queue string to send a SUB cmd,
and a callback which to which messages (Msg) will be dispatched to
be processed sequentially by default.
|
nats/aio/client.py
|
def subscribe(self, subject,
queue="",
cb=None,
future=None,
max_msgs=0,
is_async=False,
pending_msgs_limit=DEFAULT_SUB_PENDING_MSGS_LIMIT,
pending_bytes_limit=DEFAULT_SUB_PENDING_BYTES_LIMIT,
):
"""
Takes a subject string and optional queue string to send a SUB cmd,
and a callback which to which messages (Msg) will be dispatched to
be processed sequentially by default.
"""
if subject == "":
raise ErrBadSubject
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining:
raise ErrConnectionDraining
sub = Subscription(subject=subject,
queue=queue,
max_msgs=max_msgs,
is_async=is_async,
)
if cb is not None:
if asyncio.iscoroutinefunction(cb):
sub.coro = cb
elif sub.is_async:
raise NatsError(
"nats: must use coroutine for async subscriptions")
else:
# NOTE: Consider to deprecate this eventually, it should always
# be coroutines otherwise they could affect the single thread,
# for now still allow to be flexible.
sub.cb = cb
sub.pending_msgs_limit = pending_msgs_limit
sub.pending_bytes_limit = pending_bytes_limit
sub.pending_queue = asyncio.Queue(
maxsize=pending_msgs_limit,
loop=self._loop,
)
# Close the delivery coroutine over the sub and error handler
# instead of having subscription type hold over state of the conn.
err_cb = self._error_cb
@asyncio.coroutine
def wait_for_msgs():
nonlocal sub
nonlocal err_cb
while True:
try:
msg = yield from sub.pending_queue.get()
sub.pending_size -= len(msg.data)
try:
# Invoke depending of type of handler.
if sub.coro is not None:
if sub.is_async:
# NOTE: Deprecate this usage in a next release,
# the handler implementation ought to decide
# the concurrency level at which the messages
# should be processed.
self._loop.create_task(sub.coro(msg))
else:
yield from sub.coro(msg)
elif sub.cb is not None:
if sub.is_async:
raise NatsError(
"nats: must use coroutine for async subscriptions")
else:
# Schedule regular callbacks to be processed sequentially.
self._loop.call_soon(sub.cb, msg)
except asyncio.CancelledError:
# In case the coroutine handler gets cancelled
# then stop task loop and return.
break
except Exception as e:
# All errors from calling a handler
# are async errors.
if err_cb is not None:
yield from err_cb(e)
except asyncio.CancelledError:
break
# Start task for each subscription, it should be cancelled
# on both unsubscribe and closing as well.
sub.wait_for_msgs_task = self._loop.create_task(
wait_for_msgs())
elif future is not None:
# Used to handle the single response from a request.
sub.future = future
else:
raise NatsError("nats: invalid subscription type")
self._ssid += 1
ssid = self._ssid
self._subs[ssid] = sub
yield from self._subscribe(sub, ssid)
return ssid
|
def subscribe(self, subject,
queue="",
cb=None,
future=None,
max_msgs=0,
is_async=False,
pending_msgs_limit=DEFAULT_SUB_PENDING_MSGS_LIMIT,
pending_bytes_limit=DEFAULT_SUB_PENDING_BYTES_LIMIT,
):
"""
Takes a subject string and optional queue string to send a SUB cmd,
and a callback which to which messages (Msg) will be dispatched to
be processed sequentially by default.
"""
if subject == "":
raise ErrBadSubject
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining:
raise ErrConnectionDraining
sub = Subscription(subject=subject,
queue=queue,
max_msgs=max_msgs,
is_async=is_async,
)
if cb is not None:
if asyncio.iscoroutinefunction(cb):
sub.coro = cb
elif sub.is_async:
raise NatsError(
"nats: must use coroutine for async subscriptions")
else:
# NOTE: Consider to deprecate this eventually, it should always
# be coroutines otherwise they could affect the single thread,
# for now still allow to be flexible.
sub.cb = cb
sub.pending_msgs_limit = pending_msgs_limit
sub.pending_bytes_limit = pending_bytes_limit
sub.pending_queue = asyncio.Queue(
maxsize=pending_msgs_limit,
loop=self._loop,
)
# Close the delivery coroutine over the sub and error handler
# instead of having subscription type hold over state of the conn.
err_cb = self._error_cb
@asyncio.coroutine
def wait_for_msgs():
nonlocal sub
nonlocal err_cb
while True:
try:
msg = yield from sub.pending_queue.get()
sub.pending_size -= len(msg.data)
try:
# Invoke depending of type of handler.
if sub.coro is not None:
if sub.is_async:
# NOTE: Deprecate this usage in a next release,
# the handler implementation ought to decide
# the concurrency level at which the messages
# should be processed.
self._loop.create_task(sub.coro(msg))
else:
yield from sub.coro(msg)
elif sub.cb is not None:
if sub.is_async:
raise NatsError(
"nats: must use coroutine for async subscriptions")
else:
# Schedule regular callbacks to be processed sequentially.
self._loop.call_soon(sub.cb, msg)
except asyncio.CancelledError:
# In case the coroutine handler gets cancelled
# then stop task loop and return.
break
except Exception as e:
# All errors from calling a handler
# are async errors.
if err_cb is not None:
yield from err_cb(e)
except asyncio.CancelledError:
break
# Start task for each subscription, it should be cancelled
# on both unsubscribe and closing as well.
sub.wait_for_msgs_task = self._loop.create_task(
wait_for_msgs())
elif future is not None:
# Used to handle the single response from a request.
sub.future = future
else:
raise NatsError("nats: invalid subscription type")
self._ssid += 1
ssid = self._ssid
self._subs[ssid] = sub
yield from self._subscribe(sub, ssid)
return ssid
|
[
"Takes",
"a",
"subject",
"string",
"and",
"optional",
"queue",
"string",
"to",
"send",
"a",
"SUB",
"cmd",
"and",
"a",
"callback",
"which",
"to",
"which",
"messages",
"(",
"Msg",
")",
"will",
"be",
"dispatched",
"to",
"be",
"processed",
"sequentially",
"by",
"default",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L478-L585
|
[
"def",
"subscribe",
"(",
"self",
",",
"subject",
",",
"queue",
"=",
"\"\"",
",",
"cb",
"=",
"None",
",",
"future",
"=",
"None",
",",
"max_msgs",
"=",
"0",
",",
"is_async",
"=",
"False",
",",
"pending_msgs_limit",
"=",
"DEFAULT_SUB_PENDING_MSGS_LIMIT",
",",
"pending_bytes_limit",
"=",
"DEFAULT_SUB_PENDING_BYTES_LIMIT",
",",
")",
":",
"if",
"subject",
"==",
"\"\"",
":",
"raise",
"ErrBadSubject",
"if",
"self",
".",
"is_closed",
":",
"raise",
"ErrConnectionClosed",
"if",
"self",
".",
"is_draining",
":",
"raise",
"ErrConnectionDraining",
"sub",
"=",
"Subscription",
"(",
"subject",
"=",
"subject",
",",
"queue",
"=",
"queue",
",",
"max_msgs",
"=",
"max_msgs",
",",
"is_async",
"=",
"is_async",
",",
")",
"if",
"cb",
"is",
"not",
"None",
":",
"if",
"asyncio",
".",
"iscoroutinefunction",
"(",
"cb",
")",
":",
"sub",
".",
"coro",
"=",
"cb",
"elif",
"sub",
".",
"is_async",
":",
"raise",
"NatsError",
"(",
"\"nats: must use coroutine for async subscriptions\"",
")",
"else",
":",
"# NOTE: Consider to deprecate this eventually, it should always",
"# be coroutines otherwise they could affect the single thread,",
"# for now still allow to be flexible.",
"sub",
".",
"cb",
"=",
"cb",
"sub",
".",
"pending_msgs_limit",
"=",
"pending_msgs_limit",
"sub",
".",
"pending_bytes_limit",
"=",
"pending_bytes_limit",
"sub",
".",
"pending_queue",
"=",
"asyncio",
".",
"Queue",
"(",
"maxsize",
"=",
"pending_msgs_limit",
",",
"loop",
"=",
"self",
".",
"_loop",
",",
")",
"# Close the delivery coroutine over the sub and error handler",
"# instead of having subscription type hold over state of the conn.",
"err_cb",
"=",
"self",
".",
"_error_cb",
"@",
"asyncio",
".",
"coroutine",
"def",
"wait_for_msgs",
"(",
")",
":",
"nonlocal",
"sub",
"nonlocal",
"err_cb",
"while",
"True",
":",
"try",
":",
"msg",
"=",
"yield",
"from",
"sub",
".",
"pending_queue",
".",
"get",
"(",
")",
"sub",
".",
"pending_size",
"-=",
"len",
"(",
"msg",
".",
"data",
")",
"try",
":",
"# Invoke depending of type of handler.",
"if",
"sub",
".",
"coro",
"is",
"not",
"None",
":",
"if",
"sub",
".",
"is_async",
":",
"# NOTE: Deprecate this usage in a next release,",
"# the handler implementation ought to decide",
"# the concurrency level at which the messages",
"# should be processed.",
"self",
".",
"_loop",
".",
"create_task",
"(",
"sub",
".",
"coro",
"(",
"msg",
")",
")",
"else",
":",
"yield",
"from",
"sub",
".",
"coro",
"(",
"msg",
")",
"elif",
"sub",
".",
"cb",
"is",
"not",
"None",
":",
"if",
"sub",
".",
"is_async",
":",
"raise",
"NatsError",
"(",
"\"nats: must use coroutine for async subscriptions\"",
")",
"else",
":",
"# Schedule regular callbacks to be processed sequentially.",
"self",
".",
"_loop",
".",
"call_soon",
"(",
"sub",
".",
"cb",
",",
"msg",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"# In case the coroutine handler gets cancelled",
"# then stop task loop and return.",
"break",
"except",
"Exception",
"as",
"e",
":",
"# All errors from calling a handler",
"# are async errors.",
"if",
"err_cb",
"is",
"not",
"None",
":",
"yield",
"from",
"err_cb",
"(",
"e",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"break",
"# Start task for each subscription, it should be cancelled",
"# on both unsubscribe and closing as well.",
"sub",
".",
"wait_for_msgs_task",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"wait_for_msgs",
"(",
")",
")",
"elif",
"future",
"is",
"not",
"None",
":",
"# Used to handle the single response from a request.",
"sub",
".",
"future",
"=",
"future",
"else",
":",
"raise",
"NatsError",
"(",
"\"nats: invalid subscription type\"",
")",
"self",
".",
"_ssid",
"+=",
"1",
"ssid",
"=",
"self",
".",
"_ssid",
"self",
".",
"_subs",
"[",
"ssid",
"]",
"=",
"sub",
"yield",
"from",
"self",
".",
"_subscribe",
"(",
"sub",
",",
"ssid",
")",
"return",
"ssid"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client.subscribe_async
|
Sets the subcription to use a task per message to be processed.
..deprecated:: 7.0
Will be removed 9.0.
|
nats/aio/client.py
|
def subscribe_async(self, subject, **kwargs):
"""
Sets the subcription to use a task per message to be processed.
..deprecated:: 7.0
Will be removed 9.0.
"""
kwargs["is_async"] = True
sid = yield from self.subscribe(subject, **kwargs)
return sid
|
def subscribe_async(self, subject, **kwargs):
"""
Sets the subcription to use a task per message to be processed.
..deprecated:: 7.0
Will be removed 9.0.
"""
kwargs["is_async"] = True
sid = yield from self.subscribe(subject, **kwargs)
return sid
|
[
"Sets",
"the",
"subcription",
"to",
"use",
"a",
"task",
"per",
"message",
"to",
"be",
"processed",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L588-L597
|
[
"def",
"subscribe_async",
"(",
"self",
",",
"subject",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"is_async\"",
"]",
"=",
"True",
"sid",
"=",
"yield",
"from",
"self",
".",
"subscribe",
"(",
"subject",
",",
"*",
"*",
"kwargs",
")",
"return",
"sid"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client.unsubscribe
|
Takes a subscription sequence id and removes the subscription
from the client, optionally after receiving more than max_msgs.
|
nats/aio/client.py
|
def unsubscribe(self, ssid, max_msgs=0):
"""
Takes a subscription sequence id and removes the subscription
from the client, optionally after receiving more than max_msgs.
"""
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining:
raise ErrConnectionDraining
self._remove_sub(ssid, max_msgs)
# We will send these for all subs when we reconnect anyway,
# so that we can suppress here.
if not self.is_reconnecting:
yield from self.auto_unsubscribe(ssid, max_msgs)
|
def unsubscribe(self, ssid, max_msgs=0):
"""
Takes a subscription sequence id and removes the subscription
from the client, optionally after receiving more than max_msgs.
"""
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining:
raise ErrConnectionDraining
self._remove_sub(ssid, max_msgs)
# We will send these for all subs when we reconnect anyway,
# so that we can suppress here.
if not self.is_reconnecting:
yield from self.auto_unsubscribe(ssid, max_msgs)
|
[
"Takes",
"a",
"subscription",
"sequence",
"id",
"and",
"removes",
"the",
"subscription",
"from",
"the",
"client",
"optionally",
"after",
"receiving",
"more",
"than",
"max_msgs",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L600-L615
|
[
"def",
"unsubscribe",
"(",
"self",
",",
"ssid",
",",
"max_msgs",
"=",
"0",
")",
":",
"if",
"self",
".",
"is_closed",
":",
"raise",
"ErrConnectionClosed",
"if",
"self",
".",
"is_draining",
":",
"raise",
"ErrConnectionDraining",
"self",
".",
"_remove_sub",
"(",
"ssid",
",",
"max_msgs",
")",
"# We will send these for all subs when we reconnect anyway,",
"# so that we can suppress here.",
"if",
"not",
"self",
".",
"is_reconnecting",
":",
"yield",
"from",
"self",
".",
"auto_unsubscribe",
"(",
"ssid",
",",
"max_msgs",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client.request
|
Implements the request/response pattern via pub/sub
using a single wildcard subscription that handles
the responses.
|
nats/aio/client.py
|
def request(self, subject, payload, timeout=0.5, expected=1, cb=None):
"""
Implements the request/response pattern via pub/sub
using a single wildcard subscription that handles
the responses.
"""
if self.is_draining_pubs:
raise ErrConnectionDraining
# If callback given then continue to use old style.
if cb is not None:
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next())
inbox = next_inbox.decode()
sid = yield from self.subscribe(inbox, cb=cb)
yield from self.auto_unsubscribe(sid, expected)
yield from self.publish_request(subject, inbox, payload)
return sid
if self._resp_sub_prefix is None:
self._resp_map = {}
# Create a prefix and single wildcard subscription once.
self._resp_sub_prefix = INBOX_PREFIX[:]
self._resp_sub_prefix.extend(self._nuid.next())
self._resp_sub_prefix.extend(b'.')
resp_mux_subject = self._resp_sub_prefix[:]
resp_mux_subject.extend(b'*')
sub = Subscription(subject=resp_mux_subject.decode())
# FIXME: Allow setting pending limits for responses mux subscription.
sub.pending_msgs_limit = DEFAULT_SUB_PENDING_MSGS_LIMIT
sub.pending_bytes_limit = DEFAULT_SUB_PENDING_BYTES_LIMIT
sub.pending_queue = asyncio.Queue(
maxsize=sub.pending_msgs_limit,
loop=self._loop,
)
# Single task for handling the requests
@asyncio.coroutine
def wait_for_msgs():
nonlocal sub
while True:
try:
msg = yield from sub.pending_queue.get()
token = msg.subject[INBOX_PREFIX_LEN:]
try:
fut = self._resp_map[token]
fut.set_result(msg)
del self._resp_map[token]
except (asyncio.CancelledError, asyncio.InvalidStateError):
# Request may have timed out already so remove entry.
del self._resp_map[token]
continue
except KeyError:
# Future already handled so drop any extra
# responses which may have made it.
continue
except asyncio.CancelledError:
break
sub.wait_for_msgs_task = self._loop.create_task(
wait_for_msgs())
# Store the subscription in the subscriptions map,
# then send the protocol commands to the server.
self._ssid += 1
ssid = self._ssid
self._subs[ssid] = sub
yield from self._subscribe(sub, ssid)
# Use a new NUID for the token inbox and then use the future.
token = self._nuid.next()
inbox = self._resp_sub_prefix[:]
inbox.extend(token)
future = asyncio.Future(loop=self._loop)
self._resp_map[token.decode()] = future
yield from self.publish_request(subject, inbox.decode(), payload)
# Wait for the response or give up on timeout.
try:
msg = yield from asyncio.wait_for(future, timeout, loop=self._loop)
return msg
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout
|
def request(self, subject, payload, timeout=0.5, expected=1, cb=None):
"""
Implements the request/response pattern via pub/sub
using a single wildcard subscription that handles
the responses.
"""
if self.is_draining_pubs:
raise ErrConnectionDraining
# If callback given then continue to use old style.
if cb is not None:
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next())
inbox = next_inbox.decode()
sid = yield from self.subscribe(inbox, cb=cb)
yield from self.auto_unsubscribe(sid, expected)
yield from self.publish_request(subject, inbox, payload)
return sid
if self._resp_sub_prefix is None:
self._resp_map = {}
# Create a prefix and single wildcard subscription once.
self._resp_sub_prefix = INBOX_PREFIX[:]
self._resp_sub_prefix.extend(self._nuid.next())
self._resp_sub_prefix.extend(b'.')
resp_mux_subject = self._resp_sub_prefix[:]
resp_mux_subject.extend(b'*')
sub = Subscription(subject=resp_mux_subject.decode())
# FIXME: Allow setting pending limits for responses mux subscription.
sub.pending_msgs_limit = DEFAULT_SUB_PENDING_MSGS_LIMIT
sub.pending_bytes_limit = DEFAULT_SUB_PENDING_BYTES_LIMIT
sub.pending_queue = asyncio.Queue(
maxsize=sub.pending_msgs_limit,
loop=self._loop,
)
# Single task for handling the requests
@asyncio.coroutine
def wait_for_msgs():
nonlocal sub
while True:
try:
msg = yield from sub.pending_queue.get()
token = msg.subject[INBOX_PREFIX_LEN:]
try:
fut = self._resp_map[token]
fut.set_result(msg)
del self._resp_map[token]
except (asyncio.CancelledError, asyncio.InvalidStateError):
# Request may have timed out already so remove entry.
del self._resp_map[token]
continue
except KeyError:
# Future already handled so drop any extra
# responses which may have made it.
continue
except asyncio.CancelledError:
break
sub.wait_for_msgs_task = self._loop.create_task(
wait_for_msgs())
# Store the subscription in the subscriptions map,
# then send the protocol commands to the server.
self._ssid += 1
ssid = self._ssid
self._subs[ssid] = sub
yield from self._subscribe(sub, ssid)
# Use a new NUID for the token inbox and then use the future.
token = self._nuid.next()
inbox = self._resp_sub_prefix[:]
inbox.extend(token)
future = asyncio.Future(loop=self._loop)
self._resp_map[token.decode()] = future
yield from self.publish_request(subject, inbox.decode(), payload)
# Wait for the response or give up on timeout.
try:
msg = yield from asyncio.wait_for(future, timeout, loop=self._loop)
return msg
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout
|
[
"Implements",
"the",
"request",
"/",
"response",
"pattern",
"via",
"pub",
"/",
"sub",
"using",
"a",
"single",
"wildcard",
"subscription",
"that",
"handles",
"the",
"responses",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L643-L732
|
[
"def",
"request",
"(",
"self",
",",
"subject",
",",
"payload",
",",
"timeout",
"=",
"0.5",
",",
"expected",
"=",
"1",
",",
"cb",
"=",
"None",
")",
":",
"if",
"self",
".",
"is_draining_pubs",
":",
"raise",
"ErrConnectionDraining",
"# If callback given then continue to use old style.",
"if",
"cb",
"is",
"not",
"None",
":",
"next_inbox",
"=",
"INBOX_PREFIX",
"[",
":",
"]",
"next_inbox",
".",
"extend",
"(",
"self",
".",
"_nuid",
".",
"next",
"(",
")",
")",
"inbox",
"=",
"next_inbox",
".",
"decode",
"(",
")",
"sid",
"=",
"yield",
"from",
"self",
".",
"subscribe",
"(",
"inbox",
",",
"cb",
"=",
"cb",
")",
"yield",
"from",
"self",
".",
"auto_unsubscribe",
"(",
"sid",
",",
"expected",
")",
"yield",
"from",
"self",
".",
"publish_request",
"(",
"subject",
",",
"inbox",
",",
"payload",
")",
"return",
"sid",
"if",
"self",
".",
"_resp_sub_prefix",
"is",
"None",
":",
"self",
".",
"_resp_map",
"=",
"{",
"}",
"# Create a prefix and single wildcard subscription once.",
"self",
".",
"_resp_sub_prefix",
"=",
"INBOX_PREFIX",
"[",
":",
"]",
"self",
".",
"_resp_sub_prefix",
".",
"extend",
"(",
"self",
".",
"_nuid",
".",
"next",
"(",
")",
")",
"self",
".",
"_resp_sub_prefix",
".",
"extend",
"(",
"b'.'",
")",
"resp_mux_subject",
"=",
"self",
".",
"_resp_sub_prefix",
"[",
":",
"]",
"resp_mux_subject",
".",
"extend",
"(",
"b'*'",
")",
"sub",
"=",
"Subscription",
"(",
"subject",
"=",
"resp_mux_subject",
".",
"decode",
"(",
")",
")",
"# FIXME: Allow setting pending limits for responses mux subscription.",
"sub",
".",
"pending_msgs_limit",
"=",
"DEFAULT_SUB_PENDING_MSGS_LIMIT",
"sub",
".",
"pending_bytes_limit",
"=",
"DEFAULT_SUB_PENDING_BYTES_LIMIT",
"sub",
".",
"pending_queue",
"=",
"asyncio",
".",
"Queue",
"(",
"maxsize",
"=",
"sub",
".",
"pending_msgs_limit",
",",
"loop",
"=",
"self",
".",
"_loop",
",",
")",
"# Single task for handling the requests",
"@",
"asyncio",
".",
"coroutine",
"def",
"wait_for_msgs",
"(",
")",
":",
"nonlocal",
"sub",
"while",
"True",
":",
"try",
":",
"msg",
"=",
"yield",
"from",
"sub",
".",
"pending_queue",
".",
"get",
"(",
")",
"token",
"=",
"msg",
".",
"subject",
"[",
"INBOX_PREFIX_LEN",
":",
"]",
"try",
":",
"fut",
"=",
"self",
".",
"_resp_map",
"[",
"token",
"]",
"fut",
".",
"set_result",
"(",
"msg",
")",
"del",
"self",
".",
"_resp_map",
"[",
"token",
"]",
"except",
"(",
"asyncio",
".",
"CancelledError",
",",
"asyncio",
".",
"InvalidStateError",
")",
":",
"# Request may have timed out already so remove entry.",
"del",
"self",
".",
"_resp_map",
"[",
"token",
"]",
"continue",
"except",
"KeyError",
":",
"# Future already handled so drop any extra",
"# responses which may have made it.",
"continue",
"except",
"asyncio",
".",
"CancelledError",
":",
"break",
"sub",
".",
"wait_for_msgs_task",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"wait_for_msgs",
"(",
")",
")",
"# Store the subscription in the subscriptions map,",
"# then send the protocol commands to the server.",
"self",
".",
"_ssid",
"+=",
"1",
"ssid",
"=",
"self",
".",
"_ssid",
"self",
".",
"_subs",
"[",
"ssid",
"]",
"=",
"sub",
"yield",
"from",
"self",
".",
"_subscribe",
"(",
"sub",
",",
"ssid",
")",
"# Use a new NUID for the token inbox and then use the future.",
"token",
"=",
"self",
".",
"_nuid",
".",
"next",
"(",
")",
"inbox",
"=",
"self",
".",
"_resp_sub_prefix",
"[",
":",
"]",
"inbox",
".",
"extend",
"(",
"token",
")",
"future",
"=",
"asyncio",
".",
"Future",
"(",
"loop",
"=",
"self",
".",
"_loop",
")",
"self",
".",
"_resp_map",
"[",
"token",
".",
"decode",
"(",
")",
"]",
"=",
"future",
"yield",
"from",
"self",
".",
"publish_request",
"(",
"subject",
",",
"inbox",
".",
"decode",
"(",
")",
",",
"payload",
")",
"# Wait for the response or give up on timeout.",
"try",
":",
"msg",
"=",
"yield",
"from",
"asyncio",
".",
"wait_for",
"(",
"future",
",",
"timeout",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"return",
"msg",
"except",
"asyncio",
".",
"TimeoutError",
":",
"future",
".",
"cancel",
"(",
")",
"raise",
"ErrTimeout"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client.timed_request
|
Implements the request/response pattern via pub/sub
using an ephemeral subscription which will be published
with a limited interest of 1 reply returning the response
or raising a Timeout error.
->> SUB _INBOX.2007314fe0fcb2cdc2a2914c1 90
->> UNSUB 90 1
->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
|
nats/aio/client.py
|
def timed_request(self, subject, payload, timeout=0.5):
"""
Implements the request/response pattern via pub/sub
using an ephemeral subscription which will be published
with a limited interest of 1 reply returning the response
or raising a Timeout error.
->> SUB _INBOX.2007314fe0fcb2cdc2a2914c1 90
->> UNSUB 90 1
->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
"""
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next())
inbox = next_inbox.decode()
future = asyncio.Future(loop=self._loop)
sid = yield from self.subscribe(inbox, future=future, max_msgs=1)
yield from self.auto_unsubscribe(sid, 1)
yield from self.publish_request(subject, inbox, payload)
try:
msg = yield from asyncio.wait_for(future, timeout, loop=self._loop)
return msg
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout
|
def timed_request(self, subject, payload, timeout=0.5):
"""
Implements the request/response pattern via pub/sub
using an ephemeral subscription which will be published
with a limited interest of 1 reply returning the response
or raising a Timeout error.
->> SUB _INBOX.2007314fe0fcb2cdc2a2914c1 90
->> UNSUB 90 1
->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
"""
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next())
inbox = next_inbox.decode()
future = asyncio.Future(loop=self._loop)
sid = yield from self.subscribe(inbox, future=future, max_msgs=1)
yield from self.auto_unsubscribe(sid, 1)
yield from self.publish_request(subject, inbox, payload)
try:
msg = yield from asyncio.wait_for(future, timeout, loop=self._loop)
return msg
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout
|
[
"Implements",
"the",
"request",
"/",
"response",
"pattern",
"via",
"pub",
"/",
"sub",
"using",
"an",
"ephemeral",
"subscription",
"which",
"will",
"be",
"published",
"with",
"a",
"limited",
"interest",
"of",
"1",
"reply",
"returning",
"the",
"response",
"or",
"raising",
"a",
"Timeout",
"error",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L735-L763
|
[
"def",
"timed_request",
"(",
"self",
",",
"subject",
",",
"payload",
",",
"timeout",
"=",
"0.5",
")",
":",
"next_inbox",
"=",
"INBOX_PREFIX",
"[",
":",
"]",
"next_inbox",
".",
"extend",
"(",
"self",
".",
"_nuid",
".",
"next",
"(",
")",
")",
"inbox",
"=",
"next_inbox",
".",
"decode",
"(",
")",
"future",
"=",
"asyncio",
".",
"Future",
"(",
"loop",
"=",
"self",
".",
"_loop",
")",
"sid",
"=",
"yield",
"from",
"self",
".",
"subscribe",
"(",
"inbox",
",",
"future",
"=",
"future",
",",
"max_msgs",
"=",
"1",
")",
"yield",
"from",
"self",
".",
"auto_unsubscribe",
"(",
"sid",
",",
"1",
")",
"yield",
"from",
"self",
".",
"publish_request",
"(",
"subject",
",",
"inbox",
",",
"payload",
")",
"try",
":",
"msg",
"=",
"yield",
"from",
"asyncio",
".",
"wait_for",
"(",
"future",
",",
"timeout",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"return",
"msg",
"except",
"asyncio",
".",
"TimeoutError",
":",
"future",
".",
"cancel",
"(",
")",
"raise",
"ErrTimeout"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client.flush
|
Sends a ping to the server expecting a pong back ensuring
what we have written so far has made it to the server and
also enabling measuring of roundtrip time.
In case a pong is not returned within the allowed timeout,
then it will raise ErrTimeout.
|
nats/aio/client.py
|
def flush(self, timeout=60):
"""
Sends a ping to the server expecting a pong back ensuring
what we have written so far has made it to the server and
also enabling measuring of roundtrip time.
In case a pong is not returned within the allowed timeout,
then it will raise ErrTimeout.
"""
if timeout <= 0:
raise ErrBadTimeout
if self.is_closed:
raise ErrConnectionClosed
future = asyncio.Future(loop=self._loop)
try:
yield from self._send_ping(future)
yield from asyncio.wait_for(future, timeout, loop=self._loop)
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout
|
def flush(self, timeout=60):
"""
Sends a ping to the server expecting a pong back ensuring
what we have written so far has made it to the server and
also enabling measuring of roundtrip time.
In case a pong is not returned within the allowed timeout,
then it will raise ErrTimeout.
"""
if timeout <= 0:
raise ErrBadTimeout
if self.is_closed:
raise ErrConnectionClosed
future = asyncio.Future(loop=self._loop)
try:
yield from self._send_ping(future)
yield from asyncio.wait_for(future, timeout, loop=self._loop)
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout
|
[
"Sends",
"a",
"ping",
"to",
"the",
"server",
"expecting",
"a",
"pong",
"back",
"ensuring",
"what",
"we",
"have",
"written",
"so",
"far",
"has",
"made",
"it",
"to",
"the",
"server",
"and",
"also",
"enabling",
"measuring",
"of",
"roundtrip",
"time",
".",
"In",
"case",
"a",
"pong",
"is",
"not",
"returned",
"within",
"the",
"allowed",
"timeout",
"then",
"it",
"will",
"raise",
"ErrTimeout",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L787-L807
|
[
"def",
"flush",
"(",
"self",
",",
"timeout",
"=",
"60",
")",
":",
"if",
"timeout",
"<=",
"0",
":",
"raise",
"ErrBadTimeout",
"if",
"self",
".",
"is_closed",
":",
"raise",
"ErrConnectionClosed",
"future",
"=",
"asyncio",
".",
"Future",
"(",
"loop",
"=",
"self",
".",
"_loop",
")",
"try",
":",
"yield",
"from",
"self",
".",
"_send_ping",
"(",
"future",
")",
"yield",
"from",
"asyncio",
".",
"wait_for",
"(",
"future",
",",
"timeout",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"except",
"asyncio",
".",
"TimeoutError",
":",
"future",
".",
"cancel",
"(",
")",
"raise",
"ErrTimeout"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._select_next_server
|
Looks up in the server pool for an available server
and attempts to connect.
|
nats/aio/client.py
|
def _select_next_server(self):
"""
Looks up in the server pool for an available server
and attempts to connect.
"""
while True:
if len(self._server_pool) == 0:
self._current_server = None
raise ErrNoServers
now = time.monotonic()
s = self._server_pool.pop(0)
if self.options["max_reconnect_attempts"] > 0:
if s.reconnects > self.options["max_reconnect_attempts"]:
# Discard server since already tried to reconnect too many times
continue
# Not yet exceeded max_reconnect_attempts so can still use
# this server in the future.
self._server_pool.append(s)
if s.last_attempt is not None and now < s.last_attempt + self.options["reconnect_time_wait"]:
# Backoff connecting to server if we attempted recently.
yield from asyncio.sleep(self.options["reconnect_time_wait"], loop=self._loop)
try:
s.last_attempt = time.monotonic()
r, w = yield from asyncio.open_connection(
s.uri.hostname,
s.uri.port,
loop=self._loop,
limit=DEFAULT_BUFFER_SIZE)
self._current_server = s
# We keep a reference to the initial transport we used when
# establishing the connection in case we later upgrade to TLS
# after getting the first INFO message. This is in order to
# prevent the GC closing the socket after we send CONNECT
# and replace the transport.
#
# See https://github.com/nats-io/asyncio-nats/issues/43
self._bare_io_reader = self._io_reader = r
self._bare_io_writer = self._io_writer = w
break
except Exception as e:
s.last_attempt = time.monotonic()
s.reconnects += 1
self._err = e
if self._error_cb is not None:
yield from self._error_cb(e)
continue
|
def _select_next_server(self):
"""
Looks up in the server pool for an available server
and attempts to connect.
"""
while True:
if len(self._server_pool) == 0:
self._current_server = None
raise ErrNoServers
now = time.monotonic()
s = self._server_pool.pop(0)
if self.options["max_reconnect_attempts"] > 0:
if s.reconnects > self.options["max_reconnect_attempts"]:
# Discard server since already tried to reconnect too many times
continue
# Not yet exceeded max_reconnect_attempts so can still use
# this server in the future.
self._server_pool.append(s)
if s.last_attempt is not None and now < s.last_attempt + self.options["reconnect_time_wait"]:
# Backoff connecting to server if we attempted recently.
yield from asyncio.sleep(self.options["reconnect_time_wait"], loop=self._loop)
try:
s.last_attempt = time.monotonic()
r, w = yield from asyncio.open_connection(
s.uri.hostname,
s.uri.port,
loop=self._loop,
limit=DEFAULT_BUFFER_SIZE)
self._current_server = s
# We keep a reference to the initial transport we used when
# establishing the connection in case we later upgrade to TLS
# after getting the first INFO message. This is in order to
# prevent the GC closing the socket after we send CONNECT
# and replace the transport.
#
# See https://github.com/nats-io/asyncio-nats/issues/43
self._bare_io_reader = self._io_reader = r
self._bare_io_writer = self._io_writer = w
break
except Exception as e:
s.last_attempt = time.monotonic()
s.reconnects += 1
self._err = e
if self._error_cb is not None:
yield from self._error_cb(e)
continue
|
[
"Looks",
"up",
"in",
"the",
"server",
"pool",
"for",
"an",
"available",
"server",
"and",
"attempts",
"to",
"connect",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L932-L982
|
[
"def",
"_select_next_server",
"(",
"self",
")",
":",
"while",
"True",
":",
"if",
"len",
"(",
"self",
".",
"_server_pool",
")",
"==",
"0",
":",
"self",
".",
"_current_server",
"=",
"None",
"raise",
"ErrNoServers",
"now",
"=",
"time",
".",
"monotonic",
"(",
")",
"s",
"=",
"self",
".",
"_server_pool",
".",
"pop",
"(",
"0",
")",
"if",
"self",
".",
"options",
"[",
"\"max_reconnect_attempts\"",
"]",
">",
"0",
":",
"if",
"s",
".",
"reconnects",
">",
"self",
".",
"options",
"[",
"\"max_reconnect_attempts\"",
"]",
":",
"# Discard server since already tried to reconnect too many times",
"continue",
"# Not yet exceeded max_reconnect_attempts so can still use",
"# this server in the future.",
"self",
".",
"_server_pool",
".",
"append",
"(",
"s",
")",
"if",
"s",
".",
"last_attempt",
"is",
"not",
"None",
"and",
"now",
"<",
"s",
".",
"last_attempt",
"+",
"self",
".",
"options",
"[",
"\"reconnect_time_wait\"",
"]",
":",
"# Backoff connecting to server if we attempted recently.",
"yield",
"from",
"asyncio",
".",
"sleep",
"(",
"self",
".",
"options",
"[",
"\"reconnect_time_wait\"",
"]",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"try",
":",
"s",
".",
"last_attempt",
"=",
"time",
".",
"monotonic",
"(",
")",
"r",
",",
"w",
"=",
"yield",
"from",
"asyncio",
".",
"open_connection",
"(",
"s",
".",
"uri",
".",
"hostname",
",",
"s",
".",
"uri",
".",
"port",
",",
"loop",
"=",
"self",
".",
"_loop",
",",
"limit",
"=",
"DEFAULT_BUFFER_SIZE",
")",
"self",
".",
"_current_server",
"=",
"s",
"# We keep a reference to the initial transport we used when",
"# establishing the connection in case we later upgrade to TLS",
"# after getting the first INFO message. This is in order to",
"# prevent the GC closing the socket after we send CONNECT",
"# and replace the transport.",
"#",
"# See https://github.com/nats-io/asyncio-nats/issues/43",
"self",
".",
"_bare_io_reader",
"=",
"self",
".",
"_io_reader",
"=",
"r",
"self",
".",
"_bare_io_writer",
"=",
"self",
".",
"_io_writer",
"=",
"w",
"break",
"except",
"Exception",
"as",
"e",
":",
"s",
".",
"last_attempt",
"=",
"time",
".",
"monotonic",
"(",
")",
"s",
".",
"reconnects",
"+=",
"1",
"self",
".",
"_err",
"=",
"e",
"if",
"self",
".",
"_error_cb",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_error_cb",
"(",
"e",
")",
"continue"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._process_err
|
Processes the raw error message sent by the server
and close connection with current server.
|
nats/aio/client.py
|
def _process_err(self, err_msg):
"""
Processes the raw error message sent by the server
and close connection with current server.
"""
if STALE_CONNECTION in err_msg:
yield from self._process_op_err(ErrStaleConnection)
return
if AUTHORIZATION_VIOLATION in err_msg:
self._err = ErrAuthorization
else:
m = b'nats: ' + err_msg[0]
self._err = NatsError(m.decode())
do_cbs = False
if not self.is_connecting:
do_cbs = True
# FIXME: Some errors such as 'Invalid Subscription'
# do not cause the server to close the connection.
# For now we handle similar as other clients and close.
self._loop.create_task(self._close(Client.CLOSED, do_cbs))
|
def _process_err(self, err_msg):
"""
Processes the raw error message sent by the server
and close connection with current server.
"""
if STALE_CONNECTION in err_msg:
yield from self._process_op_err(ErrStaleConnection)
return
if AUTHORIZATION_VIOLATION in err_msg:
self._err = ErrAuthorization
else:
m = b'nats: ' + err_msg[0]
self._err = NatsError(m.decode())
do_cbs = False
if not self.is_connecting:
do_cbs = True
# FIXME: Some errors such as 'Invalid Subscription'
# do not cause the server to close the connection.
# For now we handle similar as other clients and close.
self._loop.create_task(self._close(Client.CLOSED, do_cbs))
|
[
"Processes",
"the",
"raw",
"error",
"message",
"sent",
"by",
"the",
"server",
"and",
"close",
"connection",
"with",
"current",
"server",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L985-L1007
|
[
"def",
"_process_err",
"(",
"self",
",",
"err_msg",
")",
":",
"if",
"STALE_CONNECTION",
"in",
"err_msg",
":",
"yield",
"from",
"self",
".",
"_process_op_err",
"(",
"ErrStaleConnection",
")",
"return",
"if",
"AUTHORIZATION_VIOLATION",
"in",
"err_msg",
":",
"self",
".",
"_err",
"=",
"ErrAuthorization",
"else",
":",
"m",
"=",
"b'nats: '",
"+",
"err_msg",
"[",
"0",
"]",
"self",
".",
"_err",
"=",
"NatsError",
"(",
"m",
".",
"decode",
"(",
")",
")",
"do_cbs",
"=",
"False",
"if",
"not",
"self",
".",
"is_connecting",
":",
"do_cbs",
"=",
"True",
"# FIXME: Some errors such as 'Invalid Subscription'",
"# do not cause the server to close the connection.",
"# For now we handle similar as other clients and close.",
"self",
".",
"_loop",
".",
"create_task",
"(",
"self",
".",
"_close",
"(",
"Client",
".",
"CLOSED",
",",
"do_cbs",
")",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._process_op_err
|
Process errors which occured while reading or parsing
the protocol. If allow_reconnect is enabled it will
try to switch the server to which it is currently connected
otherwise it will disconnect.
|
nats/aio/client.py
|
def _process_op_err(self, e):
"""
Process errors which occured while reading or parsing
the protocol. If allow_reconnect is enabled it will
try to switch the server to which it is currently connected
otherwise it will disconnect.
"""
if self.is_connecting or self.is_closed or self.is_reconnecting:
return
if self.options["allow_reconnect"] and self.is_connected:
self._status = Client.RECONNECTING
self._ps.reset()
if self._reconnection_task is not None and not self._reconnection_task.cancelled():
# Cancel the previous task in case it may still be running.
self._reconnection_task.cancel()
self._reconnection_task = self._loop.create_task(self._attempt_reconnect())
else:
self._process_disconnect()
self._err = e
yield from self._close(Client.CLOSED, True)
|
def _process_op_err(self, e):
"""
Process errors which occured while reading or parsing
the protocol. If allow_reconnect is enabled it will
try to switch the server to which it is currently connected
otherwise it will disconnect.
"""
if self.is_connecting or self.is_closed or self.is_reconnecting:
return
if self.options["allow_reconnect"] and self.is_connected:
self._status = Client.RECONNECTING
self._ps.reset()
if self._reconnection_task is not None and not self._reconnection_task.cancelled():
# Cancel the previous task in case it may still be running.
self._reconnection_task.cancel()
self._reconnection_task = self._loop.create_task(self._attempt_reconnect())
else:
self._process_disconnect()
self._err = e
yield from self._close(Client.CLOSED, True)
|
[
"Process",
"errors",
"which",
"occured",
"while",
"reading",
"or",
"parsing",
"the",
"protocol",
".",
"If",
"allow_reconnect",
"is",
"enabled",
"it",
"will",
"try",
"to",
"switch",
"the",
"server",
"to",
"which",
"it",
"is",
"currently",
"connected",
"otherwise",
"it",
"will",
"disconnect",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L1010-L1032
|
[
"def",
"_process_op_err",
"(",
"self",
",",
"e",
")",
":",
"if",
"self",
".",
"is_connecting",
"or",
"self",
".",
"is_closed",
"or",
"self",
".",
"is_reconnecting",
":",
"return",
"if",
"self",
".",
"options",
"[",
"\"allow_reconnect\"",
"]",
"and",
"self",
".",
"is_connected",
":",
"self",
".",
"_status",
"=",
"Client",
".",
"RECONNECTING",
"self",
".",
"_ps",
".",
"reset",
"(",
")",
"if",
"self",
".",
"_reconnection_task",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"_reconnection_task",
".",
"cancelled",
"(",
")",
":",
"# Cancel the previous task in case it may still be running.",
"self",
".",
"_reconnection_task",
".",
"cancel",
"(",
")",
"self",
".",
"_reconnection_task",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"self",
".",
"_attempt_reconnect",
"(",
")",
")",
"else",
":",
"self",
".",
"_process_disconnect",
"(",
")",
"self",
".",
"_err",
"=",
"e",
"yield",
"from",
"self",
".",
"_close",
"(",
"Client",
".",
"CLOSED",
",",
"True",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._connect_command
|
Generates a JSON string with the params to be used
when sending CONNECT to the server.
->> CONNECT {"lang": "python3"}
|
nats/aio/client.py
|
def _connect_command(self):
'''
Generates a JSON string with the params to be used
when sending CONNECT to the server.
->> CONNECT {"lang": "python3"}
'''
options = {
"verbose": self.options["verbose"],
"pedantic": self.options["pedantic"],
"lang": __lang__,
"version": __version__,
"protocol": PROTOCOL
}
if "auth_required" in self._server_info:
if self._server_info["auth_required"]:
# In case there is no password, then consider handle
# sending a token instead.
if self.options["user"] is not None and self.options["password"] is not None:
options["user"] = self.options["user"]
options["pass"] = self.options["password"]
elif self.options["token"] is not None:
options["auth_token"] = self.options["token"]
elif self._current_server.uri.password is None:
options["auth_token"] = self._current_server.uri.username
else:
options["user"] = self._current_server.uri.username
options["pass"] = self._current_server.uri.password
if self.options["name"] is not None:
options["name"] = self.options["name"]
if self.options["no_echo"] is not None:
options["echo"] = not self.options["no_echo"]
connect_opts = json.dumps(options, sort_keys=True)
return b''.join([CONNECT_OP + _SPC_ + connect_opts.encode() + _CRLF_])
|
def _connect_command(self):
'''
Generates a JSON string with the params to be used
when sending CONNECT to the server.
->> CONNECT {"lang": "python3"}
'''
options = {
"verbose": self.options["verbose"],
"pedantic": self.options["pedantic"],
"lang": __lang__,
"version": __version__,
"protocol": PROTOCOL
}
if "auth_required" in self._server_info:
if self._server_info["auth_required"]:
# In case there is no password, then consider handle
# sending a token instead.
if self.options["user"] is not None and self.options["password"] is not None:
options["user"] = self.options["user"]
options["pass"] = self.options["password"]
elif self.options["token"] is not None:
options["auth_token"] = self.options["token"]
elif self._current_server.uri.password is None:
options["auth_token"] = self._current_server.uri.username
else:
options["user"] = self._current_server.uri.username
options["pass"] = self._current_server.uri.password
if self.options["name"] is not None:
options["name"] = self.options["name"]
if self.options["no_echo"] is not None:
options["echo"] = not self.options["no_echo"]
connect_opts = json.dumps(options, sort_keys=True)
return b''.join([CONNECT_OP + _SPC_ + connect_opts.encode() + _CRLF_])
|
[
"Generates",
"a",
"JSON",
"string",
"with",
"the",
"params",
"to",
"be",
"used",
"when",
"sending",
"CONNECT",
"to",
"the",
"server",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L1111-L1146
|
[
"def",
"_connect_command",
"(",
"self",
")",
":",
"options",
"=",
"{",
"\"verbose\"",
":",
"self",
".",
"options",
"[",
"\"verbose\"",
"]",
",",
"\"pedantic\"",
":",
"self",
".",
"options",
"[",
"\"pedantic\"",
"]",
",",
"\"lang\"",
":",
"__lang__",
",",
"\"version\"",
":",
"__version__",
",",
"\"protocol\"",
":",
"PROTOCOL",
"}",
"if",
"\"auth_required\"",
"in",
"self",
".",
"_server_info",
":",
"if",
"self",
".",
"_server_info",
"[",
"\"auth_required\"",
"]",
":",
"# In case there is no password, then consider handle",
"# sending a token instead.",
"if",
"self",
".",
"options",
"[",
"\"user\"",
"]",
"is",
"not",
"None",
"and",
"self",
".",
"options",
"[",
"\"password\"",
"]",
"is",
"not",
"None",
":",
"options",
"[",
"\"user\"",
"]",
"=",
"self",
".",
"options",
"[",
"\"user\"",
"]",
"options",
"[",
"\"pass\"",
"]",
"=",
"self",
".",
"options",
"[",
"\"password\"",
"]",
"elif",
"self",
".",
"options",
"[",
"\"token\"",
"]",
"is",
"not",
"None",
":",
"options",
"[",
"\"auth_token\"",
"]",
"=",
"self",
".",
"options",
"[",
"\"token\"",
"]",
"elif",
"self",
".",
"_current_server",
".",
"uri",
".",
"password",
"is",
"None",
":",
"options",
"[",
"\"auth_token\"",
"]",
"=",
"self",
".",
"_current_server",
".",
"uri",
".",
"username",
"else",
":",
"options",
"[",
"\"user\"",
"]",
"=",
"self",
".",
"_current_server",
".",
"uri",
".",
"username",
"options",
"[",
"\"pass\"",
"]",
"=",
"self",
".",
"_current_server",
".",
"uri",
".",
"password",
"if",
"self",
".",
"options",
"[",
"\"name\"",
"]",
"is",
"not",
"None",
":",
"options",
"[",
"\"name\"",
"]",
"=",
"self",
".",
"options",
"[",
"\"name\"",
"]",
"if",
"self",
".",
"options",
"[",
"\"no_echo\"",
"]",
"is",
"not",
"None",
":",
"options",
"[",
"\"echo\"",
"]",
"=",
"not",
"self",
".",
"options",
"[",
"\"no_echo\"",
"]",
"connect_opts",
"=",
"json",
".",
"dumps",
"(",
"options",
",",
"sort_keys",
"=",
"True",
")",
"return",
"b''",
".",
"join",
"(",
"[",
"CONNECT_OP",
"+",
"_SPC_",
"+",
"connect_opts",
".",
"encode",
"(",
")",
"+",
"_CRLF_",
"]",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._process_pong
|
Process PONG sent by server.
|
nats/aio/client.py
|
def _process_pong(self):
"""
Process PONG sent by server.
"""
if len(self._pongs) > 0:
future = self._pongs.pop(0)
future.set_result(True)
self._pongs_received += 1
self._pings_outstanding -= 1
|
def _process_pong(self):
"""
Process PONG sent by server.
"""
if len(self._pongs) > 0:
future = self._pongs.pop(0)
future.set_result(True)
self._pongs_received += 1
self._pings_outstanding -= 1
|
[
"Process",
"PONG",
"sent",
"by",
"server",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L1157-L1165
|
[
"def",
"_process_pong",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"_pongs",
")",
">",
"0",
":",
"future",
"=",
"self",
".",
"_pongs",
".",
"pop",
"(",
"0",
")",
"future",
".",
"set_result",
"(",
"True",
")",
"self",
".",
"_pongs_received",
"+=",
"1",
"self",
".",
"_pings_outstanding",
"-=",
"1"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._process_msg
|
Process MSG sent by server.
|
nats/aio/client.py
|
def _process_msg(self, sid, subject, reply, data):
"""
Process MSG sent by server.
"""
payload_size = len(data)
self.stats['in_msgs'] += 1
self.stats['in_bytes'] += payload_size
sub = self._subs.get(sid)
if sub is None:
# Skip in case no subscription present.
return
sub.received += 1
if sub.max_msgs > 0 and sub.received >= sub.max_msgs:
# Enough messages so can throwaway subscription now.
self._subs.pop(sid, None)
msg = self._build_message(subject, reply, data)
# Check if it is an old style request.
if sub.future is not None:
if sub.future.cancelled():
# Already gave up, nothing to do.
return
sub.future.set_result(msg)
return
# Let subscription wait_for_msgs coroutine process the messages,
# but in case sending to the subscription task would block,
# then consider it to be an slow consumer and drop the message.
try:
sub.pending_size += payload_size
if sub.pending_size >= sub.pending_bytes_limit:
# Substract again the bytes since throwing away
# the message so would not be pending data.
sub.pending_size -= payload_size
if self._error_cb is not None:
yield from self._error_cb(
ErrSlowConsumer(subject=subject, sid=sid))
return
sub.pending_queue.put_nowait(msg)
except asyncio.QueueFull:
if self._error_cb is not None:
yield from self._error_cb(
ErrSlowConsumer(subject=subject, sid=sid))
|
def _process_msg(self, sid, subject, reply, data):
"""
Process MSG sent by server.
"""
payload_size = len(data)
self.stats['in_msgs'] += 1
self.stats['in_bytes'] += payload_size
sub = self._subs.get(sid)
if sub is None:
# Skip in case no subscription present.
return
sub.received += 1
if sub.max_msgs > 0 and sub.received >= sub.max_msgs:
# Enough messages so can throwaway subscription now.
self._subs.pop(sid, None)
msg = self._build_message(subject, reply, data)
# Check if it is an old style request.
if sub.future is not None:
if sub.future.cancelled():
# Already gave up, nothing to do.
return
sub.future.set_result(msg)
return
# Let subscription wait_for_msgs coroutine process the messages,
# but in case sending to the subscription task would block,
# then consider it to be an slow consumer and drop the message.
try:
sub.pending_size += payload_size
if sub.pending_size >= sub.pending_bytes_limit:
# Substract again the bytes since throwing away
# the message so would not be pending data.
sub.pending_size -= payload_size
if self._error_cb is not None:
yield from self._error_cb(
ErrSlowConsumer(subject=subject, sid=sid))
return
sub.pending_queue.put_nowait(msg)
except asyncio.QueueFull:
if self._error_cb is not None:
yield from self._error_cb(
ErrSlowConsumer(subject=subject, sid=sid))
|
[
"Process",
"MSG",
"sent",
"by",
"server",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L1168-L1213
|
[
"def",
"_process_msg",
"(",
"self",
",",
"sid",
",",
"subject",
",",
"reply",
",",
"data",
")",
":",
"payload_size",
"=",
"len",
"(",
"data",
")",
"self",
".",
"stats",
"[",
"'in_msgs'",
"]",
"+=",
"1",
"self",
".",
"stats",
"[",
"'in_bytes'",
"]",
"+=",
"payload_size",
"sub",
"=",
"self",
".",
"_subs",
".",
"get",
"(",
"sid",
")",
"if",
"sub",
"is",
"None",
":",
"# Skip in case no subscription present.",
"return",
"sub",
".",
"received",
"+=",
"1",
"if",
"sub",
".",
"max_msgs",
">",
"0",
"and",
"sub",
".",
"received",
">=",
"sub",
".",
"max_msgs",
":",
"# Enough messages so can throwaway subscription now.",
"self",
".",
"_subs",
".",
"pop",
"(",
"sid",
",",
"None",
")",
"msg",
"=",
"self",
".",
"_build_message",
"(",
"subject",
",",
"reply",
",",
"data",
")",
"# Check if it is an old style request.",
"if",
"sub",
".",
"future",
"is",
"not",
"None",
":",
"if",
"sub",
".",
"future",
".",
"cancelled",
"(",
")",
":",
"# Already gave up, nothing to do.",
"return",
"sub",
".",
"future",
".",
"set_result",
"(",
"msg",
")",
"return",
"# Let subscription wait_for_msgs coroutine process the messages,",
"# but in case sending to the subscription task would block,",
"# then consider it to be an slow consumer and drop the message.",
"try",
":",
"sub",
".",
"pending_size",
"+=",
"payload_size",
"if",
"sub",
".",
"pending_size",
">=",
"sub",
".",
"pending_bytes_limit",
":",
"# Substract again the bytes since throwing away",
"# the message so would not be pending data.",
"sub",
".",
"pending_size",
"-=",
"payload_size",
"if",
"self",
".",
"_error_cb",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_error_cb",
"(",
"ErrSlowConsumer",
"(",
"subject",
"=",
"subject",
",",
"sid",
"=",
"sid",
")",
")",
"return",
"sub",
".",
"pending_queue",
".",
"put_nowait",
"(",
"msg",
")",
"except",
"asyncio",
".",
"QueueFull",
":",
"if",
"self",
".",
"_error_cb",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_error_cb",
"(",
"ErrSlowConsumer",
"(",
"subject",
"=",
"subject",
",",
"sid",
"=",
"sid",
")",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._process_info
|
Process INFO lines sent by the server to reconfigure client
with latest updates from cluster to enable server discovery.
|
nats/aio/client.py
|
def _process_info(self, info):
"""
Process INFO lines sent by the server to reconfigure client
with latest updates from cluster to enable server discovery.
"""
if 'connect_urls' in info:
if info['connect_urls']:
connect_urls = []
for connect_url in info['connect_urls']:
uri = urlparse("nats://%s" % connect_url)
srv = Srv(uri)
srv.discovered = True
# Filter for any similar server in the server pool already.
should_add = True
for s in self._server_pool:
if uri.netloc == s.uri.netloc:
should_add = False
if should_add:
connect_urls.append(srv)
if self.options["dont_randomize"] is not True:
shuffle(connect_urls)
for srv in connect_urls:
self._server_pool.append(srv)
|
def _process_info(self, info):
"""
Process INFO lines sent by the server to reconfigure client
with latest updates from cluster to enable server discovery.
"""
if 'connect_urls' in info:
if info['connect_urls']:
connect_urls = []
for connect_url in info['connect_urls']:
uri = urlparse("nats://%s" % connect_url)
srv = Srv(uri)
srv.discovered = True
# Filter for any similar server in the server pool already.
should_add = True
for s in self._server_pool:
if uri.netloc == s.uri.netloc:
should_add = False
if should_add:
connect_urls.append(srv)
if self.options["dont_randomize"] is not True:
shuffle(connect_urls)
for srv in connect_urls:
self._server_pool.append(srv)
|
[
"Process",
"INFO",
"lines",
"sent",
"by",
"the",
"server",
"to",
"reconfigure",
"client",
"with",
"latest",
"updates",
"from",
"cluster",
"to",
"enable",
"server",
"discovery",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L1226-L1250
|
[
"def",
"_process_info",
"(",
"self",
",",
"info",
")",
":",
"if",
"'connect_urls'",
"in",
"info",
":",
"if",
"info",
"[",
"'connect_urls'",
"]",
":",
"connect_urls",
"=",
"[",
"]",
"for",
"connect_url",
"in",
"info",
"[",
"'connect_urls'",
"]",
":",
"uri",
"=",
"urlparse",
"(",
"\"nats://%s\"",
"%",
"connect_url",
")",
"srv",
"=",
"Srv",
"(",
"uri",
")",
"srv",
".",
"discovered",
"=",
"True",
"# Filter for any similar server in the server pool already.",
"should_add",
"=",
"True",
"for",
"s",
"in",
"self",
".",
"_server_pool",
":",
"if",
"uri",
".",
"netloc",
"==",
"s",
".",
"uri",
".",
"netloc",
":",
"should_add",
"=",
"False",
"if",
"should_add",
":",
"connect_urls",
".",
"append",
"(",
"srv",
")",
"if",
"self",
".",
"options",
"[",
"\"dont_randomize\"",
"]",
"is",
"not",
"True",
":",
"shuffle",
"(",
"connect_urls",
")",
"for",
"srv",
"in",
"connect_urls",
":",
"self",
".",
"_server_pool",
".",
"append",
"(",
"srv",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._process_connect_init
|
Process INFO received from the server and CONNECT to the server
with authentication. It is also responsible of setting up the
reading and ping interval tasks from the client.
|
nats/aio/client.py
|
def _process_connect_init(self):
"""
Process INFO received from the server and CONNECT to the server
with authentication. It is also responsible of setting up the
reading and ping interval tasks from the client.
"""
self._status = Client.CONNECTING
connection_completed = self._io_reader.readline()
info_line = yield from asyncio.wait_for(connection_completed, self.options["connect_timeout"])
if INFO_OP not in info_line:
raise NatsError("nats: empty response from server when expecting INFO message")
_, info = info_line.split(INFO_OP + _SPC_, 1)
try:
srv_info = json.loads(info.decode())
except:
raise NatsError("nats: info message, json parse error")
self._process_info(srv_info)
self._server_info = srv_info
if 'max_payload' in self._server_info:
self._max_payload = self._server_info["max_payload"]
if 'tls_required' in self._server_info and self._server_info['tls_required']:
ssl_context = None
if "tls" in self.options:
ssl_context = self.options.get('tls')
elif self._current_server.uri.scheme == 'tls':
ssl_context = ssl.create_default_context()
else:
raise NatsError('nats: no ssl context provided')
transport = self._io_writer.transport
sock = transport.get_extra_info('socket')
if not sock:
# This shouldn't happen
raise NatsError('nats: unable to get socket')
yield from self._io_writer.drain() # just in case something is left
self._io_reader, self._io_writer = \
yield from asyncio.open_connection(
loop=self._loop,
limit=DEFAULT_BUFFER_SIZE,
sock=sock,
ssl=ssl_context,
server_hostname=self._current_server.uri.hostname,
)
# Refresh state of parser upon reconnect.
if self.is_reconnecting:
self._ps.reset()
connect_cmd = self._connect_command()
self._io_writer.write(connect_cmd)
self._io_writer.write(PING_PROTO)
yield from self._io_writer.drain()
# FIXME: Add readline timeout
next_op = yield from self._io_reader.readline()
if self.options["verbose"] and OK_OP in next_op:
next_op = yield from self._io_reader.readline()
if ERR_OP in next_op:
err_line = next_op.decode()
_, err_msg = err_line.split(" ", 1)
# FIXME: Maybe handling could be more special here,
# checking for ErrAuthorization for example.
# yield from self._process_err(err_msg)
raise NatsError("nats: " + err_msg.rstrip('\r\n'))
if PONG_PROTO in next_op:
self._status = Client.CONNECTED
self._reading_task = self._loop.create_task(self._read_loop())
self._pongs = []
self._pings_outstanding = 0
self._ping_interval_task = self._loop.create_task(
self._ping_interval())
# Task for kicking the flusher queue
self._flusher_task = self._loop.create_task(self._flusher())
|
def _process_connect_init(self):
"""
Process INFO received from the server and CONNECT to the server
with authentication. It is also responsible of setting up the
reading and ping interval tasks from the client.
"""
self._status = Client.CONNECTING
connection_completed = self._io_reader.readline()
info_line = yield from asyncio.wait_for(connection_completed, self.options["connect_timeout"])
if INFO_OP not in info_line:
raise NatsError("nats: empty response from server when expecting INFO message")
_, info = info_line.split(INFO_OP + _SPC_, 1)
try:
srv_info = json.loads(info.decode())
except:
raise NatsError("nats: info message, json parse error")
self._process_info(srv_info)
self._server_info = srv_info
if 'max_payload' in self._server_info:
self._max_payload = self._server_info["max_payload"]
if 'tls_required' in self._server_info and self._server_info['tls_required']:
ssl_context = None
if "tls" in self.options:
ssl_context = self.options.get('tls')
elif self._current_server.uri.scheme == 'tls':
ssl_context = ssl.create_default_context()
else:
raise NatsError('nats: no ssl context provided')
transport = self._io_writer.transport
sock = transport.get_extra_info('socket')
if not sock:
# This shouldn't happen
raise NatsError('nats: unable to get socket')
yield from self._io_writer.drain() # just in case something is left
self._io_reader, self._io_writer = \
yield from asyncio.open_connection(
loop=self._loop,
limit=DEFAULT_BUFFER_SIZE,
sock=sock,
ssl=ssl_context,
server_hostname=self._current_server.uri.hostname,
)
# Refresh state of parser upon reconnect.
if self.is_reconnecting:
self._ps.reset()
connect_cmd = self._connect_command()
self._io_writer.write(connect_cmd)
self._io_writer.write(PING_PROTO)
yield from self._io_writer.drain()
# FIXME: Add readline timeout
next_op = yield from self._io_reader.readline()
if self.options["verbose"] and OK_OP in next_op:
next_op = yield from self._io_reader.readline()
if ERR_OP in next_op:
err_line = next_op.decode()
_, err_msg = err_line.split(" ", 1)
# FIXME: Maybe handling could be more special here,
# checking for ErrAuthorization for example.
# yield from self._process_err(err_msg)
raise NatsError("nats: " + err_msg.rstrip('\r\n'))
if PONG_PROTO in next_op:
self._status = Client.CONNECTED
self._reading_task = self._loop.create_task(self._read_loop())
self._pongs = []
self._pings_outstanding = 0
self._ping_interval_task = self._loop.create_task(
self._ping_interval())
# Task for kicking the flusher queue
self._flusher_task = self._loop.create_task(self._flusher())
|
[
"Process",
"INFO",
"received",
"from",
"the",
"server",
"and",
"CONNECT",
"to",
"the",
"server",
"with",
"authentication",
".",
"It",
"is",
"also",
"responsible",
"of",
"setting",
"up",
"the",
"reading",
"and",
"ping",
"interval",
"tasks",
"from",
"the",
"client",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L1253-L1337
|
[
"def",
"_process_connect_init",
"(",
"self",
")",
":",
"self",
".",
"_status",
"=",
"Client",
".",
"CONNECTING",
"connection_completed",
"=",
"self",
".",
"_io_reader",
".",
"readline",
"(",
")",
"info_line",
"=",
"yield",
"from",
"asyncio",
".",
"wait_for",
"(",
"connection_completed",
",",
"self",
".",
"options",
"[",
"\"connect_timeout\"",
"]",
")",
"if",
"INFO_OP",
"not",
"in",
"info_line",
":",
"raise",
"NatsError",
"(",
"\"nats: empty response from server when expecting INFO message\"",
")",
"_",
",",
"info",
"=",
"info_line",
".",
"split",
"(",
"INFO_OP",
"+",
"_SPC_",
",",
"1",
")",
"try",
":",
"srv_info",
"=",
"json",
".",
"loads",
"(",
"info",
".",
"decode",
"(",
")",
")",
"except",
":",
"raise",
"NatsError",
"(",
"\"nats: info message, json parse error\"",
")",
"self",
".",
"_process_info",
"(",
"srv_info",
")",
"self",
".",
"_server_info",
"=",
"srv_info",
"if",
"'max_payload'",
"in",
"self",
".",
"_server_info",
":",
"self",
".",
"_max_payload",
"=",
"self",
".",
"_server_info",
"[",
"\"max_payload\"",
"]",
"if",
"'tls_required'",
"in",
"self",
".",
"_server_info",
"and",
"self",
".",
"_server_info",
"[",
"'tls_required'",
"]",
":",
"ssl_context",
"=",
"None",
"if",
"\"tls\"",
"in",
"self",
".",
"options",
":",
"ssl_context",
"=",
"self",
".",
"options",
".",
"get",
"(",
"'tls'",
")",
"elif",
"self",
".",
"_current_server",
".",
"uri",
".",
"scheme",
"==",
"'tls'",
":",
"ssl_context",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
"else",
":",
"raise",
"NatsError",
"(",
"'nats: no ssl context provided'",
")",
"transport",
"=",
"self",
".",
"_io_writer",
".",
"transport",
"sock",
"=",
"transport",
".",
"get_extra_info",
"(",
"'socket'",
")",
"if",
"not",
"sock",
":",
"# This shouldn't happen",
"raise",
"NatsError",
"(",
"'nats: unable to get socket'",
")",
"yield",
"from",
"self",
".",
"_io_writer",
".",
"drain",
"(",
")",
"# just in case something is left",
"self",
".",
"_io_reader",
",",
"self",
".",
"_io_writer",
"=",
"yield",
"from",
"asyncio",
".",
"open_connection",
"(",
"loop",
"=",
"self",
".",
"_loop",
",",
"limit",
"=",
"DEFAULT_BUFFER_SIZE",
",",
"sock",
"=",
"sock",
",",
"ssl",
"=",
"ssl_context",
",",
"server_hostname",
"=",
"self",
".",
"_current_server",
".",
"uri",
".",
"hostname",
",",
")",
"# Refresh state of parser upon reconnect.",
"if",
"self",
".",
"is_reconnecting",
":",
"self",
".",
"_ps",
".",
"reset",
"(",
")",
"connect_cmd",
"=",
"self",
".",
"_connect_command",
"(",
")",
"self",
".",
"_io_writer",
".",
"write",
"(",
"connect_cmd",
")",
"self",
".",
"_io_writer",
".",
"write",
"(",
"PING_PROTO",
")",
"yield",
"from",
"self",
".",
"_io_writer",
".",
"drain",
"(",
")",
"# FIXME: Add readline timeout",
"next_op",
"=",
"yield",
"from",
"self",
".",
"_io_reader",
".",
"readline",
"(",
")",
"if",
"self",
".",
"options",
"[",
"\"verbose\"",
"]",
"and",
"OK_OP",
"in",
"next_op",
":",
"next_op",
"=",
"yield",
"from",
"self",
".",
"_io_reader",
".",
"readline",
"(",
")",
"if",
"ERR_OP",
"in",
"next_op",
":",
"err_line",
"=",
"next_op",
".",
"decode",
"(",
")",
"_",
",",
"err_msg",
"=",
"err_line",
".",
"split",
"(",
"\" \"",
",",
"1",
")",
"# FIXME: Maybe handling could be more special here,",
"# checking for ErrAuthorization for example.",
"# yield from self._process_err(err_msg)",
"raise",
"NatsError",
"(",
"\"nats: \"",
"+",
"err_msg",
".",
"rstrip",
"(",
"'\\r\\n'",
")",
")",
"if",
"PONG_PROTO",
"in",
"next_op",
":",
"self",
".",
"_status",
"=",
"Client",
".",
"CONNECTED",
"self",
".",
"_reading_task",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"self",
".",
"_read_loop",
"(",
")",
")",
"self",
".",
"_pongs",
"=",
"[",
"]",
"self",
".",
"_pings_outstanding",
"=",
"0",
"self",
".",
"_ping_interval_task",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"self",
".",
"_ping_interval",
"(",
")",
")",
"# Task for kicking the flusher queue",
"self",
".",
"_flusher_task",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"self",
".",
"_flusher",
"(",
")",
")"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._flusher
|
Coroutine which continuously tries to consume pending commands
and then flushes them to the socket.
|
nats/aio/client.py
|
def _flusher(self):
"""
Coroutine which continuously tries to consume pending commands
and then flushes them to the socket.
"""
while True:
if not self.is_connected or self.is_connecting:
break
try:
yield from self._flush_queue.get()
if self._pending_data_size > 0:
self._io_writer.writelines(self._pending[:])
self._pending = []
self._pending_data_size = 0
yield from self._io_writer.drain()
except OSError as e:
if self._error_cb is not None:
yield from self._error_cb(e)
yield from self._process_op_err(e)
break
except asyncio.CancelledError:
break
|
def _flusher(self):
"""
Coroutine which continuously tries to consume pending commands
and then flushes them to the socket.
"""
while True:
if not self.is_connected or self.is_connecting:
break
try:
yield from self._flush_queue.get()
if self._pending_data_size > 0:
self._io_writer.writelines(self._pending[:])
self._pending = []
self._pending_data_size = 0
yield from self._io_writer.drain()
except OSError as e:
if self._error_cb is not None:
yield from self._error_cb(e)
yield from self._process_op_err(e)
break
except asyncio.CancelledError:
break
|
[
"Coroutine",
"which",
"continuously",
"tries",
"to",
"consume",
"pending",
"commands",
"and",
"then",
"flushes",
"them",
"to",
"the",
"socket",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L1348-L1371
|
[
"def",
"_flusher",
"(",
"self",
")",
":",
"while",
"True",
":",
"if",
"not",
"self",
".",
"is_connected",
"or",
"self",
".",
"is_connecting",
":",
"break",
"try",
":",
"yield",
"from",
"self",
".",
"_flush_queue",
".",
"get",
"(",
")",
"if",
"self",
".",
"_pending_data_size",
">",
"0",
":",
"self",
".",
"_io_writer",
".",
"writelines",
"(",
"self",
".",
"_pending",
"[",
":",
"]",
")",
"self",
".",
"_pending",
"=",
"[",
"]",
"self",
".",
"_pending_data_size",
"=",
"0",
"yield",
"from",
"self",
".",
"_io_writer",
".",
"drain",
"(",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"self",
".",
"_error_cb",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_error_cb",
"(",
"e",
")",
"yield",
"from",
"self",
".",
"_process_op_err",
"(",
"e",
")",
"break",
"except",
"asyncio",
".",
"CancelledError",
":",
"break"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
Client._read_loop
|
Coroutine which gathers bytes sent by the server
and feeds them to the protocol parser.
In case of error while reading, it will stop running
and its task has to be rescheduled.
|
nats/aio/client.py
|
def _read_loop(self):
"""
Coroutine which gathers bytes sent by the server
and feeds them to the protocol parser.
In case of error while reading, it will stop running
and its task has to be rescheduled.
"""
while True:
try:
should_bail = self.is_closed or self.is_reconnecting
if should_bail or self._io_reader is None:
break
if self.is_connected and self._io_reader.at_eof():
if self._error_cb is not None:
yield from self._error_cb(ErrStaleConnection)
yield from self._process_op_err(ErrStaleConnection)
break
b = yield from self._io_reader.read(DEFAULT_BUFFER_SIZE)
yield from self._ps.parse(b)
except ErrProtocol:
yield from self._process_op_err(ErrProtocol)
break
except OSError as e:
yield from self._process_op_err(e)
break
except asyncio.CancelledError:
break
|
def _read_loop(self):
"""
Coroutine which gathers bytes sent by the server
and feeds them to the protocol parser.
In case of error while reading, it will stop running
and its task has to be rescheduled.
"""
while True:
try:
should_bail = self.is_closed or self.is_reconnecting
if should_bail or self._io_reader is None:
break
if self.is_connected and self._io_reader.at_eof():
if self._error_cb is not None:
yield from self._error_cb(ErrStaleConnection)
yield from self._process_op_err(ErrStaleConnection)
break
b = yield from self._io_reader.read(DEFAULT_BUFFER_SIZE)
yield from self._ps.parse(b)
except ErrProtocol:
yield from self._process_op_err(ErrProtocol)
break
except OSError as e:
yield from self._process_op_err(e)
break
except asyncio.CancelledError:
break
|
[
"Coroutine",
"which",
"gathers",
"bytes",
"sent",
"by",
"the",
"server",
"and",
"feeds",
"them",
"to",
"the",
"protocol",
"parser",
".",
"In",
"case",
"of",
"error",
"while",
"reading",
"it",
"will",
"stop",
"running",
"and",
"its",
"task",
"has",
"to",
"be",
"rescheduled",
"."
] |
nats-io/asyncio-nats
|
python
|
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L1392-L1419
|
[
"def",
"_read_loop",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"should_bail",
"=",
"self",
".",
"is_closed",
"or",
"self",
".",
"is_reconnecting",
"if",
"should_bail",
"or",
"self",
".",
"_io_reader",
"is",
"None",
":",
"break",
"if",
"self",
".",
"is_connected",
"and",
"self",
".",
"_io_reader",
".",
"at_eof",
"(",
")",
":",
"if",
"self",
".",
"_error_cb",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_error_cb",
"(",
"ErrStaleConnection",
")",
"yield",
"from",
"self",
".",
"_process_op_err",
"(",
"ErrStaleConnection",
")",
"break",
"b",
"=",
"yield",
"from",
"self",
".",
"_io_reader",
".",
"read",
"(",
"DEFAULT_BUFFER_SIZE",
")",
"yield",
"from",
"self",
".",
"_ps",
".",
"parse",
"(",
"b",
")",
"except",
"ErrProtocol",
":",
"yield",
"from",
"self",
".",
"_process_op_err",
"(",
"ErrProtocol",
")",
"break",
"except",
"OSError",
"as",
"e",
":",
"yield",
"from",
"self",
".",
"_process_op_err",
"(",
"e",
")",
"break",
"except",
"asyncio",
".",
"CancelledError",
":",
"break"
] |
39e840be0b12ce326edac0bba69aeb1be930dcb8
|
test
|
coactivation
|
Compute and save coactivation map given input image as seed.
This is essentially just a wrapper for a meta-analysis defined
by the contrast between those studies that activate within the seed
and those that don't.
Args:
dataset: a Dataset instance containing study and activation data.
seed: either a Nifti or Analyze image defining the boundaries of the
seed, or a list of triples (x/y/z) defining the seed(s). Note that
voxels do not need to be contiguous to define a seed--all supra-
threshold voxels will be lumped together.
threshold: optional float indicating the threshold above which voxels
are considered to be part of the seed ROI (default = 0)
r: optional integer indicating radius (in mm) of spheres to grow
(only used if seed is a list of coordinates).
output_dir: output directory to write to. Defaults to current.
If none, defaults to using the first part of the seed filename.
prefix: optional string to prepend to all coactivation images.
Output:
A set of meta-analysis images identical to that generated by
meta.MetaAnalysis.
|
neurosynth/analysis/network.py
|
def coactivation(dataset, seed, threshold=0.0, output_dir='.', prefix='', r=6):
""" Compute and save coactivation map given input image as seed.
This is essentially just a wrapper for a meta-analysis defined
by the contrast between those studies that activate within the seed
and those that don't.
Args:
dataset: a Dataset instance containing study and activation data.
seed: either a Nifti or Analyze image defining the boundaries of the
seed, or a list of triples (x/y/z) defining the seed(s). Note that
voxels do not need to be contiguous to define a seed--all supra-
threshold voxels will be lumped together.
threshold: optional float indicating the threshold above which voxels
are considered to be part of the seed ROI (default = 0)
r: optional integer indicating radius (in mm) of spheres to grow
(only used if seed is a list of coordinates).
output_dir: output directory to write to. Defaults to current.
If none, defaults to using the first part of the seed filename.
prefix: optional string to prepend to all coactivation images.
Output:
A set of meta-analysis images identical to that generated by
meta.MetaAnalysis.
"""
if isinstance(seed, string_types):
ids = dataset.get_studies(mask=seed, activation_threshold=threshold)
else:
ids = dataset.get_studies(peaks=seed, r=r,
activation_threshold=threshold)
ma = meta.MetaAnalysis(dataset, ids)
ma.save_results(output_dir, prefix)
|
def coactivation(dataset, seed, threshold=0.0, output_dir='.', prefix='', r=6):
""" Compute and save coactivation map given input image as seed.
This is essentially just a wrapper for a meta-analysis defined
by the contrast between those studies that activate within the seed
and those that don't.
Args:
dataset: a Dataset instance containing study and activation data.
seed: either a Nifti or Analyze image defining the boundaries of the
seed, or a list of triples (x/y/z) defining the seed(s). Note that
voxels do not need to be contiguous to define a seed--all supra-
threshold voxels will be lumped together.
threshold: optional float indicating the threshold above which voxels
are considered to be part of the seed ROI (default = 0)
r: optional integer indicating radius (in mm) of spheres to grow
(only used if seed is a list of coordinates).
output_dir: output directory to write to. Defaults to current.
If none, defaults to using the first part of the seed filename.
prefix: optional string to prepend to all coactivation images.
Output:
A set of meta-analysis images identical to that generated by
meta.MetaAnalysis.
"""
if isinstance(seed, string_types):
ids = dataset.get_studies(mask=seed, activation_threshold=threshold)
else:
ids = dataset.get_studies(peaks=seed, r=r,
activation_threshold=threshold)
ma = meta.MetaAnalysis(dataset, ids)
ma.save_results(output_dir, prefix)
|
[
"Compute",
"and",
"save",
"coactivation",
"map",
"given",
"input",
"image",
"as",
"seed",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/network.py#L7-L40
|
[
"def",
"coactivation",
"(",
"dataset",
",",
"seed",
",",
"threshold",
"=",
"0.0",
",",
"output_dir",
"=",
"'.'",
",",
"prefix",
"=",
"''",
",",
"r",
"=",
"6",
")",
":",
"if",
"isinstance",
"(",
"seed",
",",
"string_types",
")",
":",
"ids",
"=",
"dataset",
".",
"get_studies",
"(",
"mask",
"=",
"seed",
",",
"activation_threshold",
"=",
"threshold",
")",
"else",
":",
"ids",
"=",
"dataset",
".",
"get_studies",
"(",
"peaks",
"=",
"seed",
",",
"r",
"=",
"r",
",",
"activation_threshold",
"=",
"threshold",
")",
"ma",
"=",
"meta",
".",
"MetaAnalysis",
"(",
"dataset",
",",
"ids",
")",
"ma",
".",
"save_results",
"(",
"output_dir",
",",
"prefix",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Decoder.decode
|
Decodes a set of images.
Args:
images: The images to decode. Can be:
- A single String specifying the filename of the image to decode
- A list of filenames
- A single NumPy array containing the image data
save: Optional filename to save results to. If None (default), returns
all results as an array.
round: Optional integer indicating number of decimals to round result
to. Defaults to 4.
names: Optional list of names corresponding to the images in filenames.
If passed, must be of same length and in same order as filenames.
By default, the columns in the output will be named using the image
filenames.
Returns:
An n_features x n_files numpy array, where each feature is a row and
each image is a column. The meaning of the values depends on the
decoding method used.
|
neurosynth/analysis/decode.py
|
def decode(self, images, save=None, round=4, names=None, **kwargs):
""" Decodes a set of images.
Args:
images: The images to decode. Can be:
- A single String specifying the filename of the image to decode
- A list of filenames
- A single NumPy array containing the image data
save: Optional filename to save results to. If None (default), returns
all results as an array.
round: Optional integer indicating number of decimals to round result
to. Defaults to 4.
names: Optional list of names corresponding to the images in filenames.
If passed, must be of same length and in same order as filenames.
By default, the columns in the output will be named using the image
filenames.
Returns:
An n_features x n_files numpy array, where each feature is a row and
each image is a column. The meaning of the values depends on the
decoding method used. """
if isinstance(images, string_types):
images = [images]
if isinstance(images, list):
imgs_to_decode = imageutils.load_imgs(images, self.masker)
else:
imgs_to_decode = images
methods = {
'pearson': self._pearson_correlation,
'dot': self._dot_product,
'roi': self._roi_association
}
result = np.around(
methods[self.method](imgs_to_decode, **kwargs), round)
# if save is not None:
if names is None:
if type(images).__module__ == np.__name__:
names = ['image_%d' % i for i in range(images.shape[1])]
elif self.method == 'roi':
names = ['cluster_%d' % i for i in range(result.shape[1])]
else:
names = images
result = pd.DataFrame(result, columns=names, index=self.feature_names)
if save is not None:
result.to_csv(save, index_label='Feature')
return result
|
def decode(self, images, save=None, round=4, names=None, **kwargs):
""" Decodes a set of images.
Args:
images: The images to decode. Can be:
- A single String specifying the filename of the image to decode
- A list of filenames
- A single NumPy array containing the image data
save: Optional filename to save results to. If None (default), returns
all results as an array.
round: Optional integer indicating number of decimals to round result
to. Defaults to 4.
names: Optional list of names corresponding to the images in filenames.
If passed, must be of same length and in same order as filenames.
By default, the columns in the output will be named using the image
filenames.
Returns:
An n_features x n_files numpy array, where each feature is a row and
each image is a column. The meaning of the values depends on the
decoding method used. """
if isinstance(images, string_types):
images = [images]
if isinstance(images, list):
imgs_to_decode = imageutils.load_imgs(images, self.masker)
else:
imgs_to_decode = images
methods = {
'pearson': self._pearson_correlation,
'dot': self._dot_product,
'roi': self._roi_association
}
result = np.around(
methods[self.method](imgs_to_decode, **kwargs), round)
# if save is not None:
if names is None:
if type(images).__module__ == np.__name__:
names = ['image_%d' % i for i in range(images.shape[1])]
elif self.method == 'roi':
names = ['cluster_%d' % i for i in range(result.shape[1])]
else:
names = images
result = pd.DataFrame(result, columns=names, index=self.feature_names)
if save is not None:
result.to_csv(save, index_label='Feature')
return result
|
[
"Decodes",
"a",
"set",
"of",
"images",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L64-L117
|
[
"def",
"decode",
"(",
"self",
",",
"images",
",",
"save",
"=",
"None",
",",
"round",
"=",
"4",
",",
"names",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"images",
",",
"string_types",
")",
":",
"images",
"=",
"[",
"images",
"]",
"if",
"isinstance",
"(",
"images",
",",
"list",
")",
":",
"imgs_to_decode",
"=",
"imageutils",
".",
"load_imgs",
"(",
"images",
",",
"self",
".",
"masker",
")",
"else",
":",
"imgs_to_decode",
"=",
"images",
"methods",
"=",
"{",
"'pearson'",
":",
"self",
".",
"_pearson_correlation",
",",
"'dot'",
":",
"self",
".",
"_dot_product",
",",
"'roi'",
":",
"self",
".",
"_roi_association",
"}",
"result",
"=",
"np",
".",
"around",
"(",
"methods",
"[",
"self",
".",
"method",
"]",
"(",
"imgs_to_decode",
",",
"*",
"*",
"kwargs",
")",
",",
"round",
")",
"# if save is not None:",
"if",
"names",
"is",
"None",
":",
"if",
"type",
"(",
"images",
")",
".",
"__module__",
"==",
"np",
".",
"__name__",
":",
"names",
"=",
"[",
"'image_%d'",
"%",
"i",
"for",
"i",
"in",
"range",
"(",
"images",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"elif",
"self",
".",
"method",
"==",
"'roi'",
":",
"names",
"=",
"[",
"'cluster_%d'",
"%",
"i",
"for",
"i",
"in",
"range",
"(",
"result",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"else",
":",
"names",
"=",
"images",
"result",
"=",
"pd",
".",
"DataFrame",
"(",
"result",
",",
"columns",
"=",
"names",
",",
"index",
"=",
"self",
".",
"feature_names",
")",
"if",
"save",
"is",
"not",
"None",
":",
"result",
".",
"to_csv",
"(",
"save",
",",
"index_label",
"=",
"'Feature'",
")",
"return",
"result"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Decoder.load_features
|
Load features from current Dataset instance or a list of files.
Args:
features: List containing paths to, or names of, features to
extract. Each element in the list must be a string containing
either a path to an image, or the name of a feature (as named
in the current Dataset). Mixing of paths and feature names
within the list is not allowed.
image_type: Optional suffix indicating which kind of image to use
for analysis. Only used if features are taken from the Dataset;
if features is a list of filenames, image_type is ignored.
from_array: If True, the features argument is interpreted as a
string pointing to the location of a 2D ndarray on disk
containing feature data, where rows are voxels and columns are
individual features.
threshold: If features are taken from the dataset, this is the
threshold passed to the meta-analysis module to generate fresh
images.
|
neurosynth/analysis/decode.py
|
def load_features(self, features, image_type=None, from_array=False,
threshold=0.001):
""" Load features from current Dataset instance or a list of files.
Args:
features: List containing paths to, or names of, features to
extract. Each element in the list must be a string containing
either a path to an image, or the name of a feature (as named
in the current Dataset). Mixing of paths and feature names
within the list is not allowed.
image_type: Optional suffix indicating which kind of image to use
for analysis. Only used if features are taken from the Dataset;
if features is a list of filenames, image_type is ignored.
from_array: If True, the features argument is interpreted as a
string pointing to the location of a 2D ndarray on disk
containing feature data, where rows are voxels and columns are
individual features.
threshold: If features are taken from the dataset, this is the
threshold passed to the meta-analysis module to generate fresh
images.
"""
if from_array:
if isinstance(features, list):
features = features[0]
self._load_features_from_array(features)
elif path.exists(features[0]):
self._load_features_from_images(features)
else:
self._load_features_from_dataset(
features, image_type=image_type, threshold=threshold)
|
def load_features(self, features, image_type=None, from_array=False,
threshold=0.001):
""" Load features from current Dataset instance or a list of files.
Args:
features: List containing paths to, or names of, features to
extract. Each element in the list must be a string containing
either a path to an image, or the name of a feature (as named
in the current Dataset). Mixing of paths and feature names
within the list is not allowed.
image_type: Optional suffix indicating which kind of image to use
for analysis. Only used if features are taken from the Dataset;
if features is a list of filenames, image_type is ignored.
from_array: If True, the features argument is interpreted as a
string pointing to the location of a 2D ndarray on disk
containing feature data, where rows are voxels and columns are
individual features.
threshold: If features are taken from the dataset, this is the
threshold passed to the meta-analysis module to generate fresh
images.
"""
if from_array:
if isinstance(features, list):
features = features[0]
self._load_features_from_array(features)
elif path.exists(features[0]):
self._load_features_from_images(features)
else:
self._load_features_from_dataset(
features, image_type=image_type, threshold=threshold)
|
[
"Load",
"features",
"from",
"current",
"Dataset",
"instance",
"or",
"a",
"list",
"of",
"files",
".",
"Args",
":",
"features",
":",
"List",
"containing",
"paths",
"to",
"or",
"names",
"of",
"features",
"to",
"extract",
".",
"Each",
"element",
"in",
"the",
"list",
"must",
"be",
"a",
"string",
"containing",
"either",
"a",
"path",
"to",
"an",
"image",
"or",
"the",
"name",
"of",
"a",
"feature",
"(",
"as",
"named",
"in",
"the",
"current",
"Dataset",
")",
".",
"Mixing",
"of",
"paths",
"and",
"feature",
"names",
"within",
"the",
"list",
"is",
"not",
"allowed",
".",
"image_type",
":",
"Optional",
"suffix",
"indicating",
"which",
"kind",
"of",
"image",
"to",
"use",
"for",
"analysis",
".",
"Only",
"used",
"if",
"features",
"are",
"taken",
"from",
"the",
"Dataset",
";",
"if",
"features",
"is",
"a",
"list",
"of",
"filenames",
"image_type",
"is",
"ignored",
".",
"from_array",
":",
"If",
"True",
"the",
"features",
"argument",
"is",
"interpreted",
"as",
"a",
"string",
"pointing",
"to",
"the",
"location",
"of",
"a",
"2D",
"ndarray",
"on",
"disk",
"containing",
"feature",
"data",
"where",
"rows",
"are",
"voxels",
"and",
"columns",
"are",
"individual",
"features",
".",
"threshold",
":",
"If",
"features",
"are",
"taken",
"from",
"the",
"dataset",
"this",
"is",
"the",
"threshold",
"passed",
"to",
"the",
"meta",
"-",
"analysis",
"module",
"to",
"generate",
"fresh",
"images",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L123-L152
|
[
"def",
"load_features",
"(",
"self",
",",
"features",
",",
"image_type",
"=",
"None",
",",
"from_array",
"=",
"False",
",",
"threshold",
"=",
"0.001",
")",
":",
"if",
"from_array",
":",
"if",
"isinstance",
"(",
"features",
",",
"list",
")",
":",
"features",
"=",
"features",
"[",
"0",
"]",
"self",
".",
"_load_features_from_array",
"(",
"features",
")",
"elif",
"path",
".",
"exists",
"(",
"features",
"[",
"0",
"]",
")",
":",
"self",
".",
"_load_features_from_images",
"(",
"features",
")",
"else",
":",
"self",
".",
"_load_features_from_dataset",
"(",
"features",
",",
"image_type",
"=",
"image_type",
",",
"threshold",
"=",
"threshold",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Decoder._load_features_from_array
|
Load feature data from a 2D ndarray on disk.
|
neurosynth/analysis/decode.py
|
def _load_features_from_array(self, features):
""" Load feature data from a 2D ndarray on disk. """
self.feature_images = np.load(features)
self.feature_names = range(self.feature_images.shape[1])
|
def _load_features_from_array(self, features):
""" Load feature data from a 2D ndarray on disk. """
self.feature_images = np.load(features)
self.feature_names = range(self.feature_images.shape[1])
|
[
"Load",
"feature",
"data",
"from",
"a",
"2D",
"ndarray",
"on",
"disk",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L154-L157
|
[
"def",
"_load_features_from_array",
"(",
"self",
",",
"features",
")",
":",
"self",
".",
"feature_images",
"=",
"np",
".",
"load",
"(",
"features",
")",
"self",
".",
"feature_names",
"=",
"range",
"(",
"self",
".",
"feature_images",
".",
"shape",
"[",
"1",
"]",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Decoder._load_features_from_dataset
|
Load feature image data from the current Dataset instance. See
load_features() for documentation.
|
neurosynth/analysis/decode.py
|
def _load_features_from_dataset(self, features=None, image_type=None,
threshold=0.001):
""" Load feature image data from the current Dataset instance. See
load_features() for documentation.
"""
self.feature_names = self.dataset.feature_table.feature_names
if features is not None:
self.feature_names = [f for f in features
if f in self.feature_names]
from neurosynth.analysis import meta
self.feature_images = meta.analyze_features(
self.dataset, self.feature_names, image_type=image_type,
threshold=threshold)
# Apply a mask if one was originally passed
if self.masker.layers:
in_mask = self.masker.get_mask(in_global_mask=True)
self.feature_images = self.feature_images[in_mask, :]
|
def _load_features_from_dataset(self, features=None, image_type=None,
threshold=0.001):
""" Load feature image data from the current Dataset instance. See
load_features() for documentation.
"""
self.feature_names = self.dataset.feature_table.feature_names
if features is not None:
self.feature_names = [f for f in features
if f in self.feature_names]
from neurosynth.analysis import meta
self.feature_images = meta.analyze_features(
self.dataset, self.feature_names, image_type=image_type,
threshold=threshold)
# Apply a mask if one was originally passed
if self.masker.layers:
in_mask = self.masker.get_mask(in_global_mask=True)
self.feature_images = self.feature_images[in_mask, :]
|
[
"Load",
"feature",
"image",
"data",
"from",
"the",
"current",
"Dataset",
"instance",
".",
"See",
"load_features",
"()",
"for",
"documentation",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L159-L175
|
[
"def",
"_load_features_from_dataset",
"(",
"self",
",",
"features",
"=",
"None",
",",
"image_type",
"=",
"None",
",",
"threshold",
"=",
"0.001",
")",
":",
"self",
".",
"feature_names",
"=",
"self",
".",
"dataset",
".",
"feature_table",
".",
"feature_names",
"if",
"features",
"is",
"not",
"None",
":",
"self",
".",
"feature_names",
"=",
"[",
"f",
"for",
"f",
"in",
"features",
"if",
"f",
"in",
"self",
".",
"feature_names",
"]",
"from",
"neurosynth",
".",
"analysis",
"import",
"meta",
"self",
".",
"feature_images",
"=",
"meta",
".",
"analyze_features",
"(",
"self",
".",
"dataset",
",",
"self",
".",
"feature_names",
",",
"image_type",
"=",
"image_type",
",",
"threshold",
"=",
"threshold",
")",
"# Apply a mask if one was originally passed",
"if",
"self",
".",
"masker",
".",
"layers",
":",
"in_mask",
"=",
"self",
".",
"masker",
".",
"get_mask",
"(",
"in_global_mask",
"=",
"True",
")",
"self",
".",
"feature_images",
"=",
"self",
".",
"feature_images",
"[",
"in_mask",
",",
":",
"]"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Decoder._load_features_from_images
|
Load feature image data from image files.
Args:
images: A list of image filenames.
names: An optional list of strings to use as the feature names. Must
be in the same order as the images.
|
neurosynth/analysis/decode.py
|
def _load_features_from_images(self, images, names=None):
""" Load feature image data from image files.
Args:
images: A list of image filenames.
names: An optional list of strings to use as the feature names. Must
be in the same order as the images.
"""
if names is not None and len(names) != len(images):
raise Exception(
"Lists of feature names and images must be of same length!")
self.feature_names = names if names is not None else images
self.feature_images = imageutils.load_imgs(images, self.masker)
|
def _load_features_from_images(self, images, names=None):
""" Load feature image data from image files.
Args:
images: A list of image filenames.
names: An optional list of strings to use as the feature names. Must
be in the same order as the images.
"""
if names is not None and len(names) != len(images):
raise Exception(
"Lists of feature names and images must be of same length!")
self.feature_names = names if names is not None else images
self.feature_images = imageutils.load_imgs(images, self.masker)
|
[
"Load",
"feature",
"image",
"data",
"from",
"image",
"files",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L177-L189
|
[
"def",
"_load_features_from_images",
"(",
"self",
",",
"images",
",",
"names",
"=",
"None",
")",
":",
"if",
"names",
"is",
"not",
"None",
"and",
"len",
"(",
"names",
")",
"!=",
"len",
"(",
"images",
")",
":",
"raise",
"Exception",
"(",
"\"Lists of feature names and images must be of same length!\"",
")",
"self",
".",
"feature_names",
"=",
"names",
"if",
"names",
"is",
"not",
"None",
"else",
"images",
"self",
".",
"feature_images",
"=",
"imageutils",
".",
"load_imgs",
"(",
"images",
",",
"self",
".",
"masker",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Decoder._pearson_correlation
|
Decode images using Pearson's r.
Computes the correlation between each input image and each feature
image across voxels.
Args:
imgs_to_decode: An ndarray of images to decode, with voxels in rows
and images in columns.
Returns:
An n_features x n_images 2D array, with each cell representing the
pearson correlation between the i'th feature and the j'th image
across all voxels.
|
neurosynth/analysis/decode.py
|
def _pearson_correlation(self, imgs_to_decode):
""" Decode images using Pearson's r.
Computes the correlation between each input image and each feature
image across voxels.
Args:
imgs_to_decode: An ndarray of images to decode, with voxels in rows
and images in columns.
Returns:
An n_features x n_images 2D array, with each cell representing the
pearson correlation between the i'th feature and the j'th image
across all voxels.
"""
x, y = imgs_to_decode.astype(float), self.feature_images.astype(float)
return self._xy_corr(x, y)
|
def _pearson_correlation(self, imgs_to_decode):
""" Decode images using Pearson's r.
Computes the correlation between each input image and each feature
image across voxels.
Args:
imgs_to_decode: An ndarray of images to decode, with voxels in rows
and images in columns.
Returns:
An n_features x n_images 2D array, with each cell representing the
pearson correlation between the i'th feature and the j'th image
across all voxels.
"""
x, y = imgs_to_decode.astype(float), self.feature_images.astype(float)
return self._xy_corr(x, y)
|
[
"Decode",
"images",
"using",
"Pearson",
"s",
"r",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L198-L214
|
[
"def",
"_pearson_correlation",
"(",
"self",
",",
"imgs_to_decode",
")",
":",
"x",
",",
"y",
"=",
"imgs_to_decode",
".",
"astype",
"(",
"float",
")",
",",
"self",
".",
"feature_images",
".",
"astype",
"(",
"float",
")",
"return",
"self",
".",
"_xy_corr",
"(",
"x",
",",
"y",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Decoder._dot_product
|
Decoding using the dot product.
|
neurosynth/analysis/decode.py
|
def _dot_product(self, imgs_to_decode):
""" Decoding using the dot product.
"""
return np.dot(imgs_to_decode.T, self.feature_images).T
|
def _dot_product(self, imgs_to_decode):
""" Decoding using the dot product.
"""
return np.dot(imgs_to_decode.T, self.feature_images).T
|
[
"Decoding",
"using",
"the",
"dot",
"product",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L216-L219
|
[
"def",
"_dot_product",
"(",
"self",
",",
"imgs_to_decode",
")",
":",
"return",
"np",
".",
"dot",
"(",
"imgs_to_decode",
".",
"T",
",",
"self",
".",
"feature_images",
")",
".",
"T"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Decoder._roi_association
|
Computes the strength of association between activation in a mask
and presence/absence of a semantic feature. This is essentially a
generalization of the voxel-wise reverse inference z-score to the
multivoxel case.
|
neurosynth/analysis/decode.py
|
def _roi_association(self, imgs_to_decode, value='z', binarize=None):
""" Computes the strength of association between activation in a mask
and presence/absence of a semantic feature. This is essentially a
generalization of the voxel-wise reverse inference z-score to the
multivoxel case.
"""
imgs_to_decode = imgs_to_decode.squeeze()
x = average_within_regions(self.dataset, imgs_to_decode).astype(float)
y = self.dataset.feature_table.data[self.feature_names].values
if binarize is not None:
y[y > binarize] = 1.
y[y < 1.] = 0.
r = self._xy_corr(x.T, y)
if value == 'r':
return r
elif value == 'z':
f_r = np.arctanh(r)
return f_r * np.sqrt(y.shape[0] - 3)
|
def _roi_association(self, imgs_to_decode, value='z', binarize=None):
""" Computes the strength of association between activation in a mask
and presence/absence of a semantic feature. This is essentially a
generalization of the voxel-wise reverse inference z-score to the
multivoxel case.
"""
imgs_to_decode = imgs_to_decode.squeeze()
x = average_within_regions(self.dataset, imgs_to_decode).astype(float)
y = self.dataset.feature_table.data[self.feature_names].values
if binarize is not None:
y[y > binarize] = 1.
y[y < 1.] = 0.
r = self._xy_corr(x.T, y)
if value == 'r':
return r
elif value == 'z':
f_r = np.arctanh(r)
return f_r * np.sqrt(y.shape[0] - 3)
|
[
"Computes",
"the",
"strength",
"of",
"association",
"between",
"activation",
"in",
"a",
"mask",
"and",
"presence",
"/",
"absence",
"of",
"a",
"semantic",
"feature",
".",
"This",
"is",
"essentially",
"a",
"generalization",
"of",
"the",
"voxel",
"-",
"wise",
"reverse",
"inference",
"z",
"-",
"score",
"to",
"the",
"multivoxel",
"case",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L221-L238
|
[
"def",
"_roi_association",
"(",
"self",
",",
"imgs_to_decode",
",",
"value",
"=",
"'z'",
",",
"binarize",
"=",
"None",
")",
":",
"imgs_to_decode",
"=",
"imgs_to_decode",
".",
"squeeze",
"(",
")",
"x",
"=",
"average_within_regions",
"(",
"self",
".",
"dataset",
",",
"imgs_to_decode",
")",
".",
"astype",
"(",
"float",
")",
"y",
"=",
"self",
".",
"dataset",
".",
"feature_table",
".",
"data",
"[",
"self",
".",
"feature_names",
"]",
".",
"values",
"if",
"binarize",
"is",
"not",
"None",
":",
"y",
"[",
"y",
">",
"binarize",
"]",
"=",
"1.",
"y",
"[",
"y",
"<",
"1.",
"]",
"=",
"0.",
"r",
"=",
"self",
".",
"_xy_corr",
"(",
"x",
".",
"T",
",",
"y",
")",
"if",
"value",
"==",
"'r'",
":",
"return",
"r",
"elif",
"value",
"==",
"'z'",
":",
"f_r",
"=",
"np",
".",
"arctanh",
"(",
"r",
")",
"return",
"f_r",
"*",
"np",
".",
"sqrt",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
"-",
"3",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
feature_selection
|
Implements various kinds of feature selection
|
neurosynth/analysis/classify.py
|
def feature_selection(feat_select, X, y):
"""" Implements various kinds of feature selection """
# K-best
if re.match('.*-best', feat_select) is not None:
n = int(feat_select.split('-')[0])
selector = SelectKBest(k=n)
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
features_selected = np.where(
selector.fit(X, y).get_support() is True)[0]
elif re.match('.*-randombest', feat_select) is not None:
n = int(feat_select.split('-')[0])
from random import shuffle
features = range(0, X.shape[1])
shuffle(features)
features_selected = features[:n]
return features_selected
|
def feature_selection(feat_select, X, y):
"""" Implements various kinds of feature selection """
# K-best
if re.match('.*-best', feat_select) is not None:
n = int(feat_select.split('-')[0])
selector = SelectKBest(k=n)
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
features_selected = np.where(
selector.fit(X, y).get_support() is True)[0]
elif re.match('.*-randombest', feat_select) is not None:
n = int(feat_select.split('-')[0])
from random import shuffle
features = range(0, X.shape[1])
shuffle(features)
features_selected = features[:n]
return features_selected
|
[
"Implements",
"various",
"kinds",
"of",
"feature",
"selection"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L10-L33
|
[
"def",
"feature_selection",
"(",
"feat_select",
",",
"X",
",",
"y",
")",
":",
"# K-best",
"if",
"re",
".",
"match",
"(",
"'.*-best'",
",",
"feat_select",
")",
"is",
"not",
"None",
":",
"n",
"=",
"int",
"(",
"feat_select",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
")",
"selector",
"=",
"SelectKBest",
"(",
"k",
"=",
"n",
")",
"import",
"warnings",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"category",
"=",
"UserWarning",
")",
"features_selected",
"=",
"np",
".",
"where",
"(",
"selector",
".",
"fit",
"(",
"X",
",",
"y",
")",
".",
"get_support",
"(",
")",
"is",
"True",
")",
"[",
"0",
"]",
"elif",
"re",
".",
"match",
"(",
"'.*-randombest'",
",",
"feat_select",
")",
"is",
"not",
"None",
":",
"n",
"=",
"int",
"(",
"feat_select",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
")",
"from",
"random",
"import",
"shuffle",
"features",
"=",
"range",
"(",
"0",
",",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"shuffle",
"(",
"features",
")",
"features_selected",
"=",
"features",
"[",
":",
"n",
"]",
"return",
"features_selected"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
get_studies_by_regions
|
Set up data for a classification task given a set of masks
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features, and returns studies by feature matrix
(X) and class labels (y)
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
regularize: Optional boolean indicating if X should be regularized
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
|
neurosynth/analysis/classify.py
|
def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True,
studies=None, features=None,
regularization="scale"):
""" Set up data for a classification task given a set of masks
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features, and returns studies by feature matrix
(X) and class labels (y)
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
regularize: Optional boolean indicating if X should be regularized
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
import nibabel as nib
import os
# Load masks using NiBabel
try:
loaded_masks = [nib.load(os.path.relpath(m)) for m in masks]
except OSError:
print('Error loading masks. Check the path')
# Get a list of studies that activate for each mask file--i.e., a list of
# lists
grouped_ids = [dataset.get_studies(mask=m, activation_threshold=threshold)
for m in loaded_masks]
# Flattened ids
flat_ids = reduce(lambda a, b: a + b, grouped_ids)
# Remove duplicates
if remove_overlap:
import collections
flat_ids = [id for (id, count) in
collections.Counter(flat_ids).items() if count == 1]
grouped_ids = [[x for x in m if x in flat_ids] for m in
grouped_ids] # Remove
# Create class label(y)
y = [[idx] * len(ids) for (idx, ids) in enumerate(grouped_ids)]
y = reduce(lambda a, b: a + b, y) # Flatten
y = np.array(y)
# Extract feature set for each class separately
X = [dataset.get_feature_data(ids=group_ids, features=features)
for group_ids in grouped_ids]
X = np.vstack(tuple(X))
if regularization:
X = regularize(X, method=regularization)
return (X, y)
|
def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True,
studies=None, features=None,
regularization="scale"):
""" Set up data for a classification task given a set of masks
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features, and returns studies by feature matrix
(X) and class labels (y)
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
regularize: Optional boolean indicating if X should be regularized
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
import nibabel as nib
import os
# Load masks using NiBabel
try:
loaded_masks = [nib.load(os.path.relpath(m)) for m in masks]
except OSError:
print('Error loading masks. Check the path')
# Get a list of studies that activate for each mask file--i.e., a list of
# lists
grouped_ids = [dataset.get_studies(mask=m, activation_threshold=threshold)
for m in loaded_masks]
# Flattened ids
flat_ids = reduce(lambda a, b: a + b, grouped_ids)
# Remove duplicates
if remove_overlap:
import collections
flat_ids = [id for (id, count) in
collections.Counter(flat_ids).items() if count == 1]
grouped_ids = [[x for x in m if x in flat_ids] for m in
grouped_ids] # Remove
# Create class label(y)
y = [[idx] * len(ids) for (idx, ids) in enumerate(grouped_ids)]
y = reduce(lambda a, b: a + b, y) # Flatten
y = np.array(y)
# Extract feature set for each class separately
X = [dataset.get_feature_data(ids=group_ids, features=features)
for group_ids in grouped_ids]
X = np.vstack(tuple(X))
if regularization:
X = regularize(X, method=regularization)
return (X, y)
|
[
"Set",
"up",
"data",
"for",
"a",
"classification",
"task",
"given",
"a",
"set",
"of",
"masks"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L64-L137
|
[
"def",
"get_studies_by_regions",
"(",
"dataset",
",",
"masks",
",",
"threshold",
"=",
"0.08",
",",
"remove_overlap",
"=",
"True",
",",
"studies",
"=",
"None",
",",
"features",
"=",
"None",
",",
"regularization",
"=",
"\"scale\"",
")",
":",
"import",
"nibabel",
"as",
"nib",
"import",
"os",
"# Load masks using NiBabel",
"try",
":",
"loaded_masks",
"=",
"[",
"nib",
".",
"load",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"m",
")",
")",
"for",
"m",
"in",
"masks",
"]",
"except",
"OSError",
":",
"print",
"(",
"'Error loading masks. Check the path'",
")",
"# Get a list of studies that activate for each mask file--i.e., a list of",
"# lists",
"grouped_ids",
"=",
"[",
"dataset",
".",
"get_studies",
"(",
"mask",
"=",
"m",
",",
"activation_threshold",
"=",
"threshold",
")",
"for",
"m",
"in",
"loaded_masks",
"]",
"# Flattened ids",
"flat_ids",
"=",
"reduce",
"(",
"lambda",
"a",
",",
"b",
":",
"a",
"+",
"b",
",",
"grouped_ids",
")",
"# Remove duplicates",
"if",
"remove_overlap",
":",
"import",
"collections",
"flat_ids",
"=",
"[",
"id",
"for",
"(",
"id",
",",
"count",
")",
"in",
"collections",
".",
"Counter",
"(",
"flat_ids",
")",
".",
"items",
"(",
")",
"if",
"count",
"==",
"1",
"]",
"grouped_ids",
"=",
"[",
"[",
"x",
"for",
"x",
"in",
"m",
"if",
"x",
"in",
"flat_ids",
"]",
"for",
"m",
"in",
"grouped_ids",
"]",
"# Remove",
"# Create class label(y)",
"y",
"=",
"[",
"[",
"idx",
"]",
"*",
"len",
"(",
"ids",
")",
"for",
"(",
"idx",
",",
"ids",
")",
"in",
"enumerate",
"(",
"grouped_ids",
")",
"]",
"y",
"=",
"reduce",
"(",
"lambda",
"a",
",",
"b",
":",
"a",
"+",
"b",
",",
"y",
")",
"# Flatten",
"y",
"=",
"np",
".",
"array",
"(",
"y",
")",
"# Extract feature set for each class separately",
"X",
"=",
"[",
"dataset",
".",
"get_feature_data",
"(",
"ids",
"=",
"group_ids",
",",
"features",
"=",
"features",
")",
"for",
"group_ids",
"in",
"grouped_ids",
"]",
"X",
"=",
"np",
".",
"vstack",
"(",
"tuple",
"(",
"X",
")",
")",
"if",
"regularization",
":",
"X",
"=",
"regularize",
"(",
"X",
",",
"method",
"=",
"regularization",
")",
"return",
"(",
"X",
",",
"y",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
get_feature_order
|
Returns a list with the order that features requested appear in
dataset
|
neurosynth/analysis/classify.py
|
def get_feature_order(dataset, features):
""" Returns a list with the order that features requested appear in
dataset """
all_features = dataset.get_feature_names()
i = [all_features.index(f) for f in features]
return i
|
def get_feature_order(dataset, features):
""" Returns a list with the order that features requested appear in
dataset """
all_features = dataset.get_feature_names()
i = [all_features.index(f) for f in features]
return i
|
[
"Returns",
"a",
"list",
"with",
"the",
"order",
"that",
"features",
"requested",
"appear",
"in",
"dataset"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L140-L147
|
[
"def",
"get_feature_order",
"(",
"dataset",
",",
"features",
")",
":",
"all_features",
"=",
"dataset",
".",
"get_feature_names",
"(",
")",
"i",
"=",
"[",
"all_features",
".",
"index",
"(",
"f",
")",
"for",
"f",
"in",
"features",
"]",
"return",
"i"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
classify_regions
|
Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
|
neurosynth/analysis/classify.py
|
def classify_regions(dataset, masks, method='ERF', threshold=0.08,
remove_overlap=True, regularization='scale',
output='summary', studies=None, features=None,
class_weight='auto', classifier=None,
cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap,
studies, features,
regularization=regularization)
return classify(X, y, method, classifier, output, cross_val,
class_weight, scoring=scoring, param_grid=param_grid)
|
def classify_regions(dataset, masks, method='ERF', threshold=0.08,
remove_overlap=True, regularization='scale',
output='summary', studies=None, features=None,
class_weight='auto', classifier=None,
cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap,
studies, features,
regularization=regularization)
return classify(X, y, method, classifier, output, cross_val,
class_weight, scoring=scoring, param_grid=param_grid)
|
[
"Perform",
"classification",
"on",
"specified",
"regions"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L150-L209
|
[
"def",
"classify_regions",
"(",
"dataset",
",",
"masks",
",",
"method",
"=",
"'ERF'",
",",
"threshold",
"=",
"0.08",
",",
"remove_overlap",
"=",
"True",
",",
"regularization",
"=",
"'scale'",
",",
"output",
"=",
"'summary'",
",",
"studies",
"=",
"None",
",",
"features",
"=",
"None",
",",
"class_weight",
"=",
"'auto'",
",",
"classifier",
"=",
"None",
",",
"cross_val",
"=",
"'4-Fold'",
",",
"param_grid",
"=",
"None",
",",
"scoring",
"=",
"'accuracy'",
")",
":",
"(",
"X",
",",
"y",
")",
"=",
"get_studies_by_regions",
"(",
"dataset",
",",
"masks",
",",
"threshold",
",",
"remove_overlap",
",",
"studies",
",",
"features",
",",
"regularization",
"=",
"regularization",
")",
"return",
"classify",
"(",
"X",
",",
"y",
",",
"method",
",",
"classifier",
",",
"output",
",",
"cross_val",
",",
"class_weight",
",",
"scoring",
"=",
"scoring",
",",
"param_grid",
"=",
"param_grid",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
classify
|
Wrapper for scikit-learn classification functions
Imlements various types of classification and cross validation
|
neurosynth/analysis/classify.py
|
def classify(X, y, clf_method='ERF', classifier=None, output='summary_clf',
cross_val=None, class_weight=None, regularization=None,
param_grid=None, scoring='accuracy', refit_all=True,
feat_select=None):
""" Wrapper for scikit-learn classification functions
Imlements various types of classification and cross validation """
# Build classifier
clf = Classifier(clf_method, classifier, param_grid)
# Fit & test model with or without cross-validation
if cross_val is not None:
score = clf.cross_val_fit(X, y, cross_val, scoring=scoring,
feat_select=feat_select,
class_weight=class_weight)
else:
# Does not support scoring function
score = clf.fit(X, y, class_weight=class_weight).score(X, y)
# Return some stuff...
from collections import Counter
if output == 'clf':
return clf
else:
if output == 'summary':
output = {'score': score, 'n': dict(Counter(y))}
elif output == 'summary_clf':
output = {
'score': score,
'n': dict(Counter(y)),
'clf': clf,
'features_selected': clf.features_selected,
'predictions': clf.predictions
}
return output
|
def classify(X, y, clf_method='ERF', classifier=None, output='summary_clf',
cross_val=None, class_weight=None, regularization=None,
param_grid=None, scoring='accuracy', refit_all=True,
feat_select=None):
""" Wrapper for scikit-learn classification functions
Imlements various types of classification and cross validation """
# Build classifier
clf = Classifier(clf_method, classifier, param_grid)
# Fit & test model with or without cross-validation
if cross_val is not None:
score = clf.cross_val_fit(X, y, cross_val, scoring=scoring,
feat_select=feat_select,
class_weight=class_weight)
else:
# Does not support scoring function
score = clf.fit(X, y, class_weight=class_weight).score(X, y)
# Return some stuff...
from collections import Counter
if output == 'clf':
return clf
else:
if output == 'summary':
output = {'score': score, 'n': dict(Counter(y))}
elif output == 'summary_clf':
output = {
'score': score,
'n': dict(Counter(y)),
'clf': clf,
'features_selected': clf.features_selected,
'predictions': clf.predictions
}
return output
|
[
"Wrapper",
"for",
"scikit",
"-",
"learn",
"classification",
"functions",
"Imlements",
"various",
"types",
"of",
"classification",
"and",
"cross",
"validation"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L212-L248
|
[
"def",
"classify",
"(",
"X",
",",
"y",
",",
"clf_method",
"=",
"'ERF'",
",",
"classifier",
"=",
"None",
",",
"output",
"=",
"'summary_clf'",
",",
"cross_val",
"=",
"None",
",",
"class_weight",
"=",
"None",
",",
"regularization",
"=",
"None",
",",
"param_grid",
"=",
"None",
",",
"scoring",
"=",
"'accuracy'",
",",
"refit_all",
"=",
"True",
",",
"feat_select",
"=",
"None",
")",
":",
"# Build classifier",
"clf",
"=",
"Classifier",
"(",
"clf_method",
",",
"classifier",
",",
"param_grid",
")",
"# Fit & test model with or without cross-validation",
"if",
"cross_val",
"is",
"not",
"None",
":",
"score",
"=",
"clf",
".",
"cross_val_fit",
"(",
"X",
",",
"y",
",",
"cross_val",
",",
"scoring",
"=",
"scoring",
",",
"feat_select",
"=",
"feat_select",
",",
"class_weight",
"=",
"class_weight",
")",
"else",
":",
"# Does not support scoring function",
"score",
"=",
"clf",
".",
"fit",
"(",
"X",
",",
"y",
",",
"class_weight",
"=",
"class_weight",
")",
".",
"score",
"(",
"X",
",",
"y",
")",
"# Return some stuff...",
"from",
"collections",
"import",
"Counter",
"if",
"output",
"==",
"'clf'",
":",
"return",
"clf",
"else",
":",
"if",
"output",
"==",
"'summary'",
":",
"output",
"=",
"{",
"'score'",
":",
"score",
",",
"'n'",
":",
"dict",
"(",
"Counter",
"(",
"y",
")",
")",
"}",
"elif",
"output",
"==",
"'summary_clf'",
":",
"output",
"=",
"{",
"'score'",
":",
"score",
",",
"'n'",
":",
"dict",
"(",
"Counter",
"(",
"y",
")",
")",
",",
"'clf'",
":",
"clf",
",",
"'features_selected'",
":",
"clf",
".",
"features_selected",
",",
"'predictions'",
":",
"clf",
".",
"predictions",
"}",
"return",
"output"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Classifier.fit
|
Fits X to outcomes y, using clf
|
neurosynth/analysis/classify.py
|
def fit(self, X, y, cv=None, class_weight='auto'):
""" Fits X to outcomes y, using clf """
# Incorporate error checking such as :
# if isinstance(self.classifier, ScikitClassifier):
# do one thingNone
# otherwiseNone.
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
self.clf = self.clf.fit(X, y)
return self.clf
|
def fit(self, X, y, cv=None, class_weight='auto'):
""" Fits X to outcomes y, using clf """
# Incorporate error checking such as :
# if isinstance(self.classifier, ScikitClassifier):
# do one thingNone
# otherwiseNone.
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
self.clf = self.clf.fit(X, y)
return self.clf
|
[
"Fits",
"X",
"to",
"outcomes",
"y",
"using",
"clf"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L291-L306
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"cv",
"=",
"None",
",",
"class_weight",
"=",
"'auto'",
")",
":",
"# Incorporate error checking such as :",
"# if isinstance(self.classifier, ScikitClassifier):",
"# do one thingNone",
"# otherwiseNone.",
"self",
".",
"X",
"=",
"X",
"self",
".",
"y",
"=",
"y",
"self",
".",
"set_class_weight",
"(",
"class_weight",
"=",
"class_weight",
",",
"y",
"=",
"y",
")",
"self",
".",
"clf",
"=",
"self",
".",
"clf",
".",
"fit",
"(",
"X",
",",
"y",
")",
"return",
"self",
".",
"clf"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Classifier.set_class_weight
|
Sets the class_weight of the classifier to match y
|
neurosynth/analysis/classify.py
|
def set_class_weight(self, class_weight='auto', y=None):
""" Sets the class_weight of the classifier to match y """
if class_weight is None:
cw = None
try:
self.clf.set_params(class_weight=cw)
except ValueError:
pass
elif class_weight == 'auto':
c = np.bincount(y)
ii = np.nonzero(c)[0]
c = c / float(c.sum())
cw = dict(zip(ii[::-1], c[ii]))
try:
self.clf.set_params(class_weight=cw)
except ValueError:
import warnings
warnings.warn(
"Tried to set class_weight, but failed. The classifier "
"probably doesn't support it")
|
def set_class_weight(self, class_weight='auto', y=None):
""" Sets the class_weight of the classifier to match y """
if class_weight is None:
cw = None
try:
self.clf.set_params(class_weight=cw)
except ValueError:
pass
elif class_weight == 'auto':
c = np.bincount(y)
ii = np.nonzero(c)[0]
c = c / float(c.sum())
cw = dict(zip(ii[::-1], c[ii]))
try:
self.clf.set_params(class_weight=cw)
except ValueError:
import warnings
warnings.warn(
"Tried to set class_weight, but failed. The classifier "
"probably doesn't support it")
|
[
"Sets",
"the",
"class_weight",
"of",
"the",
"classifier",
"to",
"match",
"y"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L308-L331
|
[
"def",
"set_class_weight",
"(",
"self",
",",
"class_weight",
"=",
"'auto'",
",",
"y",
"=",
"None",
")",
":",
"if",
"class_weight",
"is",
"None",
":",
"cw",
"=",
"None",
"try",
":",
"self",
".",
"clf",
".",
"set_params",
"(",
"class_weight",
"=",
"cw",
")",
"except",
"ValueError",
":",
"pass",
"elif",
"class_weight",
"==",
"'auto'",
":",
"c",
"=",
"np",
".",
"bincount",
"(",
"y",
")",
"ii",
"=",
"np",
".",
"nonzero",
"(",
"c",
")",
"[",
"0",
"]",
"c",
"=",
"c",
"/",
"float",
"(",
"c",
".",
"sum",
"(",
")",
")",
"cw",
"=",
"dict",
"(",
"zip",
"(",
"ii",
"[",
":",
":",
"-",
"1",
"]",
",",
"c",
"[",
"ii",
"]",
")",
")",
"try",
":",
"self",
".",
"clf",
".",
"set_params",
"(",
"class_weight",
"=",
"cw",
")",
"except",
"ValueError",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"\"Tried to set class_weight, but failed. The classifier \"",
"\"probably doesn't support it\"",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Classifier.cross_val_fit
|
Fits X to outcomes y, using clf and cv_method
|
neurosynth/analysis/classify.py
|
def cross_val_fit(self, X, y, cross_val='4-Fold', scoring='accuracy',
feat_select=None, class_weight='auto'):
""" Fits X to outcomes y, using clf and cv_method """
from sklearn import cross_validation
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
# Set cross validator
if isinstance(cross_val, string_types):
if re.match('.*-Fold', cross_val) is not None:
n = int(cross_val.split('-')[0])
self.cver = cross_validation.StratifiedKFold(self.y, n)
else:
raise Exception('Unrecognized cross validation method')
else:
self.cver = cross_val
if feat_select is not None:
self.features_selected = []
# Perform cross-validated classification
from sklearn.grid_search import GridSearchCV
if isinstance(self.clf, GridSearchCV):
import warnings
if feat_select is not None:
warnings.warn(
"Cross-validated feature selection not supported with "
"GridSearchCV")
self.clf.set_params(cv=self.cver, scoring=scoring)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
self.clf = self.clf.fit(X, y)
self.cvs = self.clf.best_score_
else:
self.cvs = self.feat_select_cvs(
feat_select=feat_select, scoring=scoring)
if feat_select is not None:
fs = feature_selection(
feat_select, X, y)
self.features_selected.append(fs)
X = X[:, fs]
self.clf.fit(X, y)
return self.cvs.mean()
|
def cross_val_fit(self, X, y, cross_val='4-Fold', scoring='accuracy',
feat_select=None, class_weight='auto'):
""" Fits X to outcomes y, using clf and cv_method """
from sklearn import cross_validation
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
# Set cross validator
if isinstance(cross_val, string_types):
if re.match('.*-Fold', cross_val) is not None:
n = int(cross_val.split('-')[0])
self.cver = cross_validation.StratifiedKFold(self.y, n)
else:
raise Exception('Unrecognized cross validation method')
else:
self.cver = cross_val
if feat_select is not None:
self.features_selected = []
# Perform cross-validated classification
from sklearn.grid_search import GridSearchCV
if isinstance(self.clf, GridSearchCV):
import warnings
if feat_select is not None:
warnings.warn(
"Cross-validated feature selection not supported with "
"GridSearchCV")
self.clf.set_params(cv=self.cver, scoring=scoring)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
self.clf = self.clf.fit(X, y)
self.cvs = self.clf.best_score_
else:
self.cvs = self.feat_select_cvs(
feat_select=feat_select, scoring=scoring)
if feat_select is not None:
fs = feature_selection(
feat_select, X, y)
self.features_selected.append(fs)
X = X[:, fs]
self.clf.fit(X, y)
return self.cvs.mean()
|
[
"Fits",
"X",
"to",
"outcomes",
"y",
"using",
"clf",
"and",
"cv_method"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L333-L386
|
[
"def",
"cross_val_fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"cross_val",
"=",
"'4-Fold'",
",",
"scoring",
"=",
"'accuracy'",
",",
"feat_select",
"=",
"None",
",",
"class_weight",
"=",
"'auto'",
")",
":",
"from",
"sklearn",
"import",
"cross_validation",
"self",
".",
"X",
"=",
"X",
"self",
".",
"y",
"=",
"y",
"self",
".",
"set_class_weight",
"(",
"class_weight",
"=",
"class_weight",
",",
"y",
"=",
"y",
")",
"# Set cross validator",
"if",
"isinstance",
"(",
"cross_val",
",",
"string_types",
")",
":",
"if",
"re",
".",
"match",
"(",
"'.*-Fold'",
",",
"cross_val",
")",
"is",
"not",
"None",
":",
"n",
"=",
"int",
"(",
"cross_val",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
")",
"self",
".",
"cver",
"=",
"cross_validation",
".",
"StratifiedKFold",
"(",
"self",
".",
"y",
",",
"n",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unrecognized cross validation method'",
")",
"else",
":",
"self",
".",
"cver",
"=",
"cross_val",
"if",
"feat_select",
"is",
"not",
"None",
":",
"self",
".",
"features_selected",
"=",
"[",
"]",
"# Perform cross-validated classification",
"from",
"sklearn",
".",
"grid_search",
"import",
"GridSearchCV",
"if",
"isinstance",
"(",
"self",
".",
"clf",
",",
"GridSearchCV",
")",
":",
"import",
"warnings",
"if",
"feat_select",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"Cross-validated feature selection not supported with \"",
"\"GridSearchCV\"",
")",
"self",
".",
"clf",
".",
"set_params",
"(",
"cv",
"=",
"self",
".",
"cver",
",",
"scoring",
"=",
"scoring",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"category",
"=",
"UserWarning",
")",
"self",
".",
"clf",
"=",
"self",
".",
"clf",
".",
"fit",
"(",
"X",
",",
"y",
")",
"self",
".",
"cvs",
"=",
"self",
".",
"clf",
".",
"best_score_",
"else",
":",
"self",
".",
"cvs",
"=",
"self",
".",
"feat_select_cvs",
"(",
"feat_select",
"=",
"feat_select",
",",
"scoring",
"=",
"scoring",
")",
"if",
"feat_select",
"is",
"not",
"None",
":",
"fs",
"=",
"feature_selection",
"(",
"feat_select",
",",
"X",
",",
"y",
")",
"self",
".",
"features_selected",
".",
"append",
"(",
"fs",
")",
"X",
"=",
"X",
"[",
":",
",",
"fs",
"]",
"self",
".",
"clf",
".",
"fit",
"(",
"X",
",",
"y",
")",
"return",
"self",
".",
"cvs",
".",
"mean",
"(",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Classifier.feat_select_cvs
|
Returns cross validated scores (just like cross_val_score),
but includes feature selection as part of the cross validation loop
|
neurosynth/analysis/classify.py
|
def feat_select_cvs(self, scoring=None, feat_select=None):
""" Returns cross validated scores (just like cross_val_score),
but includes feature selection as part of the cross validation loop """
scores = []
self.predictions = []
for train, test in self.cver:
X_train, X_test, y_train, y_test = self.X[
train], self.X[test], self.y[train], self.y[test]
if feat_select is not None:
# Get which features are kept
fs = feature_selection(
feat_select, X_train, y_train)
self.features_selected.append(fs)
# Filter X to only keep selected features
X_train, X_test = X_train[
:, fs], X_test[:, fs]
# Set scoring (not implement as accuracy is default)
# Train classifier
self.clf.fit(X_train, y_train)
# Test classifier
predicition, s = get_score(
X_test, y_test, self.clf, scoring=scoring)
scores.append(s)
self.predictions.append((y_test, predicition))
return np.array(scores)
|
def feat_select_cvs(self, scoring=None, feat_select=None):
""" Returns cross validated scores (just like cross_val_score),
but includes feature selection as part of the cross validation loop """
scores = []
self.predictions = []
for train, test in self.cver:
X_train, X_test, y_train, y_test = self.X[
train], self.X[test], self.y[train], self.y[test]
if feat_select is not None:
# Get which features are kept
fs = feature_selection(
feat_select, X_train, y_train)
self.features_selected.append(fs)
# Filter X to only keep selected features
X_train, X_test = X_train[
:, fs], X_test[:, fs]
# Set scoring (not implement as accuracy is default)
# Train classifier
self.clf.fit(X_train, y_train)
# Test classifier
predicition, s = get_score(
X_test, y_test, self.clf, scoring=scoring)
scores.append(s)
self.predictions.append((y_test, predicition))
return np.array(scores)
|
[
"Returns",
"cross",
"validated",
"scores",
"(",
"just",
"like",
"cross_val_score",
")",
"but",
"includes",
"feature",
"selection",
"as",
"part",
"of",
"the",
"cross",
"validation",
"loop"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L388-L422
|
[
"def",
"feat_select_cvs",
"(",
"self",
",",
"scoring",
"=",
"None",
",",
"feat_select",
"=",
"None",
")",
":",
"scores",
"=",
"[",
"]",
"self",
".",
"predictions",
"=",
"[",
"]",
"for",
"train",
",",
"test",
"in",
"self",
".",
"cver",
":",
"X_train",
",",
"X_test",
",",
"y_train",
",",
"y_test",
"=",
"self",
".",
"X",
"[",
"train",
"]",
",",
"self",
".",
"X",
"[",
"test",
"]",
",",
"self",
".",
"y",
"[",
"train",
"]",
",",
"self",
".",
"y",
"[",
"test",
"]",
"if",
"feat_select",
"is",
"not",
"None",
":",
"# Get which features are kept",
"fs",
"=",
"feature_selection",
"(",
"feat_select",
",",
"X_train",
",",
"y_train",
")",
"self",
".",
"features_selected",
".",
"append",
"(",
"fs",
")",
"# Filter X to only keep selected features",
"X_train",
",",
"X_test",
"=",
"X_train",
"[",
":",
",",
"fs",
"]",
",",
"X_test",
"[",
":",
",",
"fs",
"]",
"# Set scoring (not implement as accuracy is default)",
"# Train classifier",
"self",
".",
"clf",
".",
"fit",
"(",
"X_train",
",",
"y_train",
")",
"# Test classifier",
"predicition",
",",
"s",
"=",
"get_score",
"(",
"X_test",
",",
"y_test",
",",
"self",
".",
"clf",
",",
"scoring",
"=",
"scoring",
")",
"scores",
".",
"append",
"(",
"s",
")",
"self",
".",
"predictions",
".",
"append",
"(",
"(",
"y_test",
",",
"predicition",
")",
")",
"return",
"np",
".",
"array",
"(",
"scores",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Classifier.fit_dataset
|
Given a dataset, fits either features or voxels to y
|
neurosynth/analysis/classify.py
|
def fit_dataset(self, dataset, y, features=None,
feature_type='features'):
""" Given a dataset, fits either features or voxels to y """
# Get data from dataset
if feature_type == 'features':
X = np.rot90(dataset.feature_table.data.toarray())
elif feature_type == 'voxels':
X = np.rot90(dataset.image_table.data.toarray())
self.sk_classifier.fit(X, y)
|
def fit_dataset(self, dataset, y, features=None,
feature_type='features'):
""" Given a dataset, fits either features or voxels to y """
# Get data from dataset
if feature_type == 'features':
X = np.rot90(dataset.feature_table.data.toarray())
elif feature_type == 'voxels':
X = np.rot90(dataset.image_table.data.toarray())
self.sk_classifier.fit(X, y)
|
[
"Given",
"a",
"dataset",
"fits",
"either",
"features",
"or",
"voxels",
"to",
"y"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L424-L435
|
[
"def",
"fit_dataset",
"(",
"self",
",",
"dataset",
",",
"y",
",",
"features",
"=",
"None",
",",
"feature_type",
"=",
"'features'",
")",
":",
"# Get data from dataset",
"if",
"feature_type",
"==",
"'features'",
":",
"X",
"=",
"np",
".",
"rot90",
"(",
"dataset",
".",
"feature_table",
".",
"data",
".",
"toarray",
"(",
")",
")",
"elif",
"feature_type",
"==",
"'voxels'",
":",
"X",
"=",
"np",
".",
"rot90",
"(",
"dataset",
".",
"image_table",
".",
"data",
".",
"toarray",
"(",
")",
")",
"self",
".",
"sk_classifier",
".",
"fit",
"(",
"X",
",",
"y",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Parser.p_list_andnot
|
list : list ANDNOT list
|
neurosynth/base/lexparser.py
|
def p_list_andnot(self, p):
'list : list ANDNOT list'
p[0] = p[1].loc[set(p[1].index) - set(p[3].index)]
|
def p_list_andnot(self, p):
'list : list ANDNOT list'
p[0] = p[1].loc[set(p[1].index) - set(p[3].index)]
|
[
"list",
":",
"list",
"ANDNOT",
"list"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/lexparser.py#L66-L68
|
[
"def",
"p_list_andnot",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
".",
"loc",
"[",
"set",
"(",
"p",
"[",
"1",
"]",
".",
"index",
")",
"-",
"set",
"(",
"p",
"[",
"3",
"]",
".",
"index",
")",
"]"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Parser.p_list_and
|
list : list AND list
|
neurosynth/base/lexparser.py
|
def p_list_and(self, p):
'list : list AND list'
p[0] = pd.concat(
[p[1], p[3]], axis=1).dropna().apply(self.func, axis=1)
|
def p_list_and(self, p):
'list : list AND list'
p[0] = pd.concat(
[p[1], p[3]], axis=1).dropna().apply(self.func, axis=1)
|
[
"list",
":",
"list",
"AND",
"list"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/lexparser.py#L70-L73
|
[
"def",
"p_list_and",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"pd",
".",
"concat",
"(",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
"]",
",",
"axis",
"=",
"1",
")",
".",
"dropna",
"(",
")",
".",
"apply",
"(",
"self",
".",
"func",
",",
"axis",
"=",
"1",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Parser.p_list_or
|
list : list OR list
|
neurosynth/base/lexparser.py
|
def p_list_or(self, p):
'list : list OR list'
p[0] = pd.concat(
[p[1], p[3]], axis=1).fillna(0.0).apply(self.func, axis=1)
|
def p_list_or(self, p):
'list : list OR list'
p[0] = pd.concat(
[p[1], p[3]], axis=1).fillna(0.0).apply(self.func, axis=1)
|
[
"list",
":",
"list",
"OR",
"list"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/lexparser.py#L75-L78
|
[
"def",
"p_list_or",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"pd",
".",
"concat",
"(",
"[",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
"]",
",",
"axis",
"=",
"1",
")",
".",
"fillna",
"(",
"0.0",
")",
".",
"apply",
"(",
"self",
".",
"func",
",",
"axis",
"=",
"1",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Parser.p_list_feature
|
list : feature
| WORD
|
neurosynth/base/lexparser.py
|
def p_list_feature(self, p):
'''list : feature
| WORD '''
p[0] = self.dataset.get_studies(
features=p[1], frequency_threshold=self.threshold, func=self.func,
return_type='weights')
|
def p_list_feature(self, p):
'''list : feature
| WORD '''
p[0] = self.dataset.get_studies(
features=p[1], frequency_threshold=self.threshold, func=self.func,
return_type='weights')
|
[
"list",
":",
"feature",
"|",
"WORD"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/lexparser.py#L93-L98
|
[
"def",
"p_list_feature",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"dataset",
".",
"get_studies",
"(",
"features",
"=",
"p",
"[",
"1",
"]",
",",
"frequency_threshold",
"=",
"self",
".",
"threshold",
",",
"func",
"=",
"self",
".",
"func",
",",
"return_type",
"=",
"'weights'",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
average_within_regions
|
Aggregates over all voxels within each ROI in the input image.
Takes a Dataset and a Nifti image that defines distinct regions, and
returns a numpy matrix of ROIs x mappables, where the value at each
ROI is the proportion of active voxels in that ROI. Each distinct ROI
must have a unique value in the image; non-contiguous voxels with the
same value will be assigned to the same ROI.
Args:
dataset: Either a Dataset instance from which image data are
extracted, or a Numpy array containing image data to use. If
the latter, the array contains voxels in rows and
features/studies in columns. The number of voxels must be equal
to the length of the vectorized image mask in the regions
image.
regions: An image defining the boundaries of the regions to use.
Can be one of:
1) A string name of the NIFTI or Analyze-format image
2) A NiBabel SpatialImage
3) A list of NiBabel images
4) A 1D numpy array of the same length as the mask vector in
the Dataset's current Masker.
masker: Optional masker used to load image if regions is not a
numpy array. Must be passed if dataset is a numpy array.
threshold: An optional float in the range of 0 - 1 or integer. If
passed, the array will be binarized, with ROI values above the
threshold assigned to True and values below the threshold
assigned to False. (E.g., if threshold = 0.05, only ROIs in
which more than 5% of voxels are active will be considered
active.) If threshold is integer, studies will only be
considered active if they activate more than that number of
voxels in the ROI.
remove_zero: An optional boolean; when True, assume that voxels
with value of 0 should not be considered as a separate ROI, and
will be ignored.
Returns:
A 2D numpy array with ROIs in rows and mappables in columns.
|
neurosynth/analysis/reduce.py
|
def average_within_regions(dataset, regions, masker=None, threshold=None,
remove_zero=True):
""" Aggregates over all voxels within each ROI in the input image.
Takes a Dataset and a Nifti image that defines distinct regions, and
returns a numpy matrix of ROIs x mappables, where the value at each
ROI is the proportion of active voxels in that ROI. Each distinct ROI
must have a unique value in the image; non-contiguous voxels with the
same value will be assigned to the same ROI.
Args:
dataset: Either a Dataset instance from which image data are
extracted, or a Numpy array containing image data to use. If
the latter, the array contains voxels in rows and
features/studies in columns. The number of voxels must be equal
to the length of the vectorized image mask in the regions
image.
regions: An image defining the boundaries of the regions to use.
Can be one of:
1) A string name of the NIFTI or Analyze-format image
2) A NiBabel SpatialImage
3) A list of NiBabel images
4) A 1D numpy array of the same length as the mask vector in
the Dataset's current Masker.
masker: Optional masker used to load image if regions is not a
numpy array. Must be passed if dataset is a numpy array.
threshold: An optional float in the range of 0 - 1 or integer. If
passed, the array will be binarized, with ROI values above the
threshold assigned to True and values below the threshold
assigned to False. (E.g., if threshold = 0.05, only ROIs in
which more than 5% of voxels are active will be considered
active.) If threshold is integer, studies will only be
considered active if they activate more than that number of
voxels in the ROI.
remove_zero: An optional boolean; when True, assume that voxels
with value of 0 should not be considered as a separate ROI, and
will be ignored.
Returns:
A 2D numpy array with ROIs in rows and mappables in columns.
"""
if masker is not None:
masker = masker
else:
if isinstance(dataset, Dataset):
masker = dataset.masker
else:
if not type(regions).__module__.startswith('numpy'):
raise ValueError(
"If dataset is a numpy array and regions is not a numpy "
"array, a masker must be provided.")
if not type(regions).__module__.startswith('numpy'):
regions = masker.mask(regions)
if isinstance(dataset, Dataset):
dataset = dataset.get_image_data(dense=False)
# If multiple images are passed, give each one a unique value
if regions.ndim == 2:
m = regions
for i in range(regions.shape[1]):
_nz = np.nonzero(m[:, i])[0]
if isinstance(threshold, int):
m[_nz, i] = 1.0
else:
m[_nz, i] = 1.0 / np.count_nonzero(m[:, i])
# Otherwise create an ROI-coding matrix
else:
labels = np.unique(regions)
if remove_zero:
labels = labels[np.nonzero(labels)]
n_regions = labels.size
m = np.zeros((regions.size, n_regions))
for i in range(n_regions):
if isinstance(threshold, int):
m[regions == labels[i], i] = 1.0
else:
m[regions == labels[i], i] = 1.0 / \
np.sum(regions == labels[i])
# Call dot() on the array itself as this will use sparse matrix
# multiplication if possible.
result = dataset.T.dot(m).T
if threshold is not None:
result[result < threshold] = 0.0
result = result.astype(bool)
return result
|
def average_within_regions(dataset, regions, masker=None, threshold=None,
remove_zero=True):
""" Aggregates over all voxels within each ROI in the input image.
Takes a Dataset and a Nifti image that defines distinct regions, and
returns a numpy matrix of ROIs x mappables, where the value at each
ROI is the proportion of active voxels in that ROI. Each distinct ROI
must have a unique value in the image; non-contiguous voxels with the
same value will be assigned to the same ROI.
Args:
dataset: Either a Dataset instance from which image data are
extracted, or a Numpy array containing image data to use. If
the latter, the array contains voxels in rows and
features/studies in columns. The number of voxels must be equal
to the length of the vectorized image mask in the regions
image.
regions: An image defining the boundaries of the regions to use.
Can be one of:
1) A string name of the NIFTI or Analyze-format image
2) A NiBabel SpatialImage
3) A list of NiBabel images
4) A 1D numpy array of the same length as the mask vector in
the Dataset's current Masker.
masker: Optional masker used to load image if regions is not a
numpy array. Must be passed if dataset is a numpy array.
threshold: An optional float in the range of 0 - 1 or integer. If
passed, the array will be binarized, with ROI values above the
threshold assigned to True and values below the threshold
assigned to False. (E.g., if threshold = 0.05, only ROIs in
which more than 5% of voxels are active will be considered
active.) If threshold is integer, studies will only be
considered active if they activate more than that number of
voxels in the ROI.
remove_zero: An optional boolean; when True, assume that voxels
with value of 0 should not be considered as a separate ROI, and
will be ignored.
Returns:
A 2D numpy array with ROIs in rows and mappables in columns.
"""
if masker is not None:
masker = masker
else:
if isinstance(dataset, Dataset):
masker = dataset.masker
else:
if not type(regions).__module__.startswith('numpy'):
raise ValueError(
"If dataset is a numpy array and regions is not a numpy "
"array, a masker must be provided.")
if not type(regions).__module__.startswith('numpy'):
regions = masker.mask(regions)
if isinstance(dataset, Dataset):
dataset = dataset.get_image_data(dense=False)
# If multiple images are passed, give each one a unique value
if regions.ndim == 2:
m = regions
for i in range(regions.shape[1]):
_nz = np.nonzero(m[:, i])[0]
if isinstance(threshold, int):
m[_nz, i] = 1.0
else:
m[_nz, i] = 1.0 / np.count_nonzero(m[:, i])
# Otherwise create an ROI-coding matrix
else:
labels = np.unique(regions)
if remove_zero:
labels = labels[np.nonzero(labels)]
n_regions = labels.size
m = np.zeros((regions.size, n_regions))
for i in range(n_regions):
if isinstance(threshold, int):
m[regions == labels[i], i] = 1.0
else:
m[regions == labels[i], i] = 1.0 / \
np.sum(regions == labels[i])
# Call dot() on the array itself as this will use sparse matrix
# multiplication if possible.
result = dataset.T.dot(m).T
if threshold is not None:
result[result < threshold] = 0.0
result = result.astype(bool)
return result
|
[
"Aggregates",
"over",
"all",
"voxels",
"within",
"each",
"ROI",
"in",
"the",
"input",
"image",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/reduce.py#L17-L111
|
[
"def",
"average_within_regions",
"(",
"dataset",
",",
"regions",
",",
"masker",
"=",
"None",
",",
"threshold",
"=",
"None",
",",
"remove_zero",
"=",
"True",
")",
":",
"if",
"masker",
"is",
"not",
"None",
":",
"masker",
"=",
"masker",
"else",
":",
"if",
"isinstance",
"(",
"dataset",
",",
"Dataset",
")",
":",
"masker",
"=",
"dataset",
".",
"masker",
"else",
":",
"if",
"not",
"type",
"(",
"regions",
")",
".",
"__module__",
".",
"startswith",
"(",
"'numpy'",
")",
":",
"raise",
"ValueError",
"(",
"\"If dataset is a numpy array and regions is not a numpy \"",
"\"array, a masker must be provided.\"",
")",
"if",
"not",
"type",
"(",
"regions",
")",
".",
"__module__",
".",
"startswith",
"(",
"'numpy'",
")",
":",
"regions",
"=",
"masker",
".",
"mask",
"(",
"regions",
")",
"if",
"isinstance",
"(",
"dataset",
",",
"Dataset",
")",
":",
"dataset",
"=",
"dataset",
".",
"get_image_data",
"(",
"dense",
"=",
"False",
")",
"# If multiple images are passed, give each one a unique value",
"if",
"regions",
".",
"ndim",
"==",
"2",
":",
"m",
"=",
"regions",
"for",
"i",
"in",
"range",
"(",
"regions",
".",
"shape",
"[",
"1",
"]",
")",
":",
"_nz",
"=",
"np",
".",
"nonzero",
"(",
"m",
"[",
":",
",",
"i",
"]",
")",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"threshold",
",",
"int",
")",
":",
"m",
"[",
"_nz",
",",
"i",
"]",
"=",
"1.0",
"else",
":",
"m",
"[",
"_nz",
",",
"i",
"]",
"=",
"1.0",
"/",
"np",
".",
"count_nonzero",
"(",
"m",
"[",
":",
",",
"i",
"]",
")",
"# Otherwise create an ROI-coding matrix",
"else",
":",
"labels",
"=",
"np",
".",
"unique",
"(",
"regions",
")",
"if",
"remove_zero",
":",
"labels",
"=",
"labels",
"[",
"np",
".",
"nonzero",
"(",
"labels",
")",
"]",
"n_regions",
"=",
"labels",
".",
"size",
"m",
"=",
"np",
".",
"zeros",
"(",
"(",
"regions",
".",
"size",
",",
"n_regions",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n_regions",
")",
":",
"if",
"isinstance",
"(",
"threshold",
",",
"int",
")",
":",
"m",
"[",
"regions",
"==",
"labels",
"[",
"i",
"]",
",",
"i",
"]",
"=",
"1.0",
"else",
":",
"m",
"[",
"regions",
"==",
"labels",
"[",
"i",
"]",
",",
"i",
"]",
"=",
"1.0",
"/",
"np",
".",
"sum",
"(",
"regions",
"==",
"labels",
"[",
"i",
"]",
")",
"# Call dot() on the array itself as this will use sparse matrix",
"# multiplication if possible.",
"result",
"=",
"dataset",
".",
"T",
".",
"dot",
"(",
"m",
")",
".",
"T",
"if",
"threshold",
"is",
"not",
"None",
":",
"result",
"[",
"result",
"<",
"threshold",
"]",
"=",
"0.0",
"result",
"=",
"result",
".",
"astype",
"(",
"bool",
")",
"return",
"result"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
apply_grid
|
Imposes a 3D grid on the brain volume and averages across all voxels
that fall within each cell.
Args:
dataset: Data to apply grid to. Either a Dataset instance, or a numpy
array with voxels in rows and features in columns.
masker: Optional Masker instance used to map between the created grid
and the dataset. This is only needed if dataset is a numpy array;
if dataset is a Dataset instance, the Masker in the dataset will
be used.
scale: int; scaling factor (in mm) to pass onto create_grid().
threshold: Optional float to pass to reduce.average_within_regions().
Returns:
A tuple of length 2, where the first element is a numpy array of
dimensions n_cubes x n_studies, and the second element is a numpy
array, with the same dimensions as the Masker instance in the current
Dataset, that maps voxel identities onto cell IDs in the grid.
|
neurosynth/analysis/reduce.py
|
def apply_grid(dataset, masker=None, scale=5, threshold=None):
""" Imposes a 3D grid on the brain volume and averages across all voxels
that fall within each cell.
Args:
dataset: Data to apply grid to. Either a Dataset instance, or a numpy
array with voxels in rows and features in columns.
masker: Optional Masker instance used to map between the created grid
and the dataset. This is only needed if dataset is a numpy array;
if dataset is a Dataset instance, the Masker in the dataset will
be used.
scale: int; scaling factor (in mm) to pass onto create_grid().
threshold: Optional float to pass to reduce.average_within_regions().
Returns:
A tuple of length 2, where the first element is a numpy array of
dimensions n_cubes x n_studies, and the second element is a numpy
array, with the same dimensions as the Masker instance in the current
Dataset, that maps voxel identities onto cell IDs in the grid.
"""
if masker is None:
if isinstance(dataset, Dataset):
masker = dataset.masker
else:
raise ValueError(
"If dataset is a numpy array, a masker must be provided.")
grid = imageutils.create_grid(masker.volume, scale)
cm = masker.mask(grid, in_global_mask=True)
data = average_within_regions(dataset, cm, threshold)
return (data, grid)
|
def apply_grid(dataset, masker=None, scale=5, threshold=None):
""" Imposes a 3D grid on the brain volume and averages across all voxels
that fall within each cell.
Args:
dataset: Data to apply grid to. Either a Dataset instance, or a numpy
array with voxels in rows and features in columns.
masker: Optional Masker instance used to map between the created grid
and the dataset. This is only needed if dataset is a numpy array;
if dataset is a Dataset instance, the Masker in the dataset will
be used.
scale: int; scaling factor (in mm) to pass onto create_grid().
threshold: Optional float to pass to reduce.average_within_regions().
Returns:
A tuple of length 2, where the first element is a numpy array of
dimensions n_cubes x n_studies, and the second element is a numpy
array, with the same dimensions as the Masker instance in the current
Dataset, that maps voxel identities onto cell IDs in the grid.
"""
if masker is None:
if isinstance(dataset, Dataset):
masker = dataset.masker
else:
raise ValueError(
"If dataset is a numpy array, a masker must be provided.")
grid = imageutils.create_grid(masker.volume, scale)
cm = masker.mask(grid, in_global_mask=True)
data = average_within_regions(dataset, cm, threshold)
return (data, grid)
|
[
"Imposes",
"a",
"3D",
"grid",
"on",
"the",
"brain",
"volume",
"and",
"averages",
"across",
"all",
"voxels",
"that",
"fall",
"within",
"each",
"cell",
".",
"Args",
":",
"dataset",
":",
"Data",
"to",
"apply",
"grid",
"to",
".",
"Either",
"a",
"Dataset",
"instance",
"or",
"a",
"numpy",
"array",
"with",
"voxels",
"in",
"rows",
"and",
"features",
"in",
"columns",
".",
"masker",
":",
"Optional",
"Masker",
"instance",
"used",
"to",
"map",
"between",
"the",
"created",
"grid",
"and",
"the",
"dataset",
".",
"This",
"is",
"only",
"needed",
"if",
"dataset",
"is",
"a",
"numpy",
"array",
";",
"if",
"dataset",
"is",
"a",
"Dataset",
"instance",
"the",
"Masker",
"in",
"the",
"dataset",
"will",
"be",
"used",
".",
"scale",
":",
"int",
";",
"scaling",
"factor",
"(",
"in",
"mm",
")",
"to",
"pass",
"onto",
"create_grid",
"()",
".",
"threshold",
":",
"Optional",
"float",
"to",
"pass",
"to",
"reduce",
".",
"average_within_regions",
"()",
".",
"Returns",
":",
"A",
"tuple",
"of",
"length",
"2",
"where",
"the",
"first",
"element",
"is",
"a",
"numpy",
"array",
"of",
"dimensions",
"n_cubes",
"x",
"n_studies",
"and",
"the",
"second",
"element",
"is",
"a",
"numpy",
"array",
"with",
"the",
"same",
"dimensions",
"as",
"the",
"Masker",
"instance",
"in",
"the",
"current",
"Dataset",
"that",
"maps",
"voxel",
"identities",
"onto",
"cell",
"IDs",
"in",
"the",
"grid",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/reduce.py#L114-L142
|
[
"def",
"apply_grid",
"(",
"dataset",
",",
"masker",
"=",
"None",
",",
"scale",
"=",
"5",
",",
"threshold",
"=",
"None",
")",
":",
"if",
"masker",
"is",
"None",
":",
"if",
"isinstance",
"(",
"dataset",
",",
"Dataset",
")",
":",
"masker",
"=",
"dataset",
".",
"masker",
"else",
":",
"raise",
"ValueError",
"(",
"\"If dataset is a numpy array, a masker must be provided.\"",
")",
"grid",
"=",
"imageutils",
".",
"create_grid",
"(",
"masker",
".",
"volume",
",",
"scale",
")",
"cm",
"=",
"masker",
".",
"mask",
"(",
"grid",
",",
"in_global_mask",
"=",
"True",
")",
"data",
"=",
"average_within_regions",
"(",
"dataset",
",",
"cm",
",",
"threshold",
")",
"return",
"(",
"data",
",",
"grid",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
get_random_voxels
|
Returns mappable data for a random subset of voxels.
May be useful as a baseline in predictive analyses--e.g., to compare
performance of a more principled feature selection method with simple
random selection.
Args:
dataset: A Dataset instance
n_voxels: An integer specifying the number of random voxels to select.
Returns:
A 2D numpy array with (randomly-selected) voxels in rows and mappables
in columns.
|
neurosynth/analysis/reduce.py
|
def get_random_voxels(dataset, n_voxels):
""" Returns mappable data for a random subset of voxels.
May be useful as a baseline in predictive analyses--e.g., to compare
performance of a more principled feature selection method with simple
random selection.
Args:
dataset: A Dataset instance
n_voxels: An integer specifying the number of random voxels to select.
Returns:
A 2D numpy array with (randomly-selected) voxels in rows and mappables
in columns.
"""
voxels = np.arange(dataset.masker.n_vox_in_vol)
np.random.shuffle(voxels)
selected = voxels[0:n_voxels]
return dataset.get_image_data(voxels=selected)
|
def get_random_voxels(dataset, n_voxels):
""" Returns mappable data for a random subset of voxels.
May be useful as a baseline in predictive analyses--e.g., to compare
performance of a more principled feature selection method with simple
random selection.
Args:
dataset: A Dataset instance
n_voxels: An integer specifying the number of random voxels to select.
Returns:
A 2D numpy array with (randomly-selected) voxels in rows and mappables
in columns.
"""
voxels = np.arange(dataset.masker.n_vox_in_vol)
np.random.shuffle(voxels)
selected = voxels[0:n_voxels]
return dataset.get_image_data(voxels=selected)
|
[
"Returns",
"mappable",
"data",
"for",
"a",
"random",
"subset",
"of",
"voxels",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/reduce.py#L145-L163
|
[
"def",
"get_random_voxels",
"(",
"dataset",
",",
"n_voxels",
")",
":",
"voxels",
"=",
"np",
".",
"arange",
"(",
"dataset",
".",
"masker",
".",
"n_vox_in_vol",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"voxels",
")",
"selected",
"=",
"voxels",
"[",
"0",
":",
"n_voxels",
"]",
"return",
"dataset",
".",
"get_image_data",
"(",
"voxels",
"=",
"selected",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
_get_top_words
|
Return top forty words from each topic in trained topic model.
|
neurosynth/analysis/reduce.py
|
def _get_top_words(model, feature_names, n_top_words=40):
""" Return top forty words from each topic in trained topic model.
"""
topic_words = []
for topic in model.components_:
top_words = [feature_names[i] for i in topic.argsort()[:-n_top_words-1:-1]]
topic_words += [top_words]
return topic_words
|
def _get_top_words(model, feature_names, n_top_words=40):
""" Return top forty words from each topic in trained topic model.
"""
topic_words = []
for topic in model.components_:
top_words = [feature_names[i] for i in topic.argsort()[:-n_top_words-1:-1]]
topic_words += [top_words]
return topic_words
|
[
"Return",
"top",
"forty",
"words",
"from",
"each",
"topic",
"in",
"trained",
"topic",
"model",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/reduce.py#L166-L173
|
[
"def",
"_get_top_words",
"(",
"model",
",",
"feature_names",
",",
"n_top_words",
"=",
"40",
")",
":",
"topic_words",
"=",
"[",
"]",
"for",
"topic",
"in",
"model",
".",
"components_",
":",
"top_words",
"=",
"[",
"feature_names",
"[",
"i",
"]",
"for",
"i",
"in",
"topic",
".",
"argsort",
"(",
")",
"[",
":",
"-",
"n_top_words",
"-",
"1",
":",
"-",
"1",
"]",
"]",
"topic_words",
"+=",
"[",
"top_words",
"]",
"return",
"topic_words"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
run_lda
|
Perform topic modeling using Latent Dirichlet Allocation with the
Java toolbox MALLET.
Args:
abstracts: A pandas DataFrame with two columns ('pmid' and 'abstract')
containing article abstracts.
n_topics: Number of topics to generate. Default=50.
n_words: Number of top words to return for each topic. Default=31,
based on Poldrack et al. (2012).
n_iters: Number of iterations to run in training topic model.
Default=1000.
alpha: The Dirichlet prior on the per-document topic
distributions.
Default: 50 / n_topics, based on Poldrack et al. (2012).
beta: The Dirichlet prior on the per-topic word distribution.
Default: 0.001, based on Poldrack et al. (2012).
Returns:
weights_df: A pandas DataFrame derived from the MALLET
output-doc-topics output file. Contains the weight assigned
to each article for each topic, which can be used to select
articles for topic-based meta-analyses (accepted threshold
from Poldrack article is 0.001). [n_topics]+1 columns:
'pmid' is the first column and the following columns are
the topic names. The names of the topics match the names
in df (e.g., topic_000).
keys_df: A pandas DataFrame derived from the MALLET
output-topic-keys output file. Contains the top [n_words]
words for each topic, which can act as a summary of the
topic's content. Two columns: 'topic' and 'terms'. The
names of the topics match the names in weights (e.g.,
topic_000).
|
neurosynth/analysis/reduce.py
|
def run_lda(abstracts, n_topics=50, n_words=31, n_iters=1000, alpha=None,
beta=0.001):
""" Perform topic modeling using Latent Dirichlet Allocation with the
Java toolbox MALLET.
Args:
abstracts: A pandas DataFrame with two columns ('pmid' and 'abstract')
containing article abstracts.
n_topics: Number of topics to generate. Default=50.
n_words: Number of top words to return for each topic. Default=31,
based on Poldrack et al. (2012).
n_iters: Number of iterations to run in training topic model.
Default=1000.
alpha: The Dirichlet prior on the per-document topic
distributions.
Default: 50 / n_topics, based on Poldrack et al. (2012).
beta: The Dirichlet prior on the per-topic word distribution.
Default: 0.001, based on Poldrack et al. (2012).
Returns:
weights_df: A pandas DataFrame derived from the MALLET
output-doc-topics output file. Contains the weight assigned
to each article for each topic, which can be used to select
articles for topic-based meta-analyses (accepted threshold
from Poldrack article is 0.001). [n_topics]+1 columns:
'pmid' is the first column and the following columns are
the topic names. The names of the topics match the names
in df (e.g., topic_000).
keys_df: A pandas DataFrame derived from the MALLET
output-topic-keys output file. Contains the top [n_words]
words for each topic, which can act as a summary of the
topic's content. Two columns: 'topic' and 'terms'. The
names of the topics match the names in weights (e.g.,
topic_000).
"""
if abstracts.index.name != 'pmid':
abstracts.index = abstracts['pmid']
resdir = os.path.abspath(get_resource_path())
tempdir = os.path.join(resdir, 'topic_models')
absdir = os.path.join(tempdir, 'abstracts')
if not os.path.isdir(tempdir):
os.mkdir(tempdir)
if alpha is None:
alpha = 50. / n_topics
# Check for presence of abstract files and convert if necessary
if not os.path.isdir(absdir):
print('Abstracts folder not found. Creating abstract files...')
os.mkdir(absdir)
for pmid in abstracts.index.values:
abstract = abstracts.loc[pmid]['abstract']
with open(os.path.join(absdir, str(pmid) + '.txt'), 'w') as fo:
fo.write(abstract)
# Run MALLET topic modeling
print('Generating topics...')
mallet_bin = join(dirname(dirname(__file__)),
'resources/mallet/bin/mallet')
import_str = ('{mallet} import-dir '
'--input {absdir} '
'--output {outdir}/topic-input.mallet '
'--keep-sequence '
'--remove-stopwords').format(mallet=mallet_bin,
absdir=absdir,
outdir=tempdir)
train_str = ('{mallet} train-topics '
'--input {out}/topic-input.mallet '
'--num-topics {n_topics} '
'--num-top-words {n_words} '
'--output-topic-keys {out}/topic_keys.txt '
'--output-doc-topics {out}/doc_topics.txt '
'--num-iterations {n_iters} '
'--output-model {out}/saved_model.mallet '
'--random-seed 1 '
'--alpha {alpha} '
'--beta {beta}').format(mallet=mallet_bin, out=tempdir,
n_topics=n_topics, n_words=n_words,
n_iters=n_iters,
alpha=alpha, beta=beta)
subprocess.call(import_str, shell=True)
subprocess.call(train_str, shell=True)
# Read in and convert doc_topics and topic_keys.
def clean_str(string):
return os.path.basename(os.path.splitext(string)[0])
def get_sort(lst):
return [i[0] for i in sorted(enumerate(lst), key=lambda x:x[1])]
topic_names = ['topic_{0:03d}'.format(i) for i in range(n_topics)]
# doc_topics: Topic weights for each paper.
# The conversion here is pretty ugly at the moment.
# First row should be dropped. First column is row number and can be used
# as the index.
# Second column is 'file: /full/path/to/pmid.txt' <-- Parse to get pmid.
# After that, odd columns are topic numbers and even columns are the
# weights for the topics in the preceding column. These columns are sorted
# on an individual pmid basis by the weights.
n_cols = (2 * n_topics) + 1
dt_df = pd.read_csv(os.path.join(tempdir, 'doc_topics.txt'),
delimiter='\t', skiprows=1, header=None, index_col=0)
dt_df = dt_df[dt_df.columns[:n_cols]]
# Get pmids from filenames
dt_df[1] = dt_df[1].apply(clean_str)
# Put weights (even cols) and topics (odd cols) into separate dfs.
weights_df = dt_df[dt_df.columns[2::2]]
weights_df.index = dt_df[1]
weights_df.columns = range(n_topics)
topics_df = dt_df[dt_df.columns[1::2]]
topics_df.index = dt_df[1]
topics_df.columns = range(n_topics)
# Sort columns in weights_df separately for each row using topics_df.
sorters_df = topics_df.apply(get_sort, axis=1)
weights = weights_df.as_matrix()
sorters = sorters_df.as_matrix()
# there has to be a better way to do this.
for i in range(sorters.shape[0]):
weights[i, :] = weights[i, sorters[i, :]]
# Define topic names (e.g., topic_000)
index = dt_df[1]
weights_df = pd.DataFrame(columns=topic_names, data=weights, index=index)
weights_df.index.name = 'pmid'
# topic_keys: Top [n_words] words for each topic.
keys_df = pd.read_csv(os.path.join(tempdir, 'topic_keys.txt'),
delimiter='\t', header=None, index_col=0)
# Second column is a list of the terms.
keys_df = keys_df[[2]]
keys_df.rename(columns={2: 'terms'}, inplace=True)
keys_df.index = topic_names
keys_df.index.name = 'topic'
# Remove all temporary files (abstract files, model, and outputs).
shutil.rmtree(tempdir)
# Return article topic weights and topic keys.
return weights_df, keys_df
|
def run_lda(abstracts, n_topics=50, n_words=31, n_iters=1000, alpha=None,
beta=0.001):
""" Perform topic modeling using Latent Dirichlet Allocation with the
Java toolbox MALLET.
Args:
abstracts: A pandas DataFrame with two columns ('pmid' and 'abstract')
containing article abstracts.
n_topics: Number of topics to generate. Default=50.
n_words: Number of top words to return for each topic. Default=31,
based on Poldrack et al. (2012).
n_iters: Number of iterations to run in training topic model.
Default=1000.
alpha: The Dirichlet prior on the per-document topic
distributions.
Default: 50 / n_topics, based on Poldrack et al. (2012).
beta: The Dirichlet prior on the per-topic word distribution.
Default: 0.001, based on Poldrack et al. (2012).
Returns:
weights_df: A pandas DataFrame derived from the MALLET
output-doc-topics output file. Contains the weight assigned
to each article for each topic, which can be used to select
articles for topic-based meta-analyses (accepted threshold
from Poldrack article is 0.001). [n_topics]+1 columns:
'pmid' is the first column and the following columns are
the topic names. The names of the topics match the names
in df (e.g., topic_000).
keys_df: A pandas DataFrame derived from the MALLET
output-topic-keys output file. Contains the top [n_words]
words for each topic, which can act as a summary of the
topic's content. Two columns: 'topic' and 'terms'. The
names of the topics match the names in weights (e.g.,
topic_000).
"""
if abstracts.index.name != 'pmid':
abstracts.index = abstracts['pmid']
resdir = os.path.abspath(get_resource_path())
tempdir = os.path.join(resdir, 'topic_models')
absdir = os.path.join(tempdir, 'abstracts')
if not os.path.isdir(tempdir):
os.mkdir(tempdir)
if alpha is None:
alpha = 50. / n_topics
# Check for presence of abstract files and convert if necessary
if not os.path.isdir(absdir):
print('Abstracts folder not found. Creating abstract files...')
os.mkdir(absdir)
for pmid in abstracts.index.values:
abstract = abstracts.loc[pmid]['abstract']
with open(os.path.join(absdir, str(pmid) + '.txt'), 'w') as fo:
fo.write(abstract)
# Run MALLET topic modeling
print('Generating topics...')
mallet_bin = join(dirname(dirname(__file__)),
'resources/mallet/bin/mallet')
import_str = ('{mallet} import-dir '
'--input {absdir} '
'--output {outdir}/topic-input.mallet '
'--keep-sequence '
'--remove-stopwords').format(mallet=mallet_bin,
absdir=absdir,
outdir=tempdir)
train_str = ('{mallet} train-topics '
'--input {out}/topic-input.mallet '
'--num-topics {n_topics} '
'--num-top-words {n_words} '
'--output-topic-keys {out}/topic_keys.txt '
'--output-doc-topics {out}/doc_topics.txt '
'--num-iterations {n_iters} '
'--output-model {out}/saved_model.mallet '
'--random-seed 1 '
'--alpha {alpha} '
'--beta {beta}').format(mallet=mallet_bin, out=tempdir,
n_topics=n_topics, n_words=n_words,
n_iters=n_iters,
alpha=alpha, beta=beta)
subprocess.call(import_str, shell=True)
subprocess.call(train_str, shell=True)
# Read in and convert doc_topics and topic_keys.
def clean_str(string):
return os.path.basename(os.path.splitext(string)[0])
def get_sort(lst):
return [i[0] for i in sorted(enumerate(lst), key=lambda x:x[1])]
topic_names = ['topic_{0:03d}'.format(i) for i in range(n_topics)]
# doc_topics: Topic weights for each paper.
# The conversion here is pretty ugly at the moment.
# First row should be dropped. First column is row number and can be used
# as the index.
# Second column is 'file: /full/path/to/pmid.txt' <-- Parse to get pmid.
# After that, odd columns are topic numbers and even columns are the
# weights for the topics in the preceding column. These columns are sorted
# on an individual pmid basis by the weights.
n_cols = (2 * n_topics) + 1
dt_df = pd.read_csv(os.path.join(tempdir, 'doc_topics.txt'),
delimiter='\t', skiprows=1, header=None, index_col=0)
dt_df = dt_df[dt_df.columns[:n_cols]]
# Get pmids from filenames
dt_df[1] = dt_df[1].apply(clean_str)
# Put weights (even cols) and topics (odd cols) into separate dfs.
weights_df = dt_df[dt_df.columns[2::2]]
weights_df.index = dt_df[1]
weights_df.columns = range(n_topics)
topics_df = dt_df[dt_df.columns[1::2]]
topics_df.index = dt_df[1]
topics_df.columns = range(n_topics)
# Sort columns in weights_df separately for each row using topics_df.
sorters_df = topics_df.apply(get_sort, axis=1)
weights = weights_df.as_matrix()
sorters = sorters_df.as_matrix()
# there has to be a better way to do this.
for i in range(sorters.shape[0]):
weights[i, :] = weights[i, sorters[i, :]]
# Define topic names (e.g., topic_000)
index = dt_df[1]
weights_df = pd.DataFrame(columns=topic_names, data=weights, index=index)
weights_df.index.name = 'pmid'
# topic_keys: Top [n_words] words for each topic.
keys_df = pd.read_csv(os.path.join(tempdir, 'topic_keys.txt'),
delimiter='\t', header=None, index_col=0)
# Second column is a list of the terms.
keys_df = keys_df[[2]]
keys_df.rename(columns={2: 'terms'}, inplace=True)
keys_df.index = topic_names
keys_df.index.name = 'topic'
# Remove all temporary files (abstract files, model, and outputs).
shutil.rmtree(tempdir)
# Return article topic weights and topic keys.
return weights_df, keys_df
|
[
"Perform",
"topic",
"modeling",
"using",
"Latent",
"Dirichlet",
"Allocation",
"with",
"the",
"Java",
"toolbox",
"MALLET",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/reduce.py#L176-L323
|
[
"def",
"run_lda",
"(",
"abstracts",
",",
"n_topics",
"=",
"50",
",",
"n_words",
"=",
"31",
",",
"n_iters",
"=",
"1000",
",",
"alpha",
"=",
"None",
",",
"beta",
"=",
"0.001",
")",
":",
"if",
"abstracts",
".",
"index",
".",
"name",
"!=",
"'pmid'",
":",
"abstracts",
".",
"index",
"=",
"abstracts",
"[",
"'pmid'",
"]",
"resdir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"get_resource_path",
"(",
")",
")",
"tempdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"resdir",
",",
"'topic_models'",
")",
"absdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'abstracts'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"tempdir",
")",
":",
"os",
".",
"mkdir",
"(",
"tempdir",
")",
"if",
"alpha",
"is",
"None",
":",
"alpha",
"=",
"50.",
"/",
"n_topics",
"# Check for presence of abstract files and convert if necessary",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"absdir",
")",
":",
"print",
"(",
"'Abstracts folder not found. Creating abstract files...'",
")",
"os",
".",
"mkdir",
"(",
"absdir",
")",
"for",
"pmid",
"in",
"abstracts",
".",
"index",
".",
"values",
":",
"abstract",
"=",
"abstracts",
".",
"loc",
"[",
"pmid",
"]",
"[",
"'abstract'",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"absdir",
",",
"str",
"(",
"pmid",
")",
"+",
"'.txt'",
")",
",",
"'w'",
")",
"as",
"fo",
":",
"fo",
".",
"write",
"(",
"abstract",
")",
"# Run MALLET topic modeling",
"print",
"(",
"'Generating topics...'",
")",
"mallet_bin",
"=",
"join",
"(",
"dirname",
"(",
"dirname",
"(",
"__file__",
")",
")",
",",
"'resources/mallet/bin/mallet'",
")",
"import_str",
"=",
"(",
"'{mallet} import-dir '",
"'--input {absdir} '",
"'--output {outdir}/topic-input.mallet '",
"'--keep-sequence '",
"'--remove-stopwords'",
")",
".",
"format",
"(",
"mallet",
"=",
"mallet_bin",
",",
"absdir",
"=",
"absdir",
",",
"outdir",
"=",
"tempdir",
")",
"train_str",
"=",
"(",
"'{mallet} train-topics '",
"'--input {out}/topic-input.mallet '",
"'--num-topics {n_topics} '",
"'--num-top-words {n_words} '",
"'--output-topic-keys {out}/topic_keys.txt '",
"'--output-doc-topics {out}/doc_topics.txt '",
"'--num-iterations {n_iters} '",
"'--output-model {out}/saved_model.mallet '",
"'--random-seed 1 '",
"'--alpha {alpha} '",
"'--beta {beta}'",
")",
".",
"format",
"(",
"mallet",
"=",
"mallet_bin",
",",
"out",
"=",
"tempdir",
",",
"n_topics",
"=",
"n_topics",
",",
"n_words",
"=",
"n_words",
",",
"n_iters",
"=",
"n_iters",
",",
"alpha",
"=",
"alpha",
",",
"beta",
"=",
"beta",
")",
"subprocess",
".",
"call",
"(",
"import_str",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"call",
"(",
"train_str",
",",
"shell",
"=",
"True",
")",
"# Read in and convert doc_topics and topic_keys.",
"def",
"clean_str",
"(",
"string",
")",
":",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"string",
")",
"[",
"0",
"]",
")",
"def",
"get_sort",
"(",
"lst",
")",
":",
"return",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"sorted",
"(",
"enumerate",
"(",
"lst",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
"]",
"topic_names",
"=",
"[",
"'topic_{0:03d}'",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"n_topics",
")",
"]",
"# doc_topics: Topic weights for each paper.",
"# The conversion here is pretty ugly at the moment.",
"# First row should be dropped. First column is row number and can be used",
"# as the index.",
"# Second column is 'file: /full/path/to/pmid.txt' <-- Parse to get pmid.",
"# After that, odd columns are topic numbers and even columns are the",
"# weights for the topics in the preceding column. These columns are sorted",
"# on an individual pmid basis by the weights.",
"n_cols",
"=",
"(",
"2",
"*",
"n_topics",
")",
"+",
"1",
"dt_df",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'doc_topics.txt'",
")",
",",
"delimiter",
"=",
"'\\t'",
",",
"skiprows",
"=",
"1",
",",
"header",
"=",
"None",
",",
"index_col",
"=",
"0",
")",
"dt_df",
"=",
"dt_df",
"[",
"dt_df",
".",
"columns",
"[",
":",
"n_cols",
"]",
"]",
"# Get pmids from filenames",
"dt_df",
"[",
"1",
"]",
"=",
"dt_df",
"[",
"1",
"]",
".",
"apply",
"(",
"clean_str",
")",
"# Put weights (even cols) and topics (odd cols) into separate dfs.",
"weights_df",
"=",
"dt_df",
"[",
"dt_df",
".",
"columns",
"[",
"2",
":",
":",
"2",
"]",
"]",
"weights_df",
".",
"index",
"=",
"dt_df",
"[",
"1",
"]",
"weights_df",
".",
"columns",
"=",
"range",
"(",
"n_topics",
")",
"topics_df",
"=",
"dt_df",
"[",
"dt_df",
".",
"columns",
"[",
"1",
":",
":",
"2",
"]",
"]",
"topics_df",
".",
"index",
"=",
"dt_df",
"[",
"1",
"]",
"topics_df",
".",
"columns",
"=",
"range",
"(",
"n_topics",
")",
"# Sort columns in weights_df separately for each row using topics_df.",
"sorters_df",
"=",
"topics_df",
".",
"apply",
"(",
"get_sort",
",",
"axis",
"=",
"1",
")",
"weights",
"=",
"weights_df",
".",
"as_matrix",
"(",
")",
"sorters",
"=",
"sorters_df",
".",
"as_matrix",
"(",
")",
"# there has to be a better way to do this.",
"for",
"i",
"in",
"range",
"(",
"sorters",
".",
"shape",
"[",
"0",
"]",
")",
":",
"weights",
"[",
"i",
",",
":",
"]",
"=",
"weights",
"[",
"i",
",",
"sorters",
"[",
"i",
",",
":",
"]",
"]",
"# Define topic names (e.g., topic_000)",
"index",
"=",
"dt_df",
"[",
"1",
"]",
"weights_df",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"topic_names",
",",
"data",
"=",
"weights",
",",
"index",
"=",
"index",
")",
"weights_df",
".",
"index",
".",
"name",
"=",
"'pmid'",
"# topic_keys: Top [n_words] words for each topic.",
"keys_df",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'topic_keys.txt'",
")",
",",
"delimiter",
"=",
"'\\t'",
",",
"header",
"=",
"None",
",",
"index_col",
"=",
"0",
")",
"# Second column is a list of the terms.",
"keys_df",
"=",
"keys_df",
"[",
"[",
"2",
"]",
"]",
"keys_df",
".",
"rename",
"(",
"columns",
"=",
"{",
"2",
":",
"'terms'",
"}",
",",
"inplace",
"=",
"True",
")",
"keys_df",
".",
"index",
"=",
"topic_names",
"keys_df",
".",
"index",
".",
"name",
"=",
"'topic'",
"# Remove all temporary files (abstract files, model, and outputs).",
"shutil",
".",
"rmtree",
"(",
"tempdir",
")",
"# Return article topic weights and topic keys.",
"return",
"weights_df",
",",
"keys_df"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
pearson
|
Correlates row vector x with each row vector in 2D array y.
|
neurosynth/analysis/stats.py
|
def pearson(x, y):
""" Correlates row vector x with each row vector in 2D array y. """
data = np.vstack((x, y))
ms = data.mean(axis=1)[(slice(None, None, None), None)]
datam = data - ms
datass = np.sqrt(np.sum(datam**2, axis=1))
temp = np.dot(datam[1:], datam[0].T)
rs = temp / (datass[1:] * datass[0])
return rs
|
def pearson(x, y):
""" Correlates row vector x with each row vector in 2D array y. """
data = np.vstack((x, y))
ms = data.mean(axis=1)[(slice(None, None, None), None)]
datam = data - ms
datass = np.sqrt(np.sum(datam**2, axis=1))
temp = np.dot(datam[1:], datam[0].T)
rs = temp / (datass[1:] * datass[0])
return rs
|
[
"Correlates",
"row",
"vector",
"x",
"with",
"each",
"row",
"vector",
"in",
"2D",
"array",
"y",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/stats.py#L8-L16
|
[
"def",
"pearson",
"(",
"x",
",",
"y",
")",
":",
"data",
"=",
"np",
".",
"vstack",
"(",
"(",
"x",
",",
"y",
")",
")",
"ms",
"=",
"data",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"[",
"(",
"slice",
"(",
"None",
",",
"None",
",",
"None",
")",
",",
"None",
")",
"]",
"datam",
"=",
"data",
"-",
"ms",
"datass",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"datam",
"**",
"2",
",",
"axis",
"=",
"1",
")",
")",
"temp",
"=",
"np",
".",
"dot",
"(",
"datam",
"[",
"1",
":",
"]",
",",
"datam",
"[",
"0",
"]",
".",
"T",
")",
"rs",
"=",
"temp",
"/",
"(",
"datass",
"[",
"1",
":",
"]",
"*",
"datass",
"[",
"0",
"]",
")",
"return",
"rs"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
two_way
|
Two-way chi-square test of independence.
Takes a 3D array as input: N(voxels) x 2 x 2, where the last two dimensions
are the contingency table for each of N voxels. Returns an array of
p-values.
|
neurosynth/analysis/stats.py
|
def two_way(cells):
""" Two-way chi-square test of independence.
Takes a 3D array as input: N(voxels) x 2 x 2, where the last two dimensions
are the contingency table for each of N voxels. Returns an array of
p-values.
"""
# Mute divide-by-zero warning for bad voxels since we account for that
# later
warnings.simplefilter("ignore", RuntimeWarning)
cells = cells.astype('float64') # Make sure we don't overflow
total = np.apply_over_axes(np.sum, cells, [1, 2]).ravel()
chi_sq = np.zeros(cells.shape, dtype='float64')
for i in range(2):
for j in range(2):
exp = np.sum(cells[:, i, :], 1).ravel() * \
np.sum(cells[:, :, j], 1).ravel() / total
bad_vox = np.where(exp == 0)[0]
chi_sq[:, i, j] = (cells[:, i, j] - exp) ** 2 / exp
chi_sq[bad_vox, i, j] = 1.0 # Set p-value for invalid voxels to 1
chi_sq = np.apply_over_axes(np.sum, chi_sq, [1, 2]).ravel()
return special.chdtrc(1, chi_sq)
|
def two_way(cells):
""" Two-way chi-square test of independence.
Takes a 3D array as input: N(voxels) x 2 x 2, where the last two dimensions
are the contingency table for each of N voxels. Returns an array of
p-values.
"""
# Mute divide-by-zero warning for bad voxels since we account for that
# later
warnings.simplefilter("ignore", RuntimeWarning)
cells = cells.astype('float64') # Make sure we don't overflow
total = np.apply_over_axes(np.sum, cells, [1, 2]).ravel()
chi_sq = np.zeros(cells.shape, dtype='float64')
for i in range(2):
for j in range(2):
exp = np.sum(cells[:, i, :], 1).ravel() * \
np.sum(cells[:, :, j], 1).ravel() / total
bad_vox = np.where(exp == 0)[0]
chi_sq[:, i, j] = (cells[:, i, j] - exp) ** 2 / exp
chi_sq[bad_vox, i, j] = 1.0 # Set p-value for invalid voxels to 1
chi_sq = np.apply_over_axes(np.sum, chi_sq, [1, 2]).ravel()
return special.chdtrc(1, chi_sq)
|
[
"Two",
"-",
"way",
"chi",
"-",
"square",
"test",
"of",
"independence",
".",
"Takes",
"a",
"3D",
"array",
"as",
"input",
":",
"N",
"(",
"voxels",
")",
"x",
"2",
"x",
"2",
"where",
"the",
"last",
"two",
"dimensions",
"are",
"the",
"contingency",
"table",
"for",
"each",
"of",
"N",
"voxels",
".",
"Returns",
"an",
"array",
"of",
"p",
"-",
"values",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/stats.py#L19-L40
|
[
"def",
"two_way",
"(",
"cells",
")",
":",
"# Mute divide-by-zero warning for bad voxels since we account for that",
"# later",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"RuntimeWarning",
")",
"cells",
"=",
"cells",
".",
"astype",
"(",
"'float64'",
")",
"# Make sure we don't overflow",
"total",
"=",
"np",
".",
"apply_over_axes",
"(",
"np",
".",
"sum",
",",
"cells",
",",
"[",
"1",
",",
"2",
"]",
")",
".",
"ravel",
"(",
")",
"chi_sq",
"=",
"np",
".",
"zeros",
"(",
"cells",
".",
"shape",
",",
"dtype",
"=",
"'float64'",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"for",
"j",
"in",
"range",
"(",
"2",
")",
":",
"exp",
"=",
"np",
".",
"sum",
"(",
"cells",
"[",
":",
",",
"i",
",",
":",
"]",
",",
"1",
")",
".",
"ravel",
"(",
")",
"*",
"np",
".",
"sum",
"(",
"cells",
"[",
":",
",",
":",
",",
"j",
"]",
",",
"1",
")",
".",
"ravel",
"(",
")",
"/",
"total",
"bad_vox",
"=",
"np",
".",
"where",
"(",
"exp",
"==",
"0",
")",
"[",
"0",
"]",
"chi_sq",
"[",
":",
",",
"i",
",",
"j",
"]",
"=",
"(",
"cells",
"[",
":",
",",
"i",
",",
"j",
"]",
"-",
"exp",
")",
"**",
"2",
"/",
"exp",
"chi_sq",
"[",
"bad_vox",
",",
"i",
",",
"j",
"]",
"=",
"1.0",
"# Set p-value for invalid voxels to 1",
"chi_sq",
"=",
"np",
".",
"apply_over_axes",
"(",
"np",
".",
"sum",
",",
"chi_sq",
",",
"[",
"1",
",",
"2",
"]",
")",
".",
"ravel",
"(",
")",
"return",
"special",
".",
"chdtrc",
"(",
"1",
",",
"chi_sq",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
one_way
|
One-way chi-square test of independence.
Takes a 1D array as input and compares activation at each voxel to
proportion expected under a uniform distribution throughout the array. Note
that if you're testing activation with this, make sure that only valid
voxels (e.g., in-mask gray matter voxels) are included in the array, or
results won't make any sense!
|
neurosynth/analysis/stats.py
|
def one_way(data, n):
""" One-way chi-square test of independence.
Takes a 1D array as input and compares activation at each voxel to
proportion expected under a uniform distribution throughout the array. Note
that if you're testing activation with this, make sure that only valid
voxels (e.g., in-mask gray matter voxels) are included in the array, or
results won't make any sense!
"""
term = data.astype('float64')
no_term = n - term
t_exp = np.mean(term, 0)
t_exp = np.array([t_exp, ] * data.shape[0])
nt_exp = n - t_exp
t_mss = (term - t_exp) ** 2 / t_exp
nt_mss = (no_term - nt_exp) ** 2 / nt_exp
chi2 = t_mss + nt_mss
return special.chdtrc(1, chi2)
|
def one_way(data, n):
""" One-way chi-square test of independence.
Takes a 1D array as input and compares activation at each voxel to
proportion expected under a uniform distribution throughout the array. Note
that if you're testing activation with this, make sure that only valid
voxels (e.g., in-mask gray matter voxels) are included in the array, or
results won't make any sense!
"""
term = data.astype('float64')
no_term = n - term
t_exp = np.mean(term, 0)
t_exp = np.array([t_exp, ] * data.shape[0])
nt_exp = n - t_exp
t_mss = (term - t_exp) ** 2 / t_exp
nt_mss = (no_term - nt_exp) ** 2 / nt_exp
chi2 = t_mss + nt_mss
return special.chdtrc(1, chi2)
|
[
"One",
"-",
"way",
"chi",
"-",
"square",
"test",
"of",
"independence",
".",
"Takes",
"a",
"1D",
"array",
"as",
"input",
"and",
"compares",
"activation",
"at",
"each",
"voxel",
"to",
"proportion",
"expected",
"under",
"a",
"uniform",
"distribution",
"throughout",
"the",
"array",
".",
"Note",
"that",
"if",
"you",
"re",
"testing",
"activation",
"with",
"this",
"make",
"sure",
"that",
"only",
"valid",
"voxels",
"(",
"e",
".",
"g",
".",
"in",
"-",
"mask",
"gray",
"matter",
"voxels",
")",
"are",
"included",
"in",
"the",
"array",
"or",
"results",
"won",
"t",
"make",
"any",
"sense!"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/stats.py#L43-L59
|
[
"def",
"one_way",
"(",
"data",
",",
"n",
")",
":",
"term",
"=",
"data",
".",
"astype",
"(",
"'float64'",
")",
"no_term",
"=",
"n",
"-",
"term",
"t_exp",
"=",
"np",
".",
"mean",
"(",
"term",
",",
"0",
")",
"t_exp",
"=",
"np",
".",
"array",
"(",
"[",
"t_exp",
",",
"]",
"*",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"nt_exp",
"=",
"n",
"-",
"t_exp",
"t_mss",
"=",
"(",
"term",
"-",
"t_exp",
")",
"**",
"2",
"/",
"t_exp",
"nt_mss",
"=",
"(",
"no_term",
"-",
"nt_exp",
")",
"**",
"2",
"/",
"nt_exp",
"chi2",
"=",
"t_mss",
"+",
"nt_mss",
"return",
"special",
".",
"chdtrc",
"(",
"1",
",",
"chi2",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
fdr
|
Determine FDR threshold given a p value array and desired false
discovery rate q.
|
neurosynth/analysis/stats.py
|
def fdr(p, q=.05):
""" Determine FDR threshold given a p value array and desired false
discovery rate q. """
s = np.sort(p)
nvox = p.shape[0]
null = np.array(range(1, nvox + 1), dtype='float') * q / nvox
below = np.where(s <= null)[0]
return s[max(below)] if len(below) else -1
|
def fdr(p, q=.05):
""" Determine FDR threshold given a p value array and desired false
discovery rate q. """
s = np.sort(p)
nvox = p.shape[0]
null = np.array(range(1, nvox + 1), dtype='float') * q / nvox
below = np.where(s <= null)[0]
return s[max(below)] if len(below) else -1
|
[
"Determine",
"FDR",
"threshold",
"given",
"a",
"p",
"value",
"array",
"and",
"desired",
"false",
"discovery",
"rate",
"q",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/stats.py#L62-L69
|
[
"def",
"fdr",
"(",
"p",
",",
"q",
"=",
".05",
")",
":",
"s",
"=",
"np",
".",
"sort",
"(",
"p",
")",
"nvox",
"=",
"p",
".",
"shape",
"[",
"0",
"]",
"null",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"1",
",",
"nvox",
"+",
"1",
")",
",",
"dtype",
"=",
"'float'",
")",
"*",
"q",
"/",
"nvox",
"below",
"=",
"np",
".",
"where",
"(",
"s",
"<=",
"null",
")",
"[",
"0",
"]",
"return",
"s",
"[",
"max",
"(",
"below",
")",
"]",
"if",
"len",
"(",
"below",
")",
"else",
"-",
"1"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
download
|
Download the latest data files.
Args:
path (str): Location to save the retrieved data files. Defaults to
current directory.
unpack (bool): If True, unzips the data file post-download.
|
neurosynth/base/dataset.py
|
def download(path='.', url=None, unpack=False):
""" Download the latest data files.
Args:
path (str): Location to save the retrieved data files. Defaults to
current directory.
unpack (bool): If True, unzips the data file post-download.
"""
if url is None:
url = 'https://github.com/neurosynth/neurosynth-data/blob/master/current_data.tar.gz?raw=true'
if os.path.exists(path) and os.path.isdir(path):
basename = os.path.basename(url).split('?')[0]
filename = os.path.join(path, basename)
else:
filename = path
f = open(filename, 'wb')
u = urlopen(url)
file_size = int(u.headers["Content-Length"][0])
print("Downloading the latest Neurosynth files: {0} bytes: {1}".format(
url, file_size))
bytes_dl = 0
block_size = 8192
while True:
buffer = u.read(block_size)
if not buffer:
break
bytes_dl += len(buffer)
f.write(buffer)
p = float(bytes_dl) / file_size
status = r"{0} [{1:.2%}]".format(bytes_dl, p)
status = status + chr(8) * (len(status) + 1)
sys.stdout.write(status)
f.close()
if unpack:
import tarfile
tarfile.open(filename, 'r:gz').extractall(os.path.dirname(filename))
|
def download(path='.', url=None, unpack=False):
""" Download the latest data files.
Args:
path (str): Location to save the retrieved data files. Defaults to
current directory.
unpack (bool): If True, unzips the data file post-download.
"""
if url is None:
url = 'https://github.com/neurosynth/neurosynth-data/blob/master/current_data.tar.gz?raw=true'
if os.path.exists(path) and os.path.isdir(path):
basename = os.path.basename(url).split('?')[0]
filename = os.path.join(path, basename)
else:
filename = path
f = open(filename, 'wb')
u = urlopen(url)
file_size = int(u.headers["Content-Length"][0])
print("Downloading the latest Neurosynth files: {0} bytes: {1}".format(
url, file_size))
bytes_dl = 0
block_size = 8192
while True:
buffer = u.read(block_size)
if not buffer:
break
bytes_dl += len(buffer)
f.write(buffer)
p = float(bytes_dl) / file_size
status = r"{0} [{1:.2%}]".format(bytes_dl, p)
status = status + chr(8) * (len(status) + 1)
sys.stdout.write(status)
f.close()
if unpack:
import tarfile
tarfile.open(filename, 'r:gz').extractall(os.path.dirname(filename))
|
[
"Download",
"the",
"latest",
"data",
"files",
".",
"Args",
":",
"path",
"(",
"str",
")",
":",
"Location",
"to",
"save",
"the",
"retrieved",
"data",
"files",
".",
"Defaults",
"to",
"current",
"directory",
".",
"unpack",
"(",
"bool",
")",
":",
"If",
"True",
"unzips",
"the",
"data",
"file",
"post",
"-",
"download",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/dataset.py#L29-L69
|
[
"def",
"download",
"(",
"path",
"=",
"'.'",
",",
"url",
"=",
"None",
",",
"unpack",
"=",
"False",
")",
":",
"if",
"url",
"is",
"None",
":",
"url",
"=",
"'https://github.com/neurosynth/neurosynth-data/blob/master/current_data.tar.gz?raw=true'",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"url",
")",
".",
"split",
"(",
"'?'",
")",
"[",
"0",
"]",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"basename",
")",
"else",
":",
"filename",
"=",
"path",
"f",
"=",
"open",
"(",
"filename",
",",
"'wb'",
")",
"u",
"=",
"urlopen",
"(",
"url",
")",
"file_size",
"=",
"int",
"(",
"u",
".",
"headers",
"[",
"\"Content-Length\"",
"]",
"[",
"0",
"]",
")",
"print",
"(",
"\"Downloading the latest Neurosynth files: {0} bytes: {1}\"",
".",
"format",
"(",
"url",
",",
"file_size",
")",
")",
"bytes_dl",
"=",
"0",
"block_size",
"=",
"8192",
"while",
"True",
":",
"buffer",
"=",
"u",
".",
"read",
"(",
"block_size",
")",
"if",
"not",
"buffer",
":",
"break",
"bytes_dl",
"+=",
"len",
"(",
"buffer",
")",
"f",
".",
"write",
"(",
"buffer",
")",
"p",
"=",
"float",
"(",
"bytes_dl",
")",
"/",
"file_size",
"status",
"=",
"r\"{0} [{1:.2%}]\"",
".",
"format",
"(",
"bytes_dl",
",",
"p",
")",
"status",
"=",
"status",
"+",
"chr",
"(",
"8",
")",
"*",
"(",
"len",
"(",
"status",
")",
"+",
"1",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"status",
")",
"f",
".",
"close",
"(",
")",
"if",
"unpack",
":",
"import",
"tarfile",
"tarfile",
".",
"open",
"(",
"filename",
",",
"'r:gz'",
")",
".",
"extractall",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
download_abstracts
|
Download the abstracts for a dataset/list of pmids
|
neurosynth/base/dataset.py
|
def download_abstracts(dataset, path='.', email=None, out_file=None):
""" Download the abstracts for a dataset/list of pmids
"""
try:
from Bio import Entrez, Medline
except:
raise Exception(
'Module biopython is required for downloading abstracts from PubMed.')
if email is None:
raise Exception('No email address provided.')
Entrez.email = email
if isinstance(dataset, Dataset):
pmids = dataset.image_table.ids.astype(str).tolist()
elif isinstance(dataset, list):
pmids = [str(pmid) for pmid in dataset]
else:
raise Exception(
'Dataset type not recognized: {0}'.format(type(dataset)))
records = []
# PubMed only allows you to search ~1000 at a time. I chose 900 to be safe.
chunks = [pmids[x: x + 900] for x in range(0, len(pmids), 900)]
for chunk in chunks:
h = Entrez.efetch(db='pubmed', id=chunk, rettype='medline',
retmode='text')
records += list(Medline.parse(h))
# Pull data for studies with abstracts
data = [[study['PMID'], study['AB']]
for study in records if study.get('AB', None)]
df = pd.DataFrame(columns=['pmid', 'abstract'], data=data)
if out_file is not None:
df.to_csv(os.path.join(os.path.abspath(path), out_file), index=False)
return df
|
def download_abstracts(dataset, path='.', email=None, out_file=None):
""" Download the abstracts for a dataset/list of pmids
"""
try:
from Bio import Entrez, Medline
except:
raise Exception(
'Module biopython is required for downloading abstracts from PubMed.')
if email is None:
raise Exception('No email address provided.')
Entrez.email = email
if isinstance(dataset, Dataset):
pmids = dataset.image_table.ids.astype(str).tolist()
elif isinstance(dataset, list):
pmids = [str(pmid) for pmid in dataset]
else:
raise Exception(
'Dataset type not recognized: {0}'.format(type(dataset)))
records = []
# PubMed only allows you to search ~1000 at a time. I chose 900 to be safe.
chunks = [pmids[x: x + 900] for x in range(0, len(pmids), 900)]
for chunk in chunks:
h = Entrez.efetch(db='pubmed', id=chunk, rettype='medline',
retmode='text')
records += list(Medline.parse(h))
# Pull data for studies with abstracts
data = [[study['PMID'], study['AB']]
for study in records if study.get('AB', None)]
df = pd.DataFrame(columns=['pmid', 'abstract'], data=data)
if out_file is not None:
df.to_csv(os.path.join(os.path.abspath(path), out_file), index=False)
return df
|
[
"Download",
"the",
"abstracts",
"for",
"a",
"dataset",
"/",
"list",
"of",
"pmids"
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/dataset.py#L72-L107
|
[
"def",
"download_abstracts",
"(",
"dataset",
",",
"path",
"=",
"'.'",
",",
"email",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"try",
":",
"from",
"Bio",
"import",
"Entrez",
",",
"Medline",
"except",
":",
"raise",
"Exception",
"(",
"'Module biopython is required for downloading abstracts from PubMed.'",
")",
"if",
"email",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'No email address provided.'",
")",
"Entrez",
".",
"email",
"=",
"email",
"if",
"isinstance",
"(",
"dataset",
",",
"Dataset",
")",
":",
"pmids",
"=",
"dataset",
".",
"image_table",
".",
"ids",
".",
"astype",
"(",
"str",
")",
".",
"tolist",
"(",
")",
"elif",
"isinstance",
"(",
"dataset",
",",
"list",
")",
":",
"pmids",
"=",
"[",
"str",
"(",
"pmid",
")",
"for",
"pmid",
"in",
"dataset",
"]",
"else",
":",
"raise",
"Exception",
"(",
"'Dataset type not recognized: {0}'",
".",
"format",
"(",
"type",
"(",
"dataset",
")",
")",
")",
"records",
"=",
"[",
"]",
"# PubMed only allows you to search ~1000 at a time. I chose 900 to be safe.",
"chunks",
"=",
"[",
"pmids",
"[",
"x",
":",
"x",
"+",
"900",
"]",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"pmids",
")",
",",
"900",
")",
"]",
"for",
"chunk",
"in",
"chunks",
":",
"h",
"=",
"Entrez",
".",
"efetch",
"(",
"db",
"=",
"'pubmed'",
",",
"id",
"=",
"chunk",
",",
"rettype",
"=",
"'medline'",
",",
"retmode",
"=",
"'text'",
")",
"records",
"+=",
"list",
"(",
"Medline",
".",
"parse",
"(",
"h",
")",
")",
"# Pull data for studies with abstracts",
"data",
"=",
"[",
"[",
"study",
"[",
"'PMID'",
"]",
",",
"study",
"[",
"'AB'",
"]",
"]",
"for",
"study",
"in",
"records",
"if",
"study",
".",
"get",
"(",
"'AB'",
",",
"None",
")",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"'pmid'",
",",
"'abstract'",
"]",
",",
"data",
"=",
"data",
")",
"if",
"out_file",
"is",
"not",
"None",
":",
"df",
".",
"to_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
",",
"out_file",
")",
",",
"index",
"=",
"False",
")",
"return",
"df"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Dataset._load_activations
|
Load activation data from a text file.
Args:
filename (str): a string pointing to the location of the txt file
to read from.
|
neurosynth/base/dataset.py
|
def _load_activations(self, filename):
""" Load activation data from a text file.
Args:
filename (str): a string pointing to the location of the txt file
to read from.
"""
logger.info("Loading activation data from %s..." % filename)
activations = pd.read_csv(filename, sep='\t')
activations.columns = [col.lower()
for col in list(activations.columns)]
# Make sure all mandatory columns exist
mc = ['x', 'y', 'z', 'id', 'space']
if (set(mc) - set(list(activations.columns))):
logger.error(
"At least one of mandatory columns (x, y, z, id, and space) "
"is missing from input file.")
return
# Transform to target space where needed
spaces = activations['space'].unique()
xyz = activations[['x', 'y', 'z']].values
for s in spaces:
if s != self.transformer.target:
inds = activations['space'] == s
xyz[inds] = self.transformer.apply(s, xyz[inds])
activations[['x', 'y', 'z']] = xyz
# xyz --> ijk
ijk = pd.DataFrame(
transformations.xyz_to_mat(xyz), columns=['i', 'j', 'k'])
activations = pd.concat([activations, ijk], axis=1)
return activations
|
def _load_activations(self, filename):
""" Load activation data from a text file.
Args:
filename (str): a string pointing to the location of the txt file
to read from.
"""
logger.info("Loading activation data from %s..." % filename)
activations = pd.read_csv(filename, sep='\t')
activations.columns = [col.lower()
for col in list(activations.columns)]
# Make sure all mandatory columns exist
mc = ['x', 'y', 'z', 'id', 'space']
if (set(mc) - set(list(activations.columns))):
logger.error(
"At least one of mandatory columns (x, y, z, id, and space) "
"is missing from input file.")
return
# Transform to target space where needed
spaces = activations['space'].unique()
xyz = activations[['x', 'y', 'z']].values
for s in spaces:
if s != self.transformer.target:
inds = activations['space'] == s
xyz[inds] = self.transformer.apply(s, xyz[inds])
activations[['x', 'y', 'z']] = xyz
# xyz --> ijk
ijk = pd.DataFrame(
transformations.xyz_to_mat(xyz), columns=['i', 'j', 'k'])
activations = pd.concat([activations, ijk], axis=1)
return activations
|
[
"Load",
"activation",
"data",
"from",
"a",
"text",
"file",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/dataset.py#L189-L223
|
[
"def",
"_load_activations",
"(",
"self",
",",
"filename",
")",
":",
"logger",
".",
"info",
"(",
"\"Loading activation data from %s...\"",
"%",
"filename",
")",
"activations",
"=",
"pd",
".",
"read_csv",
"(",
"filename",
",",
"sep",
"=",
"'\\t'",
")",
"activations",
".",
"columns",
"=",
"[",
"col",
".",
"lower",
"(",
")",
"for",
"col",
"in",
"list",
"(",
"activations",
".",
"columns",
")",
"]",
"# Make sure all mandatory columns exist",
"mc",
"=",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
",",
"'id'",
",",
"'space'",
"]",
"if",
"(",
"set",
"(",
"mc",
")",
"-",
"set",
"(",
"list",
"(",
"activations",
".",
"columns",
")",
")",
")",
":",
"logger",
".",
"error",
"(",
"\"At least one of mandatory columns (x, y, z, id, and space) \"",
"\"is missing from input file.\"",
")",
"return",
"# Transform to target space where needed",
"spaces",
"=",
"activations",
"[",
"'space'",
"]",
".",
"unique",
"(",
")",
"xyz",
"=",
"activations",
"[",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"]",
".",
"values",
"for",
"s",
"in",
"spaces",
":",
"if",
"s",
"!=",
"self",
".",
"transformer",
".",
"target",
":",
"inds",
"=",
"activations",
"[",
"'space'",
"]",
"==",
"s",
"xyz",
"[",
"inds",
"]",
"=",
"self",
".",
"transformer",
".",
"apply",
"(",
"s",
",",
"xyz",
"[",
"inds",
"]",
")",
"activations",
"[",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"]",
"=",
"xyz",
"# xyz --> ijk",
"ijk",
"=",
"pd",
".",
"DataFrame",
"(",
"transformations",
".",
"xyz_to_mat",
"(",
"xyz",
")",
",",
"columns",
"=",
"[",
"'i'",
",",
"'j'",
",",
"'k'",
"]",
")",
"activations",
"=",
"pd",
".",
"concat",
"(",
"[",
"activations",
",",
"ijk",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"activations"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
test
|
Dataset.create_image_table
|
Create and store a new ImageTable instance based on the current
Dataset. Will generally be called privately, but may be useful as a
convenience method in cases where the user wants to re-generate the
table with a new smoothing kernel of different radius.
Args:
r (int): An optional integer indicating the radius of the smoothing
kernel. By default, this is None, which will keep whatever
value is currently set in the Dataset instance.
|
neurosynth/base/dataset.py
|
def create_image_table(self, r=None):
""" Create and store a new ImageTable instance based on the current
Dataset. Will generally be called privately, but may be useful as a
convenience method in cases where the user wants to re-generate the
table with a new smoothing kernel of different radius.
Args:
r (int): An optional integer indicating the radius of the smoothing
kernel. By default, this is None, which will keep whatever
value is currently set in the Dataset instance.
"""
logger.info("Creating image table...")
if r is not None:
self.r = r
self.image_table = ImageTable(self)
|
def create_image_table(self, r=None):
""" Create and store a new ImageTable instance based on the current
Dataset. Will generally be called privately, but may be useful as a
convenience method in cases where the user wants to re-generate the
table with a new smoothing kernel of different radius.
Args:
r (int): An optional integer indicating the radius of the smoothing
kernel. By default, this is None, which will keep whatever
value is currently set in the Dataset instance.
"""
logger.info("Creating image table...")
if r is not None:
self.r = r
self.image_table = ImageTable(self)
|
[
"Create",
"and",
"store",
"a",
"new",
"ImageTable",
"instance",
"based",
"on",
"the",
"current",
"Dataset",
".",
"Will",
"generally",
"be",
"called",
"privately",
"but",
"may",
"be",
"useful",
"as",
"a",
"convenience",
"method",
"in",
"cases",
"where",
"the",
"user",
"wants",
"to",
"re",
"-",
"generate",
"the",
"table",
"with",
"a",
"new",
"smoothing",
"kernel",
"of",
"different",
"radius",
"."
] |
neurosynth/neurosynth
|
python
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/dataset.py#L225-L239
|
[
"def",
"create_image_table",
"(",
"self",
",",
"r",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"Creating image table...\"",
")",
"if",
"r",
"is",
"not",
"None",
":",
"self",
".",
"r",
"=",
"r",
"self",
".",
"image_table",
"=",
"ImageTable",
"(",
"self",
")"
] |
948ce7edce15d7df693446e76834e0c23bfe8f11
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.