partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
CorniceSwagger._extract_transform_colander_schema
Extract schema from view args and transform it using the pipeline of schema transformers :param args: Arguments from the view decorator. :rtype: colander.MappingSchema() :returns: View schema cloned and transformed
cornice_swagger/swagger.py
def _extract_transform_colander_schema(self, args): """ Extract schema from view args and transform it using the pipeline of schema transformers :param args: Arguments from the view decorator. :rtype: colander.MappingSchema() :returns: View schema cloned and transformed """ schema = args.get('schema', colander.MappingSchema()) if not isinstance(schema, colander.Schema): schema = schema() schema = schema.clone() for transformer in self.schema_transformers: schema = transformer(schema, args) return schema
def _extract_transform_colander_schema(self, args): """ Extract schema from view args and transform it using the pipeline of schema transformers :param args: Arguments from the view decorator. :rtype: colander.MappingSchema() :returns: View schema cloned and transformed """ schema = args.get('schema', colander.MappingSchema()) if not isinstance(schema, colander.Schema): schema = schema() schema = schema.clone() for transformer in self.schema_transformers: schema = transformer(schema, args) return schema
[ "Extract", "schema", "from", "view", "args", "and", "transform", "it", "using", "the", "pipeline", "of", "schema", "transformers" ]
Cornices/cornice.ext.swagger
python
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/swagger.py#L687-L705
[ "def", "_extract_transform_colander_schema", "(", "self", ",", "args", ")", ":", "schema", "=", "args", ".", "get", "(", "'schema'", ",", "colander", ".", "MappingSchema", "(", ")", ")", "if", "not", "isinstance", "(", "schema", ",", "colander", ".", "Schema", ")", ":", "schema", "=", "schema", "(", ")", "schema", "=", "schema", ".", "clone", "(", ")", "for", "transformer", "in", "self", ".", "schema_transformers", ":", "schema", "=", "transformer", "(", "schema", ",", "args", ")", "return", "schema" ]
c31a5cc8d5dd112b11dc41ccb6d09b423b537abc
valid
main
Creates arguments and parses user input
limf/cli.py
def main(): """Creates arguments and parses user input""" parser = argparse.ArgumentParser( description=_('Uploads selected file to working pomf.se clone')) parser.add_argument('files', metavar='file', nargs='*', type=str, help=_('Files to upload')) parser.add_argument('-c', metavar='host_number', type=int, dest='host', default=None, help=_('The number (0-n) of the selected host (default is random)')) parser.add_argument('-l', dest='only_link', action='store_const', const=True, default=False, help=_('Changes output to just link to the file')) parser.add_argument('-e', dest='encrypt', action='store_const', const=True, default=False, help=_('Encrypts then uploads the files.')) parser.add_argument('-d', dest='decrypt', action='store_const', const=True, default=False, help=_('Decrypts files from links with encrypted files')) parser.add_argument('-j', dest="local_list", default=False, help=_('Path to a local list file')) parser.add_argument('-s', dest="show_list", action='store_const', const=True, default=False, help=_('Show the host list (will not upload your files when called)')) parser.add_argument('-m', dest='limit_size', action='store_const', const=True, default=False, help=_('Do not upload file if it exceeds the certain host limit')) parser.add_argument('-nc', dest='no_cloudflare', action='store_const', const=True, default=False, help=_('Do not use hosts which use Cloudflare.')) parser.add_argument('--log-file', metavar="LOGFILE", dest="logfile", default="~/limf.log", help=_("The location of log file")) parser.add_argument('--log', dest='log', action="store_const", const=True, default=False, help=_("Enables the logging feature, default logfile is ~/limf.log")) args = parser.parse_args() try: if args.local_list: clone_list = retrieve_local_host_list(args.local_list) else: clone_list = retrieve_online_host_list() if len(min(clone_list, key=len)) < 5 and (args.limit_size or args.no_cloudflare): print(_("For newer options, please update your host_list.")) exit() if args.host and not(0 <= args.host < len(clone_list)): print(generate_host_string(clone_list)) exit() parse_arguments(args, clone_list) except FileNotFoundError: print(_('Plese enter valid file.'))
def main(): """Creates arguments and parses user input""" parser = argparse.ArgumentParser( description=_('Uploads selected file to working pomf.se clone')) parser.add_argument('files', metavar='file', nargs='*', type=str, help=_('Files to upload')) parser.add_argument('-c', metavar='host_number', type=int, dest='host', default=None, help=_('The number (0-n) of the selected host (default is random)')) parser.add_argument('-l', dest='only_link', action='store_const', const=True, default=False, help=_('Changes output to just link to the file')) parser.add_argument('-e', dest='encrypt', action='store_const', const=True, default=False, help=_('Encrypts then uploads the files.')) parser.add_argument('-d', dest='decrypt', action='store_const', const=True, default=False, help=_('Decrypts files from links with encrypted files')) parser.add_argument('-j', dest="local_list", default=False, help=_('Path to a local list file')) parser.add_argument('-s', dest="show_list", action='store_const', const=True, default=False, help=_('Show the host list (will not upload your files when called)')) parser.add_argument('-m', dest='limit_size', action='store_const', const=True, default=False, help=_('Do not upload file if it exceeds the certain host limit')) parser.add_argument('-nc', dest='no_cloudflare', action='store_const', const=True, default=False, help=_('Do not use hosts which use Cloudflare.')) parser.add_argument('--log-file', metavar="LOGFILE", dest="logfile", default="~/limf.log", help=_("The location of log file")) parser.add_argument('--log', dest='log', action="store_const", const=True, default=False, help=_("Enables the logging feature, default logfile is ~/limf.log")) args = parser.parse_args() try: if args.local_list: clone_list = retrieve_local_host_list(args.local_list) else: clone_list = retrieve_online_host_list() if len(min(clone_list, key=len)) < 5 and (args.limit_size or args.no_cloudflare): print(_("For newer options, please update your host_list.")) exit() if args.host and not(0 <= args.host < len(clone_list)): print(generate_host_string(clone_list)) exit() parse_arguments(args, clone_list) except FileNotFoundError: print(_('Plese enter valid file.'))
[ "Creates", "arguments", "and", "parses", "user", "input" ]
lc-guy/limf
python
https://github.com/lc-guy/limf/blob/ad380feb70ef8e579a91ca09c807efec9e8af565/limf/cli.py#L10-L60
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "_", "(", "'Uploads selected file to working pomf.se clone'", ")", ")", "parser", ".", "add_argument", "(", "'files'", ",", "metavar", "=", "'file'", ",", "nargs", "=", "'*'", ",", "type", "=", "str", ",", "help", "=", "_", "(", "'Files to upload'", ")", ")", "parser", ".", "add_argument", "(", "'-c'", ",", "metavar", "=", "'host_number'", ",", "type", "=", "int", ",", "dest", "=", "'host'", ",", "default", "=", "None", ",", "help", "=", "_", "(", "'The number (0-n) of the selected host (default is random)'", ")", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "dest", "=", "'only_link'", ",", "action", "=", "'store_const'", ",", "const", "=", "True", ",", "default", "=", "False", ",", "help", "=", "_", "(", "'Changes output to just link to the file'", ")", ")", "parser", ".", "add_argument", "(", "'-e'", ",", "dest", "=", "'encrypt'", ",", "action", "=", "'store_const'", ",", "const", "=", "True", ",", "default", "=", "False", ",", "help", "=", "_", "(", "'Encrypts then uploads the files.'", ")", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "dest", "=", "'decrypt'", ",", "action", "=", "'store_const'", ",", "const", "=", "True", ",", "default", "=", "False", ",", "help", "=", "_", "(", "'Decrypts files from links with encrypted files'", ")", ")", "parser", ".", "add_argument", "(", "'-j'", ",", "dest", "=", "\"local_list\"", ",", "default", "=", "False", ",", "help", "=", "_", "(", "'Path to a local list file'", ")", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "dest", "=", "\"show_list\"", ",", "action", "=", "'store_const'", ",", "const", "=", "True", ",", "default", "=", "False", ",", "help", "=", "_", "(", "'Show the host list (will not upload your files when called)'", ")", ")", "parser", ".", "add_argument", "(", "'-m'", ",", "dest", "=", "'limit_size'", ",", "action", "=", "'store_const'", ",", "const", "=", "True", ",", "default", "=", "False", ",", "help", "=", "_", "(", "'Do not upload file if it exceeds the certain host limit'", ")", ")", "parser", ".", "add_argument", "(", "'-nc'", ",", "dest", "=", "'no_cloudflare'", ",", "action", "=", "'store_const'", ",", "const", "=", "True", ",", "default", "=", "False", ",", "help", "=", "_", "(", "'Do not use hosts which use Cloudflare.'", ")", ")", "parser", ".", "add_argument", "(", "'--log-file'", ",", "metavar", "=", "\"LOGFILE\"", ",", "dest", "=", "\"logfile\"", ",", "default", "=", "\"~/limf.log\"", ",", "help", "=", "_", "(", "\"The location of log file\"", ")", ")", "parser", ".", "add_argument", "(", "'--log'", ",", "dest", "=", "'log'", ",", "action", "=", "\"store_const\"", ",", "const", "=", "True", ",", "default", "=", "False", ",", "help", "=", "_", "(", "\"Enables the logging feature, default logfile is ~/limf.log\"", ")", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "try", ":", "if", "args", ".", "local_list", ":", "clone_list", "=", "retrieve_local_host_list", "(", "args", ".", "local_list", ")", "else", ":", "clone_list", "=", "retrieve_online_host_list", "(", ")", "if", "len", "(", "min", "(", "clone_list", ",", "key", "=", "len", ")", ")", "<", "5", "and", "(", "args", ".", "limit_size", "or", "args", ".", "no_cloudflare", ")", ":", "print", "(", "_", "(", "\"For newer options, please update your host_list.\"", ")", ")", "exit", "(", ")", "if", "args", ".", "host", "and", "not", "(", "0", "<=", "args", ".", "host", "<", "len", "(", "clone_list", ")", ")", ":", "print", "(", "generate_host_string", "(", "clone_list", ")", ")", "exit", "(", ")", "parse_arguments", "(", "args", ",", "clone_list", ")", "except", "FileNotFoundError", ":", "print", "(", "_", "(", "'Plese enter valid file.'", ")", ")" ]
ad380feb70ef8e579a91ca09c807efec9e8af565
valid
ParameterConverter.convert
Convert node schema into a parameter object.
cornice_swagger/converters/parameters.py
def convert(self, schema_node, definition_handler): """ Convert node schema into a parameter object. """ converted = { 'name': schema_node.name, 'in': self._in, 'required': schema_node.required } if schema_node.description: converted['description'] = schema_node.description if schema_node.default: converted['default'] = schema_node.default schema = definition_handler(schema_node) # Parameters shouldn't have a title schema.pop('title', None) converted.update(schema) if schema.get('type') == 'array': converted['items'] = {'type': schema['items']['type']} return converted
def convert(self, schema_node, definition_handler): """ Convert node schema into a parameter object. """ converted = { 'name': schema_node.name, 'in': self._in, 'required': schema_node.required } if schema_node.description: converted['description'] = schema_node.description if schema_node.default: converted['default'] = schema_node.default schema = definition_handler(schema_node) # Parameters shouldn't have a title schema.pop('title', None) converted.update(schema) if schema.get('type') == 'array': converted['items'] = {'type': schema['items']['type']} return converted
[ "Convert", "node", "schema", "into", "a", "parameter", "object", "." ]
Cornices/cornice.ext.swagger
python
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/converters/parameters.py#L9-L33
[ "def", "convert", "(", "self", ",", "schema_node", ",", "definition_handler", ")", ":", "converted", "=", "{", "'name'", ":", "schema_node", ".", "name", ",", "'in'", ":", "self", ".", "_in", ",", "'required'", ":", "schema_node", ".", "required", "}", "if", "schema_node", ".", "description", ":", "converted", "[", "'description'", "]", "=", "schema_node", ".", "description", "if", "schema_node", ".", "default", ":", "converted", "[", "'default'", "]", "=", "schema_node", ".", "default", "schema", "=", "definition_handler", "(", "schema_node", ")", "# Parameters shouldn't have a title", "schema", ".", "pop", "(", "'title'", ",", "None", ")", "converted", ".", "update", "(", "schema", ")", "if", "schema", ".", "get", "(", "'type'", ")", "==", "'array'", ":", "converted", "[", "'items'", "]", "=", "{", "'type'", ":", "schema", "[", "'items'", "]", "[", "'type'", "]", "}", "return", "converted" ]
c31a5cc8d5dd112b11dc41ccb6d09b423b537abc
valid
cornice_enable_openapi_view
:param config: Pyramid configurator object :param api_path: where to expose swagger JSON definition view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes :param kwargs: kwargs that will be passed to CorniceSwagger's `generate()` This registers and configures the view that serves api definitions
cornice_swagger/__init__.py
def cornice_enable_openapi_view( config, api_path='/api-explorer/swagger.json', permission=NO_PERMISSION_REQUIRED, route_factory=None, **kwargs): """ :param config: Pyramid configurator object :param api_path: where to expose swagger JSON definition view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes :param kwargs: kwargs that will be passed to CorniceSwagger's `generate()` This registers and configures the view that serves api definitions """ config.registry.settings['cornice_swagger.spec_kwargs'] = kwargs config.add_route('cornice_swagger.open_api_path', api_path, factory=route_factory) config.add_view('cornice_swagger.views.open_api_json_view', renderer='json', permission=permission, route_name='cornice_swagger.open_api_path')
def cornice_enable_openapi_view( config, api_path='/api-explorer/swagger.json', permission=NO_PERMISSION_REQUIRED, route_factory=None, **kwargs): """ :param config: Pyramid configurator object :param api_path: where to expose swagger JSON definition view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes :param kwargs: kwargs that will be passed to CorniceSwagger's `generate()` This registers and configures the view that serves api definitions """ config.registry.settings['cornice_swagger.spec_kwargs'] = kwargs config.add_route('cornice_swagger.open_api_path', api_path, factory=route_factory) config.add_view('cornice_swagger.views.open_api_json_view', renderer='json', permission=permission, route_name='cornice_swagger.open_api_path')
[ ":", "param", "config", ":", "Pyramid", "configurator", "object", ":", "param", "api_path", ":", "where", "to", "expose", "swagger", "JSON", "definition", "view", ":", "param", "permission", ":", "pyramid", "permission", "for", "those", "views", ":", "param", "route_factory", ":", "factory", "for", "context", "object", "for", "those", "routes", ":", "param", "kwargs", ":", "kwargs", "that", "will", "be", "passed", "to", "CorniceSwagger", "s", "generate", "()" ]
Cornices/cornice.ext.swagger
python
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/__init__.py#L38-L62
[ "def", "cornice_enable_openapi_view", "(", "config", ",", "api_path", "=", "'/api-explorer/swagger.json'", ",", "permission", "=", "NO_PERMISSION_REQUIRED", ",", "route_factory", "=", "None", ",", "*", "*", "kwargs", ")", ":", "config", ".", "registry", ".", "settings", "[", "'cornice_swagger.spec_kwargs'", "]", "=", "kwargs", "config", ".", "add_route", "(", "'cornice_swagger.open_api_path'", ",", "api_path", ",", "factory", "=", "route_factory", ")", "config", ".", "add_view", "(", "'cornice_swagger.views.open_api_json_view'", ",", "renderer", "=", "'json'", ",", "permission", "=", "permission", ",", "route_name", "=", "'cornice_swagger.open_api_path'", ")" ]
c31a5cc8d5dd112b11dc41ccb6d09b423b537abc
valid
cornice_enable_openapi_explorer
:param config: Pyramid configurator object :param api_explorer_path: where to expose Swagger UI interface view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes This registers and configures the view that serves api explorer
cornice_swagger/__init__.py
def cornice_enable_openapi_explorer( config, api_explorer_path='/api-explorer', permission=NO_PERMISSION_REQUIRED, route_factory=None, **kwargs): """ :param config: Pyramid configurator object :param api_explorer_path: where to expose Swagger UI interface view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes This registers and configures the view that serves api explorer """ config.add_route('cornice_swagger.api_explorer_path', api_explorer_path, factory=route_factory) config.add_view('cornice_swagger.views.swagger_ui_template_view', permission=permission, route_name='cornice_swagger.api_explorer_path')
def cornice_enable_openapi_explorer( config, api_explorer_path='/api-explorer', permission=NO_PERMISSION_REQUIRED, route_factory=None, **kwargs): """ :param config: Pyramid configurator object :param api_explorer_path: where to expose Swagger UI interface view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes This registers and configures the view that serves api explorer """ config.add_route('cornice_swagger.api_explorer_path', api_explorer_path, factory=route_factory) config.add_view('cornice_swagger.views.swagger_ui_template_view', permission=permission, route_name='cornice_swagger.api_explorer_path')
[ ":", "param", "config", ":", "Pyramid", "configurator", "object", ":", "param", "api_explorer_path", ":", "where", "to", "expose", "Swagger", "UI", "interface", "view", ":", "param", "permission", ":", "pyramid", "permission", "for", "those", "views", ":", "param", "route_factory", ":", "factory", "for", "context", "object", "for", "those", "routes" ]
Cornices/cornice.ext.swagger
python
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/__init__.py#L65-L87
[ "def", "cornice_enable_openapi_explorer", "(", "config", ",", "api_explorer_path", "=", "'/api-explorer'", ",", "permission", "=", "NO_PERMISSION_REQUIRED", ",", "route_factory", "=", "None", ",", "*", "*", "kwargs", ")", ":", "config", ".", "add_route", "(", "'cornice_swagger.api_explorer_path'", ",", "api_explorer_path", ",", "factory", "=", "route_factory", ")", "config", ".", "add_view", "(", "'cornice_swagger.views.swagger_ui_template_view'", ",", "permission", "=", "permission", ",", "route_name", "=", "'cornice_swagger.api_explorer_path'", ")" ]
c31a5cc8d5dd112b11dc41ccb6d09b423b537abc
valid
trim
Remove the tabs to spaces, and remove the extra spaces / tabs that are in front of the text in docstrings. Implementation taken from http://www.python.org/dev/peps/pep-0257/
cornice_swagger/util.py
def trim(docstring): """ Remove the tabs to spaces, and remove the extra spaces / tabs that are in front of the text in docstrings. Implementation taken from http://www.python.org/dev/peps/pep-0257/ """ if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = six.u(docstring).expandtabs().splitlines() lines = [line.strip() for line in lines] res = six.u('\n').join(lines) return res
def trim(docstring): """ Remove the tabs to spaces, and remove the extra spaces / tabs that are in front of the text in docstrings. Implementation taken from http://www.python.org/dev/peps/pep-0257/ """ if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = six.u(docstring).expandtabs().splitlines() lines = [line.strip() for line in lines] res = six.u('\n').join(lines) return res
[ "Remove", "the", "tabs", "to", "spaces", "and", "remove", "the", "extra", "spaces", "/", "tabs", "that", "are", "in", "front", "of", "the", "text", "in", "docstrings", "." ]
Cornices/cornice.ext.swagger
python
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/util.py#L6-L20
[ "def", "trim", "(", "docstring", ")", ":", "if", "not", "docstring", ":", "return", "''", "# Convert tabs to spaces (following the normal Python rules)", "# and split into a list of lines:", "lines", "=", "six", ".", "u", "(", "docstring", ")", ".", "expandtabs", "(", ")", ".", "splitlines", "(", ")", "lines", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "lines", "]", "res", "=", "six", ".", "u", "(", "'\\n'", ")", ".", "join", "(", "lines", ")", "return", "res" ]
c31a5cc8d5dd112b11dc41ccb6d09b423b537abc
valid
merge_dicts
Merge b into a recursively, without overwriting values. :param base: the dict that will be altered. :param changes: changes to update base.
cornice_swagger/util.py
def merge_dicts(base, changes): """Merge b into a recursively, without overwriting values. :param base: the dict that will be altered. :param changes: changes to update base. """ for k, v in changes.items(): if isinstance(v, dict): merge_dicts(base.setdefault(k, {}), v) else: base.setdefault(k, v)
def merge_dicts(base, changes): """Merge b into a recursively, without overwriting values. :param base: the dict that will be altered. :param changes: changes to update base. """ for k, v in changes.items(): if isinstance(v, dict): merge_dicts(base.setdefault(k, {}), v) else: base.setdefault(k, v)
[ "Merge", "b", "into", "a", "recursively", "without", "overwriting", "values", "." ]
Cornices/cornice.ext.swagger
python
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/util.py#L32-L42
[ "def", "merge_dicts", "(", "base", ",", "changes", ")", ":", "for", "k", ",", "v", "in", "changes", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "merge_dicts", "(", "base", ".", "setdefault", "(", "k", ",", "{", "}", ")", ",", "v", ")", "else", ":", "base", ".", "setdefault", "(", "k", ",", "v", ")" ]
c31a5cc8d5dd112b11dc41ccb6d09b423b537abc
valid
get_transition_viewset_method
Create a viewset method for the provided `transition_name`
drf_fsm_transitions/viewset_mixins.py
def get_transition_viewset_method(transition_name, **kwargs): ''' Create a viewset method for the provided `transition_name` ''' @detail_route(methods=['post'], **kwargs) def inner_func(self, request, pk=None, **kwargs): object = self.get_object() transition_method = getattr(object, transition_name) transition_method(by=self.request.user) if self.save_after_transition: object.save() serializer = self.get_serializer(object) return Response(serializer.data) return inner_func
def get_transition_viewset_method(transition_name, **kwargs): ''' Create a viewset method for the provided `transition_name` ''' @detail_route(methods=['post'], **kwargs) def inner_func(self, request, pk=None, **kwargs): object = self.get_object() transition_method = getattr(object, transition_name) transition_method(by=self.request.user) if self.save_after_transition: object.save() serializer = self.get_serializer(object) return Response(serializer.data) return inner_func
[ "Create", "a", "viewset", "method", "for", "the", "provided", "transition_name" ]
jacobh/drf-fsm-transitions
python
https://github.com/jacobh/drf-fsm-transitions/blob/9cc792d4e570145dd08724bedd32676a6a58cf1f/drf_fsm_transitions/viewset_mixins.py#L5-L22
[ "def", "get_transition_viewset_method", "(", "transition_name", ",", "*", "*", "kwargs", ")", ":", "@", "detail_route", "(", "methods", "=", "[", "'post'", "]", ",", "*", "*", "kwargs", ")", "def", "inner_func", "(", "self", ",", "request", ",", "pk", "=", "None", ",", "*", "*", "kwargs", ")", ":", "object", "=", "self", ".", "get_object", "(", ")", "transition_method", "=", "getattr", "(", "object", ",", "transition_name", ")", "transition_method", "(", "by", "=", "self", ".", "request", ".", "user", ")", "if", "self", ".", "save_after_transition", ":", "object", ".", "save", "(", ")", "serializer", "=", "self", ".", "get_serializer", "(", "object", ")", "return", "Response", "(", "serializer", ".", "data", ")", "return", "inner_func" ]
9cc792d4e570145dd08724bedd32676a6a58cf1f
valid
get_viewset_transition_action_mixin
Find all transitions defined on `model`, then create a corresponding viewset action method for each and apply it to `Mixin`. Finally, return `Mixin`
drf_fsm_transitions/viewset_mixins.py
def get_viewset_transition_action_mixin(model, **kwargs): ''' Find all transitions defined on `model`, then create a corresponding viewset action method for each and apply it to `Mixin`. Finally, return `Mixin` ''' instance = model() class Mixin(object): save_after_transition = True transitions = instance.get_all_status_transitions() transition_names = set(x.name for x in transitions) for transition_name in transition_names: setattr( Mixin, transition_name, get_transition_viewset_method(transition_name, **kwargs) ) return Mixin
def get_viewset_transition_action_mixin(model, **kwargs): ''' Find all transitions defined on `model`, then create a corresponding viewset action method for each and apply it to `Mixin`. Finally, return `Mixin` ''' instance = model() class Mixin(object): save_after_transition = True transitions = instance.get_all_status_transitions() transition_names = set(x.name for x in transitions) for transition_name in transition_names: setattr( Mixin, transition_name, get_transition_viewset_method(transition_name, **kwargs) ) return Mixin
[ "Find", "all", "transitions", "defined", "on", "model", "then", "create", "a", "corresponding", "viewset", "action", "method", "for", "each", "and", "apply", "it", "to", "Mixin", ".", "Finally", "return", "Mixin" ]
jacobh/drf-fsm-transitions
python
https://github.com/jacobh/drf-fsm-transitions/blob/9cc792d4e570145dd08724bedd32676a6a58cf1f/drf_fsm_transitions/viewset_mixins.py#L25-L45
[ "def", "get_viewset_transition_action_mixin", "(", "model", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "model", "(", ")", "class", "Mixin", "(", "object", ")", ":", "save_after_transition", "=", "True", "transitions", "=", "instance", ".", "get_all_status_transitions", "(", ")", "transition_names", "=", "set", "(", "x", ".", "name", "for", "x", "in", "transitions", ")", "for", "transition_name", "in", "transition_names", ":", "setattr", "(", "Mixin", ",", "transition_name", ",", "get_transition_viewset_method", "(", "transition_name", ",", "*", "*", "kwargs", ")", ")", "return", "Mixin" ]
9cc792d4e570145dd08724bedd32676a6a58cf1f
valid
fresh_cookies
Refresh the project from the original cookiecutter template.
tasks.py
def fresh_cookies(ctx, mold=''): """Refresh the project from the original cookiecutter template.""" mold = mold or "https://github.com/Springerle/py-generic-project.git" # TODO: URL from config tmpdir = os.path.join(tempfile.gettempdir(), "cc-upgrade-pygments-markdown-lexer") if os.path.isdir('.git'): # TODO: Ensure there are no local unstashed changes pass # Make a copy of the new mold version if os.path.isdir(tmpdir): shutil.rmtree(tmpdir) if os.path.exists(mold): shutil.copytree(mold, tmpdir, ignore=shutil.ignore_patterns( ".git", ".svn", "*~", )) else: ctx.run("git clone {} {}".format(mold, tmpdir)) # Copy recorded "cookiecutter.json" into mold shutil.copy2("project.d/cookiecutter.json", tmpdir) with pushd('..'): ctx.run("cookiecutter --no-input {}".format(tmpdir)) if os.path.exists('.git'): ctx.run("git status")
def fresh_cookies(ctx, mold=''): """Refresh the project from the original cookiecutter template.""" mold = mold or "https://github.com/Springerle/py-generic-project.git" # TODO: URL from config tmpdir = os.path.join(tempfile.gettempdir(), "cc-upgrade-pygments-markdown-lexer") if os.path.isdir('.git'): # TODO: Ensure there are no local unstashed changes pass # Make a copy of the new mold version if os.path.isdir(tmpdir): shutil.rmtree(tmpdir) if os.path.exists(mold): shutil.copytree(mold, tmpdir, ignore=shutil.ignore_patterns( ".git", ".svn", "*~", )) else: ctx.run("git clone {} {}".format(mold, tmpdir)) # Copy recorded "cookiecutter.json" into mold shutil.copy2("project.d/cookiecutter.json", tmpdir) with pushd('..'): ctx.run("cookiecutter --no-input {}".format(tmpdir)) if os.path.exists('.git'): ctx.run("git status")
[ "Refresh", "the", "project", "from", "the", "original", "cookiecutter", "template", "." ]
jhermann/pygments-markdown-lexer
python
https://github.com/jhermann/pygments-markdown-lexer/blob/e651a9a3f664285b01451eb39232b1ad9af65956/tasks.py#L32-L57
[ "def", "fresh_cookies", "(", "ctx", ",", "mold", "=", "''", ")", ":", "mold", "=", "mold", "or", "\"https://github.com/Springerle/py-generic-project.git\"", "# TODO: URL from config", "tmpdir", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "\"cc-upgrade-pygments-markdown-lexer\"", ")", "if", "os", ".", "path", ".", "isdir", "(", "'.git'", ")", ":", "# TODO: Ensure there are no local unstashed changes", "pass", "# Make a copy of the new mold version", "if", "os", ".", "path", ".", "isdir", "(", "tmpdir", ")", ":", "shutil", ".", "rmtree", "(", "tmpdir", ")", "if", "os", ".", "path", ".", "exists", "(", "mold", ")", ":", "shutil", ".", "copytree", "(", "mold", ",", "tmpdir", ",", "ignore", "=", "shutil", ".", "ignore_patterns", "(", "\".git\"", ",", "\".svn\"", ",", "\"*~\"", ",", ")", ")", "else", ":", "ctx", ".", "run", "(", "\"git clone {} {}\"", ".", "format", "(", "mold", ",", "tmpdir", ")", ")", "# Copy recorded \"cookiecutter.json\" into mold", "shutil", ".", "copy2", "(", "\"project.d/cookiecutter.json\"", ",", "tmpdir", ")", "with", "pushd", "(", "'..'", ")", ":", "ctx", ".", "run", "(", "\"cookiecutter --no-input {}\"", ".", "format", "(", "tmpdir", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "'.git'", ")", ":", "ctx", ".", "run", "(", "\"git status\"", ")" ]
e651a9a3f664285b01451eb39232b1ad9af65956
valid
ci
Perform continuous integration tasks.
tasks.py
def ci(ctx): """Perform continuous integration tasks.""" opts = [''] # 'tox' makes no sense in Travis if os.environ.get('TRAVIS', '').lower() == 'true': opts += ['test.pytest'] else: opts += ['test.tox'] ctx.run("invoke --echo --pty clean --all build --docs check --reports{}".format(' '.join(opts)))
def ci(ctx): """Perform continuous integration tasks.""" opts = [''] # 'tox' makes no sense in Travis if os.environ.get('TRAVIS', '').lower() == 'true': opts += ['test.pytest'] else: opts += ['test.tox'] ctx.run("invoke --echo --pty clean --all build --docs check --reports{}".format(' '.join(opts)))
[ "Perform", "continuous", "integration", "tasks", "." ]
jhermann/pygments-markdown-lexer
python
https://github.com/jhermann/pygments-markdown-lexer/blob/e651a9a3f664285b01451eb39232b1ad9af65956/tasks.py#L65-L75
[ "def", "ci", "(", "ctx", ")", ":", "opts", "=", "[", "''", "]", "# 'tox' makes no sense in Travis", "if", "os", ".", "environ", ".", "get", "(", "'TRAVIS'", ",", "''", ")", ".", "lower", "(", ")", "==", "'true'", ":", "opts", "+=", "[", "'test.pytest'", "]", "else", ":", "opts", "+=", "[", "'test.tox'", "]", "ctx", ".", "run", "(", "\"invoke --echo --pty clean --all build --docs check --reports{}\"", ".", "format", "(", "' '", ".", "join", "(", "opts", ")", ")", ")" ]
e651a9a3f664285b01451eb39232b1ad9af65956
valid
_build_metadata
Return project's metadata as a dict.
setup.py
def _build_metadata(): # pylint: disable=too-many-locals, too-many-branches "Return project's metadata as a dict." # Handle metadata in package source expected_keys = ('url', 'version', 'license', 'author', 'author_email', 'long_description', 'keywords') metadata = {} with io.open(srcfile('src', package_name, '__init__.py'), encoding='utf-8') as handle: pkg_init = handle.read() # Get default long description from docstring metadata['long_description'] = re.search(r'^"""(.+?)^"""$', pkg_init, re.DOTALL|re.MULTILINE).group(1) for line in pkg_init.splitlines(): match = re.match(r"""^__({0})__ += (?P<q>['"])(.+?)(?P=q)$""".format('|'.join(expected_keys)), line) if match: metadata[match.group(1)] = match.group(3) if not all(i in metadata for i in expected_keys): raise RuntimeError("Missing or bad metadata in '{0}' package: {1}" .format(name, ', '.join(sorted(set(expected_keys) - set(metadata.keys()))),)) text = metadata['long_description'].strip() if text: metadata['description'], text = text.split('.', 1) metadata['description'] = ' '.join(metadata['description'].split()).strip() + '.' # normalize whitespace metadata['long_description'] = textwrap.dedent(text).strip() metadata['keywords'] = metadata['keywords'].replace(',', ' ').strip().split() # Load requirements files requirements_files = dict( install = 'requirements.txt', setup = 'setup-requirements.txt', test = 'test-requirements.txt', ) requires = {} for key, filename in requirements_files.items(): requires[key] = [] if os.path.exists(srcfile(filename)): with io.open(srcfile(filename), encoding='utf-8') as handle: for line in handle: line = line.strip() if line and not line.startswith('#'): if any(line.startswith(i) for i in ('-e', 'http://', 'https://')): line = line.split('#egg=')[1] requires[key].append(line) if not any('pytest' == re.split('[\t ,<=>]', i.lower())[0] for i in requires['test']): requires['test'].append('pytest') # add missing requirement # CLI entry points console_scripts = [] for path, dirs, files in os.walk(srcfile('src', package_name)): dirs = [i for i in dirs if not i.startswith('.')] if '__main__.py' in files: path = path[len(srcfile('src') + os.sep):] appname = path.split(os.sep)[-1] with io.open(srcfile('src', path, '__main__.py'), encoding='utf-8') as handle: for line in handle.readlines(): match = re.match(r"""^__app_name__ += (?P<q>['"])(.+?)(?P=q)$""", line) if match: appname = match.group(2) console_scripts.append('{0} = {1}.__main__:cli'.format(appname, path.replace(os.sep, '.'))) # Add some common files to EGG-INFO candidate_files = [ 'LICENSE', 'NOTICE', 'README', 'README.md', 'README.rst', 'README.txt', 'CHANGES', 'CHANGELOG', 'debian/changelog', ] data_files = defaultdict(list) for filename in candidate_files: if os.path.exists(srcfile(filename)): data_files['EGG-INFO'].append(filename) # Complete project metadata classifiers = [] for classifiers_txt in ('classifiers.txt', 'project.d/classifiers.txt'): classifiers_txt = srcfile(classifiers_txt) if os.path.exists(classifiers_txt): with io.open(classifiers_txt, encoding='utf-8') as handle: classifiers = [i.strip() for i in handle if i.strip() and not i.startswith('#')] break entry_points.setdefault('console_scripts', []).extend(console_scripts) metadata.update(dict( name = name, package_dir = {'': 'src'}, packages = find_packages(srcfile('src'), exclude=['tests']), data_files = data_files.items(), zip_safe = False, include_package_data = True, install_requires = requires['install'], setup_requires = requires['setup'], tests_require = requires['test'], classifiers = classifiers, cmdclass = dict( test = PyTest, ), entry_points = entry_points, )) return metadata
def _build_metadata(): # pylint: disable=too-many-locals, too-many-branches "Return project's metadata as a dict." # Handle metadata in package source expected_keys = ('url', 'version', 'license', 'author', 'author_email', 'long_description', 'keywords') metadata = {} with io.open(srcfile('src', package_name, '__init__.py'), encoding='utf-8') as handle: pkg_init = handle.read() # Get default long description from docstring metadata['long_description'] = re.search(r'^"""(.+?)^"""$', pkg_init, re.DOTALL|re.MULTILINE).group(1) for line in pkg_init.splitlines(): match = re.match(r"""^__({0})__ += (?P<q>['"])(.+?)(?P=q)$""".format('|'.join(expected_keys)), line) if match: metadata[match.group(1)] = match.group(3) if not all(i in metadata for i in expected_keys): raise RuntimeError("Missing or bad metadata in '{0}' package: {1}" .format(name, ', '.join(sorted(set(expected_keys) - set(metadata.keys()))),)) text = metadata['long_description'].strip() if text: metadata['description'], text = text.split('.', 1) metadata['description'] = ' '.join(metadata['description'].split()).strip() + '.' # normalize whitespace metadata['long_description'] = textwrap.dedent(text).strip() metadata['keywords'] = metadata['keywords'].replace(',', ' ').strip().split() # Load requirements files requirements_files = dict( install = 'requirements.txt', setup = 'setup-requirements.txt', test = 'test-requirements.txt', ) requires = {} for key, filename in requirements_files.items(): requires[key] = [] if os.path.exists(srcfile(filename)): with io.open(srcfile(filename), encoding='utf-8') as handle: for line in handle: line = line.strip() if line and not line.startswith('#'): if any(line.startswith(i) for i in ('-e', 'http://', 'https://')): line = line.split('#egg=')[1] requires[key].append(line) if not any('pytest' == re.split('[\t ,<=>]', i.lower())[0] for i in requires['test']): requires['test'].append('pytest') # add missing requirement # CLI entry points console_scripts = [] for path, dirs, files in os.walk(srcfile('src', package_name)): dirs = [i for i in dirs if not i.startswith('.')] if '__main__.py' in files: path = path[len(srcfile('src') + os.sep):] appname = path.split(os.sep)[-1] with io.open(srcfile('src', path, '__main__.py'), encoding='utf-8') as handle: for line in handle.readlines(): match = re.match(r"""^__app_name__ += (?P<q>['"])(.+?)(?P=q)$""", line) if match: appname = match.group(2) console_scripts.append('{0} = {1}.__main__:cli'.format(appname, path.replace(os.sep, '.'))) # Add some common files to EGG-INFO candidate_files = [ 'LICENSE', 'NOTICE', 'README', 'README.md', 'README.rst', 'README.txt', 'CHANGES', 'CHANGELOG', 'debian/changelog', ] data_files = defaultdict(list) for filename in candidate_files: if os.path.exists(srcfile(filename)): data_files['EGG-INFO'].append(filename) # Complete project metadata classifiers = [] for classifiers_txt in ('classifiers.txt', 'project.d/classifiers.txt'): classifiers_txt = srcfile(classifiers_txt) if os.path.exists(classifiers_txt): with io.open(classifiers_txt, encoding='utf-8') as handle: classifiers = [i.strip() for i in handle if i.strip() and not i.startswith('#')] break entry_points.setdefault('console_scripts', []).extend(console_scripts) metadata.update(dict( name = name, package_dir = {'': 'src'}, packages = find_packages(srcfile('src'), exclude=['tests']), data_files = data_files.items(), zip_safe = False, include_package_data = True, install_requires = requires['install'], setup_requires = requires['setup'], tests_require = requires['test'], classifiers = classifiers, cmdclass = dict( test = PyTest, ), entry_points = entry_points, )) return metadata
[ "Return", "project", "s", "metadata", "as", "a", "dict", "." ]
jhermann/pygments-markdown-lexer
python
https://github.com/jhermann/pygments-markdown-lexer/blob/e651a9a3f664285b01451eb39232b1ad9af65956/setup.py#L91-L187
[ "def", "_build_metadata", "(", ")", ":", "# pylint: disable=too-many-locals, too-many-branches", "# Handle metadata in package source", "expected_keys", "=", "(", "'url'", ",", "'version'", ",", "'license'", ",", "'author'", ",", "'author_email'", ",", "'long_description'", ",", "'keywords'", ")", "metadata", "=", "{", "}", "with", "io", ".", "open", "(", "srcfile", "(", "'src'", ",", "package_name", ",", "'__init__.py'", ")", ",", "encoding", "=", "'utf-8'", ")", "as", "handle", ":", "pkg_init", "=", "handle", ".", "read", "(", ")", "# Get default long description from docstring", "metadata", "[", "'long_description'", "]", "=", "re", ".", "search", "(", "r'^\"\"\"(.+?)^\"\"\"$'", ",", "pkg_init", ",", "re", ".", "DOTALL", "|", "re", ".", "MULTILINE", ")", ".", "group", "(", "1", ")", "for", "line", "in", "pkg_init", ".", "splitlines", "(", ")", ":", "match", "=", "re", ".", "match", "(", "r\"\"\"^__({0})__ += (?P<q>['\"])(.+?)(?P=q)$\"\"\"", ".", "format", "(", "'|'", ".", "join", "(", "expected_keys", ")", ")", ",", "line", ")", "if", "match", ":", "metadata", "[", "match", ".", "group", "(", "1", ")", "]", "=", "match", ".", "group", "(", "3", ")", "if", "not", "all", "(", "i", "in", "metadata", "for", "i", "in", "expected_keys", ")", ":", "raise", "RuntimeError", "(", "\"Missing or bad metadata in '{0}' package: {1}\"", ".", "format", "(", "name", ",", "', '", ".", "join", "(", "sorted", "(", "set", "(", "expected_keys", ")", "-", "set", "(", "metadata", ".", "keys", "(", ")", ")", ")", ")", ",", ")", ")", "text", "=", "metadata", "[", "'long_description'", "]", ".", "strip", "(", ")", "if", "text", ":", "metadata", "[", "'description'", "]", ",", "text", "=", "text", ".", "split", "(", "'.'", ",", "1", ")", "metadata", "[", "'description'", "]", "=", "' '", ".", "join", "(", "metadata", "[", "'description'", "]", ".", "split", "(", ")", ")", ".", "strip", "(", ")", "+", "'.'", "# normalize whitespace", "metadata", "[", "'long_description'", "]", "=", "textwrap", ".", "dedent", "(", "text", ")", ".", "strip", "(", ")", "metadata", "[", "'keywords'", "]", "=", "metadata", "[", "'keywords'", "]", ".", "replace", "(", "','", ",", "' '", ")", ".", "strip", "(", ")", ".", "split", "(", ")", "# Load requirements files", "requirements_files", "=", "dict", "(", "install", "=", "'requirements.txt'", ",", "setup", "=", "'setup-requirements.txt'", ",", "test", "=", "'test-requirements.txt'", ",", ")", "requires", "=", "{", "}", "for", "key", ",", "filename", "in", "requirements_files", ".", "items", "(", ")", ":", "requires", "[", "key", "]", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "srcfile", "(", "filename", ")", ")", ":", "with", "io", ".", "open", "(", "srcfile", "(", "filename", ")", ",", "encoding", "=", "'utf-8'", ")", "as", "handle", ":", "for", "line", "in", "handle", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "and", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "if", "any", "(", "line", ".", "startswith", "(", "i", ")", "for", "i", "in", "(", "'-e'", ",", "'http://'", ",", "'https://'", ")", ")", ":", "line", "=", "line", ".", "split", "(", "'#egg='", ")", "[", "1", "]", "requires", "[", "key", "]", ".", "append", "(", "line", ")", "if", "not", "any", "(", "'pytest'", "==", "re", ".", "split", "(", "'[\\t ,<=>]'", ",", "i", ".", "lower", "(", ")", ")", "[", "0", "]", "for", "i", "in", "requires", "[", "'test'", "]", ")", ":", "requires", "[", "'test'", "]", ".", "append", "(", "'pytest'", ")", "# add missing requirement", "# CLI entry points", "console_scripts", "=", "[", "]", "for", "path", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "srcfile", "(", "'src'", ",", "package_name", ")", ")", ":", "dirs", "=", "[", "i", "for", "i", "in", "dirs", "if", "not", "i", ".", "startswith", "(", "'.'", ")", "]", "if", "'__main__.py'", "in", "files", ":", "path", "=", "path", "[", "len", "(", "srcfile", "(", "'src'", ")", "+", "os", ".", "sep", ")", ":", "]", "appname", "=", "path", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "1", "]", "with", "io", ".", "open", "(", "srcfile", "(", "'src'", ",", "path", ",", "'__main__.py'", ")", ",", "encoding", "=", "'utf-8'", ")", "as", "handle", ":", "for", "line", "in", "handle", ".", "readlines", "(", ")", ":", "match", "=", "re", ".", "match", "(", "r\"\"\"^__app_name__ += (?P<q>['\"])(.+?)(?P=q)$\"\"\"", ",", "line", ")", "if", "match", ":", "appname", "=", "match", ".", "group", "(", "2", ")", "console_scripts", ".", "append", "(", "'{0} = {1}.__main__:cli'", ".", "format", "(", "appname", ",", "path", ".", "replace", "(", "os", ".", "sep", ",", "'.'", ")", ")", ")", "# Add some common files to EGG-INFO", "candidate_files", "=", "[", "'LICENSE'", ",", "'NOTICE'", ",", "'README'", ",", "'README.md'", ",", "'README.rst'", ",", "'README.txt'", ",", "'CHANGES'", ",", "'CHANGELOG'", ",", "'debian/changelog'", ",", "]", "data_files", "=", "defaultdict", "(", "list", ")", "for", "filename", "in", "candidate_files", ":", "if", "os", ".", "path", ".", "exists", "(", "srcfile", "(", "filename", ")", ")", ":", "data_files", "[", "'EGG-INFO'", "]", ".", "append", "(", "filename", ")", "# Complete project metadata", "classifiers", "=", "[", "]", "for", "classifiers_txt", "in", "(", "'classifiers.txt'", ",", "'project.d/classifiers.txt'", ")", ":", "classifiers_txt", "=", "srcfile", "(", "classifiers_txt", ")", "if", "os", ".", "path", ".", "exists", "(", "classifiers_txt", ")", ":", "with", "io", ".", "open", "(", "classifiers_txt", ",", "encoding", "=", "'utf-8'", ")", "as", "handle", ":", "classifiers", "=", "[", "i", ".", "strip", "(", ")", "for", "i", "in", "handle", "if", "i", ".", "strip", "(", ")", "and", "not", "i", ".", "startswith", "(", "'#'", ")", "]", "break", "entry_points", ".", "setdefault", "(", "'console_scripts'", ",", "[", "]", ")", ".", "extend", "(", "console_scripts", ")", "metadata", ".", "update", "(", "dict", "(", "name", "=", "name", ",", "package_dir", "=", "{", "''", ":", "'src'", "}", ",", "packages", "=", "find_packages", "(", "srcfile", "(", "'src'", ")", ",", "exclude", "=", "[", "'tests'", "]", ")", ",", "data_files", "=", "data_files", ".", "items", "(", ")", ",", "zip_safe", "=", "False", ",", "include_package_data", "=", "True", ",", "install_requires", "=", "requires", "[", "'install'", "]", ",", "setup_requires", "=", "requires", "[", "'setup'", "]", ",", "tests_require", "=", "requires", "[", "'test'", "]", ",", "classifiers", "=", "classifiers", ",", "cmdclass", "=", "dict", "(", "test", "=", "PyTest", ",", ")", ",", "entry_points", "=", "entry_points", ",", ")", ")", "return", "metadata" ]
e651a9a3f664285b01451eb39232b1ad9af65956
valid
py_hash
Generate a number in the range [0, num_buckets). Args: key (int): The key to hash. num_buckets (int): Number of buckets to use. Returns: The bucket number `key` computes to. Raises: ValueError: If `num_buckets` is not a positive number.
jump/__init__.py
def py_hash(key, num_buckets): """Generate a number in the range [0, num_buckets). Args: key (int): The key to hash. num_buckets (int): Number of buckets to use. Returns: The bucket number `key` computes to. Raises: ValueError: If `num_buckets` is not a positive number. """ b, j = -1, 0 if num_buckets < 1: raise ValueError('num_buckets must be a positive number') while j < num_buckets: b = int(j) key = ((key * long(2862933555777941757)) + 1) & 0xffffffffffffffff j = float(b + 1) * (float(1 << 31) / float((key >> 33) + 1)) return int(b)
def py_hash(key, num_buckets): """Generate a number in the range [0, num_buckets). Args: key (int): The key to hash. num_buckets (int): Number of buckets to use. Returns: The bucket number `key` computes to. Raises: ValueError: If `num_buckets` is not a positive number. """ b, j = -1, 0 if num_buckets < 1: raise ValueError('num_buckets must be a positive number') while j < num_buckets: b = int(j) key = ((key * long(2862933555777941757)) + 1) & 0xffffffffffffffff j = float(b + 1) * (float(1 << 31) / float((key >> 33) + 1)) return int(b)
[ "Generate", "a", "number", "in", "the", "range", "[", "0", "num_buckets", ")", "." ]
lithammer/python-jump-consistent-hash
python
https://github.com/lithammer/python-jump-consistent-hash/blob/62d3c7c1736971a779769cbbae87598b2f3992b9/jump/__init__.py#L19-L42
[ "def", "py_hash", "(", "key", ",", "num_buckets", ")", ":", "b", ",", "j", "=", "-", "1", ",", "0", "if", "num_buckets", "<", "1", ":", "raise", "ValueError", "(", "'num_buckets must be a positive number'", ")", "while", "j", "<", "num_buckets", ":", "b", "=", "int", "(", "j", ")", "key", "=", "(", "(", "key", "*", "long", "(", "2862933555777941757", ")", ")", "+", "1", ")", "&", "0xffffffffffffffff", "j", "=", "float", "(", "b", "+", "1", ")", "*", "(", "float", "(", "1", "<<", "31", ")", "/", "float", "(", "(", "key", ">>", "33", ")", "+", "1", ")", ")", "return", "int", "(", "b", ")" ]
62d3c7c1736971a779769cbbae87598b2f3992b9
valid
setup
Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
src/pygments_markdown_lexer/__init__.py
def setup(app): """ Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions. """ lexer = MarkdownLexer() for alias in lexer.aliases: app.add_lexer(alias, lexer) return dict(version=__version__)
def setup(app): """ Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions. """ lexer = MarkdownLexer() for alias in lexer.aliases: app.add_lexer(alias, lexer) return dict(version=__version__)
[ "Initializer", "for", "Sphinx", "extension", "API", "." ]
jhermann/pygments-markdown-lexer
python
https://github.com/jhermann/pygments-markdown-lexer/blob/e651a9a3f664285b01451eb39232b1ad9af65956/src/pygments_markdown_lexer/__init__.py#L34-L43
[ "def", "setup", "(", "app", ")", ":", "lexer", "=", "MarkdownLexer", "(", ")", "for", "alias", "in", "lexer", ".", "aliases", ":", "app", ".", "add_lexer", "(", "alias", ",", "lexer", ")", "return", "dict", "(", "version", "=", "__version__", ")" ]
e651a9a3f664285b01451eb39232b1ad9af65956
valid
MdStat.load
Return a dict of stats.
pymdstat/pymdstat.py
def load(self): """Return a dict of stats.""" ret = {} # Read the mdstat file with open(self.get_path(), 'r') as f: # lines is a list of line (with \n) lines = f.readlines() # First line: get the personalities # The "Personalities" line tells you what RAID level the kernel currently supports. # This can be changed by either changing the raid modules or recompiling the kernel. # Possible personalities include: [raid0] [raid1] [raid4] [raid5] [raid6] [linear] [multipath] [faulty] ret['personalities'] = self.get_personalities(lines[0]) # Second to last before line: Array definition ret['arrays'] = self.get_arrays(lines[1:-1], ret['personalities']) # Save the file content as it for the __str__ method self.content = reduce(lambda x, y: x + y, lines) return ret
def load(self): """Return a dict of stats.""" ret = {} # Read the mdstat file with open(self.get_path(), 'r') as f: # lines is a list of line (with \n) lines = f.readlines() # First line: get the personalities # The "Personalities" line tells you what RAID level the kernel currently supports. # This can be changed by either changing the raid modules or recompiling the kernel. # Possible personalities include: [raid0] [raid1] [raid4] [raid5] [raid6] [linear] [multipath] [faulty] ret['personalities'] = self.get_personalities(lines[0]) # Second to last before line: Array definition ret['arrays'] = self.get_arrays(lines[1:-1], ret['personalities']) # Save the file content as it for the __str__ method self.content = reduce(lambda x, y: x + y, lines) return ret
[ "Return", "a", "dict", "of", "stats", "." ]
nicolargo/pymdstat
python
https://github.com/nicolargo/pymdstat/blob/97fd47117e687463205fb562269feb9f95d59620/pymdstat/pymdstat.py#L75-L96
[ "def", "load", "(", "self", ")", ":", "ret", "=", "{", "}", "# Read the mdstat file", "with", "open", "(", "self", ".", "get_path", "(", ")", ",", "'r'", ")", "as", "f", ":", "# lines is a list of line (with \\n)", "lines", "=", "f", ".", "readlines", "(", ")", "# First line: get the personalities", "# The \"Personalities\" line tells you what RAID level the kernel currently supports.", "# This can be changed by either changing the raid modules or recompiling the kernel.", "# Possible personalities include: [raid0] [raid1] [raid4] [raid5] [raid6] [linear] [multipath] [faulty]", "ret", "[", "'personalities'", "]", "=", "self", ".", "get_personalities", "(", "lines", "[", "0", "]", ")", "# Second to last before line: Array definition", "ret", "[", "'arrays'", "]", "=", "self", ".", "get_arrays", "(", "lines", "[", "1", ":", "-", "1", "]", ",", "ret", "[", "'personalities'", "]", ")", "# Save the file content as it for the __str__ method", "self", ".", "content", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "lines", ")", "return", "ret" ]
97fd47117e687463205fb562269feb9f95d59620
valid
MdStat.get_personalities
Return a list of personalities readed from the input line.
pymdstat/pymdstat.py
def get_personalities(self, line): """Return a list of personalities readed from the input line.""" return [split('\W+', i)[1] for i in line.split(':')[1].split(' ') if i.startswith('[')]
def get_personalities(self, line): """Return a list of personalities readed from the input line.""" return [split('\W+', i)[1] for i in line.split(':')[1].split(' ') if i.startswith('[')]
[ "Return", "a", "list", "of", "personalities", "readed", "from", "the", "input", "line", "." ]
nicolargo/pymdstat
python
https://github.com/nicolargo/pymdstat/blob/97fd47117e687463205fb562269feb9f95d59620/pymdstat/pymdstat.py#L98-L100
[ "def", "get_personalities", "(", "self", ",", "line", ")", ":", "return", "[", "split", "(", "'\\W+'", ",", "i", ")", "[", "1", "]", "for", "i", "in", "line", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "split", "(", "' '", ")", "if", "i", ".", "startswith", "(", "'['", ")", "]" ]
97fd47117e687463205fb562269feb9f95d59620
valid
MdStat.get_arrays
Return a dict of arrays.
pymdstat/pymdstat.py
def get_arrays(self, lines, personalities=[]): """Return a dict of arrays.""" ret = {} i = 0 while i < len(lines): try: # First array line: get the md device md_device = self.get_md_device_name(lines[i]) except IndexError: # No array detected pass else: # Array detected if md_device is not None: # md device line ret[md_device] = self.get_md_device(lines[i], personalities) # md config/status line i += 1 ret[md_device].update(self.get_md_status(lines[i])) i += 1 return ret
def get_arrays(self, lines, personalities=[]): """Return a dict of arrays.""" ret = {} i = 0 while i < len(lines): try: # First array line: get the md device md_device = self.get_md_device_name(lines[i]) except IndexError: # No array detected pass else: # Array detected if md_device is not None: # md device line ret[md_device] = self.get_md_device(lines[i], personalities) # md config/status line i += 1 ret[md_device].update(self.get_md_status(lines[i])) i += 1 return ret
[ "Return", "a", "dict", "of", "arrays", "." ]
nicolargo/pymdstat
python
https://github.com/nicolargo/pymdstat/blob/97fd47117e687463205fb562269feb9f95d59620/pymdstat/pymdstat.py#L102-L124
[ "def", "get_arrays", "(", "self", ",", "lines", ",", "personalities", "=", "[", "]", ")", ":", "ret", "=", "{", "}", "i", "=", "0", "while", "i", "<", "len", "(", "lines", ")", ":", "try", ":", "# First array line: get the md device", "md_device", "=", "self", ".", "get_md_device_name", "(", "lines", "[", "i", "]", ")", "except", "IndexError", ":", "# No array detected", "pass", "else", ":", "# Array detected", "if", "md_device", "is", "not", "None", ":", "# md device line", "ret", "[", "md_device", "]", "=", "self", ".", "get_md_device", "(", "lines", "[", "i", "]", ",", "personalities", ")", "# md config/status line", "i", "+=", "1", "ret", "[", "md_device", "]", ".", "update", "(", "self", ".", "get_md_status", "(", "lines", "[", "i", "]", ")", ")", "i", "+=", "1", "return", "ret" ]
97fd47117e687463205fb562269feb9f95d59620
valid
MdStat.get_md_device
Return a dict of md device define in the line.
pymdstat/pymdstat.py
def get_md_device(self, line, personalities=[]): """Return a dict of md device define in the line.""" ret = {} splitted = split('\W+', line) # Raid status # Active or 'started'. An inactive array is usually faulty. # Stopped arrays aren't visible here. ret['status'] = splitted[1] if splitted[2] in personalities: # Raid type (ex: RAID5) ret['type'] = splitted[2] # Array's components ret['components'] = self.get_components(line, with_type=True) else: # Raid type (ex: RAID5) ret['type'] = None # Array's components ret['components'] = self.get_components(line, with_type=False) return ret
def get_md_device(self, line, personalities=[]): """Return a dict of md device define in the line.""" ret = {} splitted = split('\W+', line) # Raid status # Active or 'started'. An inactive array is usually faulty. # Stopped arrays aren't visible here. ret['status'] = splitted[1] if splitted[2] in personalities: # Raid type (ex: RAID5) ret['type'] = splitted[2] # Array's components ret['components'] = self.get_components(line, with_type=True) else: # Raid type (ex: RAID5) ret['type'] = None # Array's components ret['components'] = self.get_components(line, with_type=False) return ret
[ "Return", "a", "dict", "of", "md", "device", "define", "in", "the", "line", "." ]
nicolargo/pymdstat
python
https://github.com/nicolargo/pymdstat/blob/97fd47117e687463205fb562269feb9f95d59620/pymdstat/pymdstat.py#L126-L146
[ "def", "get_md_device", "(", "self", ",", "line", ",", "personalities", "=", "[", "]", ")", ":", "ret", "=", "{", "}", "splitted", "=", "split", "(", "'\\W+'", ",", "line", ")", "# Raid status", "# Active or 'started'. An inactive array is usually faulty.", "# Stopped arrays aren't visible here.", "ret", "[", "'status'", "]", "=", "splitted", "[", "1", "]", "if", "splitted", "[", "2", "]", "in", "personalities", ":", "# Raid type (ex: RAID5)", "ret", "[", "'type'", "]", "=", "splitted", "[", "2", "]", "# Array's components", "ret", "[", "'components'", "]", "=", "self", ".", "get_components", "(", "line", ",", "with_type", "=", "True", ")", "else", ":", "# Raid type (ex: RAID5)", "ret", "[", "'type'", "]", "=", "None", "# Array's components", "ret", "[", "'components'", "]", "=", "self", ".", "get_components", "(", "line", ",", "with_type", "=", "False", ")", "return", "ret" ]
97fd47117e687463205fb562269feb9f95d59620
valid
MdStat.get_md_status
Return a dict of md status define in the line.
pymdstat/pymdstat.py
def get_md_status(self, line): """Return a dict of md status define in the line.""" ret = {} splitted = split('\W+', line) if len(splitted) < 7: ret['available'] = None ret['used'] = None ret['config'] = None else: # The final 2 entries on this line: [n/m] [UUUU_] # [n/m] means that ideally the array would have n devices however, currently, m devices are in use. # Obviously when m >= n then things are good. ret['available'] = splitted[-4] ret['used'] = splitted[-3] # [UUUU_] represents the status of each device, either U for up or _ for down. ret['config'] = splitted[-2] return ret
def get_md_status(self, line): """Return a dict of md status define in the line.""" ret = {} splitted = split('\W+', line) if len(splitted) < 7: ret['available'] = None ret['used'] = None ret['config'] = None else: # The final 2 entries on this line: [n/m] [UUUU_] # [n/m] means that ideally the array would have n devices however, currently, m devices are in use. # Obviously when m >= n then things are good. ret['available'] = splitted[-4] ret['used'] = splitted[-3] # [UUUU_] represents the status of each device, either U for up or _ for down. ret['config'] = splitted[-2] return ret
[ "Return", "a", "dict", "of", "md", "status", "define", "in", "the", "line", "." ]
nicolargo/pymdstat
python
https://github.com/nicolargo/pymdstat/blob/97fd47117e687463205fb562269feb9f95d59620/pymdstat/pymdstat.py#L148-L166
[ "def", "get_md_status", "(", "self", ",", "line", ")", ":", "ret", "=", "{", "}", "splitted", "=", "split", "(", "'\\W+'", ",", "line", ")", "if", "len", "(", "splitted", ")", "<", "7", ":", "ret", "[", "'available'", "]", "=", "None", "ret", "[", "'used'", "]", "=", "None", "ret", "[", "'config'", "]", "=", "None", "else", ":", "# The final 2 entries on this line: [n/m] [UUUU_]", "# [n/m] means that ideally the array would have n devices however, currently, m devices are in use.", "# Obviously when m >= n then things are good.", "ret", "[", "'available'", "]", "=", "splitted", "[", "-", "4", "]", "ret", "[", "'used'", "]", "=", "splitted", "[", "-", "3", "]", "# [UUUU_] represents the status of each device, either U for up or _ for down.", "ret", "[", "'config'", "]", "=", "splitted", "[", "-", "2", "]", "return", "ret" ]
97fd47117e687463205fb562269feb9f95d59620
valid
MdStat.get_components
Return a dict of components in the line. key: device name (ex: 'sdc1') value: device role number
pymdstat/pymdstat.py
def get_components(self, line, with_type=True): """Return a dict of components in the line. key: device name (ex: 'sdc1') value: device role number """ ret = {} # Ignore (F) (see test 08) line2 = reduce(lambda x, y: x + y, split('\(.+\)', line)) if with_type: splitted = split('\W+', line2)[3:] else: splitted = split('\W+', line2)[2:] ret = dict(zip(splitted[0::2], splitted[1::2])) return ret
def get_components(self, line, with_type=True): """Return a dict of components in the line. key: device name (ex: 'sdc1') value: device role number """ ret = {} # Ignore (F) (see test 08) line2 = reduce(lambda x, y: x + y, split('\(.+\)', line)) if with_type: splitted = split('\W+', line2)[3:] else: splitted = split('\W+', line2)[2:] ret = dict(zip(splitted[0::2], splitted[1::2])) return ret
[ "Return", "a", "dict", "of", "components", "in", "the", "line", "." ]
nicolargo/pymdstat
python
https://github.com/nicolargo/pymdstat/blob/97fd47117e687463205fb562269feb9f95d59620/pymdstat/pymdstat.py#L168-L184
[ "def", "get_components", "(", "self", ",", "line", ",", "with_type", "=", "True", ")", ":", "ret", "=", "{", "}", "# Ignore (F) (see test 08)", "line2", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "split", "(", "'\\(.+\\)'", ",", "line", ")", ")", "if", "with_type", ":", "splitted", "=", "split", "(", "'\\W+'", ",", "line2", ")", "[", "3", ":", "]", "else", ":", "splitted", "=", "split", "(", "'\\W+'", ",", "line2", ")", "[", "2", ":", "]", "ret", "=", "dict", "(", "zip", "(", "splitted", "[", "0", ":", ":", "2", "]", ",", "splitted", "[", "1", ":", ":", "2", "]", ")", ")", "return", "ret" ]
97fd47117e687463205fb562269feb9f95d59620
valid
register_receivers
Register signal receivers which send events.
invenio_stats/receivers.py
def register_receivers(app, config): """Register signal receivers which send events.""" for event_name, event_config in config.items(): event_builders = [ obj_or_import_string(func) for func in event_config.get('event_builders', []) ] signal = obj_or_import_string(event_config['signal']) signal.connect( EventEmmiter(event_name, event_builders), sender=app, weak=False )
def register_receivers(app, config): """Register signal receivers which send events.""" for event_name, event_config in config.items(): event_builders = [ obj_or_import_string(func) for func in event_config.get('event_builders', []) ] signal = obj_or_import_string(event_config['signal']) signal.connect( EventEmmiter(event_name, event_builders), sender=app, weak=False )
[ "Register", "signal", "receivers", "which", "send", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/receivers.py#L42-L53
[ "def", "register_receivers", "(", "app", ",", "config", ")", ":", "for", "event_name", ",", "event_config", "in", "config", ".", "items", "(", ")", ":", "event_builders", "=", "[", "obj_or_import_string", "(", "func", ")", "for", "func", "in", "event_config", ".", "get", "(", "'event_builders'", ",", "[", "]", ")", "]", "signal", "=", "obj_or_import_string", "(", "event_config", "[", "'signal'", "]", ")", "signal", ".", "connect", "(", "EventEmmiter", "(", "event_name", ",", "event_builders", ")", ",", "sender", "=", "app", ",", "weak", "=", "False", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
RuleChecker.check
:param query:
protector/rules/prevent_delete.py
def check(self, query): """ :param query: """ if query.get_type() != Keyword.DELETE: return Ok(True) return Err("Delete queries are forbidden.")
def check(self, query): """ :param query: """ if query.get_type() != Keyword.DELETE: return Ok(True) return Err("Delete queries are forbidden.")
[ ":", "param", "query", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/rules/prevent_delete.py#L17-L24
[ "def", "check", "(", "self", ",", "query", ")", ":", "if", "query", ".", "get_type", "(", ")", "!=", "Keyword", ".", "DELETE", ":", "return", "Ok", "(", "True", ")", "return", "Err", "(", "\"Delete queries are forbidden.\"", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
InternalMailbox.set_scheduled
Returns True if state was successfully changed from idle to scheduled.
actors/internal/mailbox.py
def set_scheduled(self): """ Returns True if state was successfully changed from idle to scheduled. """ with self._idle_lock: if self._idle: self._idle = False return True return False
def set_scheduled(self): """ Returns True if state was successfully changed from idle to scheduled. """ with self._idle_lock: if self._idle: self._idle = False return True return False
[ "Returns", "True", "if", "state", "was", "successfully", "changed", "from", "idle", "to", "scheduled", "." ]
tamland/python-actors
python
https://github.com/tamland/python-actors/blob/9f826ab2947c665d61363a6ebc401e9e42cc6238/actors/internal/mailbox.py#L79-L87
[ "def", "set_scheduled", "(", "self", ")", ":", "with", "self", ".", "_idle_lock", ":", "if", "self", ".", "_idle", ":", "self", ".", "_idle", "=", "False", "return", "True", "return", "False" ]
9f826ab2947c665d61363a6ebc401e9e42cc6238
valid
StatsQueryResource.post
Get statistics.
invenio_stats/views.py
def post(self, **kwargs): """Get statistics.""" data = request.get_json(force=False) if data is None: data = {} result = {} for query_name, config in data.items(): if config is None or not isinstance(config, dict) \ or (set(config.keys()) != {'stat', 'params'} and set(config.keys()) != {'stat'}): raise InvalidRequestInputError( 'Invalid Input. It should be of the form ' '{ STATISTIC_NAME: { "stat": STAT_TYPE, ' '"params": STAT_PARAMS \}}' ) stat = config['stat'] params = config.get('params', {}) try: query_cfg = current_stats.queries[stat] except KeyError: raise UnknownQueryError(stat) permission = current_stats.permission_factory(stat, params) if permission is not None and not permission.can(): message = ('You do not have a permission to query the ' 'statistic "{}" with those ' 'parameters'.format(stat)) if current_user.is_authenticated: abort(403, message) abort(401, message) try: query = query_cfg.query_class(**query_cfg.query_config) result[query_name] = query.run(**params) except ValueError as e: raise InvalidRequestInputError(e.args[0]) except NotFoundError as e: return None return self.make_response(result)
def post(self, **kwargs): """Get statistics.""" data = request.get_json(force=False) if data is None: data = {} result = {} for query_name, config in data.items(): if config is None or not isinstance(config, dict) \ or (set(config.keys()) != {'stat', 'params'} and set(config.keys()) != {'stat'}): raise InvalidRequestInputError( 'Invalid Input. It should be of the form ' '{ STATISTIC_NAME: { "stat": STAT_TYPE, ' '"params": STAT_PARAMS \}}' ) stat = config['stat'] params = config.get('params', {}) try: query_cfg = current_stats.queries[stat] except KeyError: raise UnknownQueryError(stat) permission = current_stats.permission_factory(stat, params) if permission is not None and not permission.can(): message = ('You do not have a permission to query the ' 'statistic "{}" with those ' 'parameters'.format(stat)) if current_user.is_authenticated: abort(403, message) abort(401, message) try: query = query_cfg.query_class(**query_cfg.query_config) result[query_name] = query.run(**params) except ValueError as e: raise InvalidRequestInputError(e.args[0]) except NotFoundError as e: return None return self.make_response(result)
[ "Get", "statistics", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/views.py#L44-L81
[ "def", "post", "(", "self", ",", "*", "*", "kwargs", ")", ":", "data", "=", "request", ".", "get_json", "(", "force", "=", "False", ")", "if", "data", "is", "None", ":", "data", "=", "{", "}", "result", "=", "{", "}", "for", "query_name", ",", "config", "in", "data", ".", "items", "(", ")", ":", "if", "config", "is", "None", "or", "not", "isinstance", "(", "config", ",", "dict", ")", "or", "(", "set", "(", "config", ".", "keys", "(", ")", ")", "!=", "{", "'stat'", ",", "'params'", "}", "and", "set", "(", "config", ".", "keys", "(", ")", ")", "!=", "{", "'stat'", "}", ")", ":", "raise", "InvalidRequestInputError", "(", "'Invalid Input. It should be of the form '", "'{ STATISTIC_NAME: { \"stat\": STAT_TYPE, '", "'\"params\": STAT_PARAMS \\}}'", ")", "stat", "=", "config", "[", "'stat'", "]", "params", "=", "config", ".", "get", "(", "'params'", ",", "{", "}", ")", "try", ":", "query_cfg", "=", "current_stats", ".", "queries", "[", "stat", "]", "except", "KeyError", ":", "raise", "UnknownQueryError", "(", "stat", ")", "permission", "=", "current_stats", ".", "permission_factory", "(", "stat", ",", "params", ")", "if", "permission", "is", "not", "None", "and", "not", "permission", ".", "can", "(", ")", ":", "message", "=", "(", "'You do not have a permission to query the '", "'statistic \"{}\" with those '", "'parameters'", ".", "format", "(", "stat", ")", ")", "if", "current_user", ".", "is_authenticated", ":", "abort", "(", "403", ",", "message", ")", "abort", "(", "401", ",", "message", ")", "try", ":", "query", "=", "query_cfg", ".", "query_class", "(", "*", "*", "query_cfg", ".", "query_config", ")", "result", "[", "query_name", "]", "=", "query", ".", "run", "(", "*", "*", "params", ")", "except", "ValueError", "as", "e", ":", "raise", "InvalidRequestInputError", "(", "e", ".", "args", "[", "0", "]", ")", "except", "NotFoundError", "as", "e", ":", "return", "None", "return", "self", ".", "make_response", "(", "result", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
RuleChecker.check
:param query:
protector/rules/too_many_datapoints.py
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT, Keyword.DELETE}: # Only select and delete queries deal with time durations # All others are not affected by this rule. Bailing out. return Ok(True) datapoints = query.get_datapoints() if datapoints <= self.max_datapoints: return Ok(True) return Err(("Expecting {} datapoints from that query, which is above the threshold! " "Set a date range (e.g. where time > now() - 24h), " "increase grouping (e.g. group by time(24h) " "or limit the number of datapoints (e.g. limit 100)").format(datapoints))
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT, Keyword.DELETE}: # Only select and delete queries deal with time durations # All others are not affected by this rule. Bailing out. return Ok(True) datapoints = query.get_datapoints() if datapoints <= self.max_datapoints: return Ok(True) return Err(("Expecting {} datapoints from that query, which is above the threshold! " "Set a date range (e.g. where time > now() - 24h), " "increase grouping (e.g. group by time(24h) " "or limit the number of datapoints (e.g. limit 100)").format(datapoints))
[ ":", "param", "query", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/rules/too_many_datapoints.py#L21-L37
[ "def", "check", "(", "self", ",", "query", ")", ":", "if", "query", ".", "get_type", "(", ")", "not", "in", "{", "Keyword", ".", "SELECT", ",", "Keyword", ".", "DELETE", "}", ":", "# Only select and delete queries deal with time durations", "# All others are not affected by this rule. Bailing out.", "return", "Ok", "(", "True", ")", "datapoints", "=", "query", ".", "get_datapoints", "(", ")", "if", "datapoints", "<=", "self", ".", "max_datapoints", ":", "return", "Ok", "(", "True", ")", "return", "Err", "(", "(", "\"Expecting {} datapoints from that query, which is above the threshold! \"", "\"Set a date range (e.g. where time > now() - 24h), \"", "\"increase grouping (e.g. group by time(24h) \"", "\"or limit the number of datapoints (e.g. limit 100)\"", ")", ".", "format", "(", "datapoints", ")", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
StatAggregator._get_oldest_event_timestamp
Search for the oldest event timestamp.
invenio_stats/aggregations.py
def _get_oldest_event_timestamp(self): """Search for the oldest event timestamp.""" # Retrieve the oldest event in order to start aggregation # from there query_events = Search( using=self.client, index=self.event_index )[0:1].sort( {'timestamp': {'order': 'asc'}} ) result = query_events.execute() # There might not be any events yet if the first event have been # indexed but the indices have not been refreshed yet. if len(result) == 0: return None return parser.parse(result[0]['timestamp'])
def _get_oldest_event_timestamp(self): """Search for the oldest event timestamp.""" # Retrieve the oldest event in order to start aggregation # from there query_events = Search( using=self.client, index=self.event_index )[0:1].sort( {'timestamp': {'order': 'asc'}} ) result = query_events.execute() # There might not be any events yet if the first event have been # indexed but the indices have not been refreshed yet. if len(result) == 0: return None return parser.parse(result[0]['timestamp'])
[ "Search", "for", "the", "oldest", "event", "timestamp", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/aggregations.py#L132-L147
[ "def", "_get_oldest_event_timestamp", "(", "self", ")", ":", "# Retrieve the oldest event in order to start aggregation", "# from there", "query_events", "=", "Search", "(", "using", "=", "self", ".", "client", ",", "index", "=", "self", ".", "event_index", ")", "[", "0", ":", "1", "]", ".", "sort", "(", "{", "'timestamp'", ":", "{", "'order'", ":", "'asc'", "}", "}", ")", "result", "=", "query_events", ".", "execute", "(", ")", "# There might not be any events yet if the first event have been", "# indexed but the indices have not been refreshed yet.", "if", "len", "(", "result", ")", "==", "0", ":", "return", "None", "return", "parser", ".", "parse", "(", "result", "[", "0", "]", "[", "'timestamp'", "]", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
StatAggregator.get_bookmark
Get last aggregation date.
invenio_stats/aggregations.py
def get_bookmark(self): """Get last aggregation date.""" if not Index(self.aggregation_alias, using=self.client).exists(): if not Index(self.event_index, using=self.client).exists(): return datetime.date.today() return self._get_oldest_event_timestamp() # retrieve the oldest bookmark query_bookmark = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type )[0:1].sort( {'date': {'order': 'desc'}} ) bookmarks = query_bookmark.execute() # if no bookmark is found but the index exist, the bookmark was somehow # lost or never written, so restart from the beginning if len(bookmarks) == 0: return self._get_oldest_event_timestamp() # change it to doc_id_suffix bookmark = datetime.datetime.strptime(bookmarks[0].date, self.doc_id_suffix) return bookmark
def get_bookmark(self): """Get last aggregation date.""" if not Index(self.aggregation_alias, using=self.client).exists(): if not Index(self.event_index, using=self.client).exists(): return datetime.date.today() return self._get_oldest_event_timestamp() # retrieve the oldest bookmark query_bookmark = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type )[0:1].sort( {'date': {'order': 'desc'}} ) bookmarks = query_bookmark.execute() # if no bookmark is found but the index exist, the bookmark was somehow # lost or never written, so restart from the beginning if len(bookmarks) == 0: return self._get_oldest_event_timestamp() # change it to doc_id_suffix bookmark = datetime.datetime.strptime(bookmarks[0].date, self.doc_id_suffix) return bookmark
[ "Get", "last", "aggregation", "date", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/aggregations.py#L149-L175
[ "def", "get_bookmark", "(", "self", ")", ":", "if", "not", "Index", "(", "self", ".", "aggregation_alias", ",", "using", "=", "self", ".", "client", ")", ".", "exists", "(", ")", ":", "if", "not", "Index", "(", "self", ".", "event_index", ",", "using", "=", "self", ".", "client", ")", ".", "exists", "(", ")", ":", "return", "datetime", ".", "date", ".", "today", "(", ")", "return", "self", ".", "_get_oldest_event_timestamp", "(", ")", "# retrieve the oldest bookmark", "query_bookmark", "=", "Search", "(", "using", "=", "self", ".", "client", ",", "index", "=", "self", ".", "aggregation_alias", ",", "doc_type", "=", "self", ".", "bookmark_doc_type", ")", "[", "0", ":", "1", "]", ".", "sort", "(", "{", "'date'", ":", "{", "'order'", ":", "'desc'", "}", "}", ")", "bookmarks", "=", "query_bookmark", ".", "execute", "(", ")", "# if no bookmark is found but the index exist, the bookmark was somehow", "# lost or never written, so restart from the beginning", "if", "len", "(", "bookmarks", ")", "==", "0", ":", "return", "self", ".", "_get_oldest_event_timestamp", "(", ")", "# change it to doc_id_suffix", "bookmark", "=", "datetime", ".", "datetime", ".", "strptime", "(", "bookmarks", "[", "0", "]", ".", "date", ",", "self", ".", "doc_id_suffix", ")", "return", "bookmark" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
StatAggregator.set_bookmark
Set bookmark for starting next aggregation.
invenio_stats/aggregations.py
def set_bookmark(self): """Set bookmark for starting next aggregation.""" def _success_date(): bookmark = { 'date': self.new_bookmark or datetime.datetime.utcnow(). strftime(self.doc_id_suffix) } yield dict(_index=self.last_index_written, _type=self.bookmark_doc_type, _source=bookmark) if self.last_index_written: bulk(self.client, _success_date(), stats_only=True)
def set_bookmark(self): """Set bookmark for starting next aggregation.""" def _success_date(): bookmark = { 'date': self.new_bookmark or datetime.datetime.utcnow(). strftime(self.doc_id_suffix) } yield dict(_index=self.last_index_written, _type=self.bookmark_doc_type, _source=bookmark) if self.last_index_written: bulk(self.client, _success_date(), stats_only=True)
[ "Set", "bookmark", "for", "starting", "next", "aggregation", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/aggregations.py#L177-L191
[ "def", "set_bookmark", "(", "self", ")", ":", "def", "_success_date", "(", ")", ":", "bookmark", "=", "{", "'date'", ":", "self", ".", "new_bookmark", "or", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "self", ".", "doc_id_suffix", ")", "}", "yield", "dict", "(", "_index", "=", "self", ".", "last_index_written", ",", "_type", "=", "self", ".", "bookmark_doc_type", ",", "_source", "=", "bookmark", ")", "if", "self", ".", "last_index_written", ":", "bulk", "(", "self", ".", "client", ",", "_success_date", "(", ")", ",", "stats_only", "=", "True", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
StatAggregator._format_range_dt
Format range filter datetime to the closest aggregation interval.
invenio_stats/aggregations.py
def _format_range_dt(self, d): """Format range filter datetime to the closest aggregation interval.""" if not isinstance(d, six.string_types): d = d.isoformat() return '{0}||/{1}'.format( d, self.dt_rounding_map[self.aggregation_interval])
def _format_range_dt(self, d): """Format range filter datetime to the closest aggregation interval.""" if not isinstance(d, six.string_types): d = d.isoformat() return '{0}||/{1}'.format( d, self.dt_rounding_map[self.aggregation_interval])
[ "Format", "range", "filter", "datetime", "to", "the", "closest", "aggregation", "interval", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/aggregations.py#L193-L198
[ "def", "_format_range_dt", "(", "self", ",", "d", ")", ":", "if", "not", "isinstance", "(", "d", ",", "six", ".", "string_types", ")", ":", "d", "=", "d", ".", "isoformat", "(", ")", "return", "'{0}||/{1}'", ".", "format", "(", "d", ",", "self", ".", "dt_rounding_map", "[", "self", ".", "aggregation_interval", "]", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
StatAggregator.agg_iter
Aggregate and return dictionary to be indexed in ES.
invenio_stats/aggregations.py
def agg_iter(self, lower_limit=None, upper_limit=None): """Aggregate and return dictionary to be indexed in ES.""" lower_limit = lower_limit or self.get_bookmark().isoformat() upper_limit = upper_limit or ( datetime.datetime.utcnow().replace(microsecond=0).isoformat()) aggregation_data = {} self.agg_query = Search(using=self.client, index=self.event_index).\ filter('range', timestamp={ 'gte': self._format_range_dt(lower_limit), 'lte': self._format_range_dt(upper_limit)}) # apply query modifiers for modifier in self.query_modifiers: self.agg_query = modifier(self.agg_query) hist = self.agg_query.aggs.bucket( 'histogram', 'date_histogram', field='timestamp', interval=self.aggregation_interval ) terms = hist.bucket( 'terms', 'terms', field=self.aggregation_field, size=0 ) top = terms.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for dst, (metric, src, opts) in self.metric_aggregation_fields.items(): terms.metric(dst, metric, field=src, **opts) results = self.agg_query.execute() index_name = None for interval in results.aggregations['histogram'].buckets: interval_date = datetime.datetime.strptime( interval['key_as_string'], '%Y-%m-%dT%H:%M:%S') for aggregation in interval['terms'].buckets: aggregation_data['timestamp'] = interval_date.isoformat() aggregation_data[self.aggregation_field] = aggregation['key'] aggregation_data['count'] = aggregation['doc_count'] if self.metric_aggregation_fields: for f in self.metric_aggregation_fields: aggregation_data[f] = aggregation[f]['value'] doc = aggregation.top_hit.hits.hits[0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): aggregation_data[destination] = doc[source] else: aggregation_data[destination] = source( doc, aggregation_data ) index_name = 'stats-{0}-{1}'.\ format(self.event, interval_date.strftime( self.index_name_suffix)) self.indices.add(index_name) yield dict(_id='{0}-{1}'. format(aggregation['key'], interval_date.strftime( self.doc_id_suffix)), _index=index_name, _type=self.aggregation_doc_type, _source=aggregation_data) self.last_index_written = index_name
def agg_iter(self, lower_limit=None, upper_limit=None): """Aggregate and return dictionary to be indexed in ES.""" lower_limit = lower_limit or self.get_bookmark().isoformat() upper_limit = upper_limit or ( datetime.datetime.utcnow().replace(microsecond=0).isoformat()) aggregation_data = {} self.agg_query = Search(using=self.client, index=self.event_index).\ filter('range', timestamp={ 'gte': self._format_range_dt(lower_limit), 'lte': self._format_range_dt(upper_limit)}) # apply query modifiers for modifier in self.query_modifiers: self.agg_query = modifier(self.agg_query) hist = self.agg_query.aggs.bucket( 'histogram', 'date_histogram', field='timestamp', interval=self.aggregation_interval ) terms = hist.bucket( 'terms', 'terms', field=self.aggregation_field, size=0 ) top = terms.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for dst, (metric, src, opts) in self.metric_aggregation_fields.items(): terms.metric(dst, metric, field=src, **opts) results = self.agg_query.execute() index_name = None for interval in results.aggregations['histogram'].buckets: interval_date = datetime.datetime.strptime( interval['key_as_string'], '%Y-%m-%dT%H:%M:%S') for aggregation in interval['terms'].buckets: aggregation_data['timestamp'] = interval_date.isoformat() aggregation_data[self.aggregation_field] = aggregation['key'] aggregation_data['count'] = aggregation['doc_count'] if self.metric_aggregation_fields: for f in self.metric_aggregation_fields: aggregation_data[f] = aggregation[f]['value'] doc = aggregation.top_hit.hits.hits[0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): aggregation_data[destination] = doc[source] else: aggregation_data[destination] = source( doc, aggregation_data ) index_name = 'stats-{0}-{1}'.\ format(self.event, interval_date.strftime( self.index_name_suffix)) self.indices.add(index_name) yield dict(_id='{0}-{1}'. format(aggregation['key'], interval_date.strftime( self.doc_id_suffix)), _index=index_name, _type=self.aggregation_doc_type, _source=aggregation_data) self.last_index_written = index_name
[ "Aggregate", "and", "return", "dictionary", "to", "be", "indexed", "in", "ES", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/aggregations.py#L200-L268
[ "def", "agg_iter", "(", "self", ",", "lower_limit", "=", "None", ",", "upper_limit", "=", "None", ")", ":", "lower_limit", "=", "lower_limit", "or", "self", ".", "get_bookmark", "(", ")", ".", "isoformat", "(", ")", "upper_limit", "=", "upper_limit", "or", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "microsecond", "=", "0", ")", ".", "isoformat", "(", ")", ")", "aggregation_data", "=", "{", "}", "self", ".", "agg_query", "=", "Search", "(", "using", "=", "self", ".", "client", ",", "index", "=", "self", ".", "event_index", ")", ".", "filter", "(", "'range'", ",", "timestamp", "=", "{", "'gte'", ":", "self", ".", "_format_range_dt", "(", "lower_limit", ")", ",", "'lte'", ":", "self", ".", "_format_range_dt", "(", "upper_limit", ")", "}", ")", "# apply query modifiers", "for", "modifier", "in", "self", ".", "query_modifiers", ":", "self", ".", "agg_query", "=", "modifier", "(", "self", ".", "agg_query", ")", "hist", "=", "self", ".", "agg_query", ".", "aggs", ".", "bucket", "(", "'histogram'", ",", "'date_histogram'", ",", "field", "=", "'timestamp'", ",", "interval", "=", "self", ".", "aggregation_interval", ")", "terms", "=", "hist", ".", "bucket", "(", "'terms'", ",", "'terms'", ",", "field", "=", "self", ".", "aggregation_field", ",", "size", "=", "0", ")", "top", "=", "terms", ".", "metric", "(", "'top_hit'", ",", "'top_hits'", ",", "size", "=", "1", ",", "sort", "=", "{", "'timestamp'", ":", "'desc'", "}", ")", "for", "dst", ",", "(", "metric", ",", "src", ",", "opts", ")", "in", "self", ".", "metric_aggregation_fields", ".", "items", "(", ")", ":", "terms", ".", "metric", "(", "dst", ",", "metric", ",", "field", "=", "src", ",", "*", "*", "opts", ")", "results", "=", "self", ".", "agg_query", ".", "execute", "(", ")", "index_name", "=", "None", "for", "interval", "in", "results", ".", "aggregations", "[", "'histogram'", "]", ".", "buckets", ":", "interval_date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "interval", "[", "'key_as_string'", "]", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "for", "aggregation", "in", "interval", "[", "'terms'", "]", ".", "buckets", ":", "aggregation_data", "[", "'timestamp'", "]", "=", "interval_date", ".", "isoformat", "(", ")", "aggregation_data", "[", "self", ".", "aggregation_field", "]", "=", "aggregation", "[", "'key'", "]", "aggregation_data", "[", "'count'", "]", "=", "aggregation", "[", "'doc_count'", "]", "if", "self", ".", "metric_aggregation_fields", ":", "for", "f", "in", "self", ".", "metric_aggregation_fields", ":", "aggregation_data", "[", "f", "]", "=", "aggregation", "[", "f", "]", "[", "'value'", "]", "doc", "=", "aggregation", ".", "top_hit", ".", "hits", ".", "hits", "[", "0", "]", "[", "'_source'", "]", "for", "destination", ",", "source", "in", "self", ".", "copy_fields", ".", "items", "(", ")", ":", "if", "isinstance", "(", "source", ",", "six", ".", "string_types", ")", ":", "aggregation_data", "[", "destination", "]", "=", "doc", "[", "source", "]", "else", ":", "aggregation_data", "[", "destination", "]", "=", "source", "(", "doc", ",", "aggregation_data", ")", "index_name", "=", "'stats-{0}-{1}'", ".", "format", "(", "self", ".", "event", ",", "interval_date", ".", "strftime", "(", "self", ".", "index_name_suffix", ")", ")", "self", ".", "indices", ".", "add", "(", "index_name", ")", "yield", "dict", "(", "_id", "=", "'{0}-{1}'", ".", "format", "(", "aggregation", "[", "'key'", "]", ",", "interval_date", ".", "strftime", "(", "self", ".", "doc_id_suffix", ")", ")", ",", "_index", "=", "index_name", ",", "_type", "=", "self", ".", "aggregation_doc_type", ",", "_source", "=", "aggregation_data", ")", "self", ".", "last_index_written", "=", "index_name" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
StatAggregator.run
Calculate statistics aggregations.
invenio_stats/aggregations.py
def run(self, start_date=None, end_date=None, update_bookmark=True): """Calculate statistics aggregations.""" # If no events have been indexed there is nothing to aggregate if not Index(self.event_index, using=self.client).exists(): return lower_limit = start_date or self.get_bookmark() # Stop here if no bookmark could be estimated. if lower_limit is None: return upper_limit = min( end_date or datetime.datetime.max, # ignore if `None` datetime.datetime.utcnow().replace(microsecond=0), datetime.datetime.combine( lower_limit + datetime.timedelta(self.batch_size), datetime.datetime.min.time()) ) while upper_limit <= datetime.datetime.utcnow(): self.indices = set() self.new_bookmark = upper_limit.strftime(self.doc_id_suffix) bulk(self.client, self.agg_iter(lower_limit, upper_limit), stats_only=True, chunk_size=50) # Flush all indices which have been modified current_search_client.indices.flush( index=','.join(self.indices), wait_if_ongoing=True ) if update_bookmark: self.set_bookmark() self.indices = set() lower_limit = lower_limit + datetime.timedelta(self.batch_size) upper_limit = min( end_date or datetime.datetime.max, # ignore if `None`` datetime.datetime.utcnow().replace(microsecond=0), lower_limit + datetime.timedelta(self.batch_size) ) if lower_limit > upper_limit: break
def run(self, start_date=None, end_date=None, update_bookmark=True): """Calculate statistics aggregations.""" # If no events have been indexed there is nothing to aggregate if not Index(self.event_index, using=self.client).exists(): return lower_limit = start_date or self.get_bookmark() # Stop here if no bookmark could be estimated. if lower_limit is None: return upper_limit = min( end_date or datetime.datetime.max, # ignore if `None` datetime.datetime.utcnow().replace(microsecond=0), datetime.datetime.combine( lower_limit + datetime.timedelta(self.batch_size), datetime.datetime.min.time()) ) while upper_limit <= datetime.datetime.utcnow(): self.indices = set() self.new_bookmark = upper_limit.strftime(self.doc_id_suffix) bulk(self.client, self.agg_iter(lower_limit, upper_limit), stats_only=True, chunk_size=50) # Flush all indices which have been modified current_search_client.indices.flush( index=','.join(self.indices), wait_if_ongoing=True ) if update_bookmark: self.set_bookmark() self.indices = set() lower_limit = lower_limit + datetime.timedelta(self.batch_size) upper_limit = min( end_date or datetime.datetime.max, # ignore if `None`` datetime.datetime.utcnow().replace(microsecond=0), lower_limit + datetime.timedelta(self.batch_size) ) if lower_limit > upper_limit: break
[ "Calculate", "statistics", "aggregations", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/aggregations.py#L270-L308
[ "def", "run", "(", "self", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "update_bookmark", "=", "True", ")", ":", "# If no events have been indexed there is nothing to aggregate", "if", "not", "Index", "(", "self", ".", "event_index", ",", "using", "=", "self", ".", "client", ")", ".", "exists", "(", ")", ":", "return", "lower_limit", "=", "start_date", "or", "self", ".", "get_bookmark", "(", ")", "# Stop here if no bookmark could be estimated.", "if", "lower_limit", "is", "None", ":", "return", "upper_limit", "=", "min", "(", "end_date", "or", "datetime", ".", "datetime", ".", "max", ",", "# ignore if `None`", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "microsecond", "=", "0", ")", ",", "datetime", ".", "datetime", ".", "combine", "(", "lower_limit", "+", "datetime", ".", "timedelta", "(", "self", ".", "batch_size", ")", ",", "datetime", ".", "datetime", ".", "min", ".", "time", "(", ")", ")", ")", "while", "upper_limit", "<=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ":", "self", ".", "indices", "=", "set", "(", ")", "self", ".", "new_bookmark", "=", "upper_limit", ".", "strftime", "(", "self", ".", "doc_id_suffix", ")", "bulk", "(", "self", ".", "client", ",", "self", ".", "agg_iter", "(", "lower_limit", ",", "upper_limit", ")", ",", "stats_only", "=", "True", ",", "chunk_size", "=", "50", ")", "# Flush all indices which have been modified", "current_search_client", ".", "indices", ".", "flush", "(", "index", "=", "','", ".", "join", "(", "self", ".", "indices", ")", ",", "wait_if_ongoing", "=", "True", ")", "if", "update_bookmark", ":", "self", ".", "set_bookmark", "(", ")", "self", ".", "indices", "=", "set", "(", ")", "lower_limit", "=", "lower_limit", "+", "datetime", ".", "timedelta", "(", "self", ".", "batch_size", ")", "upper_limit", "=", "min", "(", "end_date", "or", "datetime", ".", "datetime", ".", "max", ",", "# ignore if `None``", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "microsecond", "=", "0", ")", ",", "lower_limit", "+", "datetime", ".", "timedelta", "(", "self", ".", "batch_size", ")", ")", "if", "lower_limit", ">", "upper_limit", ":", "break" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
StatAggregator.list_bookmarks
List the aggregation's bookmarks.
invenio_stats/aggregations.py
def list_bookmarks(self, start_date=None, end_date=None, limit=None): """List the aggregation's bookmarks.""" query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type ).sort({'date': {'order': 'desc'}}) range_args = {} if start_date: range_args['gte'] = self._format_range_dt( start_date.replace(microsecond=0)) if end_date: range_args['lte'] = self._format_range_dt( end_date.replace(microsecond=0)) if range_args: query = query.filter('range', date=range_args) return query[0:limit].execute() if limit else query.scan()
def list_bookmarks(self, start_date=None, end_date=None, limit=None): """List the aggregation's bookmarks.""" query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type ).sort({'date': {'order': 'desc'}}) range_args = {} if start_date: range_args['gte'] = self._format_range_dt( start_date.replace(microsecond=0)) if end_date: range_args['lte'] = self._format_range_dt( end_date.replace(microsecond=0)) if range_args: query = query.filter('range', date=range_args) return query[0:limit].execute() if limit else query.scan()
[ "List", "the", "aggregation", "s", "bookmarks", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/aggregations.py#L310-L328
[ "def", "list_bookmarks", "(", "self", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "limit", "=", "None", ")", ":", "query", "=", "Search", "(", "using", "=", "self", ".", "client", ",", "index", "=", "self", ".", "aggregation_alias", ",", "doc_type", "=", "self", ".", "bookmark_doc_type", ")", ".", "sort", "(", "{", "'date'", ":", "{", "'order'", ":", "'desc'", "}", "}", ")", "range_args", "=", "{", "}", "if", "start_date", ":", "range_args", "[", "'gte'", "]", "=", "self", ".", "_format_range_dt", "(", "start_date", ".", "replace", "(", "microsecond", "=", "0", ")", ")", "if", "end_date", ":", "range_args", "[", "'lte'", "]", "=", "self", ".", "_format_range_dt", "(", "end_date", ".", "replace", "(", "microsecond", "=", "0", ")", ")", "if", "range_args", ":", "query", "=", "query", ".", "filter", "(", "'range'", ",", "date", "=", "range_args", ")", "return", "query", "[", "0", ":", "limit", "]", ".", "execute", "(", ")", "if", "limit", "else", "query", ".", "scan", "(", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
StatAggregator.delete
Delete aggregation documents.
invenio_stats/aggregations.py
def delete(self, start_date=None, end_date=None): """Delete aggregation documents.""" aggs_query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.aggregation_doc_type ).extra(_source=False) range_args = {} if start_date: range_args['gte'] = self._format_range_dt( start_date.replace(microsecond=0)) if end_date: range_args['lte'] = self._format_range_dt( end_date.replace(microsecond=0)) if range_args: aggs_query = aggs_query.filter('range', timestamp=range_args) bookmarks_query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type ).sort({'date': {'order': 'desc'}}) if range_args: bookmarks_query = bookmarks_query.filter('range', date=range_args) def _delete_actions(): for query in (aggs_query, bookmarks_query): affected_indices = set() for doc in query.scan(): affected_indices.add(doc.meta.index) yield dict(_index=doc.meta.index, _op_type='delete', _id=doc.meta.id, _type=doc.meta.doc_type) current_search_client.indices.flush( index=','.join(affected_indices), wait_if_ongoing=True) bulk(self.client, _delete_actions(), refresh=True)
def delete(self, start_date=None, end_date=None): """Delete aggregation documents.""" aggs_query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.aggregation_doc_type ).extra(_source=False) range_args = {} if start_date: range_args['gte'] = self._format_range_dt( start_date.replace(microsecond=0)) if end_date: range_args['lte'] = self._format_range_dt( end_date.replace(microsecond=0)) if range_args: aggs_query = aggs_query.filter('range', timestamp=range_args) bookmarks_query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type ).sort({'date': {'order': 'desc'}}) if range_args: bookmarks_query = bookmarks_query.filter('range', date=range_args) def _delete_actions(): for query in (aggs_query, bookmarks_query): affected_indices = set() for doc in query.scan(): affected_indices.add(doc.meta.index) yield dict(_index=doc.meta.index, _op_type='delete', _id=doc.meta.id, _type=doc.meta.doc_type) current_search_client.indices.flush( index=','.join(affected_indices), wait_if_ongoing=True) bulk(self.client, _delete_actions(), refresh=True)
[ "Delete", "aggregation", "documents", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/aggregations.py#L330-L368
[ "def", "delete", "(", "self", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ")", ":", "aggs_query", "=", "Search", "(", "using", "=", "self", ".", "client", ",", "index", "=", "self", ".", "aggregation_alias", ",", "doc_type", "=", "self", ".", "aggregation_doc_type", ")", ".", "extra", "(", "_source", "=", "False", ")", "range_args", "=", "{", "}", "if", "start_date", ":", "range_args", "[", "'gte'", "]", "=", "self", ".", "_format_range_dt", "(", "start_date", ".", "replace", "(", "microsecond", "=", "0", ")", ")", "if", "end_date", ":", "range_args", "[", "'lte'", "]", "=", "self", ".", "_format_range_dt", "(", "end_date", ".", "replace", "(", "microsecond", "=", "0", ")", ")", "if", "range_args", ":", "aggs_query", "=", "aggs_query", ".", "filter", "(", "'range'", ",", "timestamp", "=", "range_args", ")", "bookmarks_query", "=", "Search", "(", "using", "=", "self", ".", "client", ",", "index", "=", "self", ".", "aggregation_alias", ",", "doc_type", "=", "self", ".", "bookmark_doc_type", ")", ".", "sort", "(", "{", "'date'", ":", "{", "'order'", ":", "'desc'", "}", "}", ")", "if", "range_args", ":", "bookmarks_query", "=", "bookmarks_query", ".", "filter", "(", "'range'", ",", "date", "=", "range_args", ")", "def", "_delete_actions", "(", ")", ":", "for", "query", "in", "(", "aggs_query", ",", "bookmarks_query", ")", ":", "affected_indices", "=", "set", "(", ")", "for", "doc", "in", "query", ".", "scan", "(", ")", ":", "affected_indices", ".", "add", "(", "doc", ".", "meta", ".", "index", ")", "yield", "dict", "(", "_index", "=", "doc", ".", "meta", ".", "index", ",", "_op_type", "=", "'delete'", ",", "_id", "=", "doc", ".", "meta", ".", "id", ",", "_type", "=", "doc", ".", "meta", ".", "doc_type", ")", "current_search_client", ".", "indices", ".", "flush", "(", "index", "=", "','", ".", "join", "(", "affected_indices", ")", ",", "wait_if_ongoing", "=", "True", ")", "bulk", "(", "self", ".", "client", ",", "_delete_actions", "(", ")", ",", "refresh", "=", "True", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ResolutionParser.parse
Extract the data resolution of a query in seconds E.g. "group by time(99s)" => 99 :param group_by_stmt: A raw InfluxDB group by statement
protector/parser/subparsers/resolution.py
def parse(self, group_by_stmt): """ Extract the data resolution of a query in seconds E.g. "group by time(99s)" => 99 :param group_by_stmt: A raw InfluxDB group by statement """ if not group_by_stmt: return Resolution.MAX_RESOLUTION m = self.GROUP_BY_TIME_PATTERN.match(group_by_stmt) if not m: return None value = int(m.group(1)) unit = m.group(2) resolution = self.convert_to_seconds(value, unit) # We can't have a higher resolution than the max resolution return max(resolution, Resolution.MAX_RESOLUTION)
def parse(self, group_by_stmt): """ Extract the data resolution of a query in seconds E.g. "group by time(99s)" => 99 :param group_by_stmt: A raw InfluxDB group by statement """ if not group_by_stmt: return Resolution.MAX_RESOLUTION m = self.GROUP_BY_TIME_PATTERN.match(group_by_stmt) if not m: return None value = int(m.group(1)) unit = m.group(2) resolution = self.convert_to_seconds(value, unit) # We can't have a higher resolution than the max resolution return max(resolution, Resolution.MAX_RESOLUTION)
[ "Extract", "the", "data", "resolution", "of", "a", "query", "in", "seconds", "E", ".", "g", ".", "group", "by", "time", "(", "99s", ")", "=", ">", "99" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/subparsers/resolution.py#L20-L39
[ "def", "parse", "(", "self", ",", "group_by_stmt", ")", ":", "if", "not", "group_by_stmt", ":", "return", "Resolution", ".", "MAX_RESOLUTION", "m", "=", "self", ".", "GROUP_BY_TIME_PATTERN", ".", "match", "(", "group_by_stmt", ")", "if", "not", "m", ":", "return", "None", "value", "=", "int", "(", "m", ".", "group", "(", "1", ")", ")", "unit", "=", "m", ".", "group", "(", "2", ")", "resolution", "=", "self", ".", "convert_to_seconds", "(", "value", ",", "unit", ")", "# We can't have a higher resolution than the max resolution", "return", "max", "(", "resolution", ",", "Resolution", ".", "MAX_RESOLUTION", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
Future.get
Return value on success, or raise exception on failure.
actors/future.py
def get(self, timeout=None): """ Return value on success, or raise exception on failure. """ result = None try: result = self._result.get(True, timeout=timeout) except Empty: raise Timeout() if isinstance(result, Failure): six.reraise(*result.exc_info) else: return result
def get(self, timeout=None): """ Return value on success, or raise exception on failure. """ result = None try: result = self._result.get(True, timeout=timeout) except Empty: raise Timeout() if isinstance(result, Failure): six.reraise(*result.exc_info) else: return result
[ "Return", "value", "on", "success", "or", "raise", "exception", "on", "failure", "." ]
tamland/python-actors
python
https://github.com/tamland/python-actors/blob/9f826ab2947c665d61363a6ebc401e9e42cc6238/actors/future.py#L35-L48
[ "def", "get", "(", "self", ",", "timeout", "=", "None", ")", ":", "result", "=", "None", "try", ":", "result", "=", "self", ".", "_result", ".", "get", "(", "True", ",", "timeout", "=", "timeout", ")", "except", "Empty", ":", "raise", "Timeout", "(", ")", "if", "isinstance", "(", "result", ",", "Failure", ")", ":", "six", ".", "reraise", "(", "*", "result", ".", "exc_info", ")", "else", ":", "return", "result" ]
9f826ab2947c665d61363a6ebc401e9e42cc6238
valid
_events_process
Process stats events.
invenio_stats/cli.py
def _events_process(event_types=None, eager=False): """Process stats events.""" event_types = event_types or list(current_stats.enabled_events) if eager: process_events.apply((event_types,), throw=True) click.secho('Events processed successfully.', fg='green') else: process_events.delay(event_types) click.secho('Events processing task sent...', fg='yellow')
def _events_process(event_types=None, eager=False): """Process stats events.""" event_types = event_types or list(current_stats.enabled_events) if eager: process_events.apply((event_types,), throw=True) click.secho('Events processed successfully.', fg='green') else: process_events.delay(event_types) click.secho('Events processing task sent...', fg='yellow')
[ "Process", "stats", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/cli.py#L83-L91
[ "def", "_events_process", "(", "event_types", "=", "None", ",", "eager", "=", "False", ")", ":", "event_types", "=", "event_types", "or", "list", "(", "current_stats", ".", "enabled_events", ")", "if", "eager", ":", "process_events", ".", "apply", "(", "(", "event_types", ",", ")", ",", "throw", "=", "True", ")", "click", ".", "secho", "(", "'Events processed successfully.'", ",", "fg", "=", "'green'", ")", "else", ":", "process_events", ".", "delay", "(", "event_types", ")", "click", ".", "secho", "(", "'Events processing task sent...'", ",", "fg", "=", "'yellow'", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
_aggregations_process
Process stats aggregations.
invenio_stats/cli.py
def _aggregations_process(aggregation_types=None, start_date=None, end_date=None, update_bookmark=False, eager=False): """Process stats aggregations.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) if eager: aggregate_events.apply( (aggregation_types,), dict(start_date=start_date, end_date=end_date, update_bookmark=update_bookmark), throw=True) click.secho('Aggregations processed successfully.', fg='green') else: aggregate_events.delay( aggregation_types, start_date=start_date, end_date=end_date) click.secho('Aggregations processing task sent...', fg='yellow')
def _aggregations_process(aggregation_types=None, start_date=None, end_date=None, update_bookmark=False, eager=False): """Process stats aggregations.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) if eager: aggregate_events.apply( (aggregation_types,), dict(start_date=start_date, end_date=end_date, update_bookmark=update_bookmark), throw=True) click.secho('Aggregations processed successfully.', fg='green') else: aggregate_events.delay( aggregation_types, start_date=start_date, end_date=end_date) click.secho('Aggregations processing task sent...', fg='yellow')
[ "Process", "stats", "aggregations", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/cli.py#L106-L122
[ "def", "_aggregations_process", "(", "aggregation_types", "=", "None", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "update_bookmark", "=", "False", ",", "eager", "=", "False", ")", ":", "aggregation_types", "=", "(", "aggregation_types", "or", "list", "(", "current_stats", ".", "enabled_aggregations", ")", ")", "if", "eager", ":", "aggregate_events", ".", "apply", "(", "(", "aggregation_types", ",", ")", ",", "dict", "(", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ",", "update_bookmark", "=", "update_bookmark", ")", ",", "throw", "=", "True", ")", "click", ".", "secho", "(", "'Aggregations processed successfully.'", ",", "fg", "=", "'green'", ")", "else", ":", "aggregate_events", ".", "delay", "(", "aggregation_types", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ")", "click", ".", "secho", "(", "'Aggregations processing task sent...'", ",", "fg", "=", "'yellow'", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
_aggregations_delete
Delete computed aggregations.
invenio_stats/cli.py
def _aggregations_delete(aggregation_types=None, start_date=None, end_date=None): """Delete computed aggregations.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) for a in aggregation_types: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) aggregator.delete(start_date, end_date)
def _aggregations_delete(aggregation_types=None, start_date=None, end_date=None): """Delete computed aggregations.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) for a in aggregation_types: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) aggregator.delete(start_date, end_date)
[ "Delete", "computed", "aggregations", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/cli.py#L132-L141
[ "def", "_aggregations_delete", "(", "aggregation_types", "=", "None", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ")", ":", "aggregation_types", "=", "(", "aggregation_types", "or", "list", "(", "current_stats", ".", "enabled_aggregations", ")", ")", "for", "a", "in", "aggregation_types", ":", "aggr_cfg", "=", "current_stats", ".", "aggregations", "[", "a", "]", "aggregator", "=", "aggr_cfg", ".", "aggregator_class", "(", "name", "=", "aggr_cfg", ".", "name", ",", "*", "*", "aggr_cfg", ".", "aggregator_config", ")", "aggregator", ".", "delete", "(", "start_date", ",", "end_date", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
_aggregations_list_bookmarks
List aggregation bookmarks.
invenio_stats/cli.py
def _aggregations_list_bookmarks(aggregation_types=None, start_date=None, end_date=None, limit=None): """List aggregation bookmarks.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) for a in aggregation_types: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) bookmarks = aggregator.list_bookmarks(start_date, end_date, limit) click.echo('{}:'.format(a)) for b in bookmarks: click.echo(' - {}'.format(b.date))
def _aggregations_list_bookmarks(aggregation_types=None, start_date=None, end_date=None, limit=None): """List aggregation bookmarks.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) for a in aggregation_types: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) bookmarks = aggregator.list_bookmarks(start_date, end_date, limit) click.echo('{}:'.format(a)) for b in bookmarks: click.echo(' - {}'.format(b.date))
[ "List", "aggregation", "bookmarks", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/cli.py#L150-L162
[ "def", "_aggregations_list_bookmarks", "(", "aggregation_types", "=", "None", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "limit", "=", "None", ")", ":", "aggregation_types", "=", "(", "aggregation_types", "or", "list", "(", "current_stats", ".", "enabled_aggregations", ")", ")", "for", "a", "in", "aggregation_types", ":", "aggr_cfg", "=", "current_stats", ".", "aggregations", "[", "a", "]", "aggregator", "=", "aggr_cfg", ".", "aggregator_class", "(", "name", "=", "aggr_cfg", ".", "name", ",", "*", "*", "aggr_cfg", ".", "aggregator_config", ")", "bookmarks", "=", "aggregator", ".", "list_bookmarks", "(", "start_date", ",", "end_date", ",", "limit", ")", "click", ".", "echo", "(", "'{}:'", ".", "format", "(", "a", ")", ")", "for", "b", "in", "bookmarks", ":", "click", ".", "echo", "(", "' - {}'", ".", "format", "(", "b", ".", "date", ")", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
_InvenioStatsState._events_config
Load events configuration.
invenio_stats/ext.py
def _events_config(self): """Load events configuration.""" # import iter_entry_points here so that it can be mocked in tests result = {} for ep in iter_entry_points( group=self.entry_point_group_events): for cfg in ep.load()(): if cfg['event_type'] not in self.enabled_events: continue elif cfg['event_type'] in result: raise DuplicateEventError( 'Duplicate event {0} in entry point ' '{1}'.format(cfg['event_type'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_events[cfg['event_type']] or {} ) result[cfg['event_type']] = cfg return result
def _events_config(self): """Load events configuration.""" # import iter_entry_points here so that it can be mocked in tests result = {} for ep in iter_entry_points( group=self.entry_point_group_events): for cfg in ep.load()(): if cfg['event_type'] not in self.enabled_events: continue elif cfg['event_type'] in result: raise DuplicateEventError( 'Duplicate event {0} in entry point ' '{1}'.format(cfg['event_type'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_events[cfg['event_type']] or {} ) result[cfg['event_type']] = cfg return result
[ "Load", "events", "configuration", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/ext.py#L44-L62
[ "def", "_events_config", "(", "self", ")", ":", "# import iter_entry_points here so that it can be mocked in tests", "result", "=", "{", "}", "for", "ep", "in", "iter_entry_points", "(", "group", "=", "self", ".", "entry_point_group_events", ")", ":", "for", "cfg", "in", "ep", ".", "load", "(", ")", "(", ")", ":", "if", "cfg", "[", "'event_type'", "]", "not", "in", "self", ".", "enabled_events", ":", "continue", "elif", "cfg", "[", "'event_type'", "]", "in", "result", ":", "raise", "DuplicateEventError", "(", "'Duplicate event {0} in entry point '", "'{1}'", ".", "format", "(", "cfg", "[", "'event_type'", "]", ",", "ep", ".", "name", ")", ")", "# Update the default configuration with env/overlay config.", "cfg", ".", "update", "(", "self", ".", "enabled_events", "[", "cfg", "[", "'event_type'", "]", "]", "or", "{", "}", ")", "result", "[", "cfg", "[", "'event_type'", "]", "]", "=", "cfg", "return", "result" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
_InvenioStatsState._aggregations_config
Load aggregation configurations.
invenio_stats/ext.py
def _aggregations_config(self): """Load aggregation configurations.""" result = {} for ep in iter_entry_points( group=self.entry_point_group_aggs): for cfg in ep.load()(): if cfg['aggregation_name'] not in self.enabled_aggregations: continue elif cfg['aggregation_name'] in result: raise DuplicateAggregationError( 'Duplicate aggregation {0} in entry point ' '{1}'.format(cfg['event_type'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_aggregations[cfg['aggregation_name']] or {} ) result[cfg['aggregation_name']] = cfg return result
def _aggregations_config(self): """Load aggregation configurations.""" result = {} for ep in iter_entry_points( group=self.entry_point_group_aggs): for cfg in ep.load()(): if cfg['aggregation_name'] not in self.enabled_aggregations: continue elif cfg['aggregation_name'] in result: raise DuplicateAggregationError( 'Duplicate aggregation {0} in entry point ' '{1}'.format(cfg['event_type'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_aggregations[cfg['aggregation_name']] or {} ) result[cfg['aggregation_name']] = cfg return result
[ "Load", "aggregation", "configurations", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/ext.py#L93-L110
[ "def", "_aggregations_config", "(", "self", ")", ":", "result", "=", "{", "}", "for", "ep", "in", "iter_entry_points", "(", "group", "=", "self", ".", "entry_point_group_aggs", ")", ":", "for", "cfg", "in", "ep", ".", "load", "(", ")", "(", ")", ":", "if", "cfg", "[", "'aggregation_name'", "]", "not", "in", "self", ".", "enabled_aggregations", ":", "continue", "elif", "cfg", "[", "'aggregation_name'", "]", "in", "result", ":", "raise", "DuplicateAggregationError", "(", "'Duplicate aggregation {0} in entry point '", "'{1}'", ".", "format", "(", "cfg", "[", "'event_type'", "]", ",", "ep", ".", "name", ")", ")", "# Update the default configuration with env/overlay config.", "cfg", ".", "update", "(", "self", ".", "enabled_aggregations", "[", "cfg", "[", "'aggregation_name'", "]", "]", "or", "{", "}", ")", "result", "[", "cfg", "[", "'aggregation_name'", "]", "]", "=", "cfg", "return", "result" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
_InvenioStatsState._queries_config
Load queries configuration.
invenio_stats/ext.py
def _queries_config(self): """Load queries configuration.""" result = {} for ep in iter_entry_points(group=self.entry_point_group_queries): for cfg in ep.load()(): if cfg['query_name'] not in self.enabled_queries: continue elif cfg['query_name'] in result: raise DuplicateQueryError( 'Duplicate query {0} in entry point ' '{1}'.format(cfg['query'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_queries[cfg['query_name']] or {} ) result[cfg['query_name']] = cfg return result
def _queries_config(self): """Load queries configuration.""" result = {} for ep in iter_entry_points(group=self.entry_point_group_queries): for cfg in ep.load()(): if cfg['query_name'] not in self.enabled_queries: continue elif cfg['query_name'] in result: raise DuplicateQueryError( 'Duplicate query {0} in entry point ' '{1}'.format(cfg['query'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_queries[cfg['query_name']] or {} ) result[cfg['query_name']] = cfg return result
[ "Load", "queries", "configuration", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/ext.py#L138-L154
[ "def", "_queries_config", "(", "self", ")", ":", "result", "=", "{", "}", "for", "ep", "in", "iter_entry_points", "(", "group", "=", "self", ".", "entry_point_group_queries", ")", ":", "for", "cfg", "in", "ep", ".", "load", "(", ")", "(", ")", ":", "if", "cfg", "[", "'query_name'", "]", "not", "in", "self", ".", "enabled_queries", ":", "continue", "elif", "cfg", "[", "'query_name'", "]", "in", "result", ":", "raise", "DuplicateQueryError", "(", "'Duplicate query {0} in entry point '", "'{1}'", ".", "format", "(", "cfg", "[", "'query'", "]", ",", "ep", ".", "name", ")", ")", "# Update the default configuration with env/overlay config.", "cfg", ".", "update", "(", "self", ".", "enabled_queries", "[", "cfg", "[", "'query_name'", "]", "]", "or", "{", "}", ")", "result", "[", "cfg", "[", "'query_name'", "]", "]", "=", "cfg", "return", "result" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
_InvenioStatsState.publish
Publish events.
invenio_stats/ext.py
def publish(self, event_type, events): """Publish events.""" assert event_type in self.events current_queues.queues['stats-{}'.format(event_type)].publish(events)
def publish(self, event_type, events): """Publish events.""" assert event_type in self.events current_queues.queues['stats-{}'.format(event_type)].publish(events)
[ "Publish", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/ext.py#L189-L192
[ "def", "publish", "(", "self", ",", "event_type", ",", "events", ")", ":", "assert", "event_type", "in", "self", ".", "events", "current_queues", ".", "queues", "[", "'stats-{}'", ".", "format", "(", "event_type", ")", "]", ".", "publish", "(", "events", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
_InvenioStatsState.consume
Comsume all pending events.
invenio_stats/ext.py
def consume(self, event_type, no_ack=True, payload=True): """Comsume all pending events.""" assert event_type in self.events return current_queues.queues['stats-{}'.format(event_type)].consume( payload=payload)
def consume(self, event_type, no_ack=True, payload=True): """Comsume all pending events.""" assert event_type in self.events return current_queues.queues['stats-{}'.format(event_type)].consume( payload=payload)
[ "Comsume", "all", "pending", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/ext.py#L194-L198
[ "def", "consume", "(", "self", ",", "event_type", ",", "no_ack", "=", "True", ",", "payload", "=", "True", ")", ":", "assert", "event_type", "in", "self", ".", "events", "return", "current_queues", ".", "queues", "[", "'stats-{}'", ".", "format", "(", "event_type", ")", "]", ".", "consume", "(", "payload", "=", "payload", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
InvenioStats.init_app
Flask application initialization.
invenio_stats/ext.py
def init_app(self, app, entry_point_group_events='invenio_stats.events', entry_point_group_aggs='invenio_stats.aggregations', entry_point_group_queries='invenio_stats.queries'): """Flask application initialization.""" self.init_config(app) state = _InvenioStatsState( app, entry_point_group_events=entry_point_group_events, entry_point_group_aggs=entry_point_group_aggs, entry_point_group_queries=entry_point_group_queries ) self._state = app.extensions['invenio-stats'] = state if app.config['STATS_REGISTER_RECEIVERS']: signal_receivers = {key: value for key, value in app.config.get('STATS_EVENTS', {}).items() if 'signal' in value} register_receivers(app, signal_receivers) return state
def init_app(self, app, entry_point_group_events='invenio_stats.events', entry_point_group_aggs='invenio_stats.aggregations', entry_point_group_queries='invenio_stats.queries'): """Flask application initialization.""" self.init_config(app) state = _InvenioStatsState( app, entry_point_group_events=entry_point_group_events, entry_point_group_aggs=entry_point_group_aggs, entry_point_group_queries=entry_point_group_queries ) self._state = app.extensions['invenio-stats'] = state if app.config['STATS_REGISTER_RECEIVERS']: signal_receivers = {key: value for key, value in app.config.get('STATS_EVENTS', {}).items() if 'signal' in value} register_receivers(app, signal_receivers) return state
[ "Flask", "application", "initialization", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/ext.py#L209-L230
[ "def", "init_app", "(", "self", ",", "app", ",", "entry_point_group_events", "=", "'invenio_stats.events'", ",", "entry_point_group_aggs", "=", "'invenio_stats.aggregations'", ",", "entry_point_group_queries", "=", "'invenio_stats.queries'", ")", ":", "self", ".", "init_config", "(", "app", ")", "state", "=", "_InvenioStatsState", "(", "app", ",", "entry_point_group_events", "=", "entry_point_group_events", ",", "entry_point_group_aggs", "=", "entry_point_group_aggs", ",", "entry_point_group_queries", "=", "entry_point_group_queries", ")", "self", ".", "_state", "=", "app", ".", "extensions", "[", "'invenio-stats'", "]", "=", "state", "if", "app", ".", "config", "[", "'STATS_REGISTER_RECEIVERS'", "]", ":", "signal_receivers", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "app", ".", "config", ".", "get", "(", "'STATS_EVENTS'", ",", "{", "}", ")", ".", "items", "(", ")", "if", "'signal'", "in", "value", "}", "register_receivers", "(", "app", ",", "signal_receivers", ")", "return", "state" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ActorRef.tell
Send a message to this actor. Asynchronous fire-and-forget. :param message: The message to send. :type message: Any :param sender: The sender of the message. If provided it will be made available to the receiving actor via the :attr:`Actor.sender` attribute. :type sender: :class:`Actor`
actors/ref.py
def tell(self, message, sender=no_sender): """ Send a message to this actor. Asynchronous fire-and-forget. :param message: The message to send. :type message: Any :param sender: The sender of the message. If provided it will be made available to the receiving actor via the :attr:`Actor.sender` attribute. :type sender: :class:`Actor` """ if sender is not no_sender and not isinstance(sender, ActorRef): raise ValueError("Sender must be actor reference") self._cell.send_message(message, sender)
def tell(self, message, sender=no_sender): """ Send a message to this actor. Asynchronous fire-and-forget. :param message: The message to send. :type message: Any :param sender: The sender of the message. If provided it will be made available to the receiving actor via the :attr:`Actor.sender` attribute. :type sender: :class:`Actor` """ if sender is not no_sender and not isinstance(sender, ActorRef): raise ValueError("Sender must be actor reference") self._cell.send_message(message, sender)
[ "Send", "a", "message", "to", "this", "actor", ".", "Asynchronous", "fire", "-", "and", "-", "forget", "." ]
tamland/python-actors
python
https://github.com/tamland/python-actors/blob/9f826ab2947c665d61363a6ebc401e9e42cc6238/actors/ref.py#L26-L39
[ "def", "tell", "(", "self", ",", "message", ",", "sender", "=", "no_sender", ")", ":", "if", "sender", "is", "not", "no_sender", "and", "not", "isinstance", "(", "sender", ",", "ActorRef", ")", ":", "raise", "ValueError", "(", "\"Sender must be actor reference\"", ")", "self", ".", "_cell", ".", "send_message", "(", "message", ",", "sender", ")" ]
9f826ab2947c665d61363a6ebc401e9e42cc6238
valid
get_anonymization_salt
Get the anonymization salt based on the event timestamp's day.
invenio_stats/utils.py
def get_anonymization_salt(ts): """Get the anonymization salt based on the event timestamp's day.""" salt_key = 'stats:salt:{}'.format(ts.date().isoformat()) salt = current_cache.get(salt_key) if not salt: salt_bytes = os.urandom(32) salt = b64encode(salt_bytes).decode('utf-8') current_cache.set(salt_key, salt, timeout=60 * 60 * 24) return salt
def get_anonymization_salt(ts): """Get the anonymization salt based on the event timestamp's day.""" salt_key = 'stats:salt:{}'.format(ts.date().isoformat()) salt = current_cache.get(salt_key) if not salt: salt_bytes = os.urandom(32) salt = b64encode(salt_bytes).decode('utf-8') current_cache.set(salt_key, salt, timeout=60 * 60 * 24) return salt
[ "Get", "the", "anonymization", "salt", "based", "on", "the", "event", "timestamp", "s", "day", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/utils.py#L24-L32
[ "def", "get_anonymization_salt", "(", "ts", ")", ":", "salt_key", "=", "'stats:salt:{}'", ".", "format", "(", "ts", ".", "date", "(", ")", ".", "isoformat", "(", ")", ")", "salt", "=", "current_cache", ".", "get", "(", "salt_key", ")", "if", "not", "salt", ":", "salt_bytes", "=", "os", ".", "urandom", "(", "32", ")", "salt", "=", "b64encode", "(", "salt_bytes", ")", ".", "decode", "(", "'utf-8'", ")", "current_cache", ".", "set", "(", "salt_key", ",", "salt", ",", "timeout", "=", "60", "*", "60", "*", "24", ")", "return", "salt" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
get_geoip
Lookup country for IP address.
invenio_stats/utils.py
def get_geoip(ip): """Lookup country for IP address.""" reader = geolite2.reader() ip_data = reader.get(ip) or {} return ip_data.get('country', {}).get('iso_code')
def get_geoip(ip): """Lookup country for IP address.""" reader = geolite2.reader() ip_data = reader.get(ip) or {} return ip_data.get('country', {}).get('iso_code')
[ "Lookup", "country", "for", "IP", "address", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/utils.py#L35-L39
[ "def", "get_geoip", "(", "ip", ")", ":", "reader", "=", "geolite2", ".", "reader", "(", ")", "ip_data", "=", "reader", ".", "get", "(", "ip", ")", "or", "{", "}", "return", "ip_data", ".", "get", "(", "'country'", ",", "{", "}", ")", ".", "get", "(", "'iso_code'", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
get_user
User information. .. note:: **Privacy note** A users IP address, user agent string, and user id (if logged in) is sent to a message queue, where it is stored for about 5 minutes. The information is used to: - Detect robot visits from the user agent string. - Generate an anonymized visitor id (using a random salt per day). - Detect the users host contry based on the IP address. The information is then discarded.
invenio_stats/utils.py
def get_user(): """User information. .. note:: **Privacy note** A users IP address, user agent string, and user id (if logged in) is sent to a message queue, where it is stored for about 5 minutes. The information is used to: - Detect robot visits from the user agent string. - Generate an anonymized visitor id (using a random salt per day). - Detect the users host contry based on the IP address. The information is then discarded. """ return dict( ip_address=request.remote_addr, user_agent=request.user_agent.string, user_id=( current_user.get_id() if current_user.is_authenticated else None ), session_id=session.get('sid_s') )
def get_user(): """User information. .. note:: **Privacy note** A users IP address, user agent string, and user id (if logged in) is sent to a message queue, where it is stored for about 5 minutes. The information is used to: - Detect robot visits from the user agent string. - Generate an anonymized visitor id (using a random salt per day). - Detect the users host contry based on the IP address. The information is then discarded. """ return dict( ip_address=request.remote_addr, user_agent=request.user_agent.string, user_id=( current_user.get_id() if current_user.is_authenticated else None ), session_id=session.get('sid_s') )
[ "User", "information", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/utils.py#L42-L64
[ "def", "get_user", "(", ")", ":", "return", "dict", "(", "ip_address", "=", "request", ".", "remote_addr", ",", "user_agent", "=", "request", ".", "user_agent", ".", "string", ",", "user_id", "=", "(", "current_user", ".", "get_id", "(", ")", "if", "current_user", ".", "is_authenticated", "else", "None", ")", ",", "session_id", "=", "session", ".", "get", "(", "'sid_s'", ")", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
default_permission_factory
Default permission factory. It enables by default the statistics if they don't have a dedicated permission factory.
invenio_stats/utils.py
def default_permission_factory(query_name, params): """Default permission factory. It enables by default the statistics if they don't have a dedicated permission factory. """ from invenio_stats import current_stats if current_stats.queries[query_name].permission_factory is None: return AllowAllPermission else: return current_stats.queries[query_name].permission_factory( query_name, params )
def default_permission_factory(query_name, params): """Default permission factory. It enables by default the statistics if they don't have a dedicated permission factory. """ from invenio_stats import current_stats if current_stats.queries[query_name].permission_factory is None: return AllowAllPermission else: return current_stats.queries[query_name].permission_factory( query_name, params )
[ "Default", "permission", "factory", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/utils.py#L97-L109
[ "def", "default_permission_factory", "(", "query_name", ",", "params", ")", ":", "from", "invenio_stats", "import", "current_stats", "if", "current_stats", ".", "queries", "[", "query_name", "]", ".", "permission_factory", "is", "None", ":", "return", "AllowAllPermission", "else", ":", "return", "current_stats", ".", "queries", "[", "query_name", "]", ".", "permission_factory", "(", "query_name", ",", "params", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
load_config
Load settings from default config and optionally overwrite with config file and commandline parameters (in that order).
protector/config/loader.py
def load_config(): """ Load settings from default config and optionally overwrite with config file and commandline parameters (in that order). """ # We start with the default config config = flatten(default_config.DEFAULT_CONFIG) # Read commandline arguments cli_config = flatten(parse_args()) if "configfile" in cli_config: logging.info("Reading config file {}".format(cli_config['configfile'])) configfile = parse_configfile(cli_config['configfile']) config = overwrite_config(config, configfile) # Parameters from commandline take precedence over all others config = overwrite_config(config, cli_config) # Set verbosity level if 'verbose' in config: if config['verbose'] == 1: logging.getLogger().setLevel(logging.INFO) elif config['verbose'] > 1: logging.getLogger().setLevel(logging.DEBUG) return ObjectView(config)
def load_config(): """ Load settings from default config and optionally overwrite with config file and commandline parameters (in that order). """ # We start with the default config config = flatten(default_config.DEFAULT_CONFIG) # Read commandline arguments cli_config = flatten(parse_args()) if "configfile" in cli_config: logging.info("Reading config file {}".format(cli_config['configfile'])) configfile = parse_configfile(cli_config['configfile']) config = overwrite_config(config, configfile) # Parameters from commandline take precedence over all others config = overwrite_config(config, cli_config) # Set verbosity level if 'verbose' in config: if config['verbose'] == 1: logging.getLogger().setLevel(logging.INFO) elif config['verbose'] > 1: logging.getLogger().setLevel(logging.DEBUG) return ObjectView(config)
[ "Load", "settings", "from", "default", "config", "and", "optionally", "overwrite", "with", "config", "file", "and", "commandline", "parameters", "(", "in", "that", "order", ")", "." ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/config/loader.py#L12-L39
[ "def", "load_config", "(", ")", ":", "# We start with the default config", "config", "=", "flatten", "(", "default_config", ".", "DEFAULT_CONFIG", ")", "# Read commandline arguments", "cli_config", "=", "flatten", "(", "parse_args", "(", ")", ")", "if", "\"configfile\"", "in", "cli_config", ":", "logging", ".", "info", "(", "\"Reading config file {}\"", ".", "format", "(", "cli_config", "[", "'configfile'", "]", ")", ")", "configfile", "=", "parse_configfile", "(", "cli_config", "[", "'configfile'", "]", ")", "config", "=", "overwrite_config", "(", "config", ",", "configfile", ")", "# Parameters from commandline take precedence over all others", "config", "=", "overwrite_config", "(", "config", ",", "cli_config", ")", "# Set verbosity level", "if", "'verbose'", "in", "config", ":", "if", "config", "[", "'verbose'", "]", "==", "1", ":", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "logging", ".", "INFO", ")", "elif", "config", "[", "'verbose'", "]", ">", "1", ":", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "return", "ObjectView", "(", "config", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
parse_configfile
Read settings from file :param configfile:
protector/config/loader.py
def parse_configfile(configfile): """ Read settings from file :param configfile: """ with open(configfile) as f: try: return yaml.safe_load(f) except Exception as e: logging.fatal("Could not load default config file: %s", e) exit(-1)
def parse_configfile(configfile): """ Read settings from file :param configfile: """ with open(configfile) as f: try: return yaml.safe_load(f) except Exception as e: logging.fatal("Could not load default config file: %s", e) exit(-1)
[ "Read", "settings", "from", "file", ":", "param", "configfile", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/config/loader.py#L48-L58
[ "def", "parse_configfile", "(", "configfile", ")", ":", "with", "open", "(", "configfile", ")", "as", "f", ":", "try", ":", "return", "yaml", ".", "safe_load", "(", "f", ")", "except", "Exception", "as", "e", ":", "logging", ".", "fatal", "(", "\"Could not load default config file: %s\"", ",", "e", ")", "exit", "(", "-", "1", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
register_templates
Register elasticsearch templates for events.
invenio_stats/templates.py
def register_templates(): """Register elasticsearch templates for events.""" event_templates = [current_stats._events_config[e] ['templates'] for e in current_stats._events_config] aggregation_templates = [current_stats._aggregations_config[a] ['templates'] for a in current_stats._aggregations_config] return event_templates + aggregation_templates
def register_templates(): """Register elasticsearch templates for events.""" event_templates = [current_stats._events_config[e] ['templates'] for e in current_stats._events_config] aggregation_templates = [current_stats._aggregations_config[a] ['templates'] for a in current_stats._aggregations_config] return event_templates + aggregation_templates
[ "Register", "elasticsearch", "templates", "for", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/templates.py#L14-L24
[ "def", "register_templates", "(", ")", ":", "event_templates", "=", "[", "current_stats", ".", "_events_config", "[", "e", "]", "[", "'templates'", "]", "for", "e", "in", "current_stats", ".", "_events_config", "]", "aggregation_templates", "=", "[", "current_stats", ".", "_aggregations_config", "[", "a", "]", "[", "'templates'", "]", "for", "a", "in", "current_stats", ".", "_aggregations_config", "]", "return", "event_templates", "+", "aggregation_templates" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
RuleChecker.check
:param query:
protector/rules/short_series_name.py
def check(self, query): """ :param query: """ if query.get_type() in {Keyword.LIST, Keyword.DROP}: series = query.series_stmt else: series = query.from_stmt if len(series) >= self.min_series_name_length: return Ok(True) return Err("Series name too short. Please be more precise.")
def check(self, query): """ :param query: """ if query.get_type() in {Keyword.LIST, Keyword.DROP}: series = query.series_stmt else: series = query.from_stmt if len(series) >= self.min_series_name_length: return Ok(True) return Err("Series name too short. Please be more precise.")
[ ":", "param", "query", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/rules/short_series_name.py#L21-L33
[ "def", "check", "(", "self", ",", "query", ")", ":", "if", "query", ".", "get_type", "(", ")", "in", "{", "Keyword", ".", "LIST", ",", "Keyword", ".", "DROP", "}", ":", "series", "=", "query", ".", "series_stmt", "else", ":", "series", "=", "query", ".", "from_stmt", "if", "len", "(", "series", ")", ">=", "self", ".", "min_series_name_length", ":", "return", "Ok", "(", "True", ")", "return", "Err", "(", "\"Series name too short. Please be more precise.\"", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
process_events
Index statistics events.
invenio_stats/tasks.py
def process_events(event_types): """Index statistics events.""" results = [] for e in event_types: processor = current_stats.events[e].processor_class( **current_stats.events[e].processor_config) results.append((e, processor.run())) return results
def process_events(event_types): """Index statistics events.""" results = [] for e in event_types: processor = current_stats.events[e].processor_class( **current_stats.events[e].processor_config) results.append((e, processor.run())) return results
[ "Index", "statistics", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/tasks.py#L20-L27
[ "def", "process_events", "(", "event_types", ")", ":", "results", "=", "[", "]", "for", "e", "in", "event_types", ":", "processor", "=", "current_stats", ".", "events", "[", "e", "]", ".", "processor_class", "(", "*", "*", "current_stats", ".", "events", "[", "e", "]", ".", "processor_config", ")", "results", ".", "append", "(", "(", "e", ",", "processor", ".", "run", "(", ")", ")", ")", "return", "results" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
aggregate_events
Aggregate indexed events.
invenio_stats/tasks.py
def aggregate_events(aggregations, start_date=None, end_date=None, update_bookmark=True): """Aggregate indexed events.""" start_date = dateutil_parse(start_date) if start_date else None end_date = dateutil_parse(end_date) if end_date else None results = [] for a in aggregations: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) results.append(aggregator.run(start_date, end_date, update_bookmark)) return results
def aggregate_events(aggregations, start_date=None, end_date=None, update_bookmark=True): """Aggregate indexed events.""" start_date = dateutil_parse(start_date) if start_date else None end_date = dateutil_parse(end_date) if end_date else None results = [] for a in aggregations: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) results.append(aggregator.run(start_date, end_date, update_bookmark)) return results
[ "Aggregate", "indexed", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/tasks.py#L31-L42
[ "def", "aggregate_events", "(", "aggregations", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "update_bookmark", "=", "True", ")", ":", "start_date", "=", "dateutil_parse", "(", "start_date", ")", "if", "start_date", "else", "None", "end_date", "=", "dateutil_parse", "(", "end_date", ")", "if", "end_date", "else", "None", "results", "=", "[", "]", "for", "a", "in", "aggregations", ":", "aggr_cfg", "=", "current_stats", ".", "aggregations", "[", "a", "]", "aggregator", "=", "aggr_cfg", ".", "aggregator_class", "(", "name", "=", "aggr_cfg", ".", "name", ",", "*", "*", "aggr_cfg", ".", "aggregator_config", ")", "results", ".", "append", "(", "aggregator", ".", "run", "(", "start_date", ",", "end_date", ",", "update_bookmark", ")", ")", "return", "results" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ask
Send a message to `actor` and return a :class:`Future` holding a possible reply. To receive a result, the actor MUST send a reply to `sender`. :param actor: :type actor: :class:`ActorRef`. :param message: :type message: :type: Any :return: A future holding the result.
actors/utils/ask.py
def ask(actor, message): """ Send a message to `actor` and return a :class:`Future` holding a possible reply. To receive a result, the actor MUST send a reply to `sender`. :param actor: :type actor: :class:`ActorRef`. :param message: :type message: :type: Any :return: A future holding the result. """ sender = PromiseActorRef() actor.tell(message, sender) return sender.promise.future
def ask(actor, message): """ Send a message to `actor` and return a :class:`Future` holding a possible reply. To receive a result, the actor MUST send a reply to `sender`. :param actor: :type actor: :class:`ActorRef`. :param message: :type message: :type: Any :return: A future holding the result. """ sender = PromiseActorRef() actor.tell(message, sender) return sender.promise.future
[ "Send", "a", "message", "to", "actor", "and", "return", "a", ":", "class", ":", "Future", "holding", "a", "possible", "reply", "." ]
tamland/python-actors
python
https://github.com/tamland/python-actors/blob/9f826ab2947c665d61363a6ebc401e9e42cc6238/actors/utils/ask.py#L30-L47
[ "def", "ask", "(", "actor", ",", "message", ")", ":", "sender", "=", "PromiseActorRef", "(", ")", "actor", ".", "tell", "(", "message", ",", "sender", ")", "return", "sender", ".", "promise", ".", "future" ]
9f826ab2947c665d61363a6ebc401e9e42cc6238
valid
ProxyRequestHandler.get_queries
Get a list of all queries (q=... parameters) from an URL parameter string :param parameters: The url parameter list
protector/proxy/request_handler.py
def get_queries(parameters): """ Get a list of all queries (q=... parameters) from an URL parameter string :param parameters: The url parameter list """ parsed_params = urlparse.parse_qs(parameters) if 'q' not in parsed_params: return [] queries = parsed_params['q'] # Check if only one query string is given # in this case make it a list if not isinstance(queries, list): queries = [queries] return queries
def get_queries(parameters): """ Get a list of all queries (q=... parameters) from an URL parameter string :param parameters: The url parameter list """ parsed_params = urlparse.parse_qs(parameters) if 'q' not in parsed_params: return [] queries = parsed_params['q'] # Check if only one query string is given # in this case make it a list if not isinstance(queries, list): queries = [queries] return queries
[ "Get", "a", "list", "of", "all", "queries", "(", "q", "=", "...", "parameters", ")", "from", "an", "URL", "parameter", "string", ":", "param", "parameters", ":", "The", "url", "parameter", "list" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/proxy/request_handler.py#L119-L133
[ "def", "get_queries", "(", "parameters", ")", ":", "parsed_params", "=", "urlparse", ".", "parse_qs", "(", "parameters", ")", "if", "'q'", "not", "in", "parsed_params", ":", "return", "[", "]", "queries", "=", "parsed_params", "[", "'q'", "]", "# Check if only one query string is given", "# in this case make it a list", "if", "not", "isinstance", "(", "queries", ",", "list", ")", ":", "queries", "=", "[", "queries", "]", "return", "queries" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
ProxyRequestHandler._handle_request
Run the actual request
protector/proxy/request_handler.py
def _handle_request(self, scheme, netloc, path, headers, body=None, method="GET"): """ Run the actual request """ backend_url = "{}://{}{}".format(scheme, netloc, path) try: response = self.http_request.request(backend_url, method=method, body=body, headers=dict(headers)) self._return_response(response) except Exception as e: body = "Invalid response from backend: '{}' Server might be busy".format(e.message) logging.debug(body) self.send_error(httplib.SERVICE_UNAVAILABLE, body)
def _handle_request(self, scheme, netloc, path, headers, body=None, method="GET"): """ Run the actual request """ backend_url = "{}://{}{}".format(scheme, netloc, path) try: response = self.http_request.request(backend_url, method=method, body=body, headers=dict(headers)) self._return_response(response) except Exception as e: body = "Invalid response from backend: '{}' Server might be busy".format(e.message) logging.debug(body) self.send_error(httplib.SERVICE_UNAVAILABLE, body)
[ "Run", "the", "actual", "request" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/proxy/request_handler.py#L162-L173
[ "def", "_handle_request", "(", "self", ",", "scheme", ",", "netloc", ",", "path", ",", "headers", ",", "body", "=", "None", ",", "method", "=", "\"GET\"", ")", ":", "backend_url", "=", "\"{}://{}{}\"", ".", "format", "(", "scheme", ",", "netloc", ",", "path", ")", "try", ":", "response", "=", "self", ".", "http_request", ".", "request", "(", "backend_url", ",", "method", "=", "method", ",", "body", "=", "body", ",", "headers", "=", "dict", "(", "headers", ")", ")", "self", ".", "_return_response", "(", "response", ")", "except", "Exception", "as", "e", ":", "body", "=", "\"Invalid response from backend: '{}' Server might be busy\"", ".", "format", "(", "e", ".", "message", ")", "logging", ".", "debug", "(", "body", ")", "self", ".", "send_error", "(", "httplib", ".", "SERVICE_UNAVAILABLE", ",", "body", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
ProxyRequestHandler.send_error
Send and log plain text error reply. :param code: :param message:
protector/proxy/request_handler.py
def send_error(self, code, message=None): """ Send and log plain text error reply. :param code: :param message: """ message = message.strip() self.log_error("code %d, message %s", code, message) self.send_response(code) self.send_header("Content-Type", "text/plain") self.send_header('Connection', 'close') self.end_headers() if message: self.wfile.write(message)
def send_error(self, code, message=None): """ Send and log plain text error reply. :param code: :param message: """ message = message.strip() self.log_error("code %d, message %s", code, message) self.send_response(code) self.send_header("Content-Type", "text/plain") self.send_header('Connection', 'close') self.end_headers() if message: self.wfile.write(message)
[ "Send", "and", "log", "plain", "text", "error", "reply", ".", ":", "param", "code", ":", ":", "param", "message", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/proxy/request_handler.py#L185-L198
[ "def", "send_error", "(", "self", ",", "code", ",", "message", "=", "None", ")", ":", "message", "=", "message", ".", "strip", "(", ")", "self", ".", "log_error", "(", "\"code %d, message %s\"", ",", "code", ",", "message", ")", "self", ".", "send_response", "(", "code", ")", "self", ".", "send_header", "(", "\"Content-Type\"", ",", "\"text/plain\"", ")", "self", ".", "send_header", "(", "'Connection'", ",", "'close'", ")", "self", ".", "end_headers", "(", ")", "if", "message", ":", "self", ".", "wfile", ".", "write", "(", "message", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
ProxyRequestHandler._return_response
:type result: HTTPResponse
protector/proxy/request_handler.py
def _return_response(self, response): """ :type result: HTTPResponse """ self.filter_headers(response.msg) if "content-length" in response.msg: del response.msg["content-length"] self.send_response(response.status, response.reason) for header_key, header_value in response.msg.items(): self.send_header(header_key, header_value) body = response.read() self.send_header('Content-Length', str(len(body))) self.end_headers() self.wfile.write(body)
def _return_response(self, response): """ :type result: HTTPResponse """ self.filter_headers(response.msg) if "content-length" in response.msg: del response.msg["content-length"] self.send_response(response.status, response.reason) for header_key, header_value in response.msg.items(): self.send_header(header_key, header_value) body = response.read() self.send_header('Content-Length', str(len(body))) self.end_headers() self.wfile.write(body)
[ ":", "type", "result", ":", "HTTPResponse" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/proxy/request_handler.py#L200-L214
[ "def", "_return_response", "(", "self", ",", "response", ")", ":", "self", ".", "filter_headers", "(", "response", ".", "msg", ")", "if", "\"content-length\"", "in", "response", ".", "msg", ":", "del", "response", ".", "msg", "[", "\"content-length\"", "]", "self", ".", "send_response", "(", "response", ".", "status", ",", "response", ".", "reason", ")", "for", "header_key", ",", "header_value", "in", "response", ".", "msg", ".", "items", "(", ")", ":", "self", ".", "send_header", "(", "header_key", ",", "header_value", ")", "body", "=", "response", ".", "read", "(", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "str", "(", "len", "(", "body", ")", ")", ")", "self", ".", "end_headers", "(", ")", "self", ".", "wfile", ".", "write", "(", "body", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
anonymize_user
Preprocess an event by anonymizing user information. The anonymization is done by removing fields that can uniquely identify a user, such as the user's ID, session ID, IP address and User Agent, and hashing them to produce a ``visitor_id`` and ``unique_session_id``. To further secure the method, a randomly generated 32-byte salt is used, that expires after 24 hours and is discarded. The salt values are stored in Redis (or whichever backend Invenio-Cache uses). The ``unique_session_id`` is calculated in the same way as the ``visitor_id``, with the only difference that it also takes into account the hour of the event . All of these rules effectively mean that a user can have a unique ``visitor_id`` for each day and unique ``unique_session_id`` for each hour of a day. This session ID generation process was designed according to the `Project COUNTER Code of Practice <https://www.projectcounter.org/code-of- practice-sections/general-information/>`_. In addition to that the country of the user is extracted from the IP address as a ISO 3166-1 alpha-2 two-letter country code (e.g. "CH" for Switzerland).
invenio_stats/processors.py
def anonymize_user(doc): """Preprocess an event by anonymizing user information. The anonymization is done by removing fields that can uniquely identify a user, such as the user's ID, session ID, IP address and User Agent, and hashing them to produce a ``visitor_id`` and ``unique_session_id``. To further secure the method, a randomly generated 32-byte salt is used, that expires after 24 hours and is discarded. The salt values are stored in Redis (or whichever backend Invenio-Cache uses). The ``unique_session_id`` is calculated in the same way as the ``visitor_id``, with the only difference that it also takes into account the hour of the event . All of these rules effectively mean that a user can have a unique ``visitor_id`` for each day and unique ``unique_session_id`` for each hour of a day. This session ID generation process was designed according to the `Project COUNTER Code of Practice <https://www.projectcounter.org/code-of- practice-sections/general-information/>`_. In addition to that the country of the user is extracted from the IP address as a ISO 3166-1 alpha-2 two-letter country code (e.g. "CH" for Switzerland). """ ip = doc.pop('ip_address', None) if ip: doc.update({'country': get_geoip(ip)}) user_id = doc.pop('user_id', '') session_id = doc.pop('session_id', '') user_agent = doc.pop('user_agent', '') # A 'User Session' is defined as activity by a user in a period of # one hour. timeslice represents the hour of the day in which # the event has been generated and together with user info it determines # the 'User Session' timestamp = arrow.get(doc.get('timestamp')) timeslice = timestamp.strftime('%Y%m%d%H') salt = get_anonymization_salt(timestamp) visitor_id = hashlib.sha224(salt.encode('utf-8')) # TODO: include random salt here, that changes once a day. # m.update(random_salt) if user_id: visitor_id.update(user_id.encode('utf-8')) elif session_id: visitor_id.update(session_id.encode('utf-8')) elif ip and user_agent: vid = '{}|{}|{}'.format(ip, user_agent, timeslice) visitor_id.update(vid.encode('utf-8')) else: # TODO: add random data? pass unique_session_id = hashlib.sha224(salt.encode('utf-8')) if user_id: sid = '{}|{}'.format(user_id, timeslice) unique_session_id.update(sid.encode('utf-8')) elif session_id: sid = '{}|{}'.format(session_id, timeslice) unique_session_id.update(sid.encode('utf-8')) elif ip and user_agent: sid = '{}|{}|{}'.format(ip, user_agent, timeslice) unique_session_id.update(sid.encode('utf-8')) doc.update(dict( visitor_id=visitor_id.hexdigest(), unique_session_id=unique_session_id.hexdigest() )) return doc
def anonymize_user(doc): """Preprocess an event by anonymizing user information. The anonymization is done by removing fields that can uniquely identify a user, such as the user's ID, session ID, IP address and User Agent, and hashing them to produce a ``visitor_id`` and ``unique_session_id``. To further secure the method, a randomly generated 32-byte salt is used, that expires after 24 hours and is discarded. The salt values are stored in Redis (or whichever backend Invenio-Cache uses). The ``unique_session_id`` is calculated in the same way as the ``visitor_id``, with the only difference that it also takes into account the hour of the event . All of these rules effectively mean that a user can have a unique ``visitor_id`` for each day and unique ``unique_session_id`` for each hour of a day. This session ID generation process was designed according to the `Project COUNTER Code of Practice <https://www.projectcounter.org/code-of- practice-sections/general-information/>`_. In addition to that the country of the user is extracted from the IP address as a ISO 3166-1 alpha-2 two-letter country code (e.g. "CH" for Switzerland). """ ip = doc.pop('ip_address', None) if ip: doc.update({'country': get_geoip(ip)}) user_id = doc.pop('user_id', '') session_id = doc.pop('session_id', '') user_agent = doc.pop('user_agent', '') # A 'User Session' is defined as activity by a user in a period of # one hour. timeslice represents the hour of the day in which # the event has been generated and together with user info it determines # the 'User Session' timestamp = arrow.get(doc.get('timestamp')) timeslice = timestamp.strftime('%Y%m%d%H') salt = get_anonymization_salt(timestamp) visitor_id = hashlib.sha224(salt.encode('utf-8')) # TODO: include random salt here, that changes once a day. # m.update(random_salt) if user_id: visitor_id.update(user_id.encode('utf-8')) elif session_id: visitor_id.update(session_id.encode('utf-8')) elif ip and user_agent: vid = '{}|{}|{}'.format(ip, user_agent, timeslice) visitor_id.update(vid.encode('utf-8')) else: # TODO: add random data? pass unique_session_id = hashlib.sha224(salt.encode('utf-8')) if user_id: sid = '{}|{}'.format(user_id, timeslice) unique_session_id.update(sid.encode('utf-8')) elif session_id: sid = '{}|{}'.format(session_id, timeslice) unique_session_id.update(sid.encode('utf-8')) elif ip and user_agent: sid = '{}|{}|{}'.format(ip, user_agent, timeslice) unique_session_id.update(sid.encode('utf-8')) doc.update(dict( visitor_id=visitor_id.hexdigest(), unique_session_id=unique_session_id.hexdigest() )) return doc
[ "Preprocess", "an", "event", "by", "anonymizing", "user", "information", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/processors.py#L27-L95
[ "def", "anonymize_user", "(", "doc", ")", ":", "ip", "=", "doc", ".", "pop", "(", "'ip_address'", ",", "None", ")", "if", "ip", ":", "doc", ".", "update", "(", "{", "'country'", ":", "get_geoip", "(", "ip", ")", "}", ")", "user_id", "=", "doc", ".", "pop", "(", "'user_id'", ",", "''", ")", "session_id", "=", "doc", ".", "pop", "(", "'session_id'", ",", "''", ")", "user_agent", "=", "doc", ".", "pop", "(", "'user_agent'", ",", "''", ")", "# A 'User Session' is defined as activity by a user in a period of", "# one hour. timeslice represents the hour of the day in which", "# the event has been generated and together with user info it determines", "# the 'User Session'", "timestamp", "=", "arrow", ".", "get", "(", "doc", ".", "get", "(", "'timestamp'", ")", ")", "timeslice", "=", "timestamp", ".", "strftime", "(", "'%Y%m%d%H'", ")", "salt", "=", "get_anonymization_salt", "(", "timestamp", ")", "visitor_id", "=", "hashlib", ".", "sha224", "(", "salt", ".", "encode", "(", "'utf-8'", ")", ")", "# TODO: include random salt here, that changes once a day.", "# m.update(random_salt)", "if", "user_id", ":", "visitor_id", ".", "update", "(", "user_id", ".", "encode", "(", "'utf-8'", ")", ")", "elif", "session_id", ":", "visitor_id", ".", "update", "(", "session_id", ".", "encode", "(", "'utf-8'", ")", ")", "elif", "ip", "and", "user_agent", ":", "vid", "=", "'{}|{}|{}'", ".", "format", "(", "ip", ",", "user_agent", ",", "timeslice", ")", "visitor_id", ".", "update", "(", "vid", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "# TODO: add random data?", "pass", "unique_session_id", "=", "hashlib", ".", "sha224", "(", "salt", ".", "encode", "(", "'utf-8'", ")", ")", "if", "user_id", ":", "sid", "=", "'{}|{}'", ".", "format", "(", "user_id", ",", "timeslice", ")", "unique_session_id", ".", "update", "(", "sid", ".", "encode", "(", "'utf-8'", ")", ")", "elif", "session_id", ":", "sid", "=", "'{}|{}'", ".", "format", "(", "session_id", ",", "timeslice", ")", "unique_session_id", ".", "update", "(", "sid", ".", "encode", "(", "'utf-8'", ")", ")", "elif", "ip", "and", "user_agent", ":", "sid", "=", "'{}|{}|{}'", ".", "format", "(", "ip", ",", "user_agent", ",", "timeslice", ")", "unique_session_id", ".", "update", "(", "sid", ".", "encode", "(", "'utf-8'", ")", ")", "doc", ".", "update", "(", "dict", "(", "visitor_id", "=", "visitor_id", ".", "hexdigest", "(", ")", ",", "unique_session_id", "=", "unique_session_id", ".", "hexdigest", "(", ")", ")", ")", "return", "doc" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
hash_id
Generate event id, optimized for ES.
invenio_stats/processors.py
def hash_id(iso_timestamp, msg): """Generate event id, optimized for ES.""" return '{0}-{1}'.format(iso_timestamp, hashlib.sha1( msg.get('unique_id').encode('utf-8') + str(msg.get('visitor_id')). encode('utf-8')). hexdigest())
def hash_id(iso_timestamp, msg): """Generate event id, optimized for ES.""" return '{0}-{1}'.format(iso_timestamp, hashlib.sha1( msg.get('unique_id').encode('utf-8') + str(msg.get('visitor_id')). encode('utf-8')). hexdigest())
[ "Generate", "event", "id", "optimized", "for", "ES", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/processors.py#L127-L134
[ "def", "hash_id", "(", "iso_timestamp", ",", "msg", ")", ":", "return", "'{0}-{1}'", ".", "format", "(", "iso_timestamp", ",", "hashlib", ".", "sha1", "(", "msg", ".", "get", "(", "'unique_id'", ")", ".", "encode", "(", "'utf-8'", ")", "+", "str", "(", "msg", ".", "get", "(", "'visitor_id'", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
EventsIndexer.actionsiter
Iterator.
invenio_stats/processors.py
def actionsiter(self): """Iterator.""" for msg in self.queue.consume(): try: for preproc in self.preprocessors: msg = preproc(msg) if msg is None: break if msg is None: continue suffix = arrow.get(msg.get('timestamp')).strftime(self.suffix) ts = parser.parse(msg.get('timestamp')) # Truncate timestamp to keep only seconds. This is to improve # elasticsearch performances. ts = ts.replace(microsecond=0) msg['timestamp'] = ts.isoformat() # apply timestamp windowing in order to group events too close # in time if self.double_click_window > 0: timestamp = mktime(utc.localize(ts).utctimetuple()) ts = ts.fromtimestamp( timestamp // self.double_click_window * self.double_click_window ) yield dict( _id=hash_id(ts.isoformat(), msg), _op_type='index', _index='{0}-{1}'.format(self.index, suffix), _type=self.doctype, _source=msg, ) except Exception: current_app.logger.exception(u'Error while processing event')
def actionsiter(self): """Iterator.""" for msg in self.queue.consume(): try: for preproc in self.preprocessors: msg = preproc(msg) if msg is None: break if msg is None: continue suffix = arrow.get(msg.get('timestamp')).strftime(self.suffix) ts = parser.parse(msg.get('timestamp')) # Truncate timestamp to keep only seconds. This is to improve # elasticsearch performances. ts = ts.replace(microsecond=0) msg['timestamp'] = ts.isoformat() # apply timestamp windowing in order to group events too close # in time if self.double_click_window > 0: timestamp = mktime(utc.localize(ts).utctimetuple()) ts = ts.fromtimestamp( timestamp // self.double_click_window * self.double_click_window ) yield dict( _id=hash_id(ts.isoformat(), msg), _op_type='index', _index='{0}-{1}'.format(self.index, suffix), _type=self.doctype, _source=msg, ) except Exception: current_app.logger.exception(u'Error while processing event')
[ "Iterator", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/processors.py#L171-L203
[ "def", "actionsiter", "(", "self", ")", ":", "for", "msg", "in", "self", ".", "queue", ".", "consume", "(", ")", ":", "try", ":", "for", "preproc", "in", "self", ".", "preprocessors", ":", "msg", "=", "preproc", "(", "msg", ")", "if", "msg", "is", "None", ":", "break", "if", "msg", "is", "None", ":", "continue", "suffix", "=", "arrow", ".", "get", "(", "msg", ".", "get", "(", "'timestamp'", ")", ")", ".", "strftime", "(", "self", ".", "suffix", ")", "ts", "=", "parser", ".", "parse", "(", "msg", ".", "get", "(", "'timestamp'", ")", ")", "# Truncate timestamp to keep only seconds. This is to improve", "# elasticsearch performances.", "ts", "=", "ts", ".", "replace", "(", "microsecond", "=", "0", ")", "msg", "[", "'timestamp'", "]", "=", "ts", ".", "isoformat", "(", ")", "# apply timestamp windowing in order to group events too close", "# in time", "if", "self", ".", "double_click_window", ">", "0", ":", "timestamp", "=", "mktime", "(", "utc", ".", "localize", "(", "ts", ")", ".", "utctimetuple", "(", ")", ")", "ts", "=", "ts", ".", "fromtimestamp", "(", "timestamp", "//", "self", ".", "double_click_window", "*", "self", ".", "double_click_window", ")", "yield", "dict", "(", "_id", "=", "hash_id", "(", "ts", ".", "isoformat", "(", ")", ",", "msg", ")", ",", "_op_type", "=", "'index'", ",", "_index", "=", "'{0}-{1}'", ".", "format", "(", "self", ".", "index", ",", "suffix", ")", ",", "_type", "=", "self", ".", "doctype", ",", "_source", "=", "msg", ",", ")", "except", "Exception", ":", "current_app", ".", "logger", ".", "exception", "(", "u'Error while processing event'", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
EventsIndexer.run
Process events queue.
invenio_stats/processors.py
def run(self): """Process events queue.""" return elasticsearch.helpers.bulk( self.client, self.actionsiter(), stats_only=True, chunk_size=50 )
def run(self): """Process events queue.""" return elasticsearch.helpers.bulk( self.client, self.actionsiter(), stats_only=True, chunk_size=50 )
[ "Process", "events", "queue", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/processors.py#L205-L212
[ "def", "run", "(", "self", ")", ":", "return", "elasticsearch", ".", "helpers", ".", "bulk", "(", "self", ".", "client", ",", "self", ".", "actionsiter", "(", ")", ",", "stats_only", "=", "True", ",", "chunk_size", "=", "50", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
DatapointsParser.parse
num_datapoints = min(duration/resolution, limit) :param duration_seconds: Time duration (in seconds) for which datapoints should be returned :param resolution_seconds: Time interval (in seconds) between data points :param limit: Maximum number of datapoints to return
protector/parser/subparsers/datapoints.py
def parse(duration_seconds, resolution_seconds=Resolution.MAX_RESOLUTION, limit=None): """ num_datapoints = min(duration/resolution, limit) :param duration_seconds: Time duration (in seconds) for which datapoints should be returned :param resolution_seconds: Time interval (in seconds) between data points :param limit: Maximum number of datapoints to return """ if not duration_seconds or duration_seconds < 0: return 0 if not resolution_seconds or resolution_seconds <= 0: return None num_datapoints = duration_seconds / resolution_seconds if limit: num_datapoints = min(int(limit), num_datapoints) return int(math.ceil(num_datapoints))
def parse(duration_seconds, resolution_seconds=Resolution.MAX_RESOLUTION, limit=None): """ num_datapoints = min(duration/resolution, limit) :param duration_seconds: Time duration (in seconds) for which datapoints should be returned :param resolution_seconds: Time interval (in seconds) between data points :param limit: Maximum number of datapoints to return """ if not duration_seconds or duration_seconds < 0: return 0 if not resolution_seconds or resolution_seconds <= 0: return None num_datapoints = duration_seconds / resolution_seconds if limit: num_datapoints = min(int(limit), num_datapoints) return int(math.ceil(num_datapoints))
[ "num_datapoints", "=", "min", "(", "duration", "/", "resolution", "limit", ")" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/subparsers/datapoints.py#L11-L31
[ "def", "parse", "(", "duration_seconds", ",", "resolution_seconds", "=", "Resolution", ".", "MAX_RESOLUTION", ",", "limit", "=", "None", ")", ":", "if", "not", "duration_seconds", "or", "duration_seconds", "<", "0", ":", "return", "0", "if", "not", "resolution_seconds", "or", "resolution_seconds", "<=", "0", ":", "return", "None", "num_datapoints", "=", "duration_seconds", "/", "resolution_seconds", "if", "limit", ":", "num_datapoints", "=", "min", "(", "int", "(", "limit", ")", ",", "num_datapoints", ")", "return", "int", "(", "math", ".", "ceil", "(", "num_datapoints", ")", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
DummyWriter.create_series
Write one data point for each series name to initialize the series :param num_series: Number of different series names to create :param batch_size: Number of series to create at the same time :return:
contrib/helpers/dummy_writer.py
def create_series(self, num_series, batch_size=5000): """ Write one data point for each series name to initialize the series :param num_series: Number of different series names to create :param batch_size: Number of series to create at the same time :return: """ datapoints = [] for _ in range(num_series): name = self.dummy_seriesname() datapoints.append(self.create_datapoint(name, ["value"], [[1]])) for data in tqdm(self.batch(datapoints, batch_size)): self.client.write_points(data)
def create_series(self, num_series, batch_size=5000): """ Write one data point for each series name to initialize the series :param num_series: Number of different series names to create :param batch_size: Number of series to create at the same time :return: """ datapoints = [] for _ in range(num_series): name = self.dummy_seriesname() datapoints.append(self.create_datapoint(name, ["value"], [[1]])) for data in tqdm(self.batch(datapoints, batch_size)): self.client.write_points(data)
[ "Write", "one", "data", "point", "for", "each", "series", "name", "to", "initialize", "the", "series", ":", "param", "num_series", ":", "Number", "of", "different", "series", "names", "to", "create", ":", "param", "batch_size", ":", "Number", "of", "series", "to", "create", "at", "the", "same", "time", ":", "return", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/contrib/helpers/dummy_writer.py#L36-L48
[ "def", "create_series", "(", "self", ",", "num_series", ",", "batch_size", "=", "5000", ")", ":", "datapoints", "=", "[", "]", "for", "_", "in", "range", "(", "num_series", ")", ":", "name", "=", "self", ".", "dummy_seriesname", "(", ")", "datapoints", ".", "append", "(", "self", ".", "create_datapoint", "(", "name", ",", "[", "\"value\"", "]", ",", "[", "[", "1", "]", "]", ")", ")", "for", "data", "in", "tqdm", "(", "self", ".", "batch", "(", "datapoints", ",", "batch_size", ")", ")", ":", "self", ".", "client", ".", "write_points", "(", "data", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
DummyWriter.write_points
Create sample datapoints between two dates with the given resolution (in seconds) :param series_name: :param start_date: :param end_date: :param resolution: :param batch_size:
contrib/helpers/dummy_writer.py
def write_points(self, series_name, start_date, end_date, resolution=10, batch_size=5000): """ Create sample datapoints between two dates with the given resolution (in seconds) :param series_name: :param start_date: :param end_date: :param resolution: :param batch_size: """ start_ts = int(start_date.strftime("%s")) end_ts = int(end_date.strftime("%s")) range_seconds = end_ts - start_ts num_datapoints = range_seconds / resolution timestamps = [start_ts + i * resolution for i in range(num_datapoints)] columns = ["time", "value"] for batch in tqdm(self.batch(timestamps, batch_size)): points = [] for timestamp in batch: point = random.randint(1, 100) points.append([timestamp, point]) datapoint = self.create_datapoint(series_name, columns, points) self.client.write_points([datapoint])
def write_points(self, series_name, start_date, end_date, resolution=10, batch_size=5000): """ Create sample datapoints between two dates with the given resolution (in seconds) :param series_name: :param start_date: :param end_date: :param resolution: :param batch_size: """ start_ts = int(start_date.strftime("%s")) end_ts = int(end_date.strftime("%s")) range_seconds = end_ts - start_ts num_datapoints = range_seconds / resolution timestamps = [start_ts + i * resolution for i in range(num_datapoints)] columns = ["time", "value"] for batch in tqdm(self.batch(timestamps, batch_size)): points = [] for timestamp in batch: point = random.randint(1, 100) points.append([timestamp, point]) datapoint = self.create_datapoint(series_name, columns, points) self.client.write_points([datapoint])
[ "Create", "sample", "datapoints", "between", "two", "dates", "with", "the", "given", "resolution", "(", "in", "seconds", ")", ":", "param", "series_name", ":", ":", "param", "start_date", ":", ":", "param", "end_date", ":", ":", "param", "resolution", ":", ":", "param", "batch_size", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/contrib/helpers/dummy_writer.py#L56-L80
[ "def", "write_points", "(", "self", ",", "series_name", ",", "start_date", ",", "end_date", ",", "resolution", "=", "10", ",", "batch_size", "=", "5000", ")", ":", "start_ts", "=", "int", "(", "start_date", ".", "strftime", "(", "\"%s\"", ")", ")", "end_ts", "=", "int", "(", "end_date", ".", "strftime", "(", "\"%s\"", ")", ")", "range_seconds", "=", "end_ts", "-", "start_ts", "num_datapoints", "=", "range_seconds", "/", "resolution", "timestamps", "=", "[", "start_ts", "+", "i", "*", "resolution", "for", "i", "in", "range", "(", "num_datapoints", ")", "]", "columns", "=", "[", "\"time\"", ",", "\"value\"", "]", "for", "batch", "in", "tqdm", "(", "self", ".", "batch", "(", "timestamps", ",", "batch_size", ")", ")", ":", "points", "=", "[", "]", "for", "timestamp", "in", "batch", ":", "point", "=", "random", ".", "randint", "(", "1", ",", "100", ")", "points", ".", "append", "(", "[", "timestamp", ",", "point", "]", ")", "datapoint", "=", "self", ".", "create_datapoint", "(", "series_name", ",", "columns", ",", "points", ")", "self", ".", "client", ".", "write_points", "(", "[", "datapoint", "]", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
register_events
Register sample events.
invenio_stats/contrib/registrations.py
def register_events(): """Register sample events.""" return [ dict( event_type='file-download', templates='invenio_stats.contrib.file_download', processor_class=EventsIndexer, processor_config=dict( preprocessors=[ flag_robots, anonymize_user, build_file_unique_id ])), dict( event_type='record-view', templates='invenio_stats.contrib.record_view', processor_class=EventsIndexer, processor_config=dict( preprocessors=[ flag_robots, anonymize_user, build_record_unique_id ])) ]
def register_events(): """Register sample events.""" return [ dict( event_type='file-download', templates='invenio_stats.contrib.file_download', processor_class=EventsIndexer, processor_config=dict( preprocessors=[ flag_robots, anonymize_user, build_file_unique_id ])), dict( event_type='record-view', templates='invenio_stats.contrib.record_view', processor_class=EventsIndexer, processor_config=dict( preprocessors=[ flag_robots, anonymize_user, build_record_unique_id ])) ]
[ "Register", "sample", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/contrib/registrations.py#L19-L42
[ "def", "register_events", "(", ")", ":", "return", "[", "dict", "(", "event_type", "=", "'file-download'", ",", "templates", "=", "'invenio_stats.contrib.file_download'", ",", "processor_class", "=", "EventsIndexer", ",", "processor_config", "=", "dict", "(", "preprocessors", "=", "[", "flag_robots", ",", "anonymize_user", ",", "build_file_unique_id", "]", ")", ")", ",", "dict", "(", "event_type", "=", "'record-view'", ",", "templates", "=", "'invenio_stats.contrib.record_view'", ",", "processor_class", "=", "EventsIndexer", ",", "processor_config", "=", "dict", "(", "preprocessors", "=", "[", "flag_robots", ",", "anonymize_user", ",", "build_record_unique_id", "]", ")", ")", "]" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
register_aggregations
Register sample aggregations.
invenio_stats/contrib/registrations.py
def register_aggregations(): """Register sample aggregations.""" return [dict( aggregation_name='file-download-agg', templates='invenio_stats.contrib.aggregations.aggr_file_download', aggregator_class=StatAggregator, aggregator_config=dict( client=current_search_client, event='file-download', aggregation_field='unique_id', aggregation_interval='day', copy_fields=dict( file_key='file_key', bucket_id='bucket_id', file_id='file_id', ), metric_aggregation_fields={ 'unique_count': ('cardinality', 'unique_session_id', {'precision_threshold': 1000}), 'volume': ('sum', 'size', {}), }, )), dict( aggregation_name='record-view-agg', templates='invenio_stats.contrib.aggregations.aggr_record_view', aggregator_class=StatAggregator, aggregator_config=dict( client=current_search_client, event='record-view', aggregation_field='unique_id', aggregation_interval='day', copy_fields=dict( record_id='record_id', pid_type='pid_type', pid_value='pid_value', ), metric_aggregation_fields={ 'unique_count': ('cardinality', 'unique_session_id', {'precision_threshold': 1000}), }, ))]
def register_aggregations(): """Register sample aggregations.""" return [dict( aggregation_name='file-download-agg', templates='invenio_stats.contrib.aggregations.aggr_file_download', aggregator_class=StatAggregator, aggregator_config=dict( client=current_search_client, event='file-download', aggregation_field='unique_id', aggregation_interval='day', copy_fields=dict( file_key='file_key', bucket_id='bucket_id', file_id='file_id', ), metric_aggregation_fields={ 'unique_count': ('cardinality', 'unique_session_id', {'precision_threshold': 1000}), 'volume': ('sum', 'size', {}), }, )), dict( aggregation_name='record-view-agg', templates='invenio_stats.contrib.aggregations.aggr_record_view', aggregator_class=StatAggregator, aggregator_config=dict( client=current_search_client, event='record-view', aggregation_field='unique_id', aggregation_interval='day', copy_fields=dict( record_id='record_id', pid_type='pid_type', pid_value='pid_value', ), metric_aggregation_fields={ 'unique_count': ('cardinality', 'unique_session_id', {'precision_threshold': 1000}), }, ))]
[ "Register", "sample", "aggregations", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/contrib/registrations.py#L45-L84
[ "def", "register_aggregations", "(", ")", ":", "return", "[", "dict", "(", "aggregation_name", "=", "'file-download-agg'", ",", "templates", "=", "'invenio_stats.contrib.aggregations.aggr_file_download'", ",", "aggregator_class", "=", "StatAggregator", ",", "aggregator_config", "=", "dict", "(", "client", "=", "current_search_client", ",", "event", "=", "'file-download'", ",", "aggregation_field", "=", "'unique_id'", ",", "aggregation_interval", "=", "'day'", ",", "copy_fields", "=", "dict", "(", "file_key", "=", "'file_key'", ",", "bucket_id", "=", "'bucket_id'", ",", "file_id", "=", "'file_id'", ",", ")", ",", "metric_aggregation_fields", "=", "{", "'unique_count'", ":", "(", "'cardinality'", ",", "'unique_session_id'", ",", "{", "'precision_threshold'", ":", "1000", "}", ")", ",", "'volume'", ":", "(", "'sum'", ",", "'size'", ",", "{", "}", ")", ",", "}", ",", ")", ")", ",", "dict", "(", "aggregation_name", "=", "'record-view-agg'", ",", "templates", "=", "'invenio_stats.contrib.aggregations.aggr_record_view'", ",", "aggregator_class", "=", "StatAggregator", ",", "aggregator_config", "=", "dict", "(", "client", "=", "current_search_client", ",", "event", "=", "'record-view'", ",", "aggregation_field", "=", "'unique_id'", ",", "aggregation_interval", "=", "'day'", ",", "copy_fields", "=", "dict", "(", "record_id", "=", "'record_id'", ",", "pid_type", "=", "'pid_type'", ",", "pid_value", "=", "'pid_value'", ",", ")", ",", "metric_aggregation_fields", "=", "{", "'unique_count'", ":", "(", "'cardinality'", ",", "'unique_session_id'", ",", "{", "'precision_threshold'", ":", "1000", "}", ")", ",", "}", ",", ")", ")", "]" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
register_queries
Register queries.
invenio_stats/contrib/registrations.py
def register_queries(): """Register queries.""" return [ dict( query_name='bucket-file-download-histogram', query_class=ESDateHistogramQuery, query_config=dict( index='stats-file-download', doc_type='file-download-day-aggregation', copy_fields=dict( bucket_id='bucket_id', file_key='file_key', ), required_filters=dict( bucket_id='bucket_id', file_key='file_key', ) ) ), dict( query_name='bucket-file-download-total', query_class=ESTermsQuery, query_config=dict( index='stats-file-download', doc_type='file-download-day-aggregation', copy_fields=dict( # bucket_id='bucket_id', ), required_filters=dict( bucket_id='bucket_id', ), aggregated_fields=['file_key'] ) ), ]
def register_queries(): """Register queries.""" return [ dict( query_name='bucket-file-download-histogram', query_class=ESDateHistogramQuery, query_config=dict( index='stats-file-download', doc_type='file-download-day-aggregation', copy_fields=dict( bucket_id='bucket_id', file_key='file_key', ), required_filters=dict( bucket_id='bucket_id', file_key='file_key', ) ) ), dict( query_name='bucket-file-download-total', query_class=ESTermsQuery, query_config=dict( index='stats-file-download', doc_type='file-download-day-aggregation', copy_fields=dict( # bucket_id='bucket_id', ), required_filters=dict( bucket_id='bucket_id', ), aggregated_fields=['file_key'] ) ), ]
[ "Register", "queries", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/contrib/registrations.py#L87-L121
[ "def", "register_queries", "(", ")", ":", "return", "[", "dict", "(", "query_name", "=", "'bucket-file-download-histogram'", ",", "query_class", "=", "ESDateHistogramQuery", ",", "query_config", "=", "dict", "(", "index", "=", "'stats-file-download'", ",", "doc_type", "=", "'file-download-day-aggregation'", ",", "copy_fields", "=", "dict", "(", "bucket_id", "=", "'bucket_id'", ",", "file_key", "=", "'file_key'", ",", ")", ",", "required_filters", "=", "dict", "(", "bucket_id", "=", "'bucket_id'", ",", "file_key", "=", "'file_key'", ",", ")", ")", ")", ",", "dict", "(", "query_name", "=", "'bucket-file-download-total'", ",", "query_class", "=", "ESTermsQuery", ",", "query_config", "=", "dict", "(", "index", "=", "'stats-file-download'", ",", "doc_type", "=", "'file-download-day-aggregation'", ",", "copy_fields", "=", "dict", "(", "# bucket_id='bucket_id',", ")", ",", "required_filters", "=", "dict", "(", "bucket_id", "=", "'bucket_id'", ",", ")", ",", "aggregated_fields", "=", "[", "'file_key'", "]", ")", ")", ",", "]" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
RuleChecker.check
:param query:
protector/rules/negative_groupby_statement.py
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT}: # Bailing out for non select queries return Ok(True) if query.get_resolution() > 0: return Ok(True) return Err("Group by statements need a positive time value (e.g. time(10s))")
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT}: # Bailing out for non select queries return Ok(True) if query.get_resolution() > 0: return Ok(True) return Err("Group by statements need a positive time value (e.g. time(10s))")
[ ":", "param", "query", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/rules/negative_groupby_statement.py#L17-L28
[ "def", "check", "(", "self", ",", "query", ")", ":", "if", "query", ".", "get_type", "(", ")", "not", "in", "{", "Keyword", ".", "SELECT", "}", ":", "# Bailing out for non select queries", "return", "Ok", "(", "True", ")", "if", "query", ".", "get_resolution", "(", ")", ">", "0", ":", "return", "Ok", "(", "True", ")", "return", "Err", "(", "\"Group by statements need a positive time value (e.g. time(10s))\"", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
declare_queues
Index statistics events.
invenio_stats/queues.py
def declare_queues(): """Index statistics events.""" return [dict(name='stats-{0}'.format(event['event_type']), exchange=current_stats.exchange) for event in current_stats._events_config.values()]
def declare_queues(): """Index statistics events.""" return [dict(name='stats-{0}'.format(event['event_type']), exchange=current_stats.exchange) for event in current_stats._events_config.values()]
[ "Index", "statistics", "events", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queues.py#L14-L18
[ "def", "declare_queues", "(", ")", ":", "return", "[", "dict", "(", "name", "=", "'stats-{0}'", ".", "format", "(", "event", "[", "'event_type'", "]", ")", ",", "exchange", "=", "current_stats", ".", "exchange", ")", "for", "event", "in", "current_stats", ".", "_events_config", ".", "values", "(", ")", "]" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
QueryParser.parse
Parse a raw query string into fields :param raw_query_string: Raw InfluxDB query string
protector/parser/query_parser.py
def parse(self, raw_query_string): """ Parse a raw query string into fields :param raw_query_string: Raw InfluxDB query string """ self._reset() if not isinstance(raw_query_string, basestring): return None query_string = self._cleanup(raw_query_string) parts = self._split(query_string) parts = self._sanitize_keywords(parts) tokens = self._tokenize(parts) if tokens: # Run subparsers to analyze parts of the query self.parsed_resolution = self._parse_resolution(tokens) self.parsed_time = self._parse_time(tokens) self.parsed_time_overlap = self._parse_duration(self.parsed_time) self.parsed_datapoints = self._parse_datapoints( self.parsed_time_overlap.timespan_seconds(), self.parsed_resolution, self.parse_keyword(Keyword.LIMIT, tokens) ) return self.create_query_object(tokens)
def parse(self, raw_query_string): """ Parse a raw query string into fields :param raw_query_string: Raw InfluxDB query string """ self._reset() if not isinstance(raw_query_string, basestring): return None query_string = self._cleanup(raw_query_string) parts = self._split(query_string) parts = self._sanitize_keywords(parts) tokens = self._tokenize(parts) if tokens: # Run subparsers to analyze parts of the query self.parsed_resolution = self._parse_resolution(tokens) self.parsed_time = self._parse_time(tokens) self.parsed_time_overlap = self._parse_duration(self.parsed_time) self.parsed_datapoints = self._parse_datapoints( self.parsed_time_overlap.timespan_seconds(), self.parsed_resolution, self.parse_keyword(Keyword.LIMIT, tokens) ) return self.create_query_object(tokens)
[ "Parse", "a", "raw", "query", "string", "into", "fields", ":", "param", "raw_query_string", ":", "Raw", "InfluxDB", "query", "string" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L37-L64
[ "def", "parse", "(", "self", ",", "raw_query_string", ")", ":", "self", ".", "_reset", "(", ")", "if", "not", "isinstance", "(", "raw_query_string", ",", "basestring", ")", ":", "return", "None", "query_string", "=", "self", ".", "_cleanup", "(", "raw_query_string", ")", "parts", "=", "self", ".", "_split", "(", "query_string", ")", "parts", "=", "self", ".", "_sanitize_keywords", "(", "parts", ")", "tokens", "=", "self", ".", "_tokenize", "(", "parts", ")", "if", "tokens", ":", "# Run subparsers to analyze parts of the query", "self", ".", "parsed_resolution", "=", "self", ".", "_parse_resolution", "(", "tokens", ")", "self", ".", "parsed_time", "=", "self", ".", "_parse_time", "(", "tokens", ")", "self", ".", "parsed_time_overlap", "=", "self", ".", "_parse_duration", "(", "self", ".", "parsed_time", ")", "self", ".", "parsed_datapoints", "=", "self", ".", "_parse_datapoints", "(", "self", ".", "parsed_time_overlap", ".", "timespan_seconds", "(", ")", ",", "self", ".", "parsed_resolution", ",", "self", ".", "parse_keyword", "(", "Keyword", ".", "LIMIT", ",", "tokens", ")", ")", "return", "self", ".", "create_query_object", "(", "tokens", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
QueryParser.create_query_object
Analyze query tokens and create an InfluxDBStatement from them Return None on error :param tokens: A list of InfluxDB query tokens
protector/parser/query_parser.py
def create_query_object(self, tokens): """ Analyze query tokens and create an InfluxDBStatement from them Return None on error :param tokens: A list of InfluxDB query tokens """ try: query_type = tokens['type'] return getattr(self, 'create_%s_query' % query_type)(tokens) except (KeyError, TypeError): return self.invalid_query(tokens)
def create_query_object(self, tokens): """ Analyze query tokens and create an InfluxDBStatement from them Return None on error :param tokens: A list of InfluxDB query tokens """ try: query_type = tokens['type'] return getattr(self, 'create_%s_query' % query_type)(tokens) except (KeyError, TypeError): return self.invalid_query(tokens)
[ "Analyze", "query", "tokens", "and", "create", "an", "InfluxDBStatement", "from", "them", "Return", "None", "on", "error", ":", "param", "tokens", ":", "A", "list", "of", "InfluxDB", "query", "tokens" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L127-L137
[ "def", "create_query_object", "(", "self", ",", "tokens", ")", ":", "try", ":", "query_type", "=", "tokens", "[", "'type'", "]", "return", "getattr", "(", "self", ",", "'create_%s_query'", "%", "query_type", ")", "(", "tokens", ")", "except", "(", "KeyError", ",", "TypeError", ")", ":", "return", "self", ".", "invalid_query", "(", "tokens", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
QueryParser.create_select_query
Parse tokens of select query :param tokens: A list of InfluxDB query tokens
protector/parser/query_parser.py
def create_select_query(self, tokens): """ Parse tokens of select query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SELECT]: return None if not tokens[Keyword.FROM]: return None return SelectQuery( self.parse_keyword(Keyword.SELECT, tokens), self.parse_keyword(Keyword.FROM, tokens), where_stmt=self.parse_keyword(Keyword.WHERE, tokens), limit_stmt=self.parse_keyword(Keyword.LIMIT, tokens), group_by_stmt=self.parse_group(tokens), duration=self.parsed_time_overlap.timespan_seconds(), resolution=self.parsed_resolution, time_ranges=self.parsed_time, time_overlap=self.parsed_time_overlap, datapoints=self.parsed_datapoints )
def create_select_query(self, tokens): """ Parse tokens of select query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SELECT]: return None if not tokens[Keyword.FROM]: return None return SelectQuery( self.parse_keyword(Keyword.SELECT, tokens), self.parse_keyword(Keyword.FROM, tokens), where_stmt=self.parse_keyword(Keyword.WHERE, tokens), limit_stmt=self.parse_keyword(Keyword.LIMIT, tokens), group_by_stmt=self.parse_group(tokens), duration=self.parsed_time_overlap.timespan_seconds(), resolution=self.parsed_resolution, time_ranges=self.parsed_time, time_overlap=self.parsed_time_overlap, datapoints=self.parsed_datapoints )
[ "Parse", "tokens", "of", "select", "query", ":", "param", "tokens", ":", "A", "list", "of", "InfluxDB", "query", "tokens" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L139-L160
[ "def", "create_select_query", "(", "self", ",", "tokens", ")", ":", "if", "not", "tokens", "[", "Keyword", ".", "SELECT", "]", ":", "return", "None", "if", "not", "tokens", "[", "Keyword", ".", "FROM", "]", ":", "return", "None", "return", "SelectQuery", "(", "self", ".", "parse_keyword", "(", "Keyword", ".", "SELECT", ",", "tokens", ")", ",", "self", ".", "parse_keyword", "(", "Keyword", ".", "FROM", ",", "tokens", ")", ",", "where_stmt", "=", "self", ".", "parse_keyword", "(", "Keyword", ".", "WHERE", ",", "tokens", ")", ",", "limit_stmt", "=", "self", ".", "parse_keyword", "(", "Keyword", ".", "LIMIT", ",", "tokens", ")", ",", "group_by_stmt", "=", "self", ".", "parse_group", "(", "tokens", ")", ",", "duration", "=", "self", ".", "parsed_time_overlap", ".", "timespan_seconds", "(", ")", ",", "resolution", "=", "self", ".", "parsed_resolution", ",", "time_ranges", "=", "self", ".", "parsed_time", ",", "time_overlap", "=", "self", ".", "parsed_time_overlap", ",", "datapoints", "=", "self", ".", "parsed_datapoints", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
QueryParser.create_list_query
Parse tokens of list query :param tokens: A list of InfluxDB query tokens
protector/parser/query_parser.py
def create_list_query(self, tokens): """ Parse tokens of list query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SERIES]: # A list series keyword is allowed # without a series name or regex tokens[Keyword.SERIES] = '' return ListQuery(self.parse_keyword(Keyword.SERIES, tokens))
def create_list_query(self, tokens): """ Parse tokens of list query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SERIES]: # A list series keyword is allowed # without a series name or regex tokens[Keyword.SERIES] = '' return ListQuery(self.parse_keyword(Keyword.SERIES, tokens))
[ "Parse", "tokens", "of", "list", "query", ":", "param", "tokens", ":", "A", "list", "of", "InfluxDB", "query", "tokens" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L162-L171
[ "def", "create_list_query", "(", "self", ",", "tokens", ")", ":", "if", "not", "tokens", "[", "Keyword", ".", "SERIES", "]", ":", "# A list series keyword is allowed", "# without a series name or regex", "tokens", "[", "Keyword", ".", "SERIES", "]", "=", "''", "return", "ListQuery", "(", "self", ".", "parse_keyword", "(", "Keyword", ".", "SERIES", ",", "tokens", ")", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
QueryParser.create_drop_query
Parse tokens of drop query :param tokens: A list of InfluxDB query tokens
protector/parser/query_parser.py
def create_drop_query(self, tokens): """ Parse tokens of drop query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SERIES]: return None return DropQuery(self.parse_keyword(Keyword.SERIES, tokens))
def create_drop_query(self, tokens): """ Parse tokens of drop query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SERIES]: return None return DropQuery(self.parse_keyword(Keyword.SERIES, tokens))
[ "Parse", "tokens", "of", "drop", "query", ":", "param", "tokens", ":", "A", "list", "of", "InfluxDB", "query", "tokens" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L173-L180
[ "def", "create_drop_query", "(", "self", ",", "tokens", ")", ":", "if", "not", "tokens", "[", "Keyword", ".", "SERIES", "]", ":", "return", "None", "return", "DropQuery", "(", "self", ".", "parse_keyword", "(", "Keyword", ".", "SERIES", ",", "tokens", ")", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
QueryParser.create_delete_query
Parse tokens of delete query :param tokens: A list of InfluxDB query tokens
protector/parser/query_parser.py
def create_delete_query(self, tokens): """ Parse tokens of delete query :param tokens: A list of InfluxDB query tokens """ # From keyword is required if not tokens[Keyword.FROM]: return None where_stmt = self.parse_keyword(Keyword.WHERE, tokens) if where_stmt: if not where_stmt.startswith('time'): return None return DeleteQuery( self.parse_keyword(Keyword.FROM, tokens), self.parse_keyword(Keyword.WHERE, tokens) )
def create_delete_query(self, tokens): """ Parse tokens of delete query :param tokens: A list of InfluxDB query tokens """ # From keyword is required if not tokens[Keyword.FROM]: return None where_stmt = self.parse_keyword(Keyword.WHERE, tokens) if where_stmt: if not where_stmt.startswith('time'): return None return DeleteQuery( self.parse_keyword(Keyword.FROM, tokens), self.parse_keyword(Keyword.WHERE, tokens) )
[ "Parse", "tokens", "of", "delete", "query", ":", "param", "tokens", ":", "A", "list", "of", "InfluxDB", "query", "tokens" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L182-L197
[ "def", "create_delete_query", "(", "self", ",", "tokens", ")", ":", "# From keyword is required", "if", "not", "tokens", "[", "Keyword", ".", "FROM", "]", ":", "return", "None", "where_stmt", "=", "self", ".", "parse_keyword", "(", "Keyword", ".", "WHERE", ",", "tokens", ")", "if", "where_stmt", ":", "if", "not", "where_stmt", ".", "startswith", "(", "'time'", ")", ":", "return", "None", "return", "DeleteQuery", "(", "self", ".", "parse_keyword", "(", "Keyword", ".", "FROM", ",", "tokens", ")", ",", "self", ".", "parse_keyword", "(", "Keyword", ".", "WHERE", ",", "tokens", ")", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
QueryParser._parse_time
Parse the date range for the query E.g. WHERE time > now() - 48h AND time < now() - 24h would result in DateRange(datetime_start, datetime_end) where datetime_start would be parsed from now() - 48h and datetime_end would be parsed from now() - 24h :param tokens: :return:
protector/parser/query_parser.py
def _parse_time(self, tokens): """ Parse the date range for the query E.g. WHERE time > now() - 48h AND time < now() - 24h would result in DateRange(datetime_start, datetime_end) where datetime_start would be parsed from now() - 48h and datetime_end would be parsed from now() - 24h :param tokens: :return: """ return self.time_parser.parse(self.parse_keyword(Keyword.WHERE, tokens))
def _parse_time(self, tokens): """ Parse the date range for the query E.g. WHERE time > now() - 48h AND time < now() - 24h would result in DateRange(datetime_start, datetime_end) where datetime_start would be parsed from now() - 48h and datetime_end would be parsed from now() - 24h :param tokens: :return: """ return self.time_parser.parse(self.parse_keyword(Keyword.WHERE, tokens))
[ "Parse", "the", "date", "range", "for", "the", "query" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L212-L226
[ "def", "_parse_time", "(", "self", ",", "tokens", ")", ":", "return", "self", ".", "time_parser", ".", "parse", "(", "self", ".", "parse_keyword", "(", "Keyword", ".", "WHERE", ",", "tokens", ")", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
QueryParser._parse_resolution
Parse resolution from the GROUP BY statement. E.g. GROUP BY time(10s) would mean a 10 second resolution :param tokens: :return:
protector/parser/query_parser.py
def _parse_resolution(self, tokens): """ Parse resolution from the GROUP BY statement. E.g. GROUP BY time(10s) would mean a 10 second resolution :param tokens: :return: """ return self.resolution_parser.parse(self.parse_keyword(Keyword.GROUP_BY, tokens))
def _parse_resolution(self, tokens): """ Parse resolution from the GROUP BY statement. E.g. GROUP BY time(10s) would mean a 10 second resolution :param tokens: :return: """ return self.resolution_parser.parse(self.parse_keyword(Keyword.GROUP_BY, tokens))
[ "Parse", "resolution", "from", "the", "GROUP", "BY", "statement", ".", "E", ".", "g", ".", "GROUP", "BY", "time", "(", "10s", ")", "would", "mean", "a", "10", "second", "resolution", ":", "param", "tokens", ":", ":", "return", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L228-L235
[ "def", "_parse_resolution", "(", "self", ",", "tokens", ")", ":", "return", "self", ".", "resolution_parser", ".", "parse", "(", "self", ".", "parse_keyword", "(", "Keyword", ".", "GROUP_BY", ",", "tokens", ")", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
QueryParser._parse_datapoints
Parse the number of datapoints of a query. This can be calculated from the given duration and resolution of the query. E.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds then the number of datapoints would be 7200/10 => 7200 datapoints. :param parsed_duration: :param parsed_resolution: :param limit: :return:
protector/parser/query_parser.py
def _parse_datapoints(self, parsed_duration, parsed_resolution, limit): """ Parse the number of datapoints of a query. This can be calculated from the given duration and resolution of the query. E.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds then the number of datapoints would be 7200/10 => 7200 datapoints. :param parsed_duration: :param parsed_resolution: :param limit: :return: """ return self.datapoints_parser.parse(parsed_duration, parsed_resolution, limit)
def _parse_datapoints(self, parsed_duration, parsed_resolution, limit): """ Parse the number of datapoints of a query. This can be calculated from the given duration and resolution of the query. E.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds then the number of datapoints would be 7200/10 => 7200 datapoints. :param parsed_duration: :param parsed_resolution: :param limit: :return: """ return self.datapoints_parser.parse(parsed_duration, parsed_resolution, limit)
[ "Parse", "the", "number", "of", "datapoints", "of", "a", "query", ".", "This", "can", "be", "calculated", "from", "the", "given", "duration", "and", "resolution", "of", "the", "query", ".", "E", ".", "g", ".", "if", "the", "query", "has", "a", "duation", "of", "2", "*", "60", "*", "60", "=", "7200", "seconds", "and", "a", "resolution", "of", "10", "seconds", "then", "the", "number", "of", "datapoints", "would", "be", "7200", "/", "10", "=", ">", "7200", "datapoints", "." ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L252-L264
[ "def", "_parse_datapoints", "(", "self", ",", "parsed_duration", ",", "parsed_resolution", ",", "limit", ")", ":", "return", "self", ".", "datapoints_parser", ".", "parse", "(", "parsed_duration", ",", "parsed_resolution", ",", "limit", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
RuleChecker.check
:param query:
protector/rules/query_old_data.py
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT}: # Only select queries need to be checked here # All others are not affected by this rule. Bailing out. return Ok(True) earliest_date = query.get_earliest_date() if earliest_date >= self.min_start_date: return Ok(True) if query.limit_stmt: return Ok(True) return Err(("Querying for data before {} is prohibited. " "Your beginning date is {}, which is before that.").format(self.min_start_date.strftime("%Y-%m-%d"), earliest_date))
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT}: # Only select queries need to be checked here # All others are not affected by this rule. Bailing out. return Ok(True) earliest_date = query.get_earliest_date() if earliest_date >= self.min_start_date: return Ok(True) if query.limit_stmt: return Ok(True) return Err(("Querying for data before {} is prohibited. " "Your beginning date is {}, which is before that.").format(self.min_start_date.strftime("%Y-%m-%d"), earliest_date))
[ ":", "param", "query", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/rules/query_old_data.py#L22-L40
[ "def", "check", "(", "self", ",", "query", ")", ":", "if", "query", ".", "get_type", "(", ")", "not", "in", "{", "Keyword", ".", "SELECT", "}", ":", "# Only select queries need to be checked here", "# All others are not affected by this rule. Bailing out.", "return", "Ok", "(", "True", ")", "earliest_date", "=", "query", ".", "get_earliest_date", "(", ")", "if", "earliest_date", ">=", "self", ".", "min_start_date", ":", "return", "Ok", "(", "True", ")", "if", "query", ".", "limit_stmt", ":", "return", "Ok", "(", "True", ")", "return", "Err", "(", "(", "\"Querying for data before {} is prohibited. \"", "\"Your beginning date is {}, which is before that.\"", ")", ".", "format", "(", "self", ".", "min_start_date", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", ",", "earliest_date", ")", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
ESQuery.extract_date
Extract date from string if necessary. :returns: the extracted date.
invenio_stats/queries.py
def extract_date(self, date): """Extract date from string if necessary. :returns: the extracted date. """ if isinstance(date, six.string_types): try: date = dateutil.parser.parse(date) except ValueError: raise ValueError( 'Invalid date format for statistic {}.' ).format(self.query_name) if not isinstance(date, datetime): raise TypeError( 'Invalid date type for statistic {}.' ).format(self.query_name) return date
def extract_date(self, date): """Extract date from string if necessary. :returns: the extracted date. """ if isinstance(date, six.string_types): try: date = dateutil.parser.parse(date) except ValueError: raise ValueError( 'Invalid date format for statistic {}.' ).format(self.query_name) if not isinstance(date, datetime): raise TypeError( 'Invalid date type for statistic {}.' ).format(self.query_name) return date
[ "Extract", "date", "from", "string", "if", "necessary", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queries.py#L38-L54
[ "def", "extract_date", "(", "self", ",", "date", ")", ":", "if", "isinstance", "(", "date", ",", "six", ".", "string_types", ")", ":", "try", ":", "date", "=", "dateutil", ".", "parser", ".", "parse", "(", "date", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Invalid date format for statistic {}.'", ")", ".", "format", "(", "self", ".", "query_name", ")", "if", "not", "isinstance", "(", "date", ",", "datetime", ")", ":", "raise", "TypeError", "(", "'Invalid date type for statistic {}.'", ")", ".", "format", "(", "self", ".", "query_name", ")", "return", "date" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ESDateHistogramQuery.validate_arguments
Validate query arguments.
invenio_stats/queries.py
def validate_arguments(self, interval, start_date, end_date, **kwargs): """Validate query arguments.""" if interval not in self.allowed_intervals: raise InvalidRequestInputError( 'Invalid aggregation time interval for statistic {}.' ).format(self.query_name) if set(kwargs) < set(self.required_filters): raise InvalidRequestInputError( 'Missing one of the required parameters {0} in ' 'query {1}'.format(set(self.required_filters.keys()), self.query_name) )
def validate_arguments(self, interval, start_date, end_date, **kwargs): """Validate query arguments.""" if interval not in self.allowed_intervals: raise InvalidRequestInputError( 'Invalid aggregation time interval for statistic {}.' ).format(self.query_name) if set(kwargs) < set(self.required_filters): raise InvalidRequestInputError( 'Missing one of the required parameters {0} in ' 'query {1}'.format(set(self.required_filters.keys()), self.query_name) )
[ "Validate", "query", "arguments", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queries.py#L97-L108
[ "def", "validate_arguments", "(", "self", ",", "interval", ",", "start_date", ",", "end_date", ",", "*", "*", "kwargs", ")", ":", "if", "interval", "not", "in", "self", ".", "allowed_intervals", ":", "raise", "InvalidRequestInputError", "(", "'Invalid aggregation time interval for statistic {}.'", ")", ".", "format", "(", "self", ".", "query_name", ")", "if", "set", "(", "kwargs", ")", "<", "set", "(", "self", ".", "required_filters", ")", ":", "raise", "InvalidRequestInputError", "(", "'Missing one of the required parameters {0} in '", "'query {1}'", ".", "format", "(", "set", "(", "self", ".", "required_filters", ".", "keys", "(", ")", ")", ",", "self", ".", "query_name", ")", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ESDateHistogramQuery.build_query
Build the elasticsearch query.
invenio_stats/queries.py
def build_query(self, interval, start_date, end_date, **kwargs): """Build the elasticsearch query.""" agg_query = Search(using=self.client, index=self.index, doc_type=self.doc_type)[0:0] if start_date is not None or end_date is not None: time_range = {} if start_date is not None: time_range['gte'] = start_date.isoformat() if end_date is not None: time_range['lte'] = end_date.isoformat() agg_query = agg_query.filter( 'range', **{self.time_field: time_range}) for modifier in self.query_modifiers: agg_query = modifier(agg_query, **kwargs) base_agg = agg_query.aggs.bucket( 'histogram', 'date_histogram', field=self.time_field, interval=interval ) for destination, (metric, field, opts) in self.metric_fields.items(): base_agg.metric(destination, metric, field=field, **opts) if self.copy_fields: base_agg.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for query_param, filtered_field in self.required_filters.items(): if query_param in kwargs: agg_query = agg_query.filter( 'term', **{filtered_field: kwargs[query_param]} ) return agg_query
def build_query(self, interval, start_date, end_date, **kwargs): """Build the elasticsearch query.""" agg_query = Search(using=self.client, index=self.index, doc_type=self.doc_type)[0:0] if start_date is not None or end_date is not None: time_range = {} if start_date is not None: time_range['gte'] = start_date.isoformat() if end_date is not None: time_range['lte'] = end_date.isoformat() agg_query = agg_query.filter( 'range', **{self.time_field: time_range}) for modifier in self.query_modifiers: agg_query = modifier(agg_query, **kwargs) base_agg = agg_query.aggs.bucket( 'histogram', 'date_histogram', field=self.time_field, interval=interval ) for destination, (metric, field, opts) in self.metric_fields.items(): base_agg.metric(destination, metric, field=field, **opts) if self.copy_fields: base_agg.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for query_param, filtered_field in self.required_filters.items(): if query_param in kwargs: agg_query = agg_query.filter( 'term', **{filtered_field: kwargs[query_param]} ) return agg_query
[ "Build", "the", "elasticsearch", "query", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queries.py#L110-L149
[ "def", "build_query", "(", "self", ",", "interval", ",", "start_date", ",", "end_date", ",", "*", "*", "kwargs", ")", ":", "agg_query", "=", "Search", "(", "using", "=", "self", ".", "client", ",", "index", "=", "self", ".", "index", ",", "doc_type", "=", "self", ".", "doc_type", ")", "[", "0", ":", "0", "]", "if", "start_date", "is", "not", "None", "or", "end_date", "is", "not", "None", ":", "time_range", "=", "{", "}", "if", "start_date", "is", "not", "None", ":", "time_range", "[", "'gte'", "]", "=", "start_date", ".", "isoformat", "(", ")", "if", "end_date", "is", "not", "None", ":", "time_range", "[", "'lte'", "]", "=", "end_date", ".", "isoformat", "(", ")", "agg_query", "=", "agg_query", ".", "filter", "(", "'range'", ",", "*", "*", "{", "self", ".", "time_field", ":", "time_range", "}", ")", "for", "modifier", "in", "self", ".", "query_modifiers", ":", "agg_query", "=", "modifier", "(", "agg_query", ",", "*", "*", "kwargs", ")", "base_agg", "=", "agg_query", ".", "aggs", ".", "bucket", "(", "'histogram'", ",", "'date_histogram'", ",", "field", "=", "self", ".", "time_field", ",", "interval", "=", "interval", ")", "for", "destination", ",", "(", "metric", ",", "field", ",", "opts", ")", "in", "self", ".", "metric_fields", ".", "items", "(", ")", ":", "base_agg", ".", "metric", "(", "destination", ",", "metric", ",", "field", "=", "field", ",", "*", "*", "opts", ")", "if", "self", ".", "copy_fields", ":", "base_agg", ".", "metric", "(", "'top_hit'", ",", "'top_hits'", ",", "size", "=", "1", ",", "sort", "=", "{", "'timestamp'", ":", "'desc'", "}", ")", "for", "query_param", ",", "filtered_field", "in", "self", ".", "required_filters", ".", "items", "(", ")", ":", "if", "query_param", "in", "kwargs", ":", "agg_query", "=", "agg_query", ".", "filter", "(", "'term'", ",", "*", "*", "{", "filtered_field", ":", "kwargs", "[", "query_param", "]", "}", ")", "return", "agg_query" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ESDateHistogramQuery.process_query_result
Build the result using the query result.
invenio_stats/queries.py
def process_query_result(self, query_result, interval, start_date, end_date): """Build the result using the query result.""" def build_buckets(agg): """Build recursively result buckets.""" bucket_result = dict( key=agg['key'], date=agg['key_as_string'], ) for metric in self.metric_fields: bucket_result[metric] = agg[metric]['value'] if self.copy_fields and agg['top_hit']['hits']['hits']: doc = agg['top_hit']['hits']['hits'][0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): bucket_result[destination] = doc[source] else: bucket_result[destination] = source(bucket_result, doc) return bucket_result # Add copy_fields buckets = query_result['aggregations']['histogram']['buckets'] return dict( interval=interval, key_type='date', start_date=start_date.isoformat() if start_date else None, end_date=end_date.isoformat() if end_date else None, buckets=[build_buckets(b) for b in buckets] )
def process_query_result(self, query_result, interval, start_date, end_date): """Build the result using the query result.""" def build_buckets(agg): """Build recursively result buckets.""" bucket_result = dict( key=agg['key'], date=agg['key_as_string'], ) for metric in self.metric_fields: bucket_result[metric] = agg[metric]['value'] if self.copy_fields and agg['top_hit']['hits']['hits']: doc = agg['top_hit']['hits']['hits'][0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): bucket_result[destination] = doc[source] else: bucket_result[destination] = source(bucket_result, doc) return bucket_result # Add copy_fields buckets = query_result['aggregations']['histogram']['buckets'] return dict( interval=interval, key_type='date', start_date=start_date.isoformat() if start_date else None, end_date=end_date.isoformat() if end_date else None, buckets=[build_buckets(b) for b in buckets] )
[ "Build", "the", "result", "using", "the", "query", "result", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queries.py#L151-L179
[ "def", "process_query_result", "(", "self", ",", "query_result", ",", "interval", ",", "start_date", ",", "end_date", ")", ":", "def", "build_buckets", "(", "agg", ")", ":", "\"\"\"Build recursively result buckets.\"\"\"", "bucket_result", "=", "dict", "(", "key", "=", "agg", "[", "'key'", "]", ",", "date", "=", "agg", "[", "'key_as_string'", "]", ",", ")", "for", "metric", "in", "self", ".", "metric_fields", ":", "bucket_result", "[", "metric", "]", "=", "agg", "[", "metric", "]", "[", "'value'", "]", "if", "self", ".", "copy_fields", "and", "agg", "[", "'top_hit'", "]", "[", "'hits'", "]", "[", "'hits'", "]", ":", "doc", "=", "agg", "[", "'top_hit'", "]", "[", "'hits'", "]", "[", "'hits'", "]", "[", "0", "]", "[", "'_source'", "]", "for", "destination", ",", "source", "in", "self", ".", "copy_fields", ".", "items", "(", ")", ":", "if", "isinstance", "(", "source", ",", "six", ".", "string_types", ")", ":", "bucket_result", "[", "destination", "]", "=", "doc", "[", "source", "]", "else", ":", "bucket_result", "[", "destination", "]", "=", "source", "(", "bucket_result", ",", "doc", ")", "return", "bucket_result", "# Add copy_fields", "buckets", "=", "query_result", "[", "'aggregations'", "]", "[", "'histogram'", "]", "[", "'buckets'", "]", "return", "dict", "(", "interval", "=", "interval", ",", "key_type", "=", "'date'", ",", "start_date", "=", "start_date", ".", "isoformat", "(", ")", "if", "start_date", "else", "None", ",", "end_date", "=", "end_date", ".", "isoformat", "(", ")", "if", "end_date", "else", "None", ",", "buckets", "=", "[", "build_buckets", "(", "b", ")", "for", "b", "in", "buckets", "]", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ESTermsQuery.validate_arguments
Validate query arguments.
invenio_stats/queries.py
def validate_arguments(self, start_date, end_date, **kwargs): """Validate query arguments.""" if set(kwargs) < set(self.required_filters): raise InvalidRequestInputError( 'Missing one of the required parameters {0} in ' 'query {1}'.format(set(self.required_filters.keys()), self.query_name) )
def validate_arguments(self, start_date, end_date, **kwargs): """Validate query arguments.""" if set(kwargs) < set(self.required_filters): raise InvalidRequestInputError( 'Missing one of the required parameters {0} in ' 'query {1}'.format(set(self.required_filters.keys()), self.query_name) )
[ "Validate", "query", "arguments", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queries.py#L225-L232
[ "def", "validate_arguments", "(", "self", ",", "start_date", ",", "end_date", ",", "*", "*", "kwargs", ")", ":", "if", "set", "(", "kwargs", ")", "<", "set", "(", "self", ".", "required_filters", ")", ":", "raise", "InvalidRequestInputError", "(", "'Missing one of the required parameters {0} in '", "'query {1}'", ".", "format", "(", "set", "(", "self", ".", "required_filters", ".", "keys", "(", ")", ")", ",", "self", ".", "query_name", ")", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ESTermsQuery.build_query
Build the elasticsearch query.
invenio_stats/queries.py
def build_query(self, start_date, end_date, **kwargs): """Build the elasticsearch query.""" agg_query = Search(using=self.client, index=self.index, doc_type=self.doc_type)[0:0] if start_date is not None or end_date is not None: time_range = {} if start_date is not None: time_range['gte'] = start_date.isoformat() if end_date is not None: time_range['lte'] = end_date.isoformat() agg_query = agg_query.filter( 'range', **{self.time_field: time_range}) for modifier in self.query_modifiers: agg_query = modifier(agg_query, **kwargs) base_agg = agg_query.aggs def _apply_metric_aggs(agg): for dst, (metric, field, opts) in self.metric_fields.items(): agg.metric(dst, metric, field=field, **opts) _apply_metric_aggs(base_agg) if self.aggregated_fields: cur_agg = base_agg for term in self.aggregated_fields: cur_agg = cur_agg.bucket(term, 'terms', field=term, size=0) _apply_metric_aggs(cur_agg) if self.copy_fields: base_agg.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for query_param, filtered_field in self.required_filters.items(): if query_param in kwargs: agg_query = agg_query.filter( 'term', **{filtered_field: kwargs[query_param]} ) return agg_query
def build_query(self, start_date, end_date, **kwargs): """Build the elasticsearch query.""" agg_query = Search(using=self.client, index=self.index, doc_type=self.doc_type)[0:0] if start_date is not None or end_date is not None: time_range = {} if start_date is not None: time_range['gte'] = start_date.isoformat() if end_date is not None: time_range['lte'] = end_date.isoformat() agg_query = agg_query.filter( 'range', **{self.time_field: time_range}) for modifier in self.query_modifiers: agg_query = modifier(agg_query, **kwargs) base_agg = agg_query.aggs def _apply_metric_aggs(agg): for dst, (metric, field, opts) in self.metric_fields.items(): agg.metric(dst, metric, field=field, **opts) _apply_metric_aggs(base_agg) if self.aggregated_fields: cur_agg = base_agg for term in self.aggregated_fields: cur_agg = cur_agg.bucket(term, 'terms', field=term, size=0) _apply_metric_aggs(cur_agg) if self.copy_fields: base_agg.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for query_param, filtered_field in self.required_filters.items(): if query_param in kwargs: agg_query = agg_query.filter( 'term', **{filtered_field: kwargs[query_param]} ) return agg_query
[ "Build", "the", "elasticsearch", "query", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queries.py#L234-L276
[ "def", "build_query", "(", "self", ",", "start_date", ",", "end_date", ",", "*", "*", "kwargs", ")", ":", "agg_query", "=", "Search", "(", "using", "=", "self", ".", "client", ",", "index", "=", "self", ".", "index", ",", "doc_type", "=", "self", ".", "doc_type", ")", "[", "0", ":", "0", "]", "if", "start_date", "is", "not", "None", "or", "end_date", "is", "not", "None", ":", "time_range", "=", "{", "}", "if", "start_date", "is", "not", "None", ":", "time_range", "[", "'gte'", "]", "=", "start_date", ".", "isoformat", "(", ")", "if", "end_date", "is", "not", "None", ":", "time_range", "[", "'lte'", "]", "=", "end_date", ".", "isoformat", "(", ")", "agg_query", "=", "agg_query", ".", "filter", "(", "'range'", ",", "*", "*", "{", "self", ".", "time_field", ":", "time_range", "}", ")", "for", "modifier", "in", "self", ".", "query_modifiers", ":", "agg_query", "=", "modifier", "(", "agg_query", ",", "*", "*", "kwargs", ")", "base_agg", "=", "agg_query", ".", "aggs", "def", "_apply_metric_aggs", "(", "agg", ")", ":", "for", "dst", ",", "(", "metric", ",", "field", ",", "opts", ")", "in", "self", ".", "metric_fields", ".", "items", "(", ")", ":", "agg", ".", "metric", "(", "dst", ",", "metric", ",", "field", "=", "field", ",", "*", "*", "opts", ")", "_apply_metric_aggs", "(", "base_agg", ")", "if", "self", ".", "aggregated_fields", ":", "cur_agg", "=", "base_agg", "for", "term", "in", "self", ".", "aggregated_fields", ":", "cur_agg", "=", "cur_agg", ".", "bucket", "(", "term", ",", "'terms'", ",", "field", "=", "term", ",", "size", "=", "0", ")", "_apply_metric_aggs", "(", "cur_agg", ")", "if", "self", ".", "copy_fields", ":", "base_agg", ".", "metric", "(", "'top_hit'", ",", "'top_hits'", ",", "size", "=", "1", ",", "sort", "=", "{", "'timestamp'", ":", "'desc'", "}", ")", "for", "query_param", ",", "filtered_field", "in", "self", ".", "required_filters", ".", "items", "(", ")", ":", "if", "query_param", "in", "kwargs", ":", "agg_query", "=", "agg_query", ".", "filter", "(", "'term'", ",", "*", "*", "{", "filtered_field", ":", "kwargs", "[", "query_param", "]", "}", ")", "return", "agg_query" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ESTermsQuery.process_query_result
Build the result using the query result.
invenio_stats/queries.py
def process_query_result(self, query_result, start_date, end_date): """Build the result using the query result.""" def build_buckets(agg, fields, bucket_result): """Build recursively result buckets.""" # Add metric results for current bucket for metric in self.metric_fields: bucket_result[metric] = agg[metric]['value'] if fields: current_level = fields[0] bucket_result.update(dict( type='bucket', field=current_level, key_type='terms', buckets=[build_buckets(b, fields[1:], dict(key=b['key'])) for b in agg[current_level]['buckets']] )) return bucket_result # Add copy_fields aggs = query_result['aggregations'] result = dict( start_date=start_date.isoformat() if start_date else None, end_date=end_date.isoformat() if end_date else None, ) if self.copy_fields and aggs['top_hit']['hits']['hits']: doc = aggs['top_hit']['hits']['hits'][0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): result[destination] = doc[source] else: result[destination] = source(result, doc) return build_buckets(aggs, self.aggregated_fields, result)
def process_query_result(self, query_result, start_date, end_date): """Build the result using the query result.""" def build_buckets(agg, fields, bucket_result): """Build recursively result buckets.""" # Add metric results for current bucket for metric in self.metric_fields: bucket_result[metric] = agg[metric]['value'] if fields: current_level = fields[0] bucket_result.update(dict( type='bucket', field=current_level, key_type='terms', buckets=[build_buckets(b, fields[1:], dict(key=b['key'])) for b in agg[current_level]['buckets']] )) return bucket_result # Add copy_fields aggs = query_result['aggregations'] result = dict( start_date=start_date.isoformat() if start_date else None, end_date=end_date.isoformat() if end_date else None, ) if self.copy_fields and aggs['top_hit']['hits']['hits']: doc = aggs['top_hit']['hits']['hits'][0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): result[destination] = doc[source] else: result[destination] = source(result, doc) return build_buckets(aggs, self.aggregated_fields, result)
[ "Build", "the", "result", "using", "the", "query", "result", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queries.py#L278-L310
[ "def", "process_query_result", "(", "self", ",", "query_result", ",", "start_date", ",", "end_date", ")", ":", "def", "build_buckets", "(", "agg", ",", "fields", ",", "bucket_result", ")", ":", "\"\"\"Build recursively result buckets.\"\"\"", "# Add metric results for current bucket", "for", "metric", "in", "self", ".", "metric_fields", ":", "bucket_result", "[", "metric", "]", "=", "agg", "[", "metric", "]", "[", "'value'", "]", "if", "fields", ":", "current_level", "=", "fields", "[", "0", "]", "bucket_result", ".", "update", "(", "dict", "(", "type", "=", "'bucket'", ",", "field", "=", "current_level", ",", "key_type", "=", "'terms'", ",", "buckets", "=", "[", "build_buckets", "(", "b", ",", "fields", "[", "1", ":", "]", ",", "dict", "(", "key", "=", "b", "[", "'key'", "]", ")", ")", "for", "b", "in", "agg", "[", "current_level", "]", "[", "'buckets'", "]", "]", ")", ")", "return", "bucket_result", "# Add copy_fields", "aggs", "=", "query_result", "[", "'aggregations'", "]", "result", "=", "dict", "(", "start_date", "=", "start_date", ".", "isoformat", "(", ")", "if", "start_date", "else", "None", ",", "end_date", "=", "end_date", ".", "isoformat", "(", ")", "if", "end_date", "else", "None", ",", ")", "if", "self", ".", "copy_fields", "and", "aggs", "[", "'top_hit'", "]", "[", "'hits'", "]", "[", "'hits'", "]", ":", "doc", "=", "aggs", "[", "'top_hit'", "]", "[", "'hits'", "]", "[", "'hits'", "]", "[", "0", "]", "[", "'_source'", "]", "for", "destination", ",", "source", "in", "self", ".", "copy_fields", ".", "items", "(", ")", ":", "if", "isinstance", "(", "source", ",", "six", ".", "string_types", ")", ":", "result", "[", "destination", "]", "=", "doc", "[", "source", "]", "else", ":", "result", "[", "destination", "]", "=", "source", "(", "result", ",", "doc", ")", "return", "build_buckets", "(", "aggs", ",", "self", ".", "aggregated_fields", ",", "result", ")" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ESTermsQuery.run
Run the query.
invenio_stats/queries.py
def run(self, start_date=None, end_date=None, **kwargs): """Run the query.""" start_date = self.extract_date(start_date) if start_date else None end_date = self.extract_date(end_date) if end_date else None self.validate_arguments(start_date, end_date, **kwargs) agg_query = self.build_query(start_date, end_date, **kwargs) query_result = agg_query.execute().to_dict() res = self.process_query_result(query_result, start_date, end_date) return res
def run(self, start_date=None, end_date=None, **kwargs): """Run the query.""" start_date = self.extract_date(start_date) if start_date else None end_date = self.extract_date(end_date) if end_date else None self.validate_arguments(start_date, end_date, **kwargs) agg_query = self.build_query(start_date, end_date, **kwargs) query_result = agg_query.execute().to_dict() res = self.process_query_result(query_result, start_date, end_date) return res
[ "Run", "the", "query", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/queries.py#L312-L321
[ "def", "run", "(", "self", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "*", "*", "kwargs", ")", ":", "start_date", "=", "self", ".", "extract_date", "(", "start_date", ")", "if", "start_date", "else", "None", "end_date", "=", "self", ".", "extract_date", "(", "end_date", ")", "if", "end_date", "else", "None", "self", ".", "validate_arguments", "(", "start_date", ",", "end_date", ",", "*", "*", "kwargs", ")", "agg_query", "=", "self", ".", "build_query", "(", "start_date", ",", "end_date", ",", "*", "*", "kwargs", ")", "query_result", "=", "agg_query", ".", "execute", "(", ")", ".", "to_dict", "(", ")", "res", "=", "self", ".", "process_query_result", "(", "query_result", ",", "start_date", ",", "end_date", ")", "return", "res" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
ThreadingHTTPServer.handle_error
Overwrite error handling to suppress socket/ssl related errors :param client_address: Address of client :param request: Request causing an error
protector/proxy/server.py
def handle_error(self, request, client_address): """ Overwrite error handling to suppress socket/ssl related errors :param client_address: Address of client :param request: Request causing an error """ cls, e = sys.exc_info()[:2] if cls is socket.error or cls is ssl.SSLError: pass else: return HTTPServer.handle_error(self, request, client_address)
def handle_error(self, request, client_address): """ Overwrite error handling to suppress socket/ssl related errors :param client_address: Address of client :param request: Request causing an error """ cls, e = sys.exc_info()[:2] if cls is socket.error or cls is ssl.SSLError: pass else: return HTTPServer.handle_error(self, request, client_address)
[ "Overwrite", "error", "handling", "to", "suppress", "socket", "/", "ssl", "related", "errors", ":", "param", "client_address", ":", "Address", "of", "client", ":", "param", "request", ":", "Request", "causing", "an", "error" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/proxy/server.py#L17-L27
[ "def", "handle_error", "(", "self", ",", "request", ",", "client_address", ")", ":", "cls", ",", "e", "=", "sys", ".", "exc_info", "(", ")", "[", ":", "2", "]", "if", "cls", "is", "socket", ".", "error", "or", "cls", "is", "ssl", ".", "SSLError", ":", "pass", "else", ":", "return", "HTTPServer", ".", "handle_error", "(", "self", ",", "request", ",", "client_address", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
file_download_event_builder
Build a file-download event.
invenio_stats/contrib/event_builders.py
def file_download_event_builder(event, sender_app, obj=None, **kwargs): """Build a file-download event.""" event.update(dict( # When: timestamp=datetime.datetime.utcnow().isoformat(), # What: bucket_id=str(obj.bucket_id), file_id=str(obj.file_id), file_key=obj.key, size=obj.file.size, referrer=request.referrer, # Who: **get_user() )) return event
def file_download_event_builder(event, sender_app, obj=None, **kwargs): """Build a file-download event.""" event.update(dict( # When: timestamp=datetime.datetime.utcnow().isoformat(), # What: bucket_id=str(obj.bucket_id), file_id=str(obj.file_id), file_key=obj.key, size=obj.file.size, referrer=request.referrer, # Who: **get_user() )) return event
[ "Build", "a", "file", "-", "download", "event", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/contrib/event_builders.py#L20-L34
[ "def", "file_download_event_builder", "(", "event", ",", "sender_app", ",", "obj", "=", "None", ",", "*", "*", "kwargs", ")", ":", "event", ".", "update", "(", "dict", "(", "# When:", "timestamp", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", ",", "# What:", "bucket_id", "=", "str", "(", "obj", ".", "bucket_id", ")", ",", "file_id", "=", "str", "(", "obj", ".", "file_id", ")", ",", "file_key", "=", "obj", ".", "key", ",", "size", "=", "obj", ".", "file", ".", "size", ",", "referrer", "=", "request", ".", "referrer", ",", "# Who:", "*", "*", "get_user", "(", ")", ")", ")", "return", "event" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
record_view_event_builder
Build a record-view event.
invenio_stats/contrib/event_builders.py
def record_view_event_builder(event, sender_app, pid=None, record=None, **kwargs): """Build a record-view event.""" event.update(dict( # When: timestamp=datetime.datetime.utcnow().isoformat(), # What: record_id=str(record.id), pid_type=pid.pid_type, pid_value=str(pid.pid_value), referrer=request.referrer, # Who: **get_user() )) return event
def record_view_event_builder(event, sender_app, pid=None, record=None, **kwargs): """Build a record-view event.""" event.update(dict( # When: timestamp=datetime.datetime.utcnow().isoformat(), # What: record_id=str(record.id), pid_type=pid.pid_type, pid_value=str(pid.pid_value), referrer=request.referrer, # Who: **get_user() )) return event
[ "Build", "a", "record", "-", "view", "event", "." ]
inveniosoftware/invenio-stats
python
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/contrib/event_builders.py#L49-L63
[ "def", "record_view_event_builder", "(", "event", ",", "sender_app", ",", "pid", "=", "None", ",", "record", "=", "None", ",", "*", "*", "kwargs", ")", ":", "event", ".", "update", "(", "dict", "(", "# When:", "timestamp", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", ",", "# What:", "record_id", "=", "str", "(", "record", ".", "id", ")", ",", "pid_type", "=", "pid", ".", "pid_type", ",", "pid_value", "=", "str", "(", "pid", ".", "pid_value", ")", ",", "referrer", "=", "request", ".", "referrer", ",", "# Who:", "*", "*", "get_user", "(", ")", ")", ")", "return", "event" ]
d877ae5462084abb4a28a20f1ebb3d636769c1bc
valid
main
Setup consumer
protector/__main__.py
def main(): """ Setup consumer """ config = loader.load_config() if config.version: show_version() if config.show_rules: show_rules() if not config.configfile and not (hasattr(config, "status") or hasattr(config, "stop")): show_configfile_warning() # Check if we have permissions to open the log file. check_write_permissions(config.logfile) start_proxy(config)
def main(): """ Setup consumer """ config = loader.load_config() if config.version: show_version() if config.show_rules: show_rules() if not config.configfile and not (hasattr(config, "status") or hasattr(config, "stop")): show_configfile_warning() # Check if we have permissions to open the log file. check_write_permissions(config.logfile) start_proxy(config)
[ "Setup", "consumer" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/__main__.py#L16-L30
[ "def", "main", "(", ")", ":", "config", "=", "loader", ".", "load_config", "(", ")", "if", "config", ".", "version", ":", "show_version", "(", ")", "if", "config", ".", "show_rules", ":", "show_rules", "(", ")", "if", "not", "config", ".", "configfile", "and", "not", "(", "hasattr", "(", "config", ",", "\"status\"", ")", "or", "hasattr", "(", "config", ",", "\"stop\"", ")", ")", ":", "show_configfile_warning", "(", ")", "# Check if we have permissions to open the log file.", "check_write_permissions", "(", "config", ".", "logfile", ")", "start_proxy", "(", "config", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
check_write_permissions
Check if we can write to the given file Otherwise since we might detach the process to run in the background we might never find out that writing failed and get an ugly exit message on startup. For example: ERROR: Child exited immediately with non-zero exit code 127 So we catch this error upfront and print a nicer error message with a hint on how to fix it.
protector/__main__.py
def check_write_permissions(file): """ Check if we can write to the given file Otherwise since we might detach the process to run in the background we might never find out that writing failed and get an ugly exit message on startup. For example: ERROR: Child exited immediately with non-zero exit code 127 So we catch this error upfront and print a nicer error message with a hint on how to fix it. """ try: open(file, 'a') except IOError: print("Can't open file {}. " "Please grant write permissions or change the path in your config".format(file)) sys.exit(1)
def check_write_permissions(file): """ Check if we can write to the given file Otherwise since we might detach the process to run in the background we might never find out that writing failed and get an ugly exit message on startup. For example: ERROR: Child exited immediately with non-zero exit code 127 So we catch this error upfront and print a nicer error message with a hint on how to fix it. """ try: open(file, 'a') except IOError: print("Can't open file {}. " "Please grant write permissions or change the path in your config".format(file)) sys.exit(1)
[ "Check", "if", "we", "can", "write", "to", "the", "given", "file" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/__main__.py#L33-L50
[ "def", "check_write_permissions", "(", "file", ")", ":", "try", ":", "open", "(", "file", ",", "'a'", ")", "except", "IOError", ":", "print", "(", "\"Can't open file {}. \"", "\"Please grant write permissions or change the path in your config\"", ".", "format", "(", "file", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
show_rules
Show the list of available rules and quit :return:
protector/__main__.py
def show_rules(): """ Show the list of available rules and quit :return: """ from rules.loader import import_rules from rules.rule_list import all_rules rules = import_rules(all_rules) print("") for name, rule in rules.iteritems(): heading = "{} (`{}`)".format(rule.description(), name) print("#### {} ####".format(heading)) for line in rule.reason(): print(line) print("") sys.exit(0)
def show_rules(): """ Show the list of available rules and quit :return: """ from rules.loader import import_rules from rules.rule_list import all_rules rules = import_rules(all_rules) print("") for name, rule in rules.iteritems(): heading = "{} (`{}`)".format(rule.description(), name) print("#### {} ####".format(heading)) for line in rule.reason(): print(line) print("") sys.exit(0)
[ "Show", "the", "list", "of", "available", "rules", "and", "quit", ":", "return", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/__main__.py#L53-L68
[ "def", "show_rules", "(", ")", ":", "from", "rules", ".", "loader", "import", "import_rules", "from", "rules", ".", "rule_list", "import", "all_rules", "rules", "=", "import_rules", "(", "all_rules", ")", "print", "(", "\"\"", ")", "for", "name", ",", "rule", "in", "rules", ".", "iteritems", "(", ")", ":", "heading", "=", "\"{} (`{}`)\"", ".", "format", "(", "rule", ".", "description", "(", ")", ",", "name", ")", "print", "(", "\"#### {} ####\"", ".", "format", "(", "heading", ")", ")", "for", "line", "in", "rule", ".", "reason", "(", ")", ":", "print", "(", "line", ")", "print", "(", "\"\"", ")", "sys", ".", "exit", "(", "0", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
start_proxy
Start the http proxy :param config: :return:
protector/__main__.py
def start_proxy(config): """ Start the http proxy :param config: :return: """ protector = Protector(config.rules, config.whitelist) protector_daemon = ProtectorDaemon(config=config, protector=protector) daemon = daemonocle.Daemon( pidfile=config.pidfile, detach=(not config.foreground), shutdown_callback=shutdown, worker=protector_daemon.run ) daemon.do_action(config.command)
def start_proxy(config): """ Start the http proxy :param config: :return: """ protector = Protector(config.rules, config.whitelist) protector_daemon = ProtectorDaemon(config=config, protector=protector) daemon = daemonocle.Daemon( pidfile=config.pidfile, detach=(not config.foreground), shutdown_callback=shutdown, worker=protector_daemon.run ) daemon.do_action(config.command)
[ "Start", "the", "http", "proxy", ":", "param", "config", ":", ":", "return", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/__main__.py#L92-L107
[ "def", "start_proxy", "(", "config", ")", ":", "protector", "=", "Protector", "(", "config", ".", "rules", ",", "config", ".", "whitelist", ")", "protector_daemon", "=", "ProtectorDaemon", "(", "config", "=", "config", ",", "protector", "=", "protector", ")", "daemon", "=", "daemonocle", ".", "Daemon", "(", "pidfile", "=", "config", ".", "pidfile", ",", "detach", "=", "(", "not", "config", ".", "foreground", ")", ",", "shutdown_callback", "=", "shutdown", ",", "worker", "=", "protector_daemon", ".", "run", ")", "daemon", ".", "do_action", "(", "config", ".", "command", ")" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
batches
From http://stackoverflow.com/a/8290508/270334 :param n: :param iterable:
contrib/helpers/benchmark.py
def batches(iterable, n=1): """ From http://stackoverflow.com/a/8290508/270334 :param n: :param iterable: """ l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)]
def batches(iterable, n=1): """ From http://stackoverflow.com/a/8290508/270334 :param n: :param iterable: """ l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)]
[ "From", "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "8290508", "/", "270334", ":", "param", "n", ":", ":", "param", "iterable", ":" ]
trivago/Protector
python
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/contrib/helpers/benchmark.py#L9-L17
[ "def", "batches", "(", "iterable", ",", "n", "=", "1", ")", ":", "l", "=", "len", "(", "iterable", ")", "for", "ndx", "in", "range", "(", "0", ",", "l", ",", "n", ")", ":", "yield", "iterable", "[", "ndx", ":", "min", "(", "ndx", "+", "n", ",", "l", ")", "]" ]
7ebe7bde965e27737b961a0cb5740724d174fdc7
valid
_is_root
Checks if the user is rooted.
jupyterpip/__init__.py
def _is_root(): """Checks if the user is rooted.""" import os import ctypes try: return os.geteuid() == 0 except AttributeError: return ctypes.windll.shell32.IsUserAnAdmin() != 0 return False
def _is_root(): """Checks if the user is rooted.""" import os import ctypes try: return os.geteuid() == 0 except AttributeError: return ctypes.windll.shell32.IsUserAnAdmin() != 0 return False
[ "Checks", "if", "the", "user", "is", "rooted", "." ]
jdfreder/jupyter-pip
python
https://github.com/jdfreder/jupyter-pip/blob/9f04c6096f1169b08aeaf6221616a5fb48111044/jupyterpip/__init__.py#L1-L9
[ "def", "_is_root", "(", ")", ":", "import", "os", "import", "ctypes", "try", ":", "return", "os", ".", "geteuid", "(", ")", "==", "0", "except", "AttributeError", ":", "return", "ctypes", ".", "windll", ".", "shell32", ".", "IsUserAnAdmin", "(", ")", "!=", "0", "return", "False" ]
9f04c6096f1169b08aeaf6221616a5fb48111044