partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
Nemo.r_assets
Route for specific assets. :param filetype: Asset Type :param asset: Filename of an asset :return: Response
flask_nemo/__init__.py
def r_assets(self, filetype, asset): """ Route for specific assets. :param filetype: Asset Type :param asset: Filename of an asset :return: Response """ if filetype in self.assets and asset in self.assets[filetype] and self.assets[filetype][asset]: return send_from_directory( directory=self.assets[filetype][asset], filename=asset ) abort(404)
def r_assets(self, filetype, asset): """ Route for specific assets. :param filetype: Asset Type :param asset: Filename of an asset :return: Response """ if filetype in self.assets and asset in self.assets[filetype] and self.assets[filetype][asset]: return send_from_directory( directory=self.assets[filetype][asset], filename=asset ) abort(404)
[ "Route", "for", "specific", "assets", "." ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L678-L690
[ "def", "r_assets", "(", "self", ",", "filetype", ",", "asset", ")", ":", "if", "filetype", "in", "self", ".", "assets", "and", "asset", "in", "self", ".", "assets", "[", "filetype", "]", "and", "self", ".", "assets", "[", "filetype", "]", "[", "asset", "]", ":", "return", "send_from_directory", "(", "directory", "=", "self", ".", "assets", "[", "filetype", "]", "[", "asset", "]", ",", "filename", "=", "asset", ")", "abort", "(", "404", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.register_assets
Merge and register assets, both as routes and dictionary :return: None
flask_nemo/__init__.py
def register_assets(self): """ Merge and register assets, both as routes and dictionary :return: None """ self.blueprint.add_url_rule( # Register another path to ensure assets compatibility "{0}.secondary/<filetype>/<asset>".format(self.static_url_path), view_func=self.r_assets, endpoint="secondary_assets", methods=["GET"] )
def register_assets(self): """ Merge and register assets, both as routes and dictionary :return: None """ self.blueprint.add_url_rule( # Register another path to ensure assets compatibility "{0}.secondary/<filetype>/<asset>".format(self.static_url_path), view_func=self.r_assets, endpoint="secondary_assets", methods=["GET"] )
[ "Merge", "and", "register", "assets", "both", "as", "routes", "and", "dictionary" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L692-L703
[ "def", "register_assets", "(", "self", ")", ":", "self", ".", "blueprint", ".", "add_url_rule", "(", "# Register another path to ensure assets compatibility", "\"{0}.secondary/<filetype>/<asset>\"", ".", "format", "(", "self", ".", "static_url_path", ")", ",", "view_func", "=", "self", ".", "r_assets", ",", "endpoint", "=", "\"secondary_assets\"", ",", "methods", "=", "[", "\"GET\"", "]", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.create_blueprint
Create blueprint and register rules :return: Blueprint of the current nemo app :rtype: flask.Blueprint
flask_nemo/__init__.py
def create_blueprint(self): """ Create blueprint and register rules :return: Blueprint of the current nemo app :rtype: flask.Blueprint """ self.register_plugins() self.blueprint = Blueprint( self.name, "nemo", url_prefix=self.prefix, template_folder=self.template_folder, static_folder=self.static_folder, static_url_path=self.static_url_path ) for url, name, methods, instance in self._urls: self.blueprint.add_url_rule( url, view_func=self.view_maker(name, instance), endpoint=_plugin_endpoint_rename(name, instance), methods=methods ) for url, name, methods, instance in self._semantic_url: self.blueprint.add_url_rule( url, view_func=self.view_maker(name, instance), endpoint=_plugin_endpoint_rename(name, instance)+"_semantic", methods=methods ) self.register_assets() self.register_filters() # We extend the loading list by the instance value self.__templates_namespaces__.extend(self.__instance_templates__) # We generate a template loader for namespace, directory in self.__templates_namespaces__[::-1]: if namespace not in self.__template_loader__: self.__template_loader__[namespace] = [] self.__template_loader__[namespace].append( jinja2.FileSystemLoader(op.abspath(directory)) ) self.blueprint.jinja_loader = jinja2.PrefixLoader( {namespace: jinja2.ChoiceLoader(paths) for namespace, paths in self.__template_loader__.items()}, "::" ) if self.cache is not None: for func, instance in self.cached: setattr(instance, func.__name__, self.cache.memoize()(func)) return self.blueprint
def create_blueprint(self): """ Create blueprint and register rules :return: Blueprint of the current nemo app :rtype: flask.Blueprint """ self.register_plugins() self.blueprint = Blueprint( self.name, "nemo", url_prefix=self.prefix, template_folder=self.template_folder, static_folder=self.static_folder, static_url_path=self.static_url_path ) for url, name, methods, instance in self._urls: self.blueprint.add_url_rule( url, view_func=self.view_maker(name, instance), endpoint=_plugin_endpoint_rename(name, instance), methods=methods ) for url, name, methods, instance in self._semantic_url: self.blueprint.add_url_rule( url, view_func=self.view_maker(name, instance), endpoint=_plugin_endpoint_rename(name, instance)+"_semantic", methods=methods ) self.register_assets() self.register_filters() # We extend the loading list by the instance value self.__templates_namespaces__.extend(self.__instance_templates__) # We generate a template loader for namespace, directory in self.__templates_namespaces__[::-1]: if namespace not in self.__template_loader__: self.__template_loader__[namespace] = [] self.__template_loader__[namespace].append( jinja2.FileSystemLoader(op.abspath(directory)) ) self.blueprint.jinja_loader = jinja2.PrefixLoader( {namespace: jinja2.ChoiceLoader(paths) for namespace, paths in self.__template_loader__.items()}, "::" ) if self.cache is not None: for func, instance in self.cached: setattr(instance, func.__name__, self.cache.memoize()(func)) return self.blueprint
[ "Create", "blueprint", "and", "register", "rules" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L705-L759
[ "def", "create_blueprint", "(", "self", ")", ":", "self", ".", "register_plugins", "(", ")", "self", ".", "blueprint", "=", "Blueprint", "(", "self", ".", "name", ",", "\"nemo\"", ",", "url_prefix", "=", "self", ".", "prefix", ",", "template_folder", "=", "self", ".", "template_folder", ",", "static_folder", "=", "self", ".", "static_folder", ",", "static_url_path", "=", "self", ".", "static_url_path", ")", "for", "url", ",", "name", ",", "methods", ",", "instance", "in", "self", ".", "_urls", ":", "self", ".", "blueprint", ".", "add_url_rule", "(", "url", ",", "view_func", "=", "self", ".", "view_maker", "(", "name", ",", "instance", ")", ",", "endpoint", "=", "_plugin_endpoint_rename", "(", "name", ",", "instance", ")", ",", "methods", "=", "methods", ")", "for", "url", ",", "name", ",", "methods", ",", "instance", "in", "self", ".", "_semantic_url", ":", "self", ".", "blueprint", ".", "add_url_rule", "(", "url", ",", "view_func", "=", "self", ".", "view_maker", "(", "name", ",", "instance", ")", ",", "endpoint", "=", "_plugin_endpoint_rename", "(", "name", ",", "instance", ")", "+", "\"_semantic\"", ",", "methods", "=", "methods", ")", "self", ".", "register_assets", "(", ")", "self", ".", "register_filters", "(", ")", "# We extend the loading list by the instance value", "self", ".", "__templates_namespaces__", ".", "extend", "(", "self", ".", "__instance_templates__", ")", "# We generate a template loader", "for", "namespace", ",", "directory", "in", "self", ".", "__templates_namespaces__", "[", ":", ":", "-", "1", "]", ":", "if", "namespace", "not", "in", "self", ".", "__template_loader__", ":", "self", ".", "__template_loader__", "[", "namespace", "]", "=", "[", "]", "self", ".", "__template_loader__", "[", "namespace", "]", ".", "append", "(", "jinja2", ".", "FileSystemLoader", "(", "op", ".", "abspath", "(", "directory", ")", ")", ")", "self", ".", "blueprint", ".", "jinja_loader", "=", "jinja2", ".", "PrefixLoader", "(", "{", "namespace", ":", "jinja2", ".", "ChoiceLoader", "(", "paths", ")", "for", "namespace", ",", "paths", "in", "self", ".", "__template_loader__", ".", "items", "(", ")", "}", ",", "\"::\"", ")", "if", "self", ".", "cache", "is", "not", "None", ":", "for", "func", ",", "instance", "in", "self", ".", "cached", ":", "setattr", "(", "instance", ",", "func", ".", "__name__", ",", "self", ".", "cache", ".", "memoize", "(", ")", "(", "func", ")", ")", "return", "self", ".", "blueprint" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.view_maker
Create a view :param name: Name of the route function to use for the view. :type name: str :return: Route function which makes use of Nemo context (such as menu informations) :rtype: function
flask_nemo/__init__.py
def view_maker(self, name, instance=None): """ Create a view :param name: Name of the route function to use for the view. :type name: str :return: Route function which makes use of Nemo context (such as menu informations) :rtype: function """ if instance is None: instance = self sig = "lang" in [ parameter.name for parameter in inspect.signature(getattr(instance, name)).parameters.values() ] def route(**kwargs): if sig and "lang" not in kwargs: kwargs["lang"] = self.get_locale() if "semantic" in kwargs: del kwargs["semantic"] return self.route(getattr(instance, name), **kwargs) return route
def view_maker(self, name, instance=None): """ Create a view :param name: Name of the route function to use for the view. :type name: str :return: Route function which makes use of Nemo context (such as menu informations) :rtype: function """ if instance is None: instance = self sig = "lang" in [ parameter.name for parameter in inspect.signature(getattr(instance, name)).parameters.values() ] def route(**kwargs): if sig and "lang" not in kwargs: kwargs["lang"] = self.get_locale() if "semantic" in kwargs: del kwargs["semantic"] return self.route(getattr(instance, name), **kwargs) return route
[ "Create", "a", "view" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L761-L782
[ "def", "view_maker", "(", "self", ",", "name", ",", "instance", "=", "None", ")", ":", "if", "instance", "is", "None", ":", "instance", "=", "self", "sig", "=", "\"lang\"", "in", "[", "parameter", ".", "name", "for", "parameter", "in", "inspect", ".", "signature", "(", "getattr", "(", "instance", ",", "name", ")", ")", ".", "parameters", ".", "values", "(", ")", "]", "def", "route", "(", "*", "*", "kwargs", ")", ":", "if", "sig", "and", "\"lang\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"lang\"", "]", "=", "self", ".", "get_locale", "(", ")", "if", "\"semantic\"", "in", "kwargs", ":", "del", "kwargs", "[", "\"semantic\"", "]", "return", "self", ".", "route", "(", "getattr", "(", "instance", ",", "name", ")", ",", "*", "*", "kwargs", ")", "return", "route" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.main_collections
Retrieve main parent collections of a repository :param lang: Language to retrieve information in :return: Sorted collections representations
flask_nemo/__init__.py
def main_collections(self, lang=None): """ Retrieve main parent collections of a repository :param lang: Language to retrieve information in :return: Sorted collections representations """ return sorted([ { "id": member.id, "label": str(member.get_label(lang=lang)), "model": str(member.model), "type": str(member.type), "size": member.size } for member in self.resolver.getMetadata().members ], key=itemgetter("label"))
def main_collections(self, lang=None): """ Retrieve main parent collections of a repository :param lang: Language to retrieve information in :return: Sorted collections representations """ return sorted([ { "id": member.id, "label": str(member.get_label(lang=lang)), "model": str(member.model), "type": str(member.type), "size": member.size } for member in self.resolver.getMetadata().members ], key=itemgetter("label"))
[ "Retrieve", "main", "parent", "collections", "of", "a", "repository" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L784-L799
[ "def", "main_collections", "(", "self", ",", "lang", "=", "None", ")", ":", "return", "sorted", "(", "[", "{", "\"id\"", ":", "member", ".", "id", ",", "\"label\"", ":", "str", "(", "member", ".", "get_label", "(", "lang", "=", "lang", ")", ")", ",", "\"model\"", ":", "str", "(", "member", ".", "model", ")", ",", "\"type\"", ":", "str", "(", "member", ".", "type", ")", ",", "\"size\"", ":", "member", ".", "size", "}", "for", "member", "in", "self", ".", "resolver", ".", "getMetadata", "(", ")", ".", "members", "]", ",", "key", "=", "itemgetter", "(", "\"label\"", ")", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.make_cache_keys
This function is built to provide cache keys for templates :param endpoint: Current endpoint :param kwargs: Keyword Arguments :return: tuple of i18n dependant cache key and i18n ignoring cache key :rtype: tuple(str)
flask_nemo/__init__.py
def make_cache_keys(self, endpoint, kwargs): """ This function is built to provide cache keys for templates :param endpoint: Current endpoint :param kwargs: Keyword Arguments :return: tuple of i18n dependant cache key and i18n ignoring cache key :rtype: tuple(str) """ keys = sorted(kwargs.keys()) i18n_cache_key = endpoint+"|"+"|".join([kwargs[k] for k in keys]) if "lang" in keys: cache_key = endpoint+"|" + "|".join([kwargs[k] for k in keys if k != "lang"]) else: cache_key = i18n_cache_key return i18n_cache_key, cache_key
def make_cache_keys(self, endpoint, kwargs): """ This function is built to provide cache keys for templates :param endpoint: Current endpoint :param kwargs: Keyword Arguments :return: tuple of i18n dependant cache key and i18n ignoring cache key :rtype: tuple(str) """ keys = sorted(kwargs.keys()) i18n_cache_key = endpoint+"|"+"|".join([kwargs[k] for k in keys]) if "lang" in keys: cache_key = endpoint+"|" + "|".join([kwargs[k] for k in keys if k != "lang"]) else: cache_key = i18n_cache_key return i18n_cache_key, cache_key
[ "This", "function", "is", "built", "to", "provide", "cache", "keys", "for", "templates" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L801-L815
[ "def", "make_cache_keys", "(", "self", ",", "endpoint", ",", "kwargs", ")", ":", "keys", "=", "sorted", "(", "kwargs", ".", "keys", "(", ")", ")", "i18n_cache_key", "=", "endpoint", "+", "\"|\"", "+", "\"|\"", ".", "join", "(", "[", "kwargs", "[", "k", "]", "for", "k", "in", "keys", "]", ")", "if", "\"lang\"", "in", "keys", ":", "cache_key", "=", "endpoint", "+", "\"|\"", "+", "\"|\"", ".", "join", "(", "[", "kwargs", "[", "k", "]", "for", "k", "in", "keys", "if", "k", "!=", "\"lang\"", "]", ")", "else", ":", "cache_key", "=", "i18n_cache_key", "return", "i18n_cache_key", ",", "cache_key" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.render
Render a route template and adds information to this route. :param template: Template name. :type template: str :param kwargs: dictionary of named arguments used to be passed to the template :type kwargs: dict :return: Http Response with rendered template :rtype: flask.Response
flask_nemo/__init__.py
def render(self, template, **kwargs): """ Render a route template and adds information to this route. :param template: Template name. :type template: str :param kwargs: dictionary of named arguments used to be passed to the template :type kwargs: dict :return: Http Response with rendered template :rtype: flask.Response """ kwargs["cache_key"] = "%s" % kwargs["url"].values() kwargs["lang"] = self.get_locale() kwargs["assets"] = self.assets kwargs["main_collections"] = self.main_collections(kwargs["lang"]) kwargs["cache_active"] = self.cache is not None kwargs["cache_time"] = 0 kwargs["cache_key"], kwargs["cache_key_i18n"] = self.make_cache_keys(request.endpoint, kwargs["url"]) kwargs["template"] = template for plugin in self.__plugins_render_views__: kwargs.update(plugin.render(**kwargs)) return render_template(kwargs["template"], **kwargs)
def render(self, template, **kwargs): """ Render a route template and adds information to this route. :param template: Template name. :type template: str :param kwargs: dictionary of named arguments used to be passed to the template :type kwargs: dict :return: Http Response with rendered template :rtype: flask.Response """ kwargs["cache_key"] = "%s" % kwargs["url"].values() kwargs["lang"] = self.get_locale() kwargs["assets"] = self.assets kwargs["main_collections"] = self.main_collections(kwargs["lang"]) kwargs["cache_active"] = self.cache is not None kwargs["cache_time"] = 0 kwargs["cache_key"], kwargs["cache_key_i18n"] = self.make_cache_keys(request.endpoint, kwargs["url"]) kwargs["template"] = template for plugin in self.__plugins_render_views__: kwargs.update(plugin.render(**kwargs)) return render_template(kwargs["template"], **kwargs)
[ "Render", "a", "route", "template", "and", "adds", "information", "to", "this", "route", "." ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L817-L840
[ "def", "render", "(", "self", ",", "template", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"cache_key\"", "]", "=", "\"%s\"", "%", "kwargs", "[", "\"url\"", "]", ".", "values", "(", ")", "kwargs", "[", "\"lang\"", "]", "=", "self", ".", "get_locale", "(", ")", "kwargs", "[", "\"assets\"", "]", "=", "self", ".", "assets", "kwargs", "[", "\"main_collections\"", "]", "=", "self", ".", "main_collections", "(", "kwargs", "[", "\"lang\"", "]", ")", "kwargs", "[", "\"cache_active\"", "]", "=", "self", ".", "cache", "is", "not", "None", "kwargs", "[", "\"cache_time\"", "]", "=", "0", "kwargs", "[", "\"cache_key\"", "]", ",", "kwargs", "[", "\"cache_key_i18n\"", "]", "=", "self", ".", "make_cache_keys", "(", "request", ".", "endpoint", ",", "kwargs", "[", "\"url\"", "]", ")", "kwargs", "[", "\"template\"", "]", "=", "template", "for", "plugin", "in", "self", ".", "__plugins_render_views__", ":", "kwargs", ".", "update", "(", "plugin", ".", "render", "(", "*", "*", "kwargs", ")", ")", "return", "render_template", "(", "kwargs", "[", "\"template\"", "]", ",", "*", "*", "kwargs", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.route
Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions :param fn: Function to run the route with :type fn: function :param kwargs: Parsed url arguments :type kwargs: dict :return: HTTP Response with rendered template :rtype: flask.Response
flask_nemo/__init__.py
def route(self, fn, **kwargs): """ Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions :param fn: Function to run the route with :type fn: function :param kwargs: Parsed url arguments :type kwargs: dict :return: HTTP Response with rendered template :rtype: flask.Response """ new_kwargs = fn(**kwargs) # If there is no templates, we assume that the response is finalized : if not isinstance(new_kwargs, dict): return new_kwargs new_kwargs["url"] = kwargs return self.render(**new_kwargs)
def route(self, fn, **kwargs): """ Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions :param fn: Function to run the route with :type fn: function :param kwargs: Parsed url arguments :type kwargs: dict :return: HTTP Response with rendered template :rtype: flask.Response """ new_kwargs = fn(**kwargs) # If there is no templates, we assume that the response is finalized : if not isinstance(new_kwargs, dict): return new_kwargs new_kwargs["url"] = kwargs return self.render(**new_kwargs)
[ "Route", "helper", ":", "apply", "fn", "function", "but", "keep", "the", "calling", "object", "*", "ie", "*", "kwargs", "for", "other", "functions" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L842-L859
[ "def", "route", "(", "self", ",", "fn", ",", "*", "*", "kwargs", ")", ":", "new_kwargs", "=", "fn", "(", "*", "*", "kwargs", ")", "# If there is no templates, we assume that the response is finalized :", "if", "not", "isinstance", "(", "new_kwargs", ",", "dict", ")", ":", "return", "new_kwargs", "new_kwargs", "[", "\"url\"", "]", "=", "kwargs", "return", "self", ".", "render", "(", "*", "*", "new_kwargs", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.register
Register the app using Blueprint :return: Nemo blueprint :rtype: flask.Blueprint
flask_nemo/__init__.py
def register(self): """ Register the app using Blueprint :return: Nemo blueprint :rtype: flask.Blueprint """ if self.app is not None: if not self.blueprint: self.blueprint = self.create_blueprint() self.app.register_blueprint(self.blueprint) if self.cache is None: # We register a fake cache extension. setattr(self.app.jinja_env, "_fake_cache_extension", self) self.app.jinja_env.add_extension(FakeCacheExtension) return self.blueprint return None
def register(self): """ Register the app using Blueprint :return: Nemo blueprint :rtype: flask.Blueprint """ if self.app is not None: if not self.blueprint: self.blueprint = self.create_blueprint() self.app.register_blueprint(self.blueprint) if self.cache is None: # We register a fake cache extension. setattr(self.app.jinja_env, "_fake_cache_extension", self) self.app.jinja_env.add_extension(FakeCacheExtension) return self.blueprint return None
[ "Register", "the", "app", "using", "Blueprint" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L861-L876
[ "def", "register", "(", "self", ")", ":", "if", "self", ".", "app", "is", "not", "None", ":", "if", "not", "self", ".", "blueprint", ":", "self", ".", "blueprint", "=", "self", ".", "create_blueprint", "(", ")", "self", ".", "app", ".", "register_blueprint", "(", "self", ".", "blueprint", ")", "if", "self", ".", "cache", "is", "None", ":", "# We register a fake cache extension.", "setattr", "(", "self", ".", "app", ".", "jinja_env", ",", "\"_fake_cache_extension\"", ",", "self", ")", "self", ".", "app", ".", "jinja_env", ".", "add_extension", "(", "FakeCacheExtension", ")", "return", "self", ".", "blueprint", "return", "None" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.register_filters
Register filters for Jinja to use .. note:: Extends the dictionary filters of jinja_env using self._filters list
flask_nemo/__init__.py
def register_filters(self): """ Register filters for Jinja to use .. note:: Extends the dictionary filters of jinja_env using self._filters list """ for _filter, instance in self._filters: if not instance: self.app.jinja_env.filters[ _filter.replace("f_", "") ] = getattr(flask_nemo.filters, _filter) else: self.app.jinja_env.filters[ _filter.replace("f_", "") ] = getattr(instance, _filter.replace("_{}".format(instance.name), ""))
def register_filters(self): """ Register filters for Jinja to use .. note:: Extends the dictionary filters of jinja_env using self._filters list """ for _filter, instance in self._filters: if not instance: self.app.jinja_env.filters[ _filter.replace("f_", "") ] = getattr(flask_nemo.filters, _filter) else: self.app.jinja_env.filters[ _filter.replace("f_", "") ] = getattr(instance, _filter.replace("_{}".format(instance.name), ""))
[ "Register", "filters", "for", "Jinja", "to", "use" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L878-L891
[ "def", "register_filters", "(", "self", ")", ":", "for", "_filter", ",", "instance", "in", "self", ".", "_filters", ":", "if", "not", "instance", ":", "self", ".", "app", ".", "jinja_env", ".", "filters", "[", "_filter", ".", "replace", "(", "\"f_\"", ",", "\"\"", ")", "]", "=", "getattr", "(", "flask_nemo", ".", "filters", ",", "_filter", ")", "else", ":", "self", ".", "app", ".", "jinja_env", ".", "filters", "[", "_filter", ".", "replace", "(", "\"f_\"", ",", "\"\"", ")", "]", "=", "getattr", "(", "instance", ",", "_filter", ".", "replace", "(", "\"_{}\"", ".", "format", "(", "instance", ".", "name", ")", ",", "\"\"", ")", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.register_plugins
Register plugins in Nemo instance - Clear routes first if asked by one plugin - Clear assets if asked by one plugin and replace by the last plugin registered static_folder - Register each plugin - Append plugin routes to registered routes - Append plugin filters to registered filters - Append templates directory to given namespaces - Append assets (CSS, JS, statics) to given resources - Append render view (if exists) to Nemo.render stack
flask_nemo/__init__.py
def register_plugins(self): """ Register plugins in Nemo instance - Clear routes first if asked by one plugin - Clear assets if asked by one plugin and replace by the last plugin registered static_folder - Register each plugin - Append plugin routes to registered routes - Append plugin filters to registered filters - Append templates directory to given namespaces - Append assets (CSS, JS, statics) to given resources - Append render view (if exists) to Nemo.render stack """ if len([plugin for plugin in self.__plugins__.values() if plugin.clear_routes]) > 0: # Clear current routes self._urls = list() self.cached = list() clear_assets = [plugin for plugin in self.__plugins__.values() if plugin.clear_assets] if len(clear_assets) > 0 and not self.prevent_plugin_clearing_assets: # Clear current Assets self.__assets__ = copy(type(self).ASSETS) static_path = [plugin.static_folder for plugin in clear_assets if plugin.static_folder] if len(static_path) > 0: self.static_folder = static_path[-1] for plugin in self.__plugins__.values(): self._urls.extend([(url, function, methods, plugin) for url, function, methods in plugin.routes]) self._filters.extend([(filt, plugin) for filt in plugin.filters]) self.__templates_namespaces__.extend( [(namespace, directory) for namespace, directory in plugin.templates.items()] ) for asset_type in self.__assets__: for key, value in plugin.assets[asset_type].items(): self.__assets__[asset_type][key] = value if plugin.augment: self.__plugins_render_views__.append(plugin) if hasattr(plugin, "CACHED"): for func in plugin.CACHED: self.cached.append((getattr(plugin, func), plugin)) plugin.register_nemo(self)
def register_plugins(self): """ Register plugins in Nemo instance - Clear routes first if asked by one plugin - Clear assets if asked by one plugin and replace by the last plugin registered static_folder - Register each plugin - Append plugin routes to registered routes - Append plugin filters to registered filters - Append templates directory to given namespaces - Append assets (CSS, JS, statics) to given resources - Append render view (if exists) to Nemo.render stack """ if len([plugin for plugin in self.__plugins__.values() if plugin.clear_routes]) > 0: # Clear current routes self._urls = list() self.cached = list() clear_assets = [plugin for plugin in self.__plugins__.values() if plugin.clear_assets] if len(clear_assets) > 0 and not self.prevent_plugin_clearing_assets: # Clear current Assets self.__assets__ = copy(type(self).ASSETS) static_path = [plugin.static_folder for plugin in clear_assets if plugin.static_folder] if len(static_path) > 0: self.static_folder = static_path[-1] for plugin in self.__plugins__.values(): self._urls.extend([(url, function, methods, plugin) for url, function, methods in plugin.routes]) self._filters.extend([(filt, plugin) for filt in plugin.filters]) self.__templates_namespaces__.extend( [(namespace, directory) for namespace, directory in plugin.templates.items()] ) for asset_type in self.__assets__: for key, value in plugin.assets[asset_type].items(): self.__assets__[asset_type][key] = value if plugin.augment: self.__plugins_render_views__.append(plugin) if hasattr(plugin, "CACHED"): for func in plugin.CACHED: self.cached.append((getattr(plugin, func), plugin)) plugin.register_nemo(self)
[ "Register", "plugins", "in", "Nemo", "instance" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L893-L931
[ "def", "register_plugins", "(", "self", ")", ":", "if", "len", "(", "[", "plugin", "for", "plugin", "in", "self", ".", "__plugins__", ".", "values", "(", ")", "if", "plugin", ".", "clear_routes", "]", ")", ">", "0", ":", "# Clear current routes", "self", ".", "_urls", "=", "list", "(", ")", "self", ".", "cached", "=", "list", "(", ")", "clear_assets", "=", "[", "plugin", "for", "plugin", "in", "self", ".", "__plugins__", ".", "values", "(", ")", "if", "plugin", ".", "clear_assets", "]", "if", "len", "(", "clear_assets", ")", ">", "0", "and", "not", "self", ".", "prevent_plugin_clearing_assets", ":", "# Clear current Assets", "self", ".", "__assets__", "=", "copy", "(", "type", "(", "self", ")", ".", "ASSETS", ")", "static_path", "=", "[", "plugin", ".", "static_folder", "for", "plugin", "in", "clear_assets", "if", "plugin", ".", "static_folder", "]", "if", "len", "(", "static_path", ")", ">", "0", ":", "self", ".", "static_folder", "=", "static_path", "[", "-", "1", "]", "for", "plugin", "in", "self", ".", "__plugins__", ".", "values", "(", ")", ":", "self", ".", "_urls", ".", "extend", "(", "[", "(", "url", ",", "function", ",", "methods", ",", "plugin", ")", "for", "url", ",", "function", ",", "methods", "in", "plugin", ".", "routes", "]", ")", "self", ".", "_filters", ".", "extend", "(", "[", "(", "filt", ",", "plugin", ")", "for", "filt", "in", "plugin", ".", "filters", "]", ")", "self", ".", "__templates_namespaces__", ".", "extend", "(", "[", "(", "namespace", ",", "directory", ")", "for", "namespace", ",", "directory", "in", "plugin", ".", "templates", ".", "items", "(", ")", "]", ")", "for", "asset_type", "in", "self", ".", "__assets__", ":", "for", "key", ",", "value", "in", "plugin", ".", "assets", "[", "asset_type", "]", ".", "items", "(", ")", ":", "self", ".", "__assets__", "[", "asset_type", "]", "[", "key", "]", "=", "value", "if", "plugin", ".", "augment", ":", "self", ".", "__plugins_render_views__", ".", "append", "(", "plugin", ")", "if", "hasattr", "(", "plugin", ",", "\"CACHED\"", ")", ":", "for", "func", "in", "plugin", ".", "CACHED", ":", "self", ".", "cached", ".", "append", "(", "(", "getattr", "(", "plugin", ",", "func", ")", ",", "plugin", ")", ")", "plugin", ".", "register_nemo", "(", "self", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Nemo.chunk
Handle a list of references depending on the text identifier using the chunker dictionary. :param text: Text object from which comes the references :type text: MyCapytains.resources.texts.api.Text :param reffs: List of references to transform :type reffs: References :return: Transformed list of references :rtype: [str]
flask_nemo/__init__.py
def chunk(self, text, reffs): """ Handle a list of references depending on the text identifier using the chunker dictionary. :param text: Text object from which comes the references :type text: MyCapytains.resources.texts.api.Text :param reffs: List of references to transform :type reffs: References :return: Transformed list of references :rtype: [str] """ if str(text.id) in self.chunker: return self.chunker[str(text.id)](text, reffs) return self.chunker["default"](text, reffs)
def chunk(self, text, reffs): """ Handle a list of references depending on the text identifier using the chunker dictionary. :param text: Text object from which comes the references :type text: MyCapytains.resources.texts.api.Text :param reffs: List of references to transform :type reffs: References :return: Transformed list of references :rtype: [str] """ if str(text.id) in self.chunker: return self.chunker[str(text.id)](text, reffs) return self.chunker["default"](text, reffs)
[ "Handle", "a", "list", "of", "references", "depending", "on", "the", "text", "identifier", "using", "the", "chunker", "dictionary", "." ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L933-L945
[ "def", "chunk", "(", "self", ",", "text", ",", "reffs", ")", ":", "if", "str", "(", "text", ".", "id", ")", "in", "self", ".", "chunker", ":", "return", "self", ".", "chunker", "[", "str", "(", "text", ".", "id", ")", "]", "(", "text", ",", "reffs", ")", "return", "self", ".", "chunker", "[", "\"default\"", "]", "(", "text", ",", "reffs", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
ChangeTracker.update
Returns the appropriate current value, based on the changes recorded by this ChangeTracker, the value stored by the server (`localValue`), and the value stored by the synchronizing client (`remoteValue`). If `remoteValue` conflicts with changes stored locally, then a `pysyncml.ConflictError` is raised. If a change needs to be applied because `remoteValue` has been updated, then the new value will be returned, and this ChangeTracker will be updated such that a call to :meth:`getChangeSpec` will incorporate the change. :param fieldname: The name of the fieldname being evaluated. :param localValue: The value of the field as stored by the server, usually the one that also stored the current change-spec. If `localValue` is ``None``, then it is assumed that the field was potentially added (this will first be verified against the stored change-spec). :param remoteValue: The new value being presented that may or may not be a source of conflict. If `remoteValue` is ``None``, then it is assumed that the field was potentially deleted (this will first be verified against the stored change-spec).
pysyncml/change/tracker.py
def update(self, fieldname, localValue, remoteValue): ''' Returns the appropriate current value, based on the changes recorded by this ChangeTracker, the value stored by the server (`localValue`), and the value stored by the synchronizing client (`remoteValue`). If `remoteValue` conflicts with changes stored locally, then a `pysyncml.ConflictError` is raised. If a change needs to be applied because `remoteValue` has been updated, then the new value will be returned, and this ChangeTracker will be updated such that a call to :meth:`getChangeSpec` will incorporate the change. :param fieldname: The name of the fieldname being evaluated. :param localValue: The value of the field as stored by the server, usually the one that also stored the current change-spec. If `localValue` is ``None``, then it is assumed that the field was potentially added (this will first be verified against the stored change-spec). :param remoteValue: The new value being presented that may or may not be a source of conflict. If `remoteValue` is ``None``, then it is assumed that the field was potentially deleted (this will first be verified against the stored change-spec). ''' if localValue == remoteValue: return localValue ct = constants.ITEM_DELETED if remoteValue is None else constants.ITEM_MODIFIED if localValue is None: ct = constants.ITEM_ADDED # todo: i should probably trap irep errors. for example, if this # cspec has a field "x" marked as deleted, then `localValue` # must be None... etc. # TODO: i think this kind of handling would break in ListChangeTracker!... changed = self.isChange(fieldname, ct, remoteValue) if changed is None: return localValue self.append(changed, ct, initialValue=localValue, isMd5=False) return remoteValue
def update(self, fieldname, localValue, remoteValue): ''' Returns the appropriate current value, based on the changes recorded by this ChangeTracker, the value stored by the server (`localValue`), and the value stored by the synchronizing client (`remoteValue`). If `remoteValue` conflicts with changes stored locally, then a `pysyncml.ConflictError` is raised. If a change needs to be applied because `remoteValue` has been updated, then the new value will be returned, and this ChangeTracker will be updated such that a call to :meth:`getChangeSpec` will incorporate the change. :param fieldname: The name of the fieldname being evaluated. :param localValue: The value of the field as stored by the server, usually the one that also stored the current change-spec. If `localValue` is ``None``, then it is assumed that the field was potentially added (this will first be verified against the stored change-spec). :param remoteValue: The new value being presented that may or may not be a source of conflict. If `remoteValue` is ``None``, then it is assumed that the field was potentially deleted (this will first be verified against the stored change-spec). ''' if localValue == remoteValue: return localValue ct = constants.ITEM_DELETED if remoteValue is None else constants.ITEM_MODIFIED if localValue is None: ct = constants.ITEM_ADDED # todo: i should probably trap irep errors. for example, if this # cspec has a field "x" marked as deleted, then `localValue` # must be None... etc. # TODO: i think this kind of handling would break in ListChangeTracker!... changed = self.isChange(fieldname, ct, remoteValue) if changed is None: return localValue self.append(changed, ct, initialValue=localValue, isMd5=False) return remoteValue
[ "Returns", "the", "appropriate", "current", "value", "based", "on", "the", "changes", "recorded", "by", "this", "ChangeTracker", "the", "value", "stored", "by", "the", "server", "(", "localValue", ")", "and", "the", "value", "stored", "by", "the", "synchronizing", "client", "(", "remoteValue", ")", ".", "If", "remoteValue", "conflicts", "with", "changes", "stored", "locally", "then", "a", "pysyncml", ".", "ConflictError", "is", "raised", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/change/tracker.py#L232-L280
[ "def", "update", "(", "self", ",", "fieldname", ",", "localValue", ",", "remoteValue", ")", ":", "if", "localValue", "==", "remoteValue", ":", "return", "localValue", "ct", "=", "constants", ".", "ITEM_DELETED", "if", "remoteValue", "is", "None", "else", "constants", ".", "ITEM_MODIFIED", "if", "localValue", "is", "None", ":", "ct", "=", "constants", ".", "ITEM_ADDED", "# todo: i should probably trap irep errors. for example, if this", "# cspec has a field \"x\" marked as deleted, then `localValue`", "# must be None... etc.", "# TODO: i think this kind of handling would break in ListChangeTracker!...", "changed", "=", "self", ".", "isChange", "(", "fieldname", ",", "ct", ",", "remoteValue", ")", "if", "changed", "is", "None", ":", "return", "localValue", "self", ".", "append", "(", "changed", ",", "ct", ",", "initialValue", "=", "localValue", ",", "isMd5", "=", "False", ")", "return", "remoteValue" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
AttributeChangeTracker.isChange
Implements as specified in :meth:`.ChangeTracker.isChange` where the `changeObject` is simply the fieldname that needs to be updated with the `newValue`. Currently, this is always equal to `fieldname`.
pysyncml/change/tracker.py
def isChange(self, fieldname, changeType, newValue=None, isMd5=False): ''' Implements as specified in :meth:`.ChangeTracker.isChange` where the `changeObject` is simply the fieldname that needs to be updated with the `newValue`. Currently, this is always equal to `fieldname`. ''' # todo: this seems inefficient... changes = self._collapseChanges(self.baseline, self.current) if fieldname not in changes: return fieldname cur = changes[fieldname] if changeType == constants.ITEM_DELETED: if cur.op == constants.ITEM_ADDED or cur.op == constants.ITEM_DELETED: # the field is deleted because it hasn't been added yet # (the check for cur.op == constants.ITEM_DELETED should # never be true, so just here for paranoia...) return None # we are requiring that the current/new values are different, # thus there is a collision between the added values raise ConflictError('conflicting deletion of field "%s"' % (fieldname,)) # the `newValue` is different than the current value (otherwise # this method should not have been called) -- either it was added # or modified. # if it appears to be "added", then it may be because it was # deleted in this tracker. # if it appears to be "modified", then it may be because it # was modified in this tracker. # in either case, check to see if it is equal to the initial # value, and if it was, then there was actually no change. if isMd5Equal(newValue, isMd5, cur.ival, cur.md5): # the new value is equal to the initial value, so this # field was not changed (but has local changes) return None # the new value is not equal to the initial value, which means # that they were both changed and/or added. raise ConflictError( 'conflicting addition or modification of field "%s"' % (fieldname,))
def isChange(self, fieldname, changeType, newValue=None, isMd5=False): ''' Implements as specified in :meth:`.ChangeTracker.isChange` where the `changeObject` is simply the fieldname that needs to be updated with the `newValue`. Currently, this is always equal to `fieldname`. ''' # todo: this seems inefficient... changes = self._collapseChanges(self.baseline, self.current) if fieldname not in changes: return fieldname cur = changes[fieldname] if changeType == constants.ITEM_DELETED: if cur.op == constants.ITEM_ADDED or cur.op == constants.ITEM_DELETED: # the field is deleted because it hasn't been added yet # (the check for cur.op == constants.ITEM_DELETED should # never be true, so just here for paranoia...) return None # we are requiring that the current/new values are different, # thus there is a collision between the added values raise ConflictError('conflicting deletion of field "%s"' % (fieldname,)) # the `newValue` is different than the current value (otherwise # this method should not have been called) -- either it was added # or modified. # if it appears to be "added", then it may be because it was # deleted in this tracker. # if it appears to be "modified", then it may be because it # was modified in this tracker. # in either case, check to see if it is equal to the initial # value, and if it was, then there was actually no change. if isMd5Equal(newValue, isMd5, cur.ival, cur.md5): # the new value is equal to the initial value, so this # field was not changed (but has local changes) return None # the new value is not equal to the initial value, which means # that they were both changed and/or added. raise ConflictError( 'conflicting addition or modification of field "%s"' % (fieldname,))
[ "Implements", "as", "specified", "in", ":", "meth", ":", ".", "ChangeTracker", ".", "isChange", "where", "the", "changeObject", "is", "simply", "the", "fieldname", "that", "needs", "to", "be", "updated", "with", "the", "newValue", ".", "Currently", "this", "is", "always", "equal", "to", "fieldname", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/change/tracker.py#L431-L475
[ "def", "isChange", "(", "self", ",", "fieldname", ",", "changeType", ",", "newValue", "=", "None", ",", "isMd5", "=", "False", ")", ":", "# todo: this seems inefficient...", "changes", "=", "self", ".", "_collapseChanges", "(", "self", ".", "baseline", ",", "self", ".", "current", ")", "if", "fieldname", "not", "in", "changes", ":", "return", "fieldname", "cur", "=", "changes", "[", "fieldname", "]", "if", "changeType", "==", "constants", ".", "ITEM_DELETED", ":", "if", "cur", ".", "op", "==", "constants", ".", "ITEM_ADDED", "or", "cur", ".", "op", "==", "constants", ".", "ITEM_DELETED", ":", "# the field is deleted because it hasn't been added yet", "# (the check for cur.op == constants.ITEM_DELETED should", "# never be true, so just here for paranoia...)", "return", "None", "# we are requiring that the current/new values are different,", "# thus there is a collision between the added values", "raise", "ConflictError", "(", "'conflicting deletion of field \"%s\"'", "%", "(", "fieldname", ",", ")", ")", "# the `newValue` is different than the current value (otherwise", "# this method should not have been called) -- either it was added", "# or modified.", "# if it appears to be \"added\", then it may be because it was", "# deleted in this tracker.", "# if it appears to be \"modified\", then it may be because it", "# was modified in this tracker.", "# in either case, check to see if it is equal to the initial", "# value, and if it was, then there was actually no change.", "if", "isMd5Equal", "(", "newValue", ",", "isMd5", ",", "cur", ".", "ival", ",", "cur", ".", "md5", ")", ":", "# the new value is equal to the initial value, so this", "# field was not changed (but has local changes)", "return", "None", "# the new value is not equal to the initial value, which means", "# that they were both changed and/or added.", "raise", "ConflictError", "(", "'conflicting addition or modification of field \"%s\"'", "%", "(", "fieldname", ",", ")", ")" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
ListChangeTracker.append
Adds a change spec to the current list of changes. The `listIndex` represents the line number (in multi-line mode) or word number (in single-line mode), and must be **INCLUSIVE** of both additions and deletions.
pysyncml/change/tracker.py
def append(self, listIndex, changeType, initialValue=None, isMd5=False): ''' Adds a change spec to the current list of changes. The `listIndex` represents the line number (in multi-line mode) or word number (in single-line mode), and must be **INCLUSIVE** of both additions and deletions. ''' if not isMd5 and initialValue is not None and len(initialValue) > 32: initialValue = hashlib.md5(initialValue).hexdigest() isMd5 = True cur = adict(index = int(listIndex), op = changeType, ival = initialValue, md5 = isMd5) for idx, val in enumerate(self.current): if val.index < cur.index: continue if val.index > cur.index: self.current.insert(idx, cur) break # todo: this should never happen... (there should not be a change # reported for the same line without a `pushChangeSpec()` between) # todo: perhaps attempt a merging?... raise InvalidChangeSpec('conflicting changes for index %d' % (cur.index,)) else: self.current.append(cur)
def append(self, listIndex, changeType, initialValue=None, isMd5=False): ''' Adds a change spec to the current list of changes. The `listIndex` represents the line number (in multi-line mode) or word number (in single-line mode), and must be **INCLUSIVE** of both additions and deletions. ''' if not isMd5 and initialValue is not None and len(initialValue) > 32: initialValue = hashlib.md5(initialValue).hexdigest() isMd5 = True cur = adict(index = int(listIndex), op = changeType, ival = initialValue, md5 = isMd5) for idx, val in enumerate(self.current): if val.index < cur.index: continue if val.index > cur.index: self.current.insert(idx, cur) break # todo: this should never happen... (there should not be a change # reported for the same line without a `pushChangeSpec()` between) # todo: perhaps attempt a merging?... raise InvalidChangeSpec('conflicting changes for index %d' % (cur.index,)) else: self.current.append(cur)
[ "Adds", "a", "change", "spec", "to", "the", "current", "list", "of", "changes", ".", "The", "listIndex", "represents", "the", "line", "number", "(", "in", "multi", "-", "line", "mode", ")", "or", "word", "number", "(", "in", "single", "-", "line", "mode", ")", "and", "must", "be", "**", "INCLUSIVE", "**", "of", "both", "additions", "and", "deletions", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/change/tracker.py#L570-L595
[ "def", "append", "(", "self", ",", "listIndex", ",", "changeType", ",", "initialValue", "=", "None", ",", "isMd5", "=", "False", ")", ":", "if", "not", "isMd5", "and", "initialValue", "is", "not", "None", "and", "len", "(", "initialValue", ")", ">", "32", ":", "initialValue", "=", "hashlib", ".", "md5", "(", "initialValue", ")", ".", "hexdigest", "(", ")", "isMd5", "=", "True", "cur", "=", "adict", "(", "index", "=", "int", "(", "listIndex", ")", ",", "op", "=", "changeType", ",", "ival", "=", "initialValue", ",", "md5", "=", "isMd5", ")", "for", "idx", ",", "val", "in", "enumerate", "(", "self", ".", "current", ")", ":", "if", "val", ".", "index", "<", "cur", ".", "index", ":", "continue", "if", "val", ".", "index", ">", "cur", ".", "index", ":", "self", ".", "current", ".", "insert", "(", "idx", ",", "cur", ")", "break", "# todo: this should never happen... (there should not be a change", "# reported for the same line without a `pushChangeSpec()` between)", "# todo: perhaps attempt a merging?...", "raise", "InvalidChangeSpec", "(", "'conflicting changes for index %d'", "%", "(", "cur", ".", "index", ",", ")", ")", "else", ":", "self", ".", "current", ".", "append", "(", "cur", ")" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
ListChangeTracker.isChange
Implements as specified in :meth:`.ChangeTracker.isChange` where the `changeObject` is a two-element tuple. The first element is the index at which the change should be applied, and the second element is an abstract token that should be passed back into this method at every iteration. IMPORTANT: unlike the AttributeChangeTracker, the ListChangeTracker's `isChange()` method is sensitive to order (which is why it uses the `changeObject` and `token` mechanisms. Therefore, it is important to call `isChange()` sequentially with all changes in the order that they occur in the change list.
pysyncml/change/tracker.py
def isChange(self, listIndex, changeType, newValue=None, isMd5=False, token=None): ''' Implements as specified in :meth:`.ChangeTracker.isChange` where the `changeObject` is a two-element tuple. The first element is the index at which the change should be applied, and the second element is an abstract token that should be passed back into this method at every iteration. IMPORTANT: unlike the AttributeChangeTracker, the ListChangeTracker's `isChange()` method is sensitive to order (which is why it uses the `changeObject` and `token` mechanisms. Therefore, it is important to call `isChange()` sequentially with all changes in the order that they occur in the change list. ''' # THE INDEX PASSED TO ListChangeTracker.isChange() DOES NOT INCLUDE: # - local deletions # - remote additions adjust = 0 # tracks local deletes token = token # tracks consecutive addition adjustments index = int(listIndex) ret = index # todo: this should reduce complexity later on, but something # went wrong... # if changeType != constants.ITEM_ADDED: # token = None # else: # if token is None or token[0] != index: # token = (ret, 0) # token = (ret, token[1] + 1) # todo: this seems inefficient... changes = self._collapseChanges(self.baseline, self.current) for cur in changes: if cur.index > index: if changeType != constants.ITEM_ADDED: return (ret, None) if token is None or token[0] != index - adjust: token = (ret, 0) token = (ret, token[1] + 1) return (ret, token) if cur.index != index: if cur.op == constants.ITEM_DELETED: index += 1 adjust += 1 continue if token is not None and token[0] == index - adjust: index += token[1] continue if changeType == constants.ITEM_DELETED: if cur.op == constants.ITEM_ADDED: # the field is deleted because it hasn't been added yet return (None, None) # we are requiring that the current/new values are different, # thus there is a collision between the added values raise ConflictError( 'conflicting deletion of list index %r' % (index,)) if changeType == constants.ITEM_ADDED: if token is None: token = (ret, 0) token = (ret, token[1] + 1) if cur.op == constants.ITEM_DELETED: if isMd5Equal(newValue, isMd5, cur.ival, cur.md5): return (None, token) # todo: this *could* be a del-mod *conflict*... but not # *NECESSARILY* so, since it could be a # del-adjacent-add, which is not a problem. in the # conflict case, the resolution will cause the # modified line to silently win. # TODO: perhaps i should err on the side of safety and # issue a ConflictError?... return (ret, token) if cur.op == constants.ITEM_DELETED: index += 1 adjust += 1 continue # changeType = mod, op = add/mod if cur.op == constants.ITEM_ADDED: # todo: i'm not sure if this case is even possible... raise ConflictError( 'conflicting addition of list index %r' % (index,)) # mod/mod - check initvalue if isMd5Equal(newValue, isMd5, cur.ival, cur.md5): # the new value is equal to the initial value, so this # line was not changed (but has local changes) return (None, None) # the new value is not equal to the initial value, which means # that they were both changed and/or added. raise ConflictError( 'conflicting modification of list index %r' % (index,)) if changeType != constants.ITEM_ADDED: return (ret, None) if token is None or token[0] != index - adjust: token = (ret, 0) token = (ret, token[1] + 1) return (ret, token)
def isChange(self, listIndex, changeType, newValue=None, isMd5=False, token=None): ''' Implements as specified in :meth:`.ChangeTracker.isChange` where the `changeObject` is a two-element tuple. The first element is the index at which the change should be applied, and the second element is an abstract token that should be passed back into this method at every iteration. IMPORTANT: unlike the AttributeChangeTracker, the ListChangeTracker's `isChange()` method is sensitive to order (which is why it uses the `changeObject` and `token` mechanisms. Therefore, it is important to call `isChange()` sequentially with all changes in the order that they occur in the change list. ''' # THE INDEX PASSED TO ListChangeTracker.isChange() DOES NOT INCLUDE: # - local deletions # - remote additions adjust = 0 # tracks local deletes token = token # tracks consecutive addition adjustments index = int(listIndex) ret = index # todo: this should reduce complexity later on, but something # went wrong... # if changeType != constants.ITEM_ADDED: # token = None # else: # if token is None or token[0] != index: # token = (ret, 0) # token = (ret, token[1] + 1) # todo: this seems inefficient... changes = self._collapseChanges(self.baseline, self.current) for cur in changes: if cur.index > index: if changeType != constants.ITEM_ADDED: return (ret, None) if token is None or token[0] != index - adjust: token = (ret, 0) token = (ret, token[1] + 1) return (ret, token) if cur.index != index: if cur.op == constants.ITEM_DELETED: index += 1 adjust += 1 continue if token is not None and token[0] == index - adjust: index += token[1] continue if changeType == constants.ITEM_DELETED: if cur.op == constants.ITEM_ADDED: # the field is deleted because it hasn't been added yet return (None, None) # we are requiring that the current/new values are different, # thus there is a collision between the added values raise ConflictError( 'conflicting deletion of list index %r' % (index,)) if changeType == constants.ITEM_ADDED: if token is None: token = (ret, 0) token = (ret, token[1] + 1) if cur.op == constants.ITEM_DELETED: if isMd5Equal(newValue, isMd5, cur.ival, cur.md5): return (None, token) # todo: this *could* be a del-mod *conflict*... but not # *NECESSARILY* so, since it could be a # del-adjacent-add, which is not a problem. in the # conflict case, the resolution will cause the # modified line to silently win. # TODO: perhaps i should err on the side of safety and # issue a ConflictError?... return (ret, token) if cur.op == constants.ITEM_DELETED: index += 1 adjust += 1 continue # changeType = mod, op = add/mod if cur.op == constants.ITEM_ADDED: # todo: i'm not sure if this case is even possible... raise ConflictError( 'conflicting addition of list index %r' % (index,)) # mod/mod - check initvalue if isMd5Equal(newValue, isMd5, cur.ival, cur.md5): # the new value is equal to the initial value, so this # line was not changed (but has local changes) return (None, None) # the new value is not equal to the initial value, which means # that they were both changed and/or added. raise ConflictError( 'conflicting modification of list index %r' % (index,)) if changeType != constants.ITEM_ADDED: return (ret, None) if token is None or token[0] != index - adjust: token = (ret, 0) token = (ret, token[1] + 1) return (ret, token)
[ "Implements", "as", "specified", "in", ":", "meth", ":", ".", "ChangeTracker", ".", "isChange", "where", "the", "changeObject", "is", "a", "two", "-", "element", "tuple", ".", "The", "first", "element", "is", "the", "index", "at", "which", "the", "change", "should", "be", "applied", "and", "the", "second", "element", "is", "an", "abstract", "token", "that", "should", "be", "passed", "back", "into", "this", "method", "at", "every", "iteration", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/change/tracker.py#L598-L707
[ "def", "isChange", "(", "self", ",", "listIndex", ",", "changeType", ",", "newValue", "=", "None", ",", "isMd5", "=", "False", ",", "token", "=", "None", ")", ":", "# THE INDEX PASSED TO ListChangeTracker.isChange() DOES NOT INCLUDE:", "# - local deletions", "# - remote additions", "adjust", "=", "0", "# tracks local deletes", "token", "=", "token", "# tracks consecutive addition adjustments", "index", "=", "int", "(", "listIndex", ")", "ret", "=", "index", "# todo: this should reduce complexity later on, but something", "# went wrong...", "# if changeType != constants.ITEM_ADDED:", "# token = None", "# else:", "# if token is None or token[0] != index:", "# token = (ret, 0)", "# token = (ret, token[1] + 1)", "# todo: this seems inefficient...", "changes", "=", "self", ".", "_collapseChanges", "(", "self", ".", "baseline", ",", "self", ".", "current", ")", "for", "cur", "in", "changes", ":", "if", "cur", ".", "index", ">", "index", ":", "if", "changeType", "!=", "constants", ".", "ITEM_ADDED", ":", "return", "(", "ret", ",", "None", ")", "if", "token", "is", "None", "or", "token", "[", "0", "]", "!=", "index", "-", "adjust", ":", "token", "=", "(", "ret", ",", "0", ")", "token", "=", "(", "ret", ",", "token", "[", "1", "]", "+", "1", ")", "return", "(", "ret", ",", "token", ")", "if", "cur", ".", "index", "!=", "index", ":", "if", "cur", ".", "op", "==", "constants", ".", "ITEM_DELETED", ":", "index", "+=", "1", "adjust", "+=", "1", "continue", "if", "token", "is", "not", "None", "and", "token", "[", "0", "]", "==", "index", "-", "adjust", ":", "index", "+=", "token", "[", "1", "]", "continue", "if", "changeType", "==", "constants", ".", "ITEM_DELETED", ":", "if", "cur", ".", "op", "==", "constants", ".", "ITEM_ADDED", ":", "# the field is deleted because it hasn't been added yet", "return", "(", "None", ",", "None", ")", "# we are requiring that the current/new values are different,", "# thus there is a collision between the added values", "raise", "ConflictError", "(", "'conflicting deletion of list index %r'", "%", "(", "index", ",", ")", ")", "if", "changeType", "==", "constants", ".", "ITEM_ADDED", ":", "if", "token", "is", "None", ":", "token", "=", "(", "ret", ",", "0", ")", "token", "=", "(", "ret", ",", "token", "[", "1", "]", "+", "1", ")", "if", "cur", ".", "op", "==", "constants", ".", "ITEM_DELETED", ":", "if", "isMd5Equal", "(", "newValue", ",", "isMd5", ",", "cur", ".", "ival", ",", "cur", ".", "md5", ")", ":", "return", "(", "None", ",", "token", ")", "# todo: this *could* be a del-mod *conflict*... but not", "# *NECESSARILY* so, since it could be a", "# del-adjacent-add, which is not a problem. in the", "# conflict case, the resolution will cause the", "# modified line to silently win.", "# TODO: perhaps i should err on the side of safety and", "# issue a ConflictError?...", "return", "(", "ret", ",", "token", ")", "if", "cur", ".", "op", "==", "constants", ".", "ITEM_DELETED", ":", "index", "+=", "1", "adjust", "+=", "1", "continue", "# changeType = mod, op = add/mod", "if", "cur", ".", "op", "==", "constants", ".", "ITEM_ADDED", ":", "# todo: i'm not sure if this case is even possible...", "raise", "ConflictError", "(", "'conflicting addition of list index %r'", "%", "(", "index", ",", ")", ")", "# mod/mod - check initvalue", "if", "isMd5Equal", "(", "newValue", ",", "isMd5", ",", "cur", ".", "ival", ",", "cur", ".", "md5", ")", ":", "# the new value is equal to the initial value, so this", "# line was not changed (but has local changes)", "return", "(", "None", ",", "None", ")", "# the new value is not equal to the initial value, which means", "# that they were both changed and/or added.", "raise", "ConflictError", "(", "'conflicting modification of list index %r'", "%", "(", "index", ",", ")", ")", "if", "changeType", "!=", "constants", ".", "ITEM_ADDED", ":", "return", "(", "ret", ",", "None", ")", "if", "token", "is", "None", "or", "token", "[", "0", "]", "!=", "index", "-", "adjust", ":", "token", "=", "(", "ret", ",", "0", ")", "token", "=", "(", "ret", ",", "token", "[", "1", "]", "+", "1", ")", "return", "(", "ret", ",", "token", ")" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
add_tag
Obtains the data from the pipe and appends the given tag.
jackal/scripts/tags.py
def add_tag(): """ Obtains the data from the pipe and appends the given tag. """ if len(sys.argv) > 1: tag = sys.argv[1] doc_mapper = DocMapper() if doc_mapper.is_pipe: count = 0 for obj in doc_mapper.get_pipe(): obj.add_tag(tag) obj.update(tags=obj.tags) count += 1 print_success("Added tag '{}' to {} object(s)".format(tag, count)) else: print_error("Please use this script with pipes") else: print_error("Usage: jk-add-tag <tag>") sys.exit()
def add_tag(): """ Obtains the data from the pipe and appends the given tag. """ if len(sys.argv) > 1: tag = sys.argv[1] doc_mapper = DocMapper() if doc_mapper.is_pipe: count = 0 for obj in doc_mapper.get_pipe(): obj.add_tag(tag) obj.update(tags=obj.tags) count += 1 print_success("Added tag '{}' to {} object(s)".format(tag, count)) else: print_error("Please use this script with pipes") else: print_error("Usage: jk-add-tag <tag>") sys.exit()
[ "Obtains", "the", "data", "from", "the", "pipe", "and", "appends", "the", "given", "tag", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/tags.py#L8-L26
[ "def", "add_tag", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", ">", "1", ":", "tag", "=", "sys", ".", "argv", "[", "1", "]", "doc_mapper", "=", "DocMapper", "(", ")", "if", "doc_mapper", ".", "is_pipe", ":", "count", "=", "0", "for", "obj", "in", "doc_mapper", ".", "get_pipe", "(", ")", ":", "obj", ".", "add_tag", "(", "tag", ")", "obj", ".", "update", "(", "tags", "=", "obj", ".", "tags", ")", "count", "+=", "1", "print_success", "(", "\"Added tag '{}' to {} object(s)\"", ".", "format", "(", "tag", ",", "count", ")", ")", "else", ":", "print_error", "(", "\"Please use this script with pipes\"", ")", "else", ":", "print_error", "(", "\"Usage: jk-add-tag <tag>\"", ")", "sys", ".", "exit", "(", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
manual_configure
Function to manually configure jackal.
jackal/config.py
def manual_configure(): """ Function to manually configure jackal. """ print("Manual configuring jackal") mapping = { '1': 'y', '0': 'n'} config = Config() # Host host = input_with_default("What is the Elasticsearch host?", config.get('jackal', 'host')) config.set('jackal', 'host', host) # SSL if input_with_default("Use SSL?", mapping[config.get('jackal', 'use_ssl')]) == 'y': config.set('jackal', 'use_ssl', '1') if input_with_default("Setup custom server cert?", 'y') == 'y': ca_certs = input_with_default("Server certificate location?", config.get('jackal', 'ca_certs')) config.set('jackal', 'ca_certs', ca_certs) else: config.set('jackal', 'ca_certs', '') else: config.set('jackal', 'use_ssl', '0') if input_with_default("Setup client certificates?", mapping[config.get('jackal', 'client_certs')]) == 'y': config.set('jackal', 'client_certs', '1') client_cert = input_with_default("Client cert location?", config.get('jackal', 'client_cert')) config.set('jackal', 'client_cert', client_cert) client_key = input_with_default("Client key location?", config.get('jackal', 'client_key')) config.set('jackal', 'client_key', client_key) else: config.set('jackal', 'client_certs', '0') # Index index = input_with_default("What index prefix should jackal use?", config.get('jackal', 'index')) config.set('jackal', 'index', index) initialize_indices = (input_with_default("Do you want to initialize the indices?", 'y').lower() == 'y') # Nmap nmap_dir = input_with_default("What directory do you want to place the nmap results in?", config.get('nmap', 'directory')) if not os.path.exists(nmap_dir): os.makedirs(nmap_dir) config.set('nmap', 'directory', nmap_dir) nmap_options = input_with_default("What nmap options do you want to set for 'custom' (for example '-p 22,445')?", config.get('nmap', 'options')) config.set('nmap', 'options', nmap_options) # Nessus configure_nessus = (input_with_default("Do you want to setup nessus?", 'n').lower() == 'y') if configure_nessus: nessus_host = input_with_default("What is the nessus host?", config.get('nessus', 'host')) nessus_template = input_with_default("What template should jackal use?", config.get('nessus', 'template_name')) nessus_access = input_with_default("What api access key should jackal use?", config.get('nessus', 'access_key')) nessus_secret = input_with_default("What api secret key should jackal use?", config.get('nessus', 'secret_key')) config.set('nessus', 'host', nessus_host) config.set('nessus', 'template_name', nessus_template) config.set('nessus', 'access_key', nessus_access) config.set('nessus', 'secret_key', nessus_secret) # Named pipes configure_pipes = (input_with_default("Do you want to setup named pipes?", 'n').lower() == 'y') if configure_pipes: directory = input_with_default("What directory do you want to place the named pipes in?", config.get('pipes', 'directory')) config.set('pipes', 'directory', directory) config_file = input_with_default("What is the name of the named pipe config?", config.get('pipes', 'config_file')) config.set('pipes', 'config_file', config_file) if not os.path.exists(directory): create = (input_with_default("Do you want to create the directory?", 'n').lower() == 'y') if create: os.makedirs(directory) if not os.path.exists(os.path.join(config.config_dir, config_file)): f = open(os.path.join(config.config_dir, config_file), 'a') f.close() config.write_config(initialize_indices)
def manual_configure(): """ Function to manually configure jackal. """ print("Manual configuring jackal") mapping = { '1': 'y', '0': 'n'} config = Config() # Host host = input_with_default("What is the Elasticsearch host?", config.get('jackal', 'host')) config.set('jackal', 'host', host) # SSL if input_with_default("Use SSL?", mapping[config.get('jackal', 'use_ssl')]) == 'y': config.set('jackal', 'use_ssl', '1') if input_with_default("Setup custom server cert?", 'y') == 'y': ca_certs = input_with_default("Server certificate location?", config.get('jackal', 'ca_certs')) config.set('jackal', 'ca_certs', ca_certs) else: config.set('jackal', 'ca_certs', '') else: config.set('jackal', 'use_ssl', '0') if input_with_default("Setup client certificates?", mapping[config.get('jackal', 'client_certs')]) == 'y': config.set('jackal', 'client_certs', '1') client_cert = input_with_default("Client cert location?", config.get('jackal', 'client_cert')) config.set('jackal', 'client_cert', client_cert) client_key = input_with_default("Client key location?", config.get('jackal', 'client_key')) config.set('jackal', 'client_key', client_key) else: config.set('jackal', 'client_certs', '0') # Index index = input_with_default("What index prefix should jackal use?", config.get('jackal', 'index')) config.set('jackal', 'index', index) initialize_indices = (input_with_default("Do you want to initialize the indices?", 'y').lower() == 'y') # Nmap nmap_dir = input_with_default("What directory do you want to place the nmap results in?", config.get('nmap', 'directory')) if not os.path.exists(nmap_dir): os.makedirs(nmap_dir) config.set('nmap', 'directory', nmap_dir) nmap_options = input_with_default("What nmap options do you want to set for 'custom' (for example '-p 22,445')?", config.get('nmap', 'options')) config.set('nmap', 'options', nmap_options) # Nessus configure_nessus = (input_with_default("Do you want to setup nessus?", 'n').lower() == 'y') if configure_nessus: nessus_host = input_with_default("What is the nessus host?", config.get('nessus', 'host')) nessus_template = input_with_default("What template should jackal use?", config.get('nessus', 'template_name')) nessus_access = input_with_default("What api access key should jackal use?", config.get('nessus', 'access_key')) nessus_secret = input_with_default("What api secret key should jackal use?", config.get('nessus', 'secret_key')) config.set('nessus', 'host', nessus_host) config.set('nessus', 'template_name', nessus_template) config.set('nessus', 'access_key', nessus_access) config.set('nessus', 'secret_key', nessus_secret) # Named pipes configure_pipes = (input_with_default("Do you want to setup named pipes?", 'n').lower() == 'y') if configure_pipes: directory = input_with_default("What directory do you want to place the named pipes in?", config.get('pipes', 'directory')) config.set('pipes', 'directory', directory) config_file = input_with_default("What is the name of the named pipe config?", config.get('pipes', 'config_file')) config.set('pipes', 'config_file', config_file) if not os.path.exists(directory): create = (input_with_default("Do you want to create the directory?", 'n').lower() == 'y') if create: os.makedirs(directory) if not os.path.exists(os.path.join(config.config_dir, config_file)): f = open(os.path.join(config.config_dir, config_file), 'a') f.close() config.write_config(initialize_indices)
[ "Function", "to", "manually", "configure", "jackal", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/config.py#L24-L95
[ "def", "manual_configure", "(", ")", ":", "print", "(", "\"Manual configuring jackal\"", ")", "mapping", "=", "{", "'1'", ":", "'y'", ",", "'0'", ":", "'n'", "}", "config", "=", "Config", "(", ")", "# Host", "host", "=", "input_with_default", "(", "\"What is the Elasticsearch host?\"", ",", "config", ".", "get", "(", "'jackal'", ",", "'host'", ")", ")", "config", ".", "set", "(", "'jackal'", ",", "'host'", ",", "host", ")", "# SSL", "if", "input_with_default", "(", "\"Use SSL?\"", ",", "mapping", "[", "config", ".", "get", "(", "'jackal'", ",", "'use_ssl'", ")", "]", ")", "==", "'y'", ":", "config", ".", "set", "(", "'jackal'", ",", "'use_ssl'", ",", "'1'", ")", "if", "input_with_default", "(", "\"Setup custom server cert?\"", ",", "'y'", ")", "==", "'y'", ":", "ca_certs", "=", "input_with_default", "(", "\"Server certificate location?\"", ",", "config", ".", "get", "(", "'jackal'", ",", "'ca_certs'", ")", ")", "config", ".", "set", "(", "'jackal'", ",", "'ca_certs'", ",", "ca_certs", ")", "else", ":", "config", ".", "set", "(", "'jackal'", ",", "'ca_certs'", ",", "''", ")", "else", ":", "config", ".", "set", "(", "'jackal'", ",", "'use_ssl'", ",", "'0'", ")", "if", "input_with_default", "(", "\"Setup client certificates?\"", ",", "mapping", "[", "config", ".", "get", "(", "'jackal'", ",", "'client_certs'", ")", "]", ")", "==", "'y'", ":", "config", ".", "set", "(", "'jackal'", ",", "'client_certs'", ",", "'1'", ")", "client_cert", "=", "input_with_default", "(", "\"Client cert location?\"", ",", "config", ".", "get", "(", "'jackal'", ",", "'client_cert'", ")", ")", "config", ".", "set", "(", "'jackal'", ",", "'client_cert'", ",", "client_cert", ")", "client_key", "=", "input_with_default", "(", "\"Client key location?\"", ",", "config", ".", "get", "(", "'jackal'", ",", "'client_key'", ")", ")", "config", ".", "set", "(", "'jackal'", ",", "'client_key'", ",", "client_key", ")", "else", ":", "config", ".", "set", "(", "'jackal'", ",", "'client_certs'", ",", "'0'", ")", "# Index", "index", "=", "input_with_default", "(", "\"What index prefix should jackal use?\"", ",", "config", ".", "get", "(", "'jackal'", ",", "'index'", ")", ")", "config", ".", "set", "(", "'jackal'", ",", "'index'", ",", "index", ")", "initialize_indices", "=", "(", "input_with_default", "(", "\"Do you want to initialize the indices?\"", ",", "'y'", ")", ".", "lower", "(", ")", "==", "'y'", ")", "# Nmap", "nmap_dir", "=", "input_with_default", "(", "\"What directory do you want to place the nmap results in?\"", ",", "config", ".", "get", "(", "'nmap'", ",", "'directory'", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "nmap_dir", ")", ":", "os", ".", "makedirs", "(", "nmap_dir", ")", "config", ".", "set", "(", "'nmap'", ",", "'directory'", ",", "nmap_dir", ")", "nmap_options", "=", "input_with_default", "(", "\"What nmap options do you want to set for 'custom' (for example '-p 22,445')?\"", ",", "config", ".", "get", "(", "'nmap'", ",", "'options'", ")", ")", "config", ".", "set", "(", "'nmap'", ",", "'options'", ",", "nmap_options", ")", "# Nessus", "configure_nessus", "=", "(", "input_with_default", "(", "\"Do you want to setup nessus?\"", ",", "'n'", ")", ".", "lower", "(", ")", "==", "'y'", ")", "if", "configure_nessus", ":", "nessus_host", "=", "input_with_default", "(", "\"What is the nessus host?\"", ",", "config", ".", "get", "(", "'nessus'", ",", "'host'", ")", ")", "nessus_template", "=", "input_with_default", "(", "\"What template should jackal use?\"", ",", "config", ".", "get", "(", "'nessus'", ",", "'template_name'", ")", ")", "nessus_access", "=", "input_with_default", "(", "\"What api access key should jackal use?\"", ",", "config", ".", "get", "(", "'nessus'", ",", "'access_key'", ")", ")", "nessus_secret", "=", "input_with_default", "(", "\"What api secret key should jackal use?\"", ",", "config", ".", "get", "(", "'nessus'", ",", "'secret_key'", ")", ")", "config", ".", "set", "(", "'nessus'", ",", "'host'", ",", "nessus_host", ")", "config", ".", "set", "(", "'nessus'", ",", "'template_name'", ",", "nessus_template", ")", "config", ".", "set", "(", "'nessus'", ",", "'access_key'", ",", "nessus_access", ")", "config", ".", "set", "(", "'nessus'", ",", "'secret_key'", ",", "nessus_secret", ")", "# Named pipes", "configure_pipes", "=", "(", "input_with_default", "(", "\"Do you want to setup named pipes?\"", ",", "'n'", ")", ".", "lower", "(", ")", "==", "'y'", ")", "if", "configure_pipes", ":", "directory", "=", "input_with_default", "(", "\"What directory do you want to place the named pipes in?\"", ",", "config", ".", "get", "(", "'pipes'", ",", "'directory'", ")", ")", "config", ".", "set", "(", "'pipes'", ",", "'directory'", ",", "directory", ")", "config_file", "=", "input_with_default", "(", "\"What is the name of the named pipe config?\"", ",", "config", ".", "get", "(", "'pipes'", ",", "'config_file'", ")", ")", "config", ".", "set", "(", "'pipes'", ",", "'config_file'", ",", "config_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "create", "=", "(", "input_with_default", "(", "\"Do you want to create the directory?\"", ",", "'n'", ")", ".", "lower", "(", ")", "==", "'y'", ")", "if", "create", ":", "os", ".", "makedirs", "(", "directory", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "config", ".", "config_dir", ",", "config_file", ")", ")", ":", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "config", ".", "config_dir", ",", "config_file", ")", ",", "'a'", ")", "f", ".", "close", "(", ")", "config", ".", "write_config", "(", "initialize_indices", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Config.set
Creates the section value if it does not exists and sets the value. Use write_config to actually set the value.
jackal/config.py
def set(self, section, key, value): """ Creates the section value if it does not exists and sets the value. Use write_config to actually set the value. """ if not section in self.config: self.config.add_section(section) self.config.set(section, key, value)
def set(self, section, key, value): """ Creates the section value if it does not exists and sets the value. Use write_config to actually set the value. """ if not section in self.config: self.config.add_section(section) self.config.set(section, key, value)
[ "Creates", "the", "section", "value", "if", "it", "does", "not", "exists", "and", "sets", "the", "value", ".", "Use", "write_config", "to", "actually", "set", "the", "value", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/config.py#L191-L198
[ "def", "set", "(", "self", ",", "section", ",", "key", ",", "value", ")", ":", "if", "not", "section", "in", "self", ".", "config", ":", "self", ".", "config", ".", "add_section", "(", "section", ")", "self", ".", "config", ".", "set", "(", "section", ",", "key", ",", "value", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Config.get
This function tries to retrieve the value from the configfile otherwise will return a default.
jackal/config.py
def get(self, section, key): """ This function tries to retrieve the value from the configfile otherwise will return a default. """ try: return self.config.get(section, key) except configparser.NoSectionError: pass except configparser.NoOptionError: pass return self.defaults[section][key]
def get(self, section, key): """ This function tries to retrieve the value from the configfile otherwise will return a default. """ try: return self.config.get(section, key) except configparser.NoSectionError: pass except configparser.NoOptionError: pass return self.defaults[section][key]
[ "This", "function", "tries", "to", "retrieve", "the", "value", "from", "the", "configfile", "otherwise", "will", "return", "a", "default", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/config.py#L201-L212
[ "def", "get", "(", "self", ",", "section", ",", "key", ")", ":", "try", ":", "return", "self", ".", "config", ".", "get", "(", "section", ",", "key", ")", "except", "configparser", ".", "NoSectionError", ":", "pass", "except", "configparser", ".", "NoOptionError", ":", "pass", "return", "self", ".", "defaults", "[", "section", "]", "[", "key", "]" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Config.config_dir
Returns the configuration directory
jackal/config.py
def config_dir(self): """ Returns the configuration directory """ home = expanduser('~') config_dir = os.path.join(home, '.jackal') return config_dir
def config_dir(self): """ Returns the configuration directory """ home = expanduser('~') config_dir = os.path.join(home, '.jackal') return config_dir
[ "Returns", "the", "configuration", "directory" ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/config.py#L223-L229
[ "def", "config_dir", "(", "self", ")", ":", "home", "=", "expanduser", "(", "'~'", ")", "config_dir", "=", "os", ".", "path", ".", "join", "(", "home", ",", "'.jackal'", ")", "return", "config_dir" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Config.write_config
Write the current config to disk to store them.
jackal/config.py
def write_config(self, initialize_indices=False): """ Write the current config to disk to store them. """ if not os.path.exists(self.config_dir): os.mkdir(self.config_dir) with open(self.config_file, 'w') as configfile: self.config.write(configfile) if initialize_indices: index = self.get('jackal', 'index') from jackal import Host, Range, Service, User, Credential, Log from jackal.core import create_connection create_connection(self) Host.init(index="{}-hosts".format(index)) Range.init(index="{}-ranges".format(index)) Service.init(index="{}-services".format(index)) User.init(index="{}-users".format(index)) Credential.init(index="{}-creds".format(index)) Log.init(index="{}-log".format(index))
def write_config(self, initialize_indices=False): """ Write the current config to disk to store them. """ if not os.path.exists(self.config_dir): os.mkdir(self.config_dir) with open(self.config_file, 'w') as configfile: self.config.write(configfile) if initialize_indices: index = self.get('jackal', 'index') from jackal import Host, Range, Service, User, Credential, Log from jackal.core import create_connection create_connection(self) Host.init(index="{}-hosts".format(index)) Range.init(index="{}-ranges".format(index)) Service.init(index="{}-services".format(index)) User.init(index="{}-users".format(index)) Credential.init(index="{}-creds".format(index)) Log.init(index="{}-log".format(index))
[ "Write", "the", "current", "config", "to", "disk", "to", "store", "them", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/config.py#L231-L251
[ "def", "write_config", "(", "self", ",", "initialize_indices", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "config_dir", ")", ":", "os", ".", "mkdir", "(", "self", ".", "config_dir", ")", "with", "open", "(", "self", ".", "config_file", ",", "'w'", ")", "as", "configfile", ":", "self", ".", "config", ".", "write", "(", "configfile", ")", "if", "initialize_indices", ":", "index", "=", "self", ".", "get", "(", "'jackal'", ",", "'index'", ")", "from", "jackal", "import", "Host", ",", "Range", ",", "Service", ",", "User", ",", "Credential", ",", "Log", "from", "jackal", ".", "core", "import", "create_connection", "create_connection", "(", "self", ")", "Host", ".", "init", "(", "index", "=", "\"{}-hosts\"", ".", "format", "(", "index", ")", ")", "Range", ".", "init", "(", "index", "=", "\"{}-ranges\"", ".", "format", "(", "index", ")", ")", "Service", ".", "init", "(", "index", "=", "\"{}-services\"", ".", "format", "(", "index", ")", ")", "User", ".", "init", "(", "index", "=", "\"{}-users\"", ".", "format", "(", "index", ")", ")", "Credential", ".", "init", "(", "index", "=", "\"{}-creds\"", ".", "format", "(", "index", ")", ")", "Log", ".", "init", "(", "index", "=", "\"{}-log\"", ".", "format", "(", "index", ")", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
ensure_remote_branch_is_tracked
Track the specified remote branch if it is not already tracked.
git_ready.py
def ensure_remote_branch_is_tracked(branch): """Track the specified remote branch if it is not already tracked.""" if branch == MASTER_BRANCH: # We don't need to explicitly track the master branch, so we're done. return # Ensure the specified branch is in the local branch list. output = subprocess.check_output(['git', 'branch', '--list']) for line in output.split('\n'): if line.strip() == branch: # We are already tracking the remote branch break else: # We are not tracking the remote branch, so track it. try: sys.stdout.write(subprocess.check_output( ['git', 'checkout', '--track', 'origin/%s' % branch])) except subprocess.CalledProcessError: # Bail gracefully. raise SystemExit(1)
def ensure_remote_branch_is_tracked(branch): """Track the specified remote branch if it is not already tracked.""" if branch == MASTER_BRANCH: # We don't need to explicitly track the master branch, so we're done. return # Ensure the specified branch is in the local branch list. output = subprocess.check_output(['git', 'branch', '--list']) for line in output.split('\n'): if line.strip() == branch: # We are already tracking the remote branch break else: # We are not tracking the remote branch, so track it. try: sys.stdout.write(subprocess.check_output( ['git', 'checkout', '--track', 'origin/%s' % branch])) except subprocess.CalledProcessError: # Bail gracefully. raise SystemExit(1)
[ "Track", "the", "specified", "remote", "branch", "if", "it", "is", "not", "already", "tracked", "." ]
dolph/git-ready
python
https://github.com/dolph/git-ready/blob/4e237efcc9bff2ac7807a74d28fa68cd0081207b/git_ready.py#L23-L42
[ "def", "ensure_remote_branch_is_tracked", "(", "branch", ")", ":", "if", "branch", "==", "MASTER_BRANCH", ":", "# We don't need to explicitly track the master branch, so we're done.", "return", "# Ensure the specified branch is in the local branch list.", "output", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'branch'", ",", "'--list'", "]", ")", "for", "line", "in", "output", ".", "split", "(", "'\\n'", ")", ":", "if", "line", ".", "strip", "(", ")", "==", "branch", ":", "# We are already tracking the remote branch", "break", "else", ":", "# We are not tracking the remote branch, so track it.", "try", ":", "sys", ".", "stdout", ".", "write", "(", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'checkout'", ",", "'--track'", ",", "'origin/%s'", "%", "branch", "]", ")", ")", "except", "subprocess", ".", "CalledProcessError", ":", "# Bail gracefully.", "raise", "SystemExit", "(", "1", ")" ]
4e237efcc9bff2ac7807a74d28fa68cd0081207b
valid
main
Checkout, update and branch from the specified branch.
git_ready.py
def main(branch): """Checkout, update and branch from the specified branch.""" try: # Ensure that we're in a git repository. This command is silent unless # you're not actually in a git repository, in which case, you receive a # "Not a git repository" error message. output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8') sys.stdout.write(output) except subprocess.CalledProcessError: # Bail if we're not in a git repository. return # This behavior ensures a better user experience for those that aren't # intimately familiar with git. ensure_remote_branch_is_tracked(branch) # Switch to the specified branch and update it. subprocess.check_call(['git', 'checkout', '--quiet', branch]) # Pulling is always safe here, because we never commit to this branch. subprocess.check_call(['git', 'pull', '--quiet']) # Checkout the top commit in the branch, effectively going "untracked." subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch]) # Clean up the repository of Python cruft. Because we've just switched # branches and compiled Python files should not be version controlled, # there are likely leftover compiled Python files sitting on disk which may # confuse some tools, such as sqlalchemy-migrate. subprocess.check_call(['find', '.', '-name', '"*.pyc"', '-delete']) # For the sake of user experience, give some familiar output. print('Your branch is up to date with branch \'origin/%s\'.' % branch)
def main(branch): """Checkout, update and branch from the specified branch.""" try: # Ensure that we're in a git repository. This command is silent unless # you're not actually in a git repository, in which case, you receive a # "Not a git repository" error message. output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8') sys.stdout.write(output) except subprocess.CalledProcessError: # Bail if we're not in a git repository. return # This behavior ensures a better user experience for those that aren't # intimately familiar with git. ensure_remote_branch_is_tracked(branch) # Switch to the specified branch and update it. subprocess.check_call(['git', 'checkout', '--quiet', branch]) # Pulling is always safe here, because we never commit to this branch. subprocess.check_call(['git', 'pull', '--quiet']) # Checkout the top commit in the branch, effectively going "untracked." subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch]) # Clean up the repository of Python cruft. Because we've just switched # branches and compiled Python files should not be version controlled, # there are likely leftover compiled Python files sitting on disk which may # confuse some tools, such as sqlalchemy-migrate. subprocess.check_call(['find', '.', '-name', '"*.pyc"', '-delete']) # For the sake of user experience, give some familiar output. print('Your branch is up to date with branch \'origin/%s\'.' % branch)
[ "Checkout", "update", "and", "branch", "from", "the", "specified", "branch", "." ]
dolph/git-ready
python
https://github.com/dolph/git-ready/blob/4e237efcc9bff2ac7807a74d28fa68cd0081207b/git_ready.py#L45-L77
[ "def", "main", "(", "branch", ")", ":", "try", ":", "# Ensure that we're in a git repository. This command is silent unless", "# you're not actually in a git repository, in which case, you receive a", "# \"Not a git repository\" error message.", "output", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'rev-parse'", "]", ")", ".", "decode", "(", "'utf-8'", ")", "sys", ".", "stdout", ".", "write", "(", "output", ")", "except", "subprocess", ".", "CalledProcessError", ":", "# Bail if we're not in a git repository.", "return", "# This behavior ensures a better user experience for those that aren't", "# intimately familiar with git.", "ensure_remote_branch_is_tracked", "(", "branch", ")", "# Switch to the specified branch and update it.", "subprocess", ".", "check_call", "(", "[", "'git'", ",", "'checkout'", ",", "'--quiet'", ",", "branch", "]", ")", "# Pulling is always safe here, because we never commit to this branch.", "subprocess", ".", "check_call", "(", "[", "'git'", ",", "'pull'", ",", "'--quiet'", "]", ")", "# Checkout the top commit in the branch, effectively going \"untracked.\"", "subprocess", ".", "check_call", "(", "[", "'git'", ",", "'checkout'", ",", "'--quiet'", ",", "'%s~0'", "%", "branch", "]", ")", "# Clean up the repository of Python cruft. Because we've just switched", "# branches and compiled Python files should not be version controlled,", "# there are likely leftover compiled Python files sitting on disk which may", "# confuse some tools, such as sqlalchemy-migrate.", "subprocess", ".", "check_call", "(", "[", "'find'", ",", "'.'", ",", "'-name'", ",", "'\"*.pyc\"'", ",", "'-delete'", "]", ")", "# For the sake of user experience, give some familiar output.", "print", "(", "'Your branch is up to date with branch \\'origin/%s\\'.'", "%", "branch", ")" ]
4e237efcc9bff2ac7807a74d28fa68cd0081207b
valid
get_interface_name
Returns the interface name of the first not link_local and not loopback interface.
jackal/scripts/relaying.py
def get_interface_name(): """ Returns the interface name of the first not link_local and not loopback interface. """ interface_name = '' interfaces = psutil.net_if_addrs() for name, details in interfaces.items(): for detail in details: if detail.family == socket.AF_INET: ip_address = ipaddress.ip_address(detail.address) if not (ip_address.is_link_local or ip_address.is_loopback): interface_name = name break return interface_name
def get_interface_name(): """ Returns the interface name of the first not link_local and not loopback interface. """ interface_name = '' interfaces = psutil.net_if_addrs() for name, details in interfaces.items(): for detail in details: if detail.family == socket.AF_INET: ip_address = ipaddress.ip_address(detail.address) if not (ip_address.is_link_local or ip_address.is_loopback): interface_name = name break return interface_name
[ "Returns", "the", "interface", "name", "of", "the", "first", "not", "link_local", "and", "not", "loopback", "interface", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L179-L192
[ "def", "get_interface_name", "(", ")", ":", "interface_name", "=", "''", "interfaces", "=", "psutil", ".", "net_if_addrs", "(", ")", "for", "name", ",", "details", "in", "interfaces", ".", "items", "(", ")", ":", "for", "detail", "in", "details", ":", "if", "detail", ".", "family", "==", "socket", ".", "AF_INET", ":", "ip_address", "=", "ipaddress", ".", "ip_address", "(", "detail", ".", "address", ")", "if", "not", "(", "ip_address", ".", "is_link_local", "or", "ip_address", ".", "is_loopback", ")", ":", "interface_name", "=", "name", "break", "return", "interface_name" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Spoofing.load_targets
load_targets will load the services with smb signing disabled and if ldap is enabled the services with the ldap port open.
jackal/scripts/relaying.py
def load_targets(self): """ load_targets will load the services with smb signing disabled and if ldap is enabled the services with the ldap port open. """ ldap_services = [] if self.ldap: ldap_services = self.search.get_services(ports=[389], up=True) self.ldap_strings = ["ldap://{}".format(service.address) for service in ldap_services] self.services = self.search.get_services(tags=['smb_signing_disabled']) self.ips = [str(service.address) for service in self.services]
def load_targets(self): """ load_targets will load the services with smb signing disabled and if ldap is enabled the services with the ldap port open. """ ldap_services = [] if self.ldap: ldap_services = self.search.get_services(ports=[389], up=True) self.ldap_strings = ["ldap://{}".format(service.address) for service in ldap_services] self.services = self.search.get_services(tags=['smb_signing_disabled']) self.ips = [str(service.address) for service in self.services]
[ "load_targets", "will", "load", "the", "services", "with", "smb", "signing", "disabled", "and", "if", "ldap", "is", "enabled", "the", "services", "with", "the", "ldap", "port", "open", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L48-L58
[ "def", "load_targets", "(", "self", ")", ":", "ldap_services", "=", "[", "]", "if", "self", ".", "ldap", ":", "ldap_services", "=", "self", ".", "search", ".", "get_services", "(", "ports", "=", "[", "389", "]", ",", "up", "=", "True", ")", "self", ".", "ldap_strings", "=", "[", "\"ldap://{}\"", ".", "format", "(", "service", ".", "address", ")", "for", "service", "in", "ldap_services", "]", "self", ".", "services", "=", "self", ".", "search", ".", "get_services", "(", "tags", "=", "[", "'smb_signing_disabled'", "]", ")", "self", ".", "ips", "=", "[", "str", "(", "service", ".", "address", ")", "for", "service", "in", "self", ".", "services", "]" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Spoofing.write_targets
write_targets will write the contents of ips and ldap_strings to the targets_file.
jackal/scripts/relaying.py
def write_targets(self): """ write_targets will write the contents of ips and ldap_strings to the targets_file. """ if len(self.ldap_strings) == 0 and len(self.ips) == 0: print_notification("No targets left") if self.auto_exit: if self.notifier: self.notifier.stop() self.terminate_processes() with open(self.targets_file, 'w') as f: f.write('\n'.join(self.ldap_strings + self.ips))
def write_targets(self): """ write_targets will write the contents of ips and ldap_strings to the targets_file. """ if len(self.ldap_strings) == 0 and len(self.ips) == 0: print_notification("No targets left") if self.auto_exit: if self.notifier: self.notifier.stop() self.terminate_processes() with open(self.targets_file, 'w') as f: f.write('\n'.join(self.ldap_strings + self.ips))
[ "write_targets", "will", "write", "the", "contents", "of", "ips", "and", "ldap_strings", "to", "the", "targets_file", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L61-L73
[ "def", "write_targets", "(", "self", ")", ":", "if", "len", "(", "self", ".", "ldap_strings", ")", "==", "0", "and", "len", "(", "self", ".", "ips", ")", "==", "0", ":", "print_notification", "(", "\"No targets left\"", ")", "if", "self", ".", "auto_exit", ":", "if", "self", ".", "notifier", ":", "self", ".", "notifier", ".", "stop", "(", ")", "self", ".", "terminate_processes", "(", ")", "with", "open", "(", "self", ".", "targets_file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'\\n'", ".", "join", "(", "self", ".", "ldap_strings", "+", "self", ".", "ips", ")", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Spoofing.start_processes
Starts the ntlmrelayx.py and responder processes. Assumes you have these programs in your path.
jackal/scripts/relaying.py
def start_processes(self): """ Starts the ntlmrelayx.py and responder processes. Assumes you have these programs in your path. """ self.relay = subprocess.Popen(['ntlmrelayx.py', '-6', '-tf', self.targets_file, '-w', '-l', self.directory, '-of', self.output_file], cwd=self.directory) self.responder = subprocess.Popen(['responder', '-I', self.interface_name])
def start_processes(self): """ Starts the ntlmrelayx.py and responder processes. Assumes you have these programs in your path. """ self.relay = subprocess.Popen(['ntlmrelayx.py', '-6', '-tf', self.targets_file, '-w', '-l', self.directory, '-of', self.output_file], cwd=self.directory) self.responder = subprocess.Popen(['responder', '-I', self.interface_name])
[ "Starts", "the", "ntlmrelayx", ".", "py", "and", "responder", "processes", ".", "Assumes", "you", "have", "these", "programs", "in", "your", "path", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L76-L82
[ "def", "start_processes", "(", "self", ")", ":", "self", ".", "relay", "=", "subprocess", ".", "Popen", "(", "[", "'ntlmrelayx.py'", ",", "'-6'", ",", "'-tf'", ",", "self", ".", "targets_file", ",", "'-w'", ",", "'-l'", ",", "self", ".", "directory", ",", "'-of'", ",", "self", ".", "output_file", "]", ",", "cwd", "=", "self", ".", "directory", ")", "self", ".", "responder", "=", "subprocess", ".", "Popen", "(", "[", "'responder'", ",", "'-I'", ",", "self", ".", "interface_name", "]", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Spoofing.callback
Function that gets called on each event from pyinotify.
jackal/scripts/relaying.py
def callback(self, event): """ Function that gets called on each event from pyinotify. """ # IN_CLOSE_WRITE -> 0x00000008 if event.mask == 0x00000008: if event.name.endswith('.json'): print_success("Ldapdomaindump file found") if event.name in ['domain_groups.json', 'domain_users.json']: if event.name == 'domain_groups.json': self.domain_groups_file = event.pathname if event.name == 'domain_users.json': self.domain_users_file = event.pathname if self.domain_groups_file and self.domain_users_file: print_success("Importing users") subprocess.Popen(['jk-import-domaindump', self.domain_groups_file, self.domain_users_file]) elif event.name == 'domain_computers.json': print_success("Importing computers") subprocess.Popen(['jk-import-domaindump', event.pathname]) # Ldap has been dumped, so remove the ldap targets. self.ldap_strings = [] self.write_targets() if event.name.endswith('_samhashes.sam'): host = event.name.replace('_samhashes.sam', '') # TODO import file. print_success("Secretsdump file, host ip: {}".format(host)) subprocess.Popen(['jk-import-secretsdump', event.pathname]) # Remove this system from this ip list. self.ips.remove(host) self.write_targets()
def callback(self, event): """ Function that gets called on each event from pyinotify. """ # IN_CLOSE_WRITE -> 0x00000008 if event.mask == 0x00000008: if event.name.endswith('.json'): print_success("Ldapdomaindump file found") if event.name in ['domain_groups.json', 'domain_users.json']: if event.name == 'domain_groups.json': self.domain_groups_file = event.pathname if event.name == 'domain_users.json': self.domain_users_file = event.pathname if self.domain_groups_file and self.domain_users_file: print_success("Importing users") subprocess.Popen(['jk-import-domaindump', self.domain_groups_file, self.domain_users_file]) elif event.name == 'domain_computers.json': print_success("Importing computers") subprocess.Popen(['jk-import-domaindump', event.pathname]) # Ldap has been dumped, so remove the ldap targets. self.ldap_strings = [] self.write_targets() if event.name.endswith('_samhashes.sam'): host = event.name.replace('_samhashes.sam', '') # TODO import file. print_success("Secretsdump file, host ip: {}".format(host)) subprocess.Popen(['jk-import-secretsdump', event.pathname]) # Remove this system from this ip list. self.ips.remove(host) self.write_targets()
[ "Function", "that", "gets", "called", "on", "each", "event", "from", "pyinotify", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L85-L117
[ "def", "callback", "(", "self", ",", "event", ")", ":", "# IN_CLOSE_WRITE -> 0x00000008", "if", "event", ".", "mask", "==", "0x00000008", ":", "if", "event", ".", "name", ".", "endswith", "(", "'.json'", ")", ":", "print_success", "(", "\"Ldapdomaindump file found\"", ")", "if", "event", ".", "name", "in", "[", "'domain_groups.json'", ",", "'domain_users.json'", "]", ":", "if", "event", ".", "name", "==", "'domain_groups.json'", ":", "self", ".", "domain_groups_file", "=", "event", ".", "pathname", "if", "event", ".", "name", "==", "'domain_users.json'", ":", "self", ".", "domain_users_file", "=", "event", ".", "pathname", "if", "self", ".", "domain_groups_file", "and", "self", ".", "domain_users_file", ":", "print_success", "(", "\"Importing users\"", ")", "subprocess", ".", "Popen", "(", "[", "'jk-import-domaindump'", ",", "self", ".", "domain_groups_file", ",", "self", ".", "domain_users_file", "]", ")", "elif", "event", ".", "name", "==", "'domain_computers.json'", ":", "print_success", "(", "\"Importing computers\"", ")", "subprocess", ".", "Popen", "(", "[", "'jk-import-domaindump'", ",", "event", ".", "pathname", "]", ")", "# Ldap has been dumped, so remove the ldap targets.", "self", ".", "ldap_strings", "=", "[", "]", "self", ".", "write_targets", "(", ")", "if", "event", ".", "name", ".", "endswith", "(", "'_samhashes.sam'", ")", ":", "host", "=", "event", ".", "name", ".", "replace", "(", "'_samhashes.sam'", ",", "''", ")", "# TODO import file.", "print_success", "(", "\"Secretsdump file, host ip: {}\"", ".", "format", "(", "host", ")", ")", "subprocess", ".", "Popen", "(", "[", "'jk-import-secretsdump'", ",", "event", ".", "pathname", "]", ")", "# Remove this system from this ip list.", "self", ".", "ips", ".", "remove", "(", "host", ")", "self", ".", "write_targets", "(", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Spoofing.watch
Watches directory for changes
jackal/scripts/relaying.py
def watch(self): """ Watches directory for changes """ wm = pyinotify.WatchManager() self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback) wm.add_watch(self.directory, pyinotify.ALL_EVENTS) try: self.notifier.loop() except (KeyboardInterrupt, AttributeError): print_notification("Stopping") finally: self.notifier.stop() self.terminate_processes()
def watch(self): """ Watches directory for changes """ wm = pyinotify.WatchManager() self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback) wm.add_watch(self.directory, pyinotify.ALL_EVENTS) try: self.notifier.loop() except (KeyboardInterrupt, AttributeError): print_notification("Stopping") finally: self.notifier.stop() self.terminate_processes()
[ "Watches", "directory", "for", "changes" ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L120-L133
[ "def", "watch", "(", "self", ")", ":", "wm", "=", "pyinotify", ".", "WatchManager", "(", ")", "self", ".", "notifier", "=", "pyinotify", ".", "Notifier", "(", "wm", ",", "default_proc_fun", "=", "self", ".", "callback", ")", "wm", ".", "add_watch", "(", "self", ".", "directory", ",", "pyinotify", ".", "ALL_EVENTS", ")", "try", ":", "self", ".", "notifier", ".", "loop", "(", ")", "except", "(", "KeyboardInterrupt", ",", "AttributeError", ")", ":", "print_notification", "(", "\"Stopping\"", ")", "finally", ":", "self", ".", "notifier", ".", "stop", "(", ")", "self", ".", "terminate_processes", "(", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Spoofing.terminate_processes
Terminate the processes.
jackal/scripts/relaying.py
def terminate_processes(self): """ Terminate the processes. """ if self.relay: self.relay.terminate() if self.responder: self.responder.terminate()
def terminate_processes(self): """ Terminate the processes. """ if self.relay: self.relay.terminate() if self.responder: self.responder.terminate()
[ "Terminate", "the", "processes", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L136-L143
[ "def", "terminate_processes", "(", "self", ")", ":", "if", "self", ".", "relay", ":", "self", ".", "relay", ".", "terminate", "(", ")", "if", "self", ".", "responder", ":", "self", ".", "responder", ".", "terminate", "(", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Spoofing.wait
This function waits for the relay and responding processes to exit. Captures KeyboardInterrupt to shutdown these processes.
jackal/scripts/relaying.py
def wait(self): """ This function waits for the relay and responding processes to exit. Captures KeyboardInterrupt to shutdown these processes. """ try: self.relay.wait() self.responder.wait() except KeyboardInterrupt: print_notification("Stopping") finally: self.terminate_processes()
def wait(self): """ This function waits for the relay and responding processes to exit. Captures KeyboardInterrupt to shutdown these processes. """ try: self.relay.wait() self.responder.wait() except KeyboardInterrupt: print_notification("Stopping") finally: self.terminate_processes()
[ "This", "function", "waits", "for", "the", "relay", "and", "responding", "processes", "to", "exit", ".", "Captures", "KeyboardInterrupt", "to", "shutdown", "these", "processes", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/relaying.py#L146-L157
[ "def", "wait", "(", "self", ")", ":", "try", ":", "self", ".", "relay", ".", "wait", "(", ")", "self", ".", "responder", ".", "wait", "(", ")", "except", "KeyboardInterrupt", ":", "print_notification", "(", "\"Stopping\"", ")", "finally", ":", "self", ".", "terminate_processes", "(", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
QueryPrototype.getAnnotations
Retrieve annotations from the query provider :param targets: The CTS URN(s) to query as the target of annotations :type targets: [MyCapytain.common.reference.URN], URN or None :param wildcard: Wildcard specifier for how to match the URN :type wildcard: str :param include: URI(s) of Annotation types to include in the results :type include: list(str) :param exclude: URI(s) of Annotation types to include in the results :type exclude: list(str) :param limit: The max number of results to return (Default is None for no limit) :type limit: int :param start: the starting record to return (Default is 1) :type start: int :param expand: Flag to state whether Annotations are expanded (Default is False) :type expand: bool :return: Tuple representing the query results. The first element The first element is the number of total Annotations found The second element is the list of Annotations :rtype: (int, list(Annotation) .. note:: Wildcard should be one of the following value - '.' to match exact, - '.%' to match exact plus lower in the hierarchy - '%.' to match exact + higher in the hierarchy - '-' to match in the range - '%.%' to match all
flask_nemo/query/proto.py
def getAnnotations(self, targets, wildcard=".", include=None, exclude=None, limit=None, start=1, expand=False, **kwargs): """ Retrieve annotations from the query provider :param targets: The CTS URN(s) to query as the target of annotations :type targets: [MyCapytain.common.reference.URN], URN or None :param wildcard: Wildcard specifier for how to match the URN :type wildcard: str :param include: URI(s) of Annotation types to include in the results :type include: list(str) :param exclude: URI(s) of Annotation types to include in the results :type exclude: list(str) :param limit: The max number of results to return (Default is None for no limit) :type limit: int :param start: the starting record to return (Default is 1) :type start: int :param expand: Flag to state whether Annotations are expanded (Default is False) :type expand: bool :return: Tuple representing the query results. The first element The first element is the number of total Annotations found The second element is the list of Annotations :rtype: (int, list(Annotation) .. note:: Wildcard should be one of the following value - '.' to match exact, - '.%' to match exact plus lower in the hierarchy - '%.' to match exact + higher in the hierarchy - '-' to match in the range - '%.%' to match all """ return 0, []
def getAnnotations(self, targets, wildcard=".", include=None, exclude=None, limit=None, start=1, expand=False, **kwargs): """ Retrieve annotations from the query provider :param targets: The CTS URN(s) to query as the target of annotations :type targets: [MyCapytain.common.reference.URN], URN or None :param wildcard: Wildcard specifier for how to match the URN :type wildcard: str :param include: URI(s) of Annotation types to include in the results :type include: list(str) :param exclude: URI(s) of Annotation types to include in the results :type exclude: list(str) :param limit: The max number of results to return (Default is None for no limit) :type limit: int :param start: the starting record to return (Default is 1) :type start: int :param expand: Flag to state whether Annotations are expanded (Default is False) :type expand: bool :return: Tuple representing the query results. The first element The first element is the number of total Annotations found The second element is the list of Annotations :rtype: (int, list(Annotation) .. note:: Wildcard should be one of the following value - '.' to match exact, - '.%' to match exact plus lower in the hierarchy - '%.' to match exact + higher in the hierarchy - '-' to match in the range - '%.%' to match all """ return 0, []
[ "Retrieve", "annotations", "from", "the", "query", "provider" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/query/proto.py#L23-L58
[ "def", "getAnnotations", "(", "self", ",", "targets", ",", "wildcard", "=", "\".\"", ",", "include", "=", "None", ",", "exclude", "=", "None", ",", "limit", "=", "None", ",", "start", "=", "1", ",", "expand", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "0", ",", "[", "]" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
Breadcrumb.render
Make breadcrumbs for a route :param kwargs: dictionary of named arguments used to construct the view :type kwargs: dict :return: List of dict items the view can use to construct the link. :rtype: {str: list({ "link": str, "title", str, "args", dict})}
flask_nemo/plugins/default.py
def render(self, **kwargs): """ Make breadcrumbs for a route :param kwargs: dictionary of named arguments used to construct the view :type kwargs: dict :return: List of dict items the view can use to construct the link. :rtype: {str: list({ "link": str, "title", str, "args", dict})} """ breadcrumbs = [] # this is the list of items we want to accumulate in the breadcrumb trail. # item[0] is the key into the kwargs["url"] object and item[1] is the name of the route # setting a route name to None means that it's needed to construct the route of the next item in the list # but shouldn't be included in the list itself (this is currently the case for work -- # at some point we probably should include work in the navigation) breadcrumbs = [] if "collections" in kwargs: breadcrumbs = [{ "title": "Text Collections", "link": ".r_collections", "args": {} }] if "parents" in kwargs["collections"]: breadcrumbs += [ { "title": parent["label"], "link": ".r_collection_semantic", "args": { "objectId": parent["id"], "semantic": f_slugify(parent["label"]), }, } for parent in kwargs["collections"]["parents"] ][::-1] if "current" in kwargs["collections"]: breadcrumbs.append({ "title": kwargs["collections"]["current"]["label"], "link": None, "args": {} }) # don't link the last item in the trail if len(breadcrumbs) > 0: breadcrumbs[-1]["link"] = None return {"breadcrumbs": breadcrumbs}
def render(self, **kwargs): """ Make breadcrumbs for a route :param kwargs: dictionary of named arguments used to construct the view :type kwargs: dict :return: List of dict items the view can use to construct the link. :rtype: {str: list({ "link": str, "title", str, "args", dict})} """ breadcrumbs = [] # this is the list of items we want to accumulate in the breadcrumb trail. # item[0] is the key into the kwargs["url"] object and item[1] is the name of the route # setting a route name to None means that it's needed to construct the route of the next item in the list # but shouldn't be included in the list itself (this is currently the case for work -- # at some point we probably should include work in the navigation) breadcrumbs = [] if "collections" in kwargs: breadcrumbs = [{ "title": "Text Collections", "link": ".r_collections", "args": {} }] if "parents" in kwargs["collections"]: breadcrumbs += [ { "title": parent["label"], "link": ".r_collection_semantic", "args": { "objectId": parent["id"], "semantic": f_slugify(parent["label"]), }, } for parent in kwargs["collections"]["parents"] ][::-1] if "current" in kwargs["collections"]: breadcrumbs.append({ "title": kwargs["collections"]["current"]["label"], "link": None, "args": {} }) # don't link the last item in the trail if len(breadcrumbs) > 0: breadcrumbs[-1]["link"] = None return {"breadcrumbs": breadcrumbs}
[ "Make", "breadcrumbs", "for", "a", "route" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/plugins/default.py#L17-L63
[ "def", "render", "(", "self", ",", "*", "*", "kwargs", ")", ":", "breadcrumbs", "=", "[", "]", "# this is the list of items we want to accumulate in the breadcrumb trail.", "# item[0] is the key into the kwargs[\"url\"] object and item[1] is the name of the route", "# setting a route name to None means that it's needed to construct the route of the next item in the list", "# but shouldn't be included in the list itself (this is currently the case for work --", "# at some point we probably should include work in the navigation)", "breadcrumbs", "=", "[", "]", "if", "\"collections\"", "in", "kwargs", ":", "breadcrumbs", "=", "[", "{", "\"title\"", ":", "\"Text Collections\"", ",", "\"link\"", ":", "\".r_collections\"", ",", "\"args\"", ":", "{", "}", "}", "]", "if", "\"parents\"", "in", "kwargs", "[", "\"collections\"", "]", ":", "breadcrumbs", "+=", "[", "{", "\"title\"", ":", "parent", "[", "\"label\"", "]", ",", "\"link\"", ":", "\".r_collection_semantic\"", ",", "\"args\"", ":", "{", "\"objectId\"", ":", "parent", "[", "\"id\"", "]", ",", "\"semantic\"", ":", "f_slugify", "(", "parent", "[", "\"label\"", "]", ")", ",", "}", ",", "}", "for", "parent", "in", "kwargs", "[", "\"collections\"", "]", "[", "\"parents\"", "]", "]", "[", ":", ":", "-", "1", "]", "if", "\"current\"", "in", "kwargs", "[", "\"collections\"", "]", ":", "breadcrumbs", ".", "append", "(", "{", "\"title\"", ":", "kwargs", "[", "\"collections\"", "]", "[", "\"current\"", "]", "[", "\"label\"", "]", ",", "\"link\"", ":", "None", ",", "\"args\"", ":", "{", "}", "}", ")", "# don't link the last item in the trail", "if", "len", "(", "breadcrumbs", ")", ">", "0", ":", "breadcrumbs", "[", "-", "1", "]", "[", "\"link\"", "]", "=", "None", "return", "{", "\"breadcrumbs\"", ":", "breadcrumbs", "}" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
main
This function obtains hosts from core and starts a nessus scan on these hosts. The nessus tag is appended to the host tags.
jackal/scripts/nessus.py
def main(): """ This function obtains hosts from core and starts a nessus scan on these hosts. The nessus tag is appended to the host tags. """ config = Config() core = HostSearch() hosts = core.get_hosts(tags=['!nessus'], up=True) hosts = [host for host in hosts] host_ips = ",".join([str(host.address) for host in hosts]) url = config.get('nessus', 'host') access = config.get('nessus', 'access_key') secret = config.get('nessus', 'secret_key') template_name = config.get('nessus', 'template_name') nessus = Nessus(access, secret, url, template_name) scan_id = nessus.create_scan(host_ips) nessus.start_scan(scan_id) for host in hosts: host.add_tag('nessus') host.save() Logger().log("nessus", "Nessus scan started on {} hosts".format(len(hosts)), {'scanned_hosts': len(hosts)})
def main(): """ This function obtains hosts from core and starts a nessus scan on these hosts. The nessus tag is appended to the host tags. """ config = Config() core = HostSearch() hosts = core.get_hosts(tags=['!nessus'], up=True) hosts = [host for host in hosts] host_ips = ",".join([str(host.address) for host in hosts]) url = config.get('nessus', 'host') access = config.get('nessus', 'access_key') secret = config.get('nessus', 'secret_key') template_name = config.get('nessus', 'template_name') nessus = Nessus(access, secret, url, template_name) scan_id = nessus.create_scan(host_ips) nessus.start_scan(scan_id) for host in hosts: host.add_tag('nessus') host.save() Logger().log("nessus", "Nessus scan started on {} hosts".format(len(hosts)), {'scanned_hosts': len(hosts)})
[ "This", "function", "obtains", "hosts", "from", "core", "and", "starts", "a", "nessus", "scan", "on", "these", "hosts", ".", "The", "nessus", "tag", "is", "appended", "to", "the", "host", "tags", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/nessus.py#L61-L86
[ "def", "main", "(", ")", ":", "config", "=", "Config", "(", ")", "core", "=", "HostSearch", "(", ")", "hosts", "=", "core", ".", "get_hosts", "(", "tags", "=", "[", "'!nessus'", "]", ",", "up", "=", "True", ")", "hosts", "=", "[", "host", "for", "host", "in", "hosts", "]", "host_ips", "=", "\",\"", ".", "join", "(", "[", "str", "(", "host", ".", "address", ")", "for", "host", "in", "hosts", "]", ")", "url", "=", "config", ".", "get", "(", "'nessus'", ",", "'host'", ")", "access", "=", "config", ".", "get", "(", "'nessus'", ",", "'access_key'", ")", "secret", "=", "config", ".", "get", "(", "'nessus'", ",", "'secret_key'", ")", "template_name", "=", "config", ".", "get", "(", "'nessus'", ",", "'template_name'", ")", "nessus", "=", "Nessus", "(", "access", ",", "secret", ",", "url", ",", "template_name", ")", "scan_id", "=", "nessus", ".", "create_scan", "(", "host_ips", ")", "nessus", ".", "start_scan", "(", "scan_id", ")", "for", "host", "in", "hosts", ":", "host", ".", "add_tag", "(", "'nessus'", ")", "host", ".", "save", "(", ")", "Logger", "(", ")", ".", "log", "(", "\"nessus\"", ",", "\"Nessus scan started on {} hosts\"", ".", "format", "(", "len", "(", "hosts", ")", ")", ",", "{", "'scanned_hosts'", ":", "len", "(", "hosts", ")", "}", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Nessus.get_template_uuid
Retrieves the uuid of the given template name.
jackal/scripts/nessus.py
def get_template_uuid(self): """ Retrieves the uuid of the given template name. """ response = requests.get(self.url + 'editor/scan/templates', headers=self.headers, verify=False) templates = json.loads(response.text) for template in templates['templates']: if template['name'] == self.template_name: return template['uuid']
def get_template_uuid(self): """ Retrieves the uuid of the given template name. """ response = requests.get(self.url + 'editor/scan/templates', headers=self.headers, verify=False) templates = json.loads(response.text) for template in templates['templates']: if template['name'] == self.template_name: return template['uuid']
[ "Retrieves", "the", "uuid", "of", "the", "given", "template", "name", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/nessus.py#L24-L32
[ "def", "get_template_uuid", "(", "self", ")", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "url", "+", "'editor/scan/templates'", ",", "headers", "=", "self", ".", "headers", ",", "verify", "=", "False", ")", "templates", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "for", "template", "in", "templates", "[", "'templates'", "]", ":", "if", "template", "[", "'name'", "]", "==", "self", ".", "template_name", ":", "return", "template", "[", "'uuid'", "]" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Nessus.create_scan
Creates a scan with the given host ips Returns the scan id of the created object.
jackal/scripts/nessus.py
def create_scan(self, host_ips): """ Creates a scan with the given host ips Returns the scan id of the created object. """ now = datetime.datetime.now() data = { "uuid": self.get_template_uuid(), "settings": { "name": "jackal-" + now.strftime("%Y-%m-%d %H:%M"), "text_targets": host_ips } } response = requests.post(self.url + 'scans', data=json.dumps(data), verify=False, headers=self.headers) if response: result = json.loads(response.text) return result['scan']['id']
def create_scan(self, host_ips): """ Creates a scan with the given host ips Returns the scan id of the created object. """ now = datetime.datetime.now() data = { "uuid": self.get_template_uuid(), "settings": { "name": "jackal-" + now.strftime("%Y-%m-%d %H:%M"), "text_targets": host_ips } } response = requests.post(self.url + 'scans', data=json.dumps(data), verify=False, headers=self.headers) if response: result = json.loads(response.text) return result['scan']['id']
[ "Creates", "a", "scan", "with", "the", "given", "host", "ips", "Returns", "the", "scan", "id", "of", "the", "created", "object", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/nessus.py#L35-L51
[ "def", "create_scan", "(", "self", ",", "host_ips", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "data", "=", "{", "\"uuid\"", ":", "self", ".", "get_template_uuid", "(", ")", ",", "\"settings\"", ":", "{", "\"name\"", ":", "\"jackal-\"", "+", "now", ".", "strftime", "(", "\"%Y-%m-%d %H:%M\"", ")", ",", "\"text_targets\"", ":", "host_ips", "}", "}", "response", "=", "requests", ".", "post", "(", "self", ".", "url", "+", "'scans'", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "verify", "=", "False", ",", "headers", "=", "self", ".", "headers", ")", "if", "response", ":", "result", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "return", "result", "[", "'scan'", "]", "[", "'id'", "]" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Nessus.start_scan
Starts the scan identified by the scan_id.s
jackal/scripts/nessus.py
def start_scan(self, scan_id): """ Starts the scan identified by the scan_id.s """ requests.post(self.url + 'scans/{}/launch'.format(scan_id), verify=False, headers=self.headers)
def start_scan(self, scan_id): """ Starts the scan identified by the scan_id.s """ requests.post(self.url + 'scans/{}/launch'.format(scan_id), verify=False, headers=self.headers)
[ "Starts", "the", "scan", "identified", "by", "the", "scan_id", ".", "s" ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/nessus.py#L54-L58
[ "def", "start_scan", "(", "self", ",", "scan_id", ")", ":", "requests", ".", "post", "(", "self", ".", "url", "+", "'scans/{}/launch'", ".", "format", "(", "scan_id", ")", ",", "verify", "=", "False", ",", "headers", "=", "self", ".", "headers", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
stable
r""" rankings[(a, n)] = partner that a ranked n^th >>> from itertools import product >>> A = ['1','2','3','4','5','6'] >>> B = ['a','b','c','d','e','f'] >>> rank = dict() >>> rank['1'] = (1,4,2,6,5,3) >>> rank['2'] = (3,1,2,4,5,6) >>> rank['3'] = (1,2,4,3,5,6) >>> rank['4'] = (4,1,2,5,3,6) >>> rank['5'] = (1,2,3,6,4,5) >>> rank['6'] = (2,1,4,3,5,6) >>> rank['a'] = (1,2,3,4,5,6) >>> rank['b'] = (2,1,4,3,5,6) >>> rank['c'] = (5,1,6,3,2,4) >>> rank['d'] = (1,3,2,5,4,6) >>> rank['e'] = (4,1,3,6,2,5) >>> rank['f'] = (2,1,4,3,6,5) >>> Arankings = dict(((a, rank[a][b_]), B[b_]) for (a, b_) in product(A, range(0, 6))) >>> Brankings = dict(((b, rank[b][a_]), A[a_]) for (b, a_) in product(B, range(0, 6))) >>> rankings = Arankings >>> rankings.update(Brankings) >>> stable(rankings, A, B) [('1', 'a'), ('2', 'b'), ('3', 'd'), ('4', 'f'), ('5', 'c'), ('6', 'e')]
pysyncml/smp.py
def stable(rankings, A, B): r""" rankings[(a, n)] = partner that a ranked n^th >>> from itertools import product >>> A = ['1','2','3','4','5','6'] >>> B = ['a','b','c','d','e','f'] >>> rank = dict() >>> rank['1'] = (1,4,2,6,5,3) >>> rank['2'] = (3,1,2,4,5,6) >>> rank['3'] = (1,2,4,3,5,6) >>> rank['4'] = (4,1,2,5,3,6) >>> rank['5'] = (1,2,3,6,4,5) >>> rank['6'] = (2,1,4,3,5,6) >>> rank['a'] = (1,2,3,4,5,6) >>> rank['b'] = (2,1,4,3,5,6) >>> rank['c'] = (5,1,6,3,2,4) >>> rank['d'] = (1,3,2,5,4,6) >>> rank['e'] = (4,1,3,6,2,5) >>> rank['f'] = (2,1,4,3,6,5) >>> Arankings = dict(((a, rank[a][b_]), B[b_]) for (a, b_) in product(A, range(0, 6))) >>> Brankings = dict(((b, rank[b][a_]), A[a_]) for (b, a_) in product(B, range(0, 6))) >>> rankings = Arankings >>> rankings.update(Brankings) >>> stable(rankings, A, B) [('1', 'a'), ('2', 'b'), ('3', 'd'), ('4', 'f'), ('5', 'c'), ('6', 'e')] """ partners = dict((a, (rankings[(a, 1)], 1)) for a in A) is_stable = False # whether the current pairing (given by `partners`) is stable while is_stable == False: is_stable = True for b in B: is_paired = False # whether b has a pair which b ranks <= to n for n in range(1, len(B) + 1): a = rankings[(b, n)] a_partner, a_n = partners[a] if a_partner == b: if is_paired: is_stable = False partners[a] = (rankings[(a, a_n + 1)], a_n + 1) else: is_paired = True return sorted((a, b) for (a, (b, n)) in partners.items())
def stable(rankings, A, B): r""" rankings[(a, n)] = partner that a ranked n^th >>> from itertools import product >>> A = ['1','2','3','4','5','6'] >>> B = ['a','b','c','d','e','f'] >>> rank = dict() >>> rank['1'] = (1,4,2,6,5,3) >>> rank['2'] = (3,1,2,4,5,6) >>> rank['3'] = (1,2,4,3,5,6) >>> rank['4'] = (4,1,2,5,3,6) >>> rank['5'] = (1,2,3,6,4,5) >>> rank['6'] = (2,1,4,3,5,6) >>> rank['a'] = (1,2,3,4,5,6) >>> rank['b'] = (2,1,4,3,5,6) >>> rank['c'] = (5,1,6,3,2,4) >>> rank['d'] = (1,3,2,5,4,6) >>> rank['e'] = (4,1,3,6,2,5) >>> rank['f'] = (2,1,4,3,6,5) >>> Arankings = dict(((a, rank[a][b_]), B[b_]) for (a, b_) in product(A, range(0, 6))) >>> Brankings = dict(((b, rank[b][a_]), A[a_]) for (b, a_) in product(B, range(0, 6))) >>> rankings = Arankings >>> rankings.update(Brankings) >>> stable(rankings, A, B) [('1', 'a'), ('2', 'b'), ('3', 'd'), ('4', 'f'), ('5', 'c'), ('6', 'e')] """ partners = dict((a, (rankings[(a, 1)], 1)) for a in A) is_stable = False # whether the current pairing (given by `partners`) is stable while is_stable == False: is_stable = True for b in B: is_paired = False # whether b has a pair which b ranks <= to n for n in range(1, len(B) + 1): a = rankings[(b, n)] a_partner, a_n = partners[a] if a_partner == b: if is_paired: is_stable = False partners[a] = (rankings[(a, a_n + 1)], a_n + 1) else: is_paired = True return sorted((a, b) for (a, (b, n)) in partners.items())
[ "r", "rankings", "[", "(", "a", "n", ")", "]", "=", "partner", "that", "a", "ranked", "n^th" ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/smp.py#L81-L124
[ "def", "stable", "(", "rankings", ",", "A", ",", "B", ")", ":", "partners", "=", "dict", "(", "(", "a", ",", "(", "rankings", "[", "(", "a", ",", "1", ")", "]", ",", "1", ")", ")", "for", "a", "in", "A", ")", "is_stable", "=", "False", "# whether the current pairing (given by `partners`) is stable", "while", "is_stable", "==", "False", ":", "is_stable", "=", "True", "for", "b", "in", "B", ":", "is_paired", "=", "False", "# whether b has a pair which b ranks <= to n", "for", "n", "in", "range", "(", "1", ",", "len", "(", "B", ")", "+", "1", ")", ":", "a", "=", "rankings", "[", "(", "b", ",", "n", ")", "]", "a_partner", ",", "a_n", "=", "partners", "[", "a", "]", "if", "a_partner", "==", "b", ":", "if", "is_paired", ":", "is_stable", "=", "False", "partners", "[", "a", "]", "=", "(", "rankings", "[", "(", "a", ",", "a_n", "+", "1", ")", "]", ",", "a_n", "+", "1", ")", "else", ":", "is_paired", "=", "True", "return", "sorted", "(", "(", "a", ",", "b", ")", "for", "(", "a", ",", "(", "b", ",", "n", ")", ")", "in", "partners", ".", "items", "(", ")", ")" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
cmpToDataStore_uri
Bases the comparison of the datastores on URI alone.
pysyncml/matcher.py
def cmpToDataStore_uri(base, ds1, ds2): '''Bases the comparison of the datastores on URI alone.''' ret = difflib.get_close_matches(base.uri, [ds1.uri, ds2.uri], 1, cutoff=0.5) if len(ret) <= 0: return 0 if ret[0] == ds1.uri: return -1 return 1
def cmpToDataStore_uri(base, ds1, ds2): '''Bases the comparison of the datastores on URI alone.''' ret = difflib.get_close_matches(base.uri, [ds1.uri, ds2.uri], 1, cutoff=0.5) if len(ret) <= 0: return 0 if ret[0] == ds1.uri: return -1 return 1
[ "Bases", "the", "comparison", "of", "the", "datastores", "on", "URI", "alone", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/matcher.py#L81-L88
[ "def", "cmpToDataStore_uri", "(", "base", ",", "ds1", ",", "ds2", ")", ":", "ret", "=", "difflib", ".", "get_close_matches", "(", "base", ".", "uri", ",", "[", "ds1", ".", "uri", ",", "ds2", ".", "uri", "]", ",", "1", ",", "cutoff", "=", "0.5", ")", "if", "len", "(", "ret", ")", "<=", "0", ":", "return", "0", "if", "ret", "[", "0", "]", "==", "ds1", ".", "uri", ":", "return", "-", "1", "return", "1" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
JackalDoc.add_tag
Adds a tag to the list of tags and makes sure the result list contains only unique results.
jackal/documents.py
def add_tag(self, tag): """ Adds a tag to the list of tags and makes sure the result list contains only unique results. """ self.tags = list(set(self.tags or []) | set([tag]))
def add_tag(self, tag): """ Adds a tag to the list of tags and makes sure the result list contains only unique results. """ self.tags = list(set(self.tags or []) | set([tag]))
[ "Adds", "a", "tag", "to", "the", "list", "of", "tags", "and", "makes", "sure", "the", "result", "list", "contains", "only", "unique", "results", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/documents.py#L24-L28
[ "def", "add_tag", "(", "self", ",", "tag", ")", ":", "self", ".", "tags", "=", "list", "(", "set", "(", "self", ".", "tags", "or", "[", "]", ")", "|", "set", "(", "[", "tag", "]", ")", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
JackalDoc.remove_tag
Removes a tag from this object
jackal/documents.py
def remove_tag(self, tag): """ Removes a tag from this object """ self.tags = list(set(self.tags or []) - set([tag]))
def remove_tag(self, tag): """ Removes a tag from this object """ self.tags = list(set(self.tags or []) - set([tag]))
[ "Removes", "a", "tag", "from", "this", "object" ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/documents.py#L31-L35
[ "def", "remove_tag", "(", "self", ",", "tag", ")", ":", "self", ".", "tags", "=", "list", "(", "set", "(", "self", ".", "tags", "or", "[", "]", ")", "-", "set", "(", "[", "tag", "]", ")", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
JackalDoc.to_dict
Returns the result as a dictionary, provide the include_meta flag to als show information like index and doctype.
jackal/documents.py
def to_dict(self, include_meta=False): """ Returns the result as a dictionary, provide the include_meta flag to als show information like index and doctype. """ result = super(JackalDoc, self).to_dict(include_meta=include_meta) if include_meta: source = result.pop('_source') return {**result, **source} else: return result
def to_dict(self, include_meta=False): """ Returns the result as a dictionary, provide the include_meta flag to als show information like index and doctype. """ result = super(JackalDoc, self).to_dict(include_meta=include_meta) if include_meta: source = result.pop('_source') return {**result, **source} else: return result
[ "Returns", "the", "result", "as", "a", "dictionary", "provide", "the", "include_meta", "flag", "to", "als", "show", "information", "like", "index", "and", "doctype", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/documents.py#L40-L49
[ "def", "to_dict", "(", "self", ",", "include_meta", "=", "False", ")", ":", "result", "=", "super", "(", "JackalDoc", ",", "self", ")", ".", "to_dict", "(", "include_meta", "=", "include_meta", ")", "if", "include_meta", ":", "source", "=", "result", ".", "pop", "(", "'_source'", ")", "return", "{", "*", "*", "result", ",", "*", "*", "source", "}", "else", ":", "return", "result" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
AnnotationsApiPlugin.r_annotations
Route to retrieve annotations by target :param target_urn: The CTS URN for which to retrieve annotations :type target_urn: str :return: a JSON string containing count and list of resources :rtype: {str: Any}
flask_nemo/plugins/annotations_api.py
def r_annotations(self): """ Route to retrieve annotations by target :param target_urn: The CTS URN for which to retrieve annotations :type target_urn: str :return: a JSON string containing count and list of resources :rtype: {str: Any} """ target = request.args.get("target", None) wildcard = request.args.get("wildcard", ".", type=str) include = request.args.get("include") exclude = request.args.get("exclude") limit = request.args.get("limit", None, type=int) start = request.args.get("start", 1, type=int) expand = request.args.get("expand", False, type=bool) if target: try: urn = MyCapytain.common.reference.URN(target) except ValueError: return "invalid urn", 400 count, annotations = self.__queryinterface__.getAnnotations(urn, wildcard=wildcard, include=include, exclude=exclude, limit=limit, start=start, expand=expand) else: # Note that this implementation is not done for too much annotations # because we do not implement pagination here count, annotations = self.__queryinterface__.getAnnotations(None, limit=limit, start=start, expand=expand) mapped = [] response = { "@context": type(self).JSONLD_CONTEXT, "id": url_for(".r_annotations", start=start, limit=limit), "type": "AnnotationCollection", "startIndex": start, "items": [ ], "total": count } for a in annotations: mapped.append({ "id": url_for(".r_annotation", sha=a.sha), "body": url_for(".r_annotation_body", sha=a.sha), "type": "Annotation", "target": a.target.to_json(), "dc:type": a.type_uri, "owl:sameAs": [a.uri], "nemo:slug": a.slug }) response["items"] = mapped response = jsonify(response) return response
def r_annotations(self): """ Route to retrieve annotations by target :param target_urn: The CTS URN for which to retrieve annotations :type target_urn: str :return: a JSON string containing count and list of resources :rtype: {str: Any} """ target = request.args.get("target", None) wildcard = request.args.get("wildcard", ".", type=str) include = request.args.get("include") exclude = request.args.get("exclude") limit = request.args.get("limit", None, type=int) start = request.args.get("start", 1, type=int) expand = request.args.get("expand", False, type=bool) if target: try: urn = MyCapytain.common.reference.URN(target) except ValueError: return "invalid urn", 400 count, annotations = self.__queryinterface__.getAnnotations(urn, wildcard=wildcard, include=include, exclude=exclude, limit=limit, start=start, expand=expand) else: # Note that this implementation is not done for too much annotations # because we do not implement pagination here count, annotations = self.__queryinterface__.getAnnotations(None, limit=limit, start=start, expand=expand) mapped = [] response = { "@context": type(self).JSONLD_CONTEXT, "id": url_for(".r_annotations", start=start, limit=limit), "type": "AnnotationCollection", "startIndex": start, "items": [ ], "total": count } for a in annotations: mapped.append({ "id": url_for(".r_annotation", sha=a.sha), "body": url_for(".r_annotation_body", sha=a.sha), "type": "Annotation", "target": a.target.to_json(), "dc:type": a.type_uri, "owl:sameAs": [a.uri], "nemo:slug": a.slug }) response["items"] = mapped response = jsonify(response) return response
[ "Route", "to", "retrieve", "annotations", "by", "target" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/plugins/annotations_api.py#L42-L95
[ "def", "r_annotations", "(", "self", ")", ":", "target", "=", "request", ".", "args", ".", "get", "(", "\"target\"", ",", "None", ")", "wildcard", "=", "request", ".", "args", ".", "get", "(", "\"wildcard\"", ",", "\".\"", ",", "type", "=", "str", ")", "include", "=", "request", ".", "args", ".", "get", "(", "\"include\"", ")", "exclude", "=", "request", ".", "args", ".", "get", "(", "\"exclude\"", ")", "limit", "=", "request", ".", "args", ".", "get", "(", "\"limit\"", ",", "None", ",", "type", "=", "int", ")", "start", "=", "request", ".", "args", ".", "get", "(", "\"start\"", ",", "1", ",", "type", "=", "int", ")", "expand", "=", "request", ".", "args", ".", "get", "(", "\"expand\"", ",", "False", ",", "type", "=", "bool", ")", "if", "target", ":", "try", ":", "urn", "=", "MyCapytain", ".", "common", ".", "reference", ".", "URN", "(", "target", ")", "except", "ValueError", ":", "return", "\"invalid urn\"", ",", "400", "count", ",", "annotations", "=", "self", ".", "__queryinterface__", ".", "getAnnotations", "(", "urn", ",", "wildcard", "=", "wildcard", ",", "include", "=", "include", ",", "exclude", "=", "exclude", ",", "limit", "=", "limit", ",", "start", "=", "start", ",", "expand", "=", "expand", ")", "else", ":", "# Note that this implementation is not done for too much annotations", "# because we do not implement pagination here", "count", ",", "annotations", "=", "self", ".", "__queryinterface__", ".", "getAnnotations", "(", "None", ",", "limit", "=", "limit", ",", "start", "=", "start", ",", "expand", "=", "expand", ")", "mapped", "=", "[", "]", "response", "=", "{", "\"@context\"", ":", "type", "(", "self", ")", ".", "JSONLD_CONTEXT", ",", "\"id\"", ":", "url_for", "(", "\".r_annotations\"", ",", "start", "=", "start", ",", "limit", "=", "limit", ")", ",", "\"type\"", ":", "\"AnnotationCollection\"", ",", "\"startIndex\"", ":", "start", ",", "\"items\"", ":", "[", "]", ",", "\"total\"", ":", "count", "}", "for", "a", "in", "annotations", ":", "mapped", ".", "append", "(", "{", "\"id\"", ":", "url_for", "(", "\".r_annotation\"", ",", "sha", "=", "a", ".", "sha", ")", ",", "\"body\"", ":", "url_for", "(", "\".r_annotation_body\"", ",", "sha", "=", "a", ".", "sha", ")", ",", "\"type\"", ":", "\"Annotation\"", ",", "\"target\"", ":", "a", ".", "target", ".", "to_json", "(", ")", ",", "\"dc:type\"", ":", "a", ".", "type_uri", ",", "\"owl:sameAs\"", ":", "[", "a", ".", "uri", "]", ",", "\"nemo:slug\"", ":", "a", ".", "slug", "}", ")", "response", "[", "\"items\"", "]", "=", "mapped", "response", "=", "jsonify", "(", "response", ")", "return", "response" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
AnnotationsApiPlugin.r_annotation
Route to retrieve contents of an annotation resource :param uri: The uri of the annotation resource :type uri: str :return: annotation contents :rtype: {str: Any}
flask_nemo/plugins/annotations_api.py
def r_annotation(self, sha): """ Route to retrieve contents of an annotation resource :param uri: The uri of the annotation resource :type uri: str :return: annotation contents :rtype: {str: Any} """ annotation = self.__queryinterface__.getResource(sha) if not annotation: return "invalid resource uri", 404 response = { "@context": type(self).JSONLD_CONTEXT, "id": url_for(".r_annotation", sha=annotation.sha), "body": url_for(".r_annotation_body", sha=annotation.sha), "type": "Annotation", "target": annotation.target.to_json(), "owl:sameAs": [annotation.uri], "dc:type": annotation.type_uri, "nemo:slug": annotation.slug } return jsonify(response)
def r_annotation(self, sha): """ Route to retrieve contents of an annotation resource :param uri: The uri of the annotation resource :type uri: str :return: annotation contents :rtype: {str: Any} """ annotation = self.__queryinterface__.getResource(sha) if not annotation: return "invalid resource uri", 404 response = { "@context": type(self).JSONLD_CONTEXT, "id": url_for(".r_annotation", sha=annotation.sha), "body": url_for(".r_annotation_body", sha=annotation.sha), "type": "Annotation", "target": annotation.target.to_json(), "owl:sameAs": [annotation.uri], "dc:type": annotation.type_uri, "nemo:slug": annotation.slug } return jsonify(response)
[ "Route", "to", "retrieve", "contents", "of", "an", "annotation", "resource" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/plugins/annotations_api.py#L97-L118
[ "def", "r_annotation", "(", "self", ",", "sha", ")", ":", "annotation", "=", "self", ".", "__queryinterface__", ".", "getResource", "(", "sha", ")", "if", "not", "annotation", ":", "return", "\"invalid resource uri\"", ",", "404", "response", "=", "{", "\"@context\"", ":", "type", "(", "self", ")", ".", "JSONLD_CONTEXT", ",", "\"id\"", ":", "url_for", "(", "\".r_annotation\"", ",", "sha", "=", "annotation", ".", "sha", ")", ",", "\"body\"", ":", "url_for", "(", "\".r_annotation_body\"", ",", "sha", "=", "annotation", ".", "sha", ")", ",", "\"type\"", ":", "\"Annotation\"", ",", "\"target\"", ":", "annotation", ".", "target", ".", "to_json", "(", ")", ",", "\"owl:sameAs\"", ":", "[", "annotation", ".", "uri", "]", ",", "\"dc:type\"", ":", "annotation", ".", "type_uri", ",", "\"nemo:slug\"", ":", "annotation", ".", "slug", "}", "return", "jsonify", "(", "response", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
AnnotationsApiPlugin.r_annotation_body
Route to retrieve contents of an annotation resource :param uri: The uri of the annotation resource :type uri: str :return: annotation contents :rtype: {str: Any}
flask_nemo/plugins/annotations_api.py
def r_annotation_body(self, sha): """ Route to retrieve contents of an annotation resource :param uri: The uri of the annotation resource :type uri: str :return: annotation contents :rtype: {str: Any} """ annotation = self.__queryinterface__.getResource(sha) if not annotation: return "invalid resource uri", 404 # TODO this should inspect the annotation content # set appropriate Content-Type headers # and return the actual content content = annotation.read() if isinstance(content, Response): return content headers = {"Content-Type": annotation.mimetype} return Response(content, headers=headers)
def r_annotation_body(self, sha): """ Route to retrieve contents of an annotation resource :param uri: The uri of the annotation resource :type uri: str :return: annotation contents :rtype: {str: Any} """ annotation = self.__queryinterface__.getResource(sha) if not annotation: return "invalid resource uri", 404 # TODO this should inspect the annotation content # set appropriate Content-Type headers # and return the actual content content = annotation.read() if isinstance(content, Response): return content headers = {"Content-Type": annotation.mimetype} return Response(content, headers=headers)
[ "Route", "to", "retrieve", "contents", "of", "an", "annotation", "resource" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/plugins/annotations_api.py#L120-L138
[ "def", "r_annotation_body", "(", "self", ",", "sha", ")", ":", "annotation", "=", "self", ".", "__queryinterface__", ".", "getResource", "(", "sha", ")", "if", "not", "annotation", ":", "return", "\"invalid resource uri\"", ",", "404", "# TODO this should inspect the annotation content", "# set appropriate Content-Type headers", "# and return the actual content", "content", "=", "annotation", ".", "read", "(", ")", "if", "isinstance", "(", "content", ",", "Response", ")", ":", "return", "content", "headers", "=", "{", "\"Content-Type\"", ":", "annotation", ".", "mimetype", "}", "return", "Response", "(", "content", ",", "headers", "=", "headers", ")" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
describeStats
Renders an ASCII-table of the synchronization statistics `stats`, example output: .. code-block:: +----------------------------------------------------------------------------------+ | TITLE | +----------+------+-------------------------+--------------------------+-----------+ | | | Local | Remote | Conflicts | | Source | Mode | Add | Mod | Del | Err | Add | Mod | Del | Err | Col | Mrg | +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+ | contacts | <= | - | - | - | - | 10,387 | - | - | - | - | - | | note | SS | 1,308 | - | 2 | - | - | - | - | - | - | - | +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+ | 1,310 local changes and 10,387 remote changes. | +----------------------------------------------------------------------------------+ :Parameters: stats : dict The synchronization stats returned by a call to Adapter.sync(). stream : file-like-object An output file-like object that has at least a `write()` method, e.g. ``sys.stdout`` can be used. title : str, optional, default: null A title placed at the top of the table -- if omitted (the default), then no title is rendered. details : bool, optional, default: true If truthy, a per-datastore listing of changes will be displayed (as in the above example). totals : bool, optional, default: true If truthy, a summary of all changes will be displayed (as in the above example). gettext : callable, optional, @DEPRECATED(0.2.0), default: null A `gettext.gettext` compatible callable used for translating localized content (such as number formatting, etc.). NOTE: this parameter is deprecated, and will be replaced with a generalized i18n solution.
pysyncml/common.py
def describeStats(stats, stream, title=None, details=True, totals=True, gettext=None): ''' Renders an ASCII-table of the synchronization statistics `stats`, example output: .. code-block:: +----------------------------------------------------------------------------------+ | TITLE | +----------+------+-------------------------+--------------------------+-----------+ | | | Local | Remote | Conflicts | | Source | Mode | Add | Mod | Del | Err | Add | Mod | Del | Err | Col | Mrg | +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+ | contacts | <= | - | - | - | - | 10,387 | - | - | - | - | - | | note | SS | 1,308 | - | 2 | - | - | - | - | - | - | - | +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+ | 1,310 local changes and 10,387 remote changes. | +----------------------------------------------------------------------------------+ :Parameters: stats : dict The synchronization stats returned by a call to Adapter.sync(). stream : file-like-object An output file-like object that has at least a `write()` method, e.g. ``sys.stdout`` can be used. title : str, optional, default: null A title placed at the top of the table -- if omitted (the default), then no title is rendered. details : bool, optional, default: true If truthy, a per-datastore listing of changes will be displayed (as in the above example). totals : bool, optional, default: true If truthy, a summary of all changes will be displayed (as in the above example). gettext : callable, optional, @DEPRECATED(0.2.0), default: null A `gettext.gettext` compatible callable used for translating localized content (such as number formatting, etc.). NOTE: this parameter is deprecated, and will be replaced with a generalized i18n solution. ''' from . import state modeStringLut = dict(( (constants.SYNCTYPE_TWO_WAY, '<>'), (constants.SYNCTYPE_SLOW_SYNC, 'SS'), (constants.SYNCTYPE_ONE_WAY_FROM_CLIENT, '->'), (constants.SYNCTYPE_REFRESH_FROM_CLIENT, '=>'), (constants.SYNCTYPE_ONE_WAY_FROM_SERVER, '<-'), (constants.SYNCTYPE_REFRESH_FROM_SERVER, '<='), )) if gettext is not None: _ = gettext else: _ = lambda s: s # todo: this does not handle the case where the title is wider than the table. wSrc = len(_('Source')) wMode = len(_('Mode')) wCon = len(_('Conflicts')) wCol = len(_('Col')) wMrg = len(_('Mrg')) wHereAdd = wPeerAdd = len(_('Add')) wHereMod = wPeerMod = len(_('Mod')) wHereDel = wPeerDel = len(_('Del')) wHereErr = wPeerErr = len(_('Err')) totLoc = 0 totRem = 0 totErr = 0 totCol = 0 totMrg = 0 for key in stats.keys(): wSrc = max(wSrc, len(key)) wMode = max(wMode, len(modeStringLut.get(stats[key].mode))) wCol = max(wCol, len(num2str(stats[key].conflicts))) wMrg = max(wMrg, len(num2str(stats[key].merged))) wHereAdd = max(wHereAdd, len(num2str(stats[key].hereAdd))) wPeerAdd = max(wPeerAdd, len(num2str(stats[key].peerAdd))) wHereMod = max(wHereMod, len(num2str(stats[key].hereMod))) wPeerMod = max(wPeerMod, len(num2str(stats[key].peerMod))) wHereDel = max(wHereDel, len(num2str(stats[key].hereDel))) wPeerDel = max(wPeerDel, len(num2str(stats[key].peerDel))) wHereErr = max(wHereErr, len(num2str(stats[key].hereErr))) wPeerErr = max(wPeerErr, len(num2str(stats[key].peerErr))) totLoc += stats[key].hereAdd + stats[key].hereMod + stats[key].hereDel totRem += stats[key].peerAdd + stats[key].peerMod + stats[key].peerDel totErr += stats[key].hereErr + stats[key].peerErr totCol += stats[key].conflicts totMrg += stats[key].merged # TODO: i'm 100% sure there is a python library that can do this for me... if wCon > wCol + 3 + wMrg: diff = wCon - ( wCol + 3 + wMrg ) wCol += diff / 2 wMrg = wCon - 3 - wCol else: wCon = wCol + 3 + wMrg if details: tWid = ( wSrc + 3 + wMode + 3 + wHereAdd + wHereMod + wHereDel + wHereErr + 9 + 3 + wPeerAdd + wPeerMod + wPeerDel + wPeerErr + 9 + 3 + wCon ) else: if title is None: tWid = 0 else: tWid = len(title) if totals: # TODO: oh dear. from an i18n POV, this is *horrible*!... sumlist = [] for val, singular, plural in [ (totLoc, _('local change'), _('local changes')), (totRem, _('remote change'), _('remote changes')), (totErr, _('error'), _('errors')), ]: if val == 1: sumlist.append(num2str(val) + ' ' + singular) elif val > 1: sumlist.append(num2str(val) + ' ' + plural) if len(sumlist) <= 0: sumlist = _('No changes') elif len(sumlist) == 1: sumlist = sumlist[0] else: sumlist = ', '.join(sumlist[:-1]) + ' ' + _('and') + ' ' + sumlist[-1] if totMrg > 0 or totCol > 0: sumlist += ': ' if totMrg == 1: sumlist += num2str(totMrg) + ' ' + _('merge') elif totMrg > 1: sumlist += num2str(totMrg) + ' ' + _('merges') if totMrg > 0 and totCol > 0: sumlist += ' ' + _('and') + ' ' if totCol == 1: sumlist += num2str(totCol) + ' ' + _('conflict') elif totCol > 1: sumlist += num2str(totCol) + ' ' + _('conflicts') sumlist += '.' if len(sumlist) > tWid: wSrc += len(sumlist) - tWid tWid = len(sumlist) if title is not None: stream.write('+-' + '-' * tWid + '-+\n') stream.write('| {0: ^{w}}'.format(title, w=tWid)) stream.write(' |\n') hline = '+-' \ + '-' * wSrc \ + '-+-' \ + '-' * wMode \ + '-+-' \ + '-' * ( wHereAdd + wHereMod + wHereDel + wHereErr + 9 ) \ + '-+-' \ + '-' * ( wPeerAdd + wPeerMod + wPeerDel + wPeerErr + 9 ) \ + '-+-' \ + '-' * wCon \ + '-+\n' if details: stream.write(hline) stream.write('| ' + ' ' * wSrc) stream.write(' | ' + ' ' * wMode) stream.write(' | {0: ^{w}}'.format(_('Local'), w=( wHereAdd + wHereMod + wHereDel + wHereErr + 9 ))) stream.write(' | {0: ^{w}}'.format(_('Remote'), w=( wPeerAdd + wPeerMod + wPeerDel + wPeerErr + 9 ))) stream.write(' | {0: ^{w}}'.format(_('Conflicts'), w=wCon)) stream.write(' |\n') stream.write('| {0: >{w}}'.format(_('Source'), w=wSrc)) stream.write(' | {0: >{w}}'.format(_('Mode'), w=wMode)) stream.write(' | {0: ^{w}}'.format(_('Add'), w=wHereAdd)) stream.write(' | {0: ^{w}}'.format(_('Mod'), w=wHereMod)) stream.write(' | {0: ^{w}}'.format(_('Del'), w=wHereDel)) stream.write(' | {0: ^{w}}'.format(_('Err'), w=wHereErr)) stream.write(' | {0: ^{w}}'.format(_('Add'), w=wPeerAdd)) stream.write(' | {0: ^{w}}'.format(_('Mod'), w=wPeerMod)) stream.write(' | {0: ^{w}}'.format(_('Del'), w=wPeerDel)) stream.write(' | {0: ^{w}}'.format(_('Err'), w=wPeerErr)) stream.write(' | {0: ^{w}}'.format(_('Col'), w=wCol)) stream.write(' | {0: ^{w}}'.format(_('Mrg'), w=wMrg)) stream.write(' |\n') hsline = '+-' + '-' * wSrc \ + '-+-' + '-' * wMode \ + '-+-' + '-' * wHereAdd \ + '-+-' + '-' * wHereMod \ + '-+-' + '-' * wHereDel \ + '-+-' + '-' * wHereErr \ + '-+-' + '-' * wPeerAdd \ + '-+-' + '-' * wPeerMod \ + '-+-' + '-' * wPeerDel \ + '-+-' + '-' * wPeerErr \ + '-+-' + '-' * wCol \ + '-+-' + '-' * wMrg \ + '-+\n' stream.write(hsline) def numcol(val, wid): if val == 0: return ' | {0: ^{w}}'.format('-', w=wid) return ' | {0: >{w}}'.format(num2str(val), w=wid) for key in sorted(stats.keys(), key=lambda k: str(k).lower()): stream.write('| {0: >{w}}'.format(key, w=wSrc)) stream.write(' | {0: ^{w}}'.format(modeStringLut.get(stats[key].mode), w=wMode)) stream.write(numcol(stats[key].hereAdd, wHereAdd)) stream.write(numcol(stats[key].hereMod, wHereMod)) stream.write(numcol(stats[key].hereDel, wHereDel)) stream.write(numcol(stats[key].hereErr, wHereErr)) stream.write(numcol(stats[key].peerAdd, wPeerAdd)) stream.write(numcol(stats[key].peerMod, wPeerMod)) stream.write(numcol(stats[key].peerDel, wPeerDel)) stream.write(numcol(stats[key].peerErr, wPeerErr)) stream.write(numcol(stats[key].conflicts, wCol)) stream.write(numcol(stats[key].merged, wMrg)) stream.write(' |\n') stream.write(hsline) if totals: if title is None and not details: stream.write('+-' + '-' * tWid + '-+\n') stream.write('| {0: ^{w}}'.format(sumlist, w=tWid)) stream.write(' |\n') stream.write('+-' + '-' * tWid + '-+\n') return
def describeStats(stats, stream, title=None, details=True, totals=True, gettext=None): ''' Renders an ASCII-table of the synchronization statistics `stats`, example output: .. code-block:: +----------------------------------------------------------------------------------+ | TITLE | +----------+------+-------------------------+--------------------------+-----------+ | | | Local | Remote | Conflicts | | Source | Mode | Add | Mod | Del | Err | Add | Mod | Del | Err | Col | Mrg | +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+ | contacts | <= | - | - | - | - | 10,387 | - | - | - | - | - | | note | SS | 1,308 | - | 2 | - | - | - | - | - | - | - | +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+ | 1,310 local changes and 10,387 remote changes. | +----------------------------------------------------------------------------------+ :Parameters: stats : dict The synchronization stats returned by a call to Adapter.sync(). stream : file-like-object An output file-like object that has at least a `write()` method, e.g. ``sys.stdout`` can be used. title : str, optional, default: null A title placed at the top of the table -- if omitted (the default), then no title is rendered. details : bool, optional, default: true If truthy, a per-datastore listing of changes will be displayed (as in the above example). totals : bool, optional, default: true If truthy, a summary of all changes will be displayed (as in the above example). gettext : callable, optional, @DEPRECATED(0.2.0), default: null A `gettext.gettext` compatible callable used for translating localized content (such as number formatting, etc.). NOTE: this parameter is deprecated, and will be replaced with a generalized i18n solution. ''' from . import state modeStringLut = dict(( (constants.SYNCTYPE_TWO_WAY, '<>'), (constants.SYNCTYPE_SLOW_SYNC, 'SS'), (constants.SYNCTYPE_ONE_WAY_FROM_CLIENT, '->'), (constants.SYNCTYPE_REFRESH_FROM_CLIENT, '=>'), (constants.SYNCTYPE_ONE_WAY_FROM_SERVER, '<-'), (constants.SYNCTYPE_REFRESH_FROM_SERVER, '<='), )) if gettext is not None: _ = gettext else: _ = lambda s: s # todo: this does not handle the case where the title is wider than the table. wSrc = len(_('Source')) wMode = len(_('Mode')) wCon = len(_('Conflicts')) wCol = len(_('Col')) wMrg = len(_('Mrg')) wHereAdd = wPeerAdd = len(_('Add')) wHereMod = wPeerMod = len(_('Mod')) wHereDel = wPeerDel = len(_('Del')) wHereErr = wPeerErr = len(_('Err')) totLoc = 0 totRem = 0 totErr = 0 totCol = 0 totMrg = 0 for key in stats.keys(): wSrc = max(wSrc, len(key)) wMode = max(wMode, len(modeStringLut.get(stats[key].mode))) wCol = max(wCol, len(num2str(stats[key].conflicts))) wMrg = max(wMrg, len(num2str(stats[key].merged))) wHereAdd = max(wHereAdd, len(num2str(stats[key].hereAdd))) wPeerAdd = max(wPeerAdd, len(num2str(stats[key].peerAdd))) wHereMod = max(wHereMod, len(num2str(stats[key].hereMod))) wPeerMod = max(wPeerMod, len(num2str(stats[key].peerMod))) wHereDel = max(wHereDel, len(num2str(stats[key].hereDel))) wPeerDel = max(wPeerDel, len(num2str(stats[key].peerDel))) wHereErr = max(wHereErr, len(num2str(stats[key].hereErr))) wPeerErr = max(wPeerErr, len(num2str(stats[key].peerErr))) totLoc += stats[key].hereAdd + stats[key].hereMod + stats[key].hereDel totRem += stats[key].peerAdd + stats[key].peerMod + stats[key].peerDel totErr += stats[key].hereErr + stats[key].peerErr totCol += stats[key].conflicts totMrg += stats[key].merged # TODO: i'm 100% sure there is a python library that can do this for me... if wCon > wCol + 3 + wMrg: diff = wCon - ( wCol + 3 + wMrg ) wCol += diff / 2 wMrg = wCon - 3 - wCol else: wCon = wCol + 3 + wMrg if details: tWid = ( wSrc + 3 + wMode + 3 + wHereAdd + wHereMod + wHereDel + wHereErr + 9 + 3 + wPeerAdd + wPeerMod + wPeerDel + wPeerErr + 9 + 3 + wCon ) else: if title is None: tWid = 0 else: tWid = len(title) if totals: # TODO: oh dear. from an i18n POV, this is *horrible*!... sumlist = [] for val, singular, plural in [ (totLoc, _('local change'), _('local changes')), (totRem, _('remote change'), _('remote changes')), (totErr, _('error'), _('errors')), ]: if val == 1: sumlist.append(num2str(val) + ' ' + singular) elif val > 1: sumlist.append(num2str(val) + ' ' + plural) if len(sumlist) <= 0: sumlist = _('No changes') elif len(sumlist) == 1: sumlist = sumlist[0] else: sumlist = ', '.join(sumlist[:-1]) + ' ' + _('and') + ' ' + sumlist[-1] if totMrg > 0 or totCol > 0: sumlist += ': ' if totMrg == 1: sumlist += num2str(totMrg) + ' ' + _('merge') elif totMrg > 1: sumlist += num2str(totMrg) + ' ' + _('merges') if totMrg > 0 and totCol > 0: sumlist += ' ' + _('and') + ' ' if totCol == 1: sumlist += num2str(totCol) + ' ' + _('conflict') elif totCol > 1: sumlist += num2str(totCol) + ' ' + _('conflicts') sumlist += '.' if len(sumlist) > tWid: wSrc += len(sumlist) - tWid tWid = len(sumlist) if title is not None: stream.write('+-' + '-' * tWid + '-+\n') stream.write('| {0: ^{w}}'.format(title, w=tWid)) stream.write(' |\n') hline = '+-' \ + '-' * wSrc \ + '-+-' \ + '-' * wMode \ + '-+-' \ + '-' * ( wHereAdd + wHereMod + wHereDel + wHereErr + 9 ) \ + '-+-' \ + '-' * ( wPeerAdd + wPeerMod + wPeerDel + wPeerErr + 9 ) \ + '-+-' \ + '-' * wCon \ + '-+\n' if details: stream.write(hline) stream.write('| ' + ' ' * wSrc) stream.write(' | ' + ' ' * wMode) stream.write(' | {0: ^{w}}'.format(_('Local'), w=( wHereAdd + wHereMod + wHereDel + wHereErr + 9 ))) stream.write(' | {0: ^{w}}'.format(_('Remote'), w=( wPeerAdd + wPeerMod + wPeerDel + wPeerErr + 9 ))) stream.write(' | {0: ^{w}}'.format(_('Conflicts'), w=wCon)) stream.write(' |\n') stream.write('| {0: >{w}}'.format(_('Source'), w=wSrc)) stream.write(' | {0: >{w}}'.format(_('Mode'), w=wMode)) stream.write(' | {0: ^{w}}'.format(_('Add'), w=wHereAdd)) stream.write(' | {0: ^{w}}'.format(_('Mod'), w=wHereMod)) stream.write(' | {0: ^{w}}'.format(_('Del'), w=wHereDel)) stream.write(' | {0: ^{w}}'.format(_('Err'), w=wHereErr)) stream.write(' | {0: ^{w}}'.format(_('Add'), w=wPeerAdd)) stream.write(' | {0: ^{w}}'.format(_('Mod'), w=wPeerMod)) stream.write(' | {0: ^{w}}'.format(_('Del'), w=wPeerDel)) stream.write(' | {0: ^{w}}'.format(_('Err'), w=wPeerErr)) stream.write(' | {0: ^{w}}'.format(_('Col'), w=wCol)) stream.write(' | {0: ^{w}}'.format(_('Mrg'), w=wMrg)) stream.write(' |\n') hsline = '+-' + '-' * wSrc \ + '-+-' + '-' * wMode \ + '-+-' + '-' * wHereAdd \ + '-+-' + '-' * wHereMod \ + '-+-' + '-' * wHereDel \ + '-+-' + '-' * wHereErr \ + '-+-' + '-' * wPeerAdd \ + '-+-' + '-' * wPeerMod \ + '-+-' + '-' * wPeerDel \ + '-+-' + '-' * wPeerErr \ + '-+-' + '-' * wCol \ + '-+-' + '-' * wMrg \ + '-+\n' stream.write(hsline) def numcol(val, wid): if val == 0: return ' | {0: ^{w}}'.format('-', w=wid) return ' | {0: >{w}}'.format(num2str(val), w=wid) for key in sorted(stats.keys(), key=lambda k: str(k).lower()): stream.write('| {0: >{w}}'.format(key, w=wSrc)) stream.write(' | {0: ^{w}}'.format(modeStringLut.get(stats[key].mode), w=wMode)) stream.write(numcol(stats[key].hereAdd, wHereAdd)) stream.write(numcol(stats[key].hereMod, wHereMod)) stream.write(numcol(stats[key].hereDel, wHereDel)) stream.write(numcol(stats[key].hereErr, wHereErr)) stream.write(numcol(stats[key].peerAdd, wPeerAdd)) stream.write(numcol(stats[key].peerMod, wPeerMod)) stream.write(numcol(stats[key].peerDel, wPeerDel)) stream.write(numcol(stats[key].peerErr, wPeerErr)) stream.write(numcol(stats[key].conflicts, wCol)) stream.write(numcol(stats[key].merged, wMrg)) stream.write(' |\n') stream.write(hsline) if totals: if title is None and not details: stream.write('+-' + '-' * tWid + '-+\n') stream.write('| {0: ^{w}}'.format(sumlist, w=tWid)) stream.write(' |\n') stream.write('+-' + '-' * tWid + '-+\n') return
[ "Renders", "an", "ASCII", "-", "table", "of", "the", "synchronization", "statistics", "stats", "example", "output", ":" ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/common.py#L211-L459
[ "def", "describeStats", "(", "stats", ",", "stream", ",", "title", "=", "None", ",", "details", "=", "True", ",", "totals", "=", "True", ",", "gettext", "=", "None", ")", ":", "from", ".", "import", "state", "modeStringLut", "=", "dict", "(", "(", "(", "constants", ".", "SYNCTYPE_TWO_WAY", ",", "'<>'", ")", ",", "(", "constants", ".", "SYNCTYPE_SLOW_SYNC", ",", "'SS'", ")", ",", "(", "constants", ".", "SYNCTYPE_ONE_WAY_FROM_CLIENT", ",", "'->'", ")", ",", "(", "constants", ".", "SYNCTYPE_REFRESH_FROM_CLIENT", ",", "'=>'", ")", ",", "(", "constants", ".", "SYNCTYPE_ONE_WAY_FROM_SERVER", ",", "'<-'", ")", ",", "(", "constants", ".", "SYNCTYPE_REFRESH_FROM_SERVER", ",", "'<='", ")", ",", ")", ")", "if", "gettext", "is", "not", "None", ":", "_", "=", "gettext", "else", ":", "_", "=", "lambda", "s", ":", "s", "# todo: this does not handle the case where the title is wider than the table.", "wSrc", "=", "len", "(", "_", "(", "'Source'", ")", ")", "wMode", "=", "len", "(", "_", "(", "'Mode'", ")", ")", "wCon", "=", "len", "(", "_", "(", "'Conflicts'", ")", ")", "wCol", "=", "len", "(", "_", "(", "'Col'", ")", ")", "wMrg", "=", "len", "(", "_", "(", "'Mrg'", ")", ")", "wHereAdd", "=", "wPeerAdd", "=", "len", "(", "_", "(", "'Add'", ")", ")", "wHereMod", "=", "wPeerMod", "=", "len", "(", "_", "(", "'Mod'", ")", ")", "wHereDel", "=", "wPeerDel", "=", "len", "(", "_", "(", "'Del'", ")", ")", "wHereErr", "=", "wPeerErr", "=", "len", "(", "_", "(", "'Err'", ")", ")", "totLoc", "=", "0", "totRem", "=", "0", "totErr", "=", "0", "totCol", "=", "0", "totMrg", "=", "0", "for", "key", "in", "stats", ".", "keys", "(", ")", ":", "wSrc", "=", "max", "(", "wSrc", ",", "len", "(", "key", ")", ")", "wMode", "=", "max", "(", "wMode", ",", "len", "(", "modeStringLut", ".", "get", "(", "stats", "[", "key", "]", ".", "mode", ")", ")", ")", "wCol", "=", "max", "(", "wCol", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "conflicts", ")", ")", ")", "wMrg", "=", "max", "(", "wMrg", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "merged", ")", ")", ")", "wHereAdd", "=", "max", "(", "wHereAdd", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "hereAdd", ")", ")", ")", "wPeerAdd", "=", "max", "(", "wPeerAdd", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "peerAdd", ")", ")", ")", "wHereMod", "=", "max", "(", "wHereMod", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "hereMod", ")", ")", ")", "wPeerMod", "=", "max", "(", "wPeerMod", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "peerMod", ")", ")", ")", "wHereDel", "=", "max", "(", "wHereDel", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "hereDel", ")", ")", ")", "wPeerDel", "=", "max", "(", "wPeerDel", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "peerDel", ")", ")", ")", "wHereErr", "=", "max", "(", "wHereErr", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "hereErr", ")", ")", ")", "wPeerErr", "=", "max", "(", "wPeerErr", ",", "len", "(", "num2str", "(", "stats", "[", "key", "]", ".", "peerErr", ")", ")", ")", "totLoc", "+=", "stats", "[", "key", "]", ".", "hereAdd", "+", "stats", "[", "key", "]", ".", "hereMod", "+", "stats", "[", "key", "]", ".", "hereDel", "totRem", "+=", "stats", "[", "key", "]", ".", "peerAdd", "+", "stats", "[", "key", "]", ".", "peerMod", "+", "stats", "[", "key", "]", ".", "peerDel", "totErr", "+=", "stats", "[", "key", "]", ".", "hereErr", "+", "stats", "[", "key", "]", ".", "peerErr", "totCol", "+=", "stats", "[", "key", "]", ".", "conflicts", "totMrg", "+=", "stats", "[", "key", "]", ".", "merged", "# TODO: i'm 100% sure there is a python library that can do this for me...", "if", "wCon", ">", "wCol", "+", "3", "+", "wMrg", ":", "diff", "=", "wCon", "-", "(", "wCol", "+", "3", "+", "wMrg", ")", "wCol", "+=", "diff", "/", "2", "wMrg", "=", "wCon", "-", "3", "-", "wCol", "else", ":", "wCon", "=", "wCol", "+", "3", "+", "wMrg", "if", "details", ":", "tWid", "=", "(", "wSrc", "+", "3", "+", "wMode", "+", "3", "+", "wHereAdd", "+", "wHereMod", "+", "wHereDel", "+", "wHereErr", "+", "9", "+", "3", "+", "wPeerAdd", "+", "wPeerMod", "+", "wPeerDel", "+", "wPeerErr", "+", "9", "+", "3", "+", "wCon", ")", "else", ":", "if", "title", "is", "None", ":", "tWid", "=", "0", "else", ":", "tWid", "=", "len", "(", "title", ")", "if", "totals", ":", "# TODO: oh dear. from an i18n POV, this is *horrible*!...", "sumlist", "=", "[", "]", "for", "val", ",", "singular", ",", "plural", "in", "[", "(", "totLoc", ",", "_", "(", "'local change'", ")", ",", "_", "(", "'local changes'", ")", ")", ",", "(", "totRem", ",", "_", "(", "'remote change'", ")", ",", "_", "(", "'remote changes'", ")", ")", ",", "(", "totErr", ",", "_", "(", "'error'", ")", ",", "_", "(", "'errors'", ")", ")", ",", "]", ":", "if", "val", "==", "1", ":", "sumlist", ".", "append", "(", "num2str", "(", "val", ")", "+", "' '", "+", "singular", ")", "elif", "val", ">", "1", ":", "sumlist", ".", "append", "(", "num2str", "(", "val", ")", "+", "' '", "+", "plural", ")", "if", "len", "(", "sumlist", ")", "<=", "0", ":", "sumlist", "=", "_", "(", "'No changes'", ")", "elif", "len", "(", "sumlist", ")", "==", "1", ":", "sumlist", "=", "sumlist", "[", "0", "]", "else", ":", "sumlist", "=", "', '", ".", "join", "(", "sumlist", "[", ":", "-", "1", "]", ")", "+", "' '", "+", "_", "(", "'and'", ")", "+", "' '", "+", "sumlist", "[", "-", "1", "]", "if", "totMrg", ">", "0", "or", "totCol", ">", "0", ":", "sumlist", "+=", "': '", "if", "totMrg", "==", "1", ":", "sumlist", "+=", "num2str", "(", "totMrg", ")", "+", "' '", "+", "_", "(", "'merge'", ")", "elif", "totMrg", ">", "1", ":", "sumlist", "+=", "num2str", "(", "totMrg", ")", "+", "' '", "+", "_", "(", "'merges'", ")", "if", "totMrg", ">", "0", "and", "totCol", ">", "0", ":", "sumlist", "+=", "' '", "+", "_", "(", "'and'", ")", "+", "' '", "if", "totCol", "==", "1", ":", "sumlist", "+=", "num2str", "(", "totCol", ")", "+", "' '", "+", "_", "(", "'conflict'", ")", "elif", "totCol", ">", "1", ":", "sumlist", "+=", "num2str", "(", "totCol", ")", "+", "' '", "+", "_", "(", "'conflicts'", ")", "sumlist", "+=", "'.'", "if", "len", "(", "sumlist", ")", ">", "tWid", ":", "wSrc", "+=", "len", "(", "sumlist", ")", "-", "tWid", "tWid", "=", "len", "(", "sumlist", ")", "if", "title", "is", "not", "None", ":", "stream", ".", "write", "(", "'+-'", "+", "'-'", "*", "tWid", "+", "'-+\\n'", ")", "stream", ".", "write", "(", "'| {0: ^{w}}'", ".", "format", "(", "title", ",", "w", "=", "tWid", ")", ")", "stream", ".", "write", "(", "' |\\n'", ")", "hline", "=", "'+-'", "+", "'-'", "*", "wSrc", "+", "'-+-'", "+", "'-'", "*", "wMode", "+", "'-+-'", "+", "'-'", "*", "(", "wHereAdd", "+", "wHereMod", "+", "wHereDel", "+", "wHereErr", "+", "9", ")", "+", "'-+-'", "+", "'-'", "*", "(", "wPeerAdd", "+", "wPeerMod", "+", "wPeerDel", "+", "wPeerErr", "+", "9", ")", "+", "'-+-'", "+", "'-'", "*", "wCon", "+", "'-+\\n'", "if", "details", ":", "stream", ".", "write", "(", "hline", ")", "stream", ".", "write", "(", "'| '", "+", "' '", "*", "wSrc", ")", "stream", ".", "write", "(", "' | '", "+", "' '", "*", "wMode", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Local'", ")", ",", "w", "=", "(", "wHereAdd", "+", "wHereMod", "+", "wHereDel", "+", "wHereErr", "+", "9", ")", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Remote'", ")", ",", "w", "=", "(", "wPeerAdd", "+", "wPeerMod", "+", "wPeerDel", "+", "wPeerErr", "+", "9", ")", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Conflicts'", ")", ",", "w", "=", "wCon", ")", ")", "stream", ".", "write", "(", "' |\\n'", ")", "stream", ".", "write", "(", "'| {0: >{w}}'", ".", "format", "(", "_", "(", "'Source'", ")", ",", "w", "=", "wSrc", ")", ")", "stream", ".", "write", "(", "' | {0: >{w}}'", ".", "format", "(", "_", "(", "'Mode'", ")", ",", "w", "=", "wMode", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Add'", ")", ",", "w", "=", "wHereAdd", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Mod'", ")", ",", "w", "=", "wHereMod", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Del'", ")", ",", "w", "=", "wHereDel", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Err'", ")", ",", "w", "=", "wHereErr", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Add'", ")", ",", "w", "=", "wPeerAdd", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Mod'", ")", ",", "w", "=", "wPeerMod", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Del'", ")", ",", "w", "=", "wPeerDel", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Err'", ")", ",", "w", "=", "wPeerErr", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Col'", ")", ",", "w", "=", "wCol", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "_", "(", "'Mrg'", ")", ",", "w", "=", "wMrg", ")", ")", "stream", ".", "write", "(", "' |\\n'", ")", "hsline", "=", "'+-'", "+", "'-'", "*", "wSrc", "+", "'-+-'", "+", "'-'", "*", "wMode", "+", "'-+-'", "+", "'-'", "*", "wHereAdd", "+", "'-+-'", "+", "'-'", "*", "wHereMod", "+", "'-+-'", "+", "'-'", "*", "wHereDel", "+", "'-+-'", "+", "'-'", "*", "wHereErr", "+", "'-+-'", "+", "'-'", "*", "wPeerAdd", "+", "'-+-'", "+", "'-'", "*", "wPeerMod", "+", "'-+-'", "+", "'-'", "*", "wPeerDel", "+", "'-+-'", "+", "'-'", "*", "wPeerErr", "+", "'-+-'", "+", "'-'", "*", "wCol", "+", "'-+-'", "+", "'-'", "*", "wMrg", "+", "'-+\\n'", "stream", ".", "write", "(", "hsline", ")", "def", "numcol", "(", "val", ",", "wid", ")", ":", "if", "val", "==", "0", ":", "return", "' | {0: ^{w}}'", ".", "format", "(", "'-'", ",", "w", "=", "wid", ")", "return", "' | {0: >{w}}'", ".", "format", "(", "num2str", "(", "val", ")", ",", "w", "=", "wid", ")", "for", "key", "in", "sorted", "(", "stats", ".", "keys", "(", ")", ",", "key", "=", "lambda", "k", ":", "str", "(", "k", ")", ".", "lower", "(", ")", ")", ":", "stream", ".", "write", "(", "'| {0: >{w}}'", ".", "format", "(", "key", ",", "w", "=", "wSrc", ")", ")", "stream", ".", "write", "(", "' | {0: ^{w}}'", ".", "format", "(", "modeStringLut", ".", "get", "(", "stats", "[", "key", "]", ".", "mode", ")", ",", "w", "=", "wMode", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "hereAdd", ",", "wHereAdd", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "hereMod", ",", "wHereMod", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "hereDel", ",", "wHereDel", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "hereErr", ",", "wHereErr", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "peerAdd", ",", "wPeerAdd", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "peerMod", ",", "wPeerMod", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "peerDel", ",", "wPeerDel", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "peerErr", ",", "wPeerErr", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "conflicts", ",", "wCol", ")", ")", "stream", ".", "write", "(", "numcol", "(", "stats", "[", "key", "]", ".", "merged", ",", "wMrg", ")", ")", "stream", ".", "write", "(", "' |\\n'", ")", "stream", ".", "write", "(", "hsline", ")", "if", "totals", ":", "if", "title", "is", "None", "and", "not", "details", ":", "stream", ".", "write", "(", "'+-'", "+", "'-'", "*", "tWid", "+", "'-+\\n'", ")", "stream", ".", "write", "(", "'| {0: ^{w}}'", ".", "format", "(", "sumlist", ",", "w", "=", "tWid", ")", ")", "stream", ".", "write", "(", "' |\\n'", ")", "stream", ".", "write", "(", "'+-'", "+", "'-'", "*", "tWid", "+", "'-+\\n'", ")", "return" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
Enum.lookup
Returns the label for a given Enum key
enum21/__init__.py
def lookup(cls, key, get=False): """Returns the label for a given Enum key""" if get: item = cls._item_dict.get(key) return item.name if item else key return cls._item_dict[key].name
def lookup(cls, key, get=False): """Returns the label for a given Enum key""" if get: item = cls._item_dict.get(key) return item.name if item else key return cls._item_dict[key].name
[ "Returns", "the", "label", "for", "a", "given", "Enum", "key" ]
bmorgan21/python-enum
python
https://github.com/bmorgan21/python-enum/blob/91a3a3cbaddce5db5fe3ac09dfd60b89cb8e22f4/enum21/__init__.py#L229-L234
[ "def", "lookup", "(", "cls", ",", "key", ",", "get", "=", "False", ")", ":", "if", "get", ":", "item", "=", "cls", ".", "_item_dict", ".", "get", "(", "key", ")", "return", "item", ".", "name", "if", "item", "else", "key", "return", "cls", ".", "_item_dict", "[", "key", "]", ".", "name" ]
91a3a3cbaddce5db5fe3ac09dfd60b89cb8e22f4
valid
Enum.verbose
Returns the verbose name for a given enum value
enum21/__init__.py
def verbose(cls, key=False, default=''): """Returns the verbose name for a given enum value""" if key is False: items = cls._item_dict.values() return [(x.key, x.value) for x in sorted(items, key=lambda x:x.sort or x.key)] item = cls._item_dict.get(key) return item.value if item else default
def verbose(cls, key=False, default=''): """Returns the verbose name for a given enum value""" if key is False: items = cls._item_dict.values() return [(x.key, x.value) for x in sorted(items, key=lambda x:x.sort or x.key)] item = cls._item_dict.get(key) return item.value if item else default
[ "Returns", "the", "verbose", "name", "for", "a", "given", "enum", "value" ]
bmorgan21/python-enum
python
https://github.com/bmorgan21/python-enum/blob/91a3a3cbaddce5db5fe3ac09dfd60b89cb8e22f4/enum21/__init__.py#L252-L259
[ "def", "verbose", "(", "cls", ",", "key", "=", "False", ",", "default", "=", "''", ")", ":", "if", "key", "is", "False", ":", "items", "=", "cls", ".", "_item_dict", ".", "values", "(", ")", "return", "[", "(", "x", ".", "key", ",", "x", ".", "value", ")", "for", "x", "in", "sorted", "(", "items", ",", "key", "=", "lambda", "x", ":", "x", ".", "sort", "or", "x", ".", "key", ")", "]", "item", "=", "cls", ".", "_item_dict", ".", "get", "(", "key", ")", "return", "item", ".", "value", "if", "item", "else", "default" ]
91a3a3cbaddce5db5fe3ac09dfd60b89cb8e22f4
valid
get_configured_dns
Returns the configured DNS servers with the use f nmcli.
jackal/scripts/dns_discover.py
def get_configured_dns(): """ Returns the configured DNS servers with the use f nmcli. """ ips = [] try: output = subprocess.check_output(['nmcli', 'device', 'show']) output = output.decode('utf-8') for line in output.split('\n'): if 'DNS' in line: pattern = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" for hit in re.findall(pattern, line): ips.append(hit) except FileNotFoundError: pass return ips
def get_configured_dns(): """ Returns the configured DNS servers with the use f nmcli. """ ips = [] try: output = subprocess.check_output(['nmcli', 'device', 'show']) output = output.decode('utf-8') for line in output.split('\n'): if 'DNS' in line: pattern = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" for hit in re.findall(pattern, line): ips.append(hit) except FileNotFoundError: pass return ips
[ "Returns", "the", "configured", "DNS", "servers", "with", "the", "use", "f", "nmcli", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/dns_discover.py#L15-L31
[ "def", "get_configured_dns", "(", ")", ":", "ips", "=", "[", "]", "try", ":", "output", "=", "subprocess", ".", "check_output", "(", "[", "'nmcli'", ",", "'device'", ",", "'show'", "]", ")", "output", "=", "output", ".", "decode", "(", "'utf-8'", ")", "for", "line", "in", "output", ".", "split", "(", "'\\n'", ")", ":", "if", "'DNS'", "in", "line", ":", "pattern", "=", "r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\"", "for", "hit", "in", "re", ".", "findall", "(", "pattern", ",", "line", ")", ":", "ips", ".", "append", "(", "hit", ")", "except", "FileNotFoundError", ":", "pass", "return", "ips" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
get_resolv_dns
Returns the dns servers configured in /etc/resolv.conf
jackal/scripts/dns_discover.py
def get_resolv_dns(): """ Returns the dns servers configured in /etc/resolv.conf """ result = [] try: for line in open('/etc/resolv.conf', 'r'): if line.startswith('search'): result.append(line.strip().split(' ')[1]) except FileNotFoundError: pass return result
def get_resolv_dns(): """ Returns the dns servers configured in /etc/resolv.conf """ result = [] try: for line in open('/etc/resolv.conf', 'r'): if line.startswith('search'): result.append(line.strip().split(' ')[1]) except FileNotFoundError: pass return result
[ "Returns", "the", "dns", "servers", "configured", "in", "/", "etc", "/", "resolv", ".", "conf" ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/dns_discover.py#L34-L45
[ "def", "get_resolv_dns", "(", ")", ":", "result", "=", "[", "]", "try", ":", "for", "line", "in", "open", "(", "'/etc/resolv.conf'", ",", "'r'", ")", ":", "if", "line", ".", "startswith", "(", "'search'", ")", ":", "result", ".", "append", "(", "line", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "[", "1", "]", ")", "except", "FileNotFoundError", ":", "pass", "return", "result" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
zone_transfer
Tries to perform a zone transfer.
jackal/scripts/dns_discover.py
def zone_transfer(address, dns_name): """ Tries to perform a zone transfer. """ ips = [] try: print_notification("Attempting dns zone transfer for {} on {}".format(dns_name, address)) z = dns.zone.from_xfr(dns.query.xfr(address, dns_name)) except dns.exception.FormError: print_notification("Zone transfer not allowed") return ips names = z.nodes.keys() print_success("Zone transfer successfull for {}, found {} entries".format(address, len(names))) for n in names: node = z[n] data = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A) if data: # TODO add hostnames to entries. # hostname = n.to_text() for item in data.items: address = item.address ips.append(address) return ips
def zone_transfer(address, dns_name): """ Tries to perform a zone transfer. """ ips = [] try: print_notification("Attempting dns zone transfer for {} on {}".format(dns_name, address)) z = dns.zone.from_xfr(dns.query.xfr(address, dns_name)) except dns.exception.FormError: print_notification("Zone transfer not allowed") return ips names = z.nodes.keys() print_success("Zone transfer successfull for {}, found {} entries".format(address, len(names))) for n in names: node = z[n] data = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A) if data: # TODO add hostnames to entries. # hostname = n.to_text() for item in data.items: address = item.address ips.append(address) return ips
[ "Tries", "to", "perform", "a", "zone", "transfer", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/dns_discover.py#L48-L70
[ "def", "zone_transfer", "(", "address", ",", "dns_name", ")", ":", "ips", "=", "[", "]", "try", ":", "print_notification", "(", "\"Attempting dns zone transfer for {} on {}\"", ".", "format", "(", "dns_name", ",", "address", ")", ")", "z", "=", "dns", ".", "zone", ".", "from_xfr", "(", "dns", ".", "query", ".", "xfr", "(", "address", ",", "dns_name", ")", ")", "except", "dns", ".", "exception", ".", "FormError", ":", "print_notification", "(", "\"Zone transfer not allowed\"", ")", "return", "ips", "names", "=", "z", ".", "nodes", ".", "keys", "(", ")", "print_success", "(", "\"Zone transfer successfull for {}, found {} entries\"", ".", "format", "(", "address", ",", "len", "(", "names", ")", ")", ")", "for", "n", "in", "names", ":", "node", "=", "z", "[", "n", "]", "data", "=", "node", ".", "get_rdataset", "(", "dns", ".", "rdataclass", ".", "IN", ",", "dns", ".", "rdatatype", ".", "A", ")", "if", "data", ":", "# TODO add hostnames to entries.", "# hostname = n.to_text()", "for", "item", "in", "data", ".", "items", ":", "address", "=", "item", ".", "address", "ips", ".", "append", "(", "address", ")", "return", "ips" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
resolve_domains
Resolves the list of domains and returns the ips.
jackal/scripts/dns_discover.py
def resolve_domains(domains, disable_zone=False): """ Resolves the list of domains and returns the ips. """ dnsresolver = dns.resolver.Resolver() ips = [] for domain in domains: print_notification("Resolving {}".format(domain)) try: result = dnsresolver.query(domain, 'A') for a in result.response.answer[0]: ips.append(str(a)) if not disable_zone: ips.extend(zone_transfer(str(a), domain)) except dns.resolver.NXDOMAIN as e: print_error(e) return ips
def resolve_domains(domains, disable_zone=False): """ Resolves the list of domains and returns the ips. """ dnsresolver = dns.resolver.Resolver() ips = [] for domain in domains: print_notification("Resolving {}".format(domain)) try: result = dnsresolver.query(domain, 'A') for a in result.response.answer[0]: ips.append(str(a)) if not disable_zone: ips.extend(zone_transfer(str(a), domain)) except dns.resolver.NXDOMAIN as e: print_error(e) return ips
[ "Resolves", "the", "list", "of", "domains", "and", "returns", "the", "ips", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/dns_discover.py#L73-L91
[ "def", "resolve_domains", "(", "domains", ",", "disable_zone", "=", "False", ")", ":", "dnsresolver", "=", "dns", ".", "resolver", ".", "Resolver", "(", ")", "ips", "=", "[", "]", "for", "domain", "in", "domains", ":", "print_notification", "(", "\"Resolving {}\"", ".", "format", "(", "domain", ")", ")", "try", ":", "result", "=", "dnsresolver", ".", "query", "(", "domain", ",", "'A'", ")", "for", "a", "in", "result", ".", "response", ".", "answer", "[", "0", "]", ":", "ips", ".", "append", "(", "str", "(", "a", ")", ")", "if", "not", "disable_zone", ":", "ips", ".", "extend", "(", "zone_transfer", "(", "str", "(", "a", ")", ",", "domain", ")", ")", "except", "dns", ".", "resolver", ".", "NXDOMAIN", "as", "e", ":", "print_error", "(", "e", ")", "return", "ips" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
parse_ips
Parses the list of ips, turns these into ranges based on the netmask given. Set include_public to True to include public IP adresses.
jackal/scripts/dns_discover.py
def parse_ips(ips, netmask, include_public): """ Parses the list of ips, turns these into ranges based on the netmask given. Set include_public to True to include public IP adresses. """ hs = HostSearch() rs = RangeSearch() ranges = [] ips = list(set(ips)) included_ips = [] print_success("Found {} ips".format(len(ips))) for ip in ips: ip_address = ipaddress.ip_address(ip) if include_public or ip_address.is_private: # To stop the screen filling with ranges. if len(ips) < 15: print_success("Found ip: {}".format(ip)) host = hs.id_to_object(ip) host.add_tag('dns_discover') host.save() r = str(ipaddress.IPv4Network("{}/{}".format(ip, netmask), strict=False)) ranges.append(r) included_ips.append(ip) else: print_notification("Excluding ip {}".format(ip)) ranges = list(set(ranges)) print_success("Found {} ranges".format(len(ranges))) for rng in ranges: # To stop the screen filling with ranges. if len(ranges) < 15: print_success("Found range: {}".format(rng)) r = rs.id_to_object(rng) r.add_tag('dns_discover') r.save() stats = {} stats['ips'] = included_ips stats['ranges'] = ranges return stats
def parse_ips(ips, netmask, include_public): """ Parses the list of ips, turns these into ranges based on the netmask given. Set include_public to True to include public IP adresses. """ hs = HostSearch() rs = RangeSearch() ranges = [] ips = list(set(ips)) included_ips = [] print_success("Found {} ips".format(len(ips))) for ip in ips: ip_address = ipaddress.ip_address(ip) if include_public or ip_address.is_private: # To stop the screen filling with ranges. if len(ips) < 15: print_success("Found ip: {}".format(ip)) host = hs.id_to_object(ip) host.add_tag('dns_discover') host.save() r = str(ipaddress.IPv4Network("{}/{}".format(ip, netmask), strict=False)) ranges.append(r) included_ips.append(ip) else: print_notification("Excluding ip {}".format(ip)) ranges = list(set(ranges)) print_success("Found {} ranges".format(len(ranges))) for rng in ranges: # To stop the screen filling with ranges. if len(ranges) < 15: print_success("Found range: {}".format(rng)) r = rs.id_to_object(rng) r.add_tag('dns_discover') r.save() stats = {} stats['ips'] = included_ips stats['ranges'] = ranges return stats
[ "Parses", "the", "list", "of", "ips", "turns", "these", "into", "ranges", "based", "on", "the", "netmask", "given", ".", "Set", "include_public", "to", "True", "to", "include", "public", "IP", "adresses", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/dns_discover.py#L94-L133
[ "def", "parse_ips", "(", "ips", ",", "netmask", ",", "include_public", ")", ":", "hs", "=", "HostSearch", "(", ")", "rs", "=", "RangeSearch", "(", ")", "ranges", "=", "[", "]", "ips", "=", "list", "(", "set", "(", "ips", ")", ")", "included_ips", "=", "[", "]", "print_success", "(", "\"Found {} ips\"", ".", "format", "(", "len", "(", "ips", ")", ")", ")", "for", "ip", "in", "ips", ":", "ip_address", "=", "ipaddress", ".", "ip_address", "(", "ip", ")", "if", "include_public", "or", "ip_address", ".", "is_private", ":", "# To stop the screen filling with ranges.", "if", "len", "(", "ips", ")", "<", "15", ":", "print_success", "(", "\"Found ip: {}\"", ".", "format", "(", "ip", ")", ")", "host", "=", "hs", ".", "id_to_object", "(", "ip", ")", "host", ".", "add_tag", "(", "'dns_discover'", ")", "host", ".", "save", "(", ")", "r", "=", "str", "(", "ipaddress", ".", "IPv4Network", "(", "\"{}/{}\"", ".", "format", "(", "ip", ",", "netmask", ")", ",", "strict", "=", "False", ")", ")", "ranges", ".", "append", "(", "r", ")", "included_ips", ".", "append", "(", "ip", ")", "else", ":", "print_notification", "(", "\"Excluding ip {}\"", ".", "format", "(", "ip", ")", ")", "ranges", "=", "list", "(", "set", "(", "ranges", ")", ")", "print_success", "(", "\"Found {} ranges\"", ".", "format", "(", "len", "(", "ranges", ")", ")", ")", "for", "rng", "in", "ranges", ":", "# To stop the screen filling with ranges.", "if", "len", "(", "ranges", ")", "<", "15", ":", "print_success", "(", "\"Found range: {}\"", ".", "format", "(", "rng", ")", ")", "r", "=", "rs", ".", "id_to_object", "(", "rng", ")", "r", ".", "add_tag", "(", "'dns_discover'", ")", "r", ".", "save", "(", ")", "stats", "=", "{", "}", "stats", "[", "'ips'", "]", "=", "included_ips", "stats", "[", "'ranges'", "]", "=", "ranges", "return", "stats" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
create_connection
Creates a connection based upon the given configuration object.
jackal/core.py
def create_connection(conf): """ Creates a connection based upon the given configuration object. """ host_config = {} host_config['hosts'] = [conf.get('jackal', 'host')] if int(conf.get('jackal', 'use_ssl')): host_config['use_ssl'] = True if conf.get('jackal', 'ca_certs'): host_config['ca_certs'] = conf.get('jackal', 'ca_certs') if int(conf.get('jackal', 'client_certs')): host_config['client_cert'] = conf.get('jackal', 'client_cert') host_config['client_key'] = conf.get('jackal', 'client_key') # Disable hostname checking for now. host_config['ssl_assert_hostname'] = False connections.create_connection(**host_config)
def create_connection(conf): """ Creates a connection based upon the given configuration object. """ host_config = {} host_config['hosts'] = [conf.get('jackal', 'host')] if int(conf.get('jackal', 'use_ssl')): host_config['use_ssl'] = True if conf.get('jackal', 'ca_certs'): host_config['ca_certs'] = conf.get('jackal', 'ca_certs') if int(conf.get('jackal', 'client_certs')): host_config['client_cert'] = conf.get('jackal', 'client_cert') host_config['client_key'] = conf.get('jackal', 'client_key') # Disable hostname checking for now. host_config['ssl_assert_hostname'] = False connections.create_connection(**host_config)
[ "Creates", "a", "connection", "based", "upon", "the", "given", "configuration", "object", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L21-L38
[ "def", "create_connection", "(", "conf", ")", ":", "host_config", "=", "{", "}", "host_config", "[", "'hosts'", "]", "=", "[", "conf", ".", "get", "(", "'jackal'", ",", "'host'", ")", "]", "if", "int", "(", "conf", ".", "get", "(", "'jackal'", ",", "'use_ssl'", ")", ")", ":", "host_config", "[", "'use_ssl'", "]", "=", "True", "if", "conf", ".", "get", "(", "'jackal'", ",", "'ca_certs'", ")", ":", "host_config", "[", "'ca_certs'", "]", "=", "conf", ".", "get", "(", "'jackal'", ",", "'ca_certs'", ")", "if", "int", "(", "conf", ".", "get", "(", "'jackal'", ",", "'client_certs'", ")", ")", ":", "host_config", "[", "'client_cert'", "]", "=", "conf", ".", "get", "(", "'jackal'", ",", "'client_cert'", ")", "host_config", "[", "'client_key'", "]", "=", "conf", ".", "get", "(", "'jackal'", ",", "'client_key'", ")", "# Disable hostname checking for now.", "host_config", "[", "'ssl_assert_hostname'", "]", "=", "False", "connections", ".", "create_connection", "(", "*", "*", "host_config", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
CoreSearch.search
Searches the elasticsearch instance to retrieve the requested documents.
jackal/core.py
def search(self, number=None, *args, **kwargs): """ Searches the elasticsearch instance to retrieve the requested documents. """ search = self.create_search(*args, **kwargs) try: if number: response = search[0:number] else: args, _ = self.core_parser.parse_known_args() if args.number: response = search[0:args.number] else: response = search.scan() return [hit for hit in response] except NotFoundError: print_error("The index was not found, have you initialized the index?") return [] except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch") return []
def search(self, number=None, *args, **kwargs): """ Searches the elasticsearch instance to retrieve the requested documents. """ search = self.create_search(*args, **kwargs) try: if number: response = search[0:number] else: args, _ = self.core_parser.parse_known_args() if args.number: response = search[0:args.number] else: response = search.scan() return [hit for hit in response] except NotFoundError: print_error("The index was not found, have you initialized the index?") return [] except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch") return []
[ "Searches", "the", "elasticsearch", "instance", "to", "retrieve", "the", "requested", "documents", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L55-L76
[ "def", "search", "(", "self", ",", "number", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "search", "=", "self", ".", "create_search", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "if", "number", ":", "response", "=", "search", "[", "0", ":", "number", "]", "else", ":", "args", ",", "_", "=", "self", ".", "core_parser", ".", "parse_known_args", "(", ")", "if", "args", ".", "number", ":", "response", "=", "search", "[", "0", ":", "args", ".", "number", "]", "else", ":", "response", "=", "search", ".", "scan", "(", ")", "return", "[", "hit", "for", "hit", "in", "response", "]", "except", "NotFoundError", ":", "print_error", "(", "\"The index was not found, have you initialized the index?\"", ")", "return", "[", "]", "except", "(", "ConnectionError", ",", "TransportError", ")", ":", "print_error", "(", "\"Cannot connect to elasticsearch\"", ")", "return", "[", "]" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
CoreSearch.argument_search
Uses the command line arguments to fill the search function and call it.
jackal/core.py
def argument_search(self): """ Uses the command line arguments to fill the search function and call it. """ arguments, _ = self.argparser.parse_known_args() return self.search(**vars(arguments))
def argument_search(self): """ Uses the command line arguments to fill the search function and call it. """ arguments, _ = self.argparser.parse_known_args() return self.search(**vars(arguments))
[ "Uses", "the", "command", "line", "arguments", "to", "fill", "the", "search", "function", "and", "call", "it", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L79-L84
[ "def", "argument_search", "(", "self", ")", ":", "arguments", ",", "_", "=", "self", ".", "argparser", ".", "parse_known_args", "(", ")", "return", "self", ".", "search", "(", "*", "*", "vars", "(", "arguments", ")", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
CoreSearch.count
Returns the number of results after filtering with the given arguments.
jackal/core.py
def count(self, *args, **kwargs): """ Returns the number of results after filtering with the given arguments. """ search = self.create_search(*args, **kwargs) try: return search.count() except NotFoundError: print_error("The index was not found, have you initialized the index?") except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch")
def count(self, *args, **kwargs): """ Returns the number of results after filtering with the given arguments. """ search = self.create_search(*args, **kwargs) try: return search.count() except NotFoundError: print_error("The index was not found, have you initialized the index?") except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch")
[ "Returns", "the", "number", "of", "results", "after", "filtering", "with", "the", "given", "arguments", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L87-L97
[ "def", "count", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "search", "=", "self", ".", "create_search", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "return", "search", ".", "count", "(", ")", "except", "NotFoundError", ":", "print_error", "(", "\"The index was not found, have you initialized the index?\"", ")", "except", "(", "ConnectionError", ",", "TransportError", ")", ":", "print_error", "(", "\"Cannot connect to elasticsearch\"", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
CoreSearch.argument_count
Uses the command line arguments to fill the count function and call it.
jackal/core.py
def argument_count(self): """ Uses the command line arguments to fill the count function and call it. """ arguments, _ = self.argparser.parse_known_args() return self.count(**vars(arguments))
def argument_count(self): """ Uses the command line arguments to fill the count function and call it. """ arguments, _ = self.argparser.parse_known_args() return self.count(**vars(arguments))
[ "Uses", "the", "command", "line", "arguments", "to", "fill", "the", "count", "function", "and", "call", "it", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L100-L105
[ "def", "argument_count", "(", "self", ")", ":", "arguments", ",", "_", "=", "self", ".", "argparser", ".", "parse_known_args", "(", ")", "return", "self", ".", "count", "(", "*", "*", "vars", "(", "arguments", ")", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
CoreSearch.get_pipe
Returns a generator that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json.
jackal/core.py
def get_pipe(self, object_type): """ Returns a generator that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json. """ for line in sys.stdin: try: data = json.loads(line.strip()) obj = object_type(**data) yield obj except ValueError: yield self.id_to_object(line.strip())
def get_pipe(self, object_type): """ Returns a generator that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json. """ for line in sys.stdin: try: data = json.loads(line.strip()) obj = object_type(**data) yield obj except ValueError: yield self.id_to_object(line.strip())
[ "Returns", "a", "generator", "that", "maps", "the", "input", "of", "the", "pipe", "to", "an", "elasticsearch", "object", ".", "Will", "call", "id_to_object", "if", "it", "cannot", "serialize", "the", "data", "from", "json", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L129-L140
[ "def", "get_pipe", "(", "self", ",", "object_type", ")", ":", "for", "line", "in", "sys", ".", "stdin", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "line", ".", "strip", "(", ")", ")", "obj", "=", "object_type", "(", "*", "*", "data", ")", "yield", "obj", "except", "ValueError", ":", "yield", "self", ".", "id_to_object", "(", "line", ".", "strip", "(", ")", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
RangeSearch.id_to_object
Resolves an ip adres to a range object, creating it if it doesn't exists.
jackal/core.py
def id_to_object(self, line): """ Resolves an ip adres to a range object, creating it if it doesn't exists. """ result = Range.get(line, ignore=404) if not result: result = Range(range=line) result.save() return result
def id_to_object(self, line): """ Resolves an ip adres to a range object, creating it if it doesn't exists. """ result = Range.get(line, ignore=404) if not result: result = Range(range=line) result.save() return result
[ "Resolves", "an", "ip", "adres", "to", "a", "range", "object", "creating", "it", "if", "it", "doesn", "t", "exists", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L184-L192
[ "def", "id_to_object", "(", "self", ",", "line", ")", ":", "result", "=", "Range", ".", "get", "(", "line", ",", "ignore", "=", "404", ")", "if", "not", "result", ":", "result", "=", "Range", "(", "range", "=", "line", ")", "result", ".", "save", "(", ")", "return", "result" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
RangeSearch.argparser
Argparser option with search functionality specific for ranges.
jackal/core.py
def argparser(self): """ Argparser option with search functionality specific for ranges. """ core_parser = self.core_parser core_parser.add_argument('-r', '--range', type=str, help="The range to search for use") return core_parser
def argparser(self): """ Argparser option with search functionality specific for ranges. """ core_parser = self.core_parser core_parser.add_argument('-r', '--range', type=str, help="The range to search for use") return core_parser
[ "Argparser", "option", "with", "search", "functionality", "specific", "for", "ranges", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L204-L210
[ "def", "argparser", "(", "self", ")", ":", "core_parser", "=", "self", ".", "core_parser", "core_parser", ".", "add_argument", "(", "'-r'", ",", "'--range'", ",", "type", "=", "str", ",", "help", "=", "\"The range to search for use\"", ")", "return", "core_parser" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
ServiceSearch.object_to_id
Searches elasticsearch for objects with the same address, protocol, port and state.
jackal/core.py
def object_to_id(self, obj): """ Searches elasticsearch for objects with the same address, protocol, port and state. """ search = Service.search() search = search.filter("term", address=obj.address) search = search.filter("term", protocol=obj.protocol) search = search.filter("term", port=obj.port) search = search.filter("term", state=obj.state) if search.count(): result = search[0].execute()[0] return result.meta.id else: return None
def object_to_id(self, obj): """ Searches elasticsearch for objects with the same address, protocol, port and state. """ search = Service.search() search = search.filter("term", address=obj.address) search = search.filter("term", protocol=obj.protocol) search = search.filter("term", port=obj.port) search = search.filter("term", state=obj.state) if search.count(): result = search[0].execute()[0] return result.meta.id else: return None
[ "Searches", "elasticsearch", "for", "objects", "with", "the", "same", "address", "protocol", "port", "and", "state", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L334-L347
[ "def", "object_to_id", "(", "self", ",", "obj", ")", ":", "search", "=", "Service", ".", "search", "(", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "address", "=", "obj", ".", "address", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "protocol", "=", "obj", ".", "protocol", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "port", "=", "obj", ".", "port", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "state", "=", "obj", ".", "state", ")", "if", "search", ".", "count", "(", ")", ":", "result", "=", "search", "[", "0", "]", ".", "execute", "(", ")", "[", "0", "]", "return", "result", ".", "meta", ".", "id", "else", ":", "return", "None" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
UserSearch.id_to_object
Resolves the given id to a user object, if it doesn't exists it will be created.
jackal/core.py
def id_to_object(self, line): """ Resolves the given id to a user object, if it doesn't exists it will be created. """ user = User.get(line, ignore=404) if not user: user = User(username=line) user.save() return user
def id_to_object(self, line): """ Resolves the given id to a user object, if it doesn't exists it will be created. """ user = User.get(line, ignore=404) if not user: user = User(username=line) user.save() return user
[ "Resolves", "the", "given", "id", "to", "a", "user", "object", "if", "it", "doesn", "t", "exists", "it", "will", "be", "created", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L393-L401
[ "def", "id_to_object", "(", "self", ",", "line", ")", ":", "user", "=", "User", ".", "get", "(", "line", ",", "ignore", "=", "404", ")", "if", "not", "user", ":", "user", "=", "User", "(", "username", "=", "line", ")", "user", ".", "save", "(", ")", "return", "user" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
UserSearch.get_users
Retrieves the users from elastic.
jackal/core.py
def get_users(self, *args, **kwargs): """ Retrieves the users from elastic. """ arguments, _ = self.argparser.parse_known_args() if self.is_pipe and self.use_pipe: return self.get_pipe(self.object_type) elif arguments.tags or arguments.group or arguments.search or arguments.domain: return self.argument_search() else: return self.search(*args, **kwargs)
def get_users(self, *args, **kwargs): """ Retrieves the users from elastic. """ arguments, _ = self.argparser.parse_known_args() if self.is_pipe and self.use_pipe: return self.get_pipe(self.object_type) elif arguments.tags or arguments.group or arguments.search or arguments.domain: return self.argument_search() else: return self.search(*args, **kwargs)
[ "Retrieves", "the", "users", "from", "elastic", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L413-L423
[ "def", "get_users", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "arguments", ",", "_", "=", "self", ".", "argparser", ".", "parse_known_args", "(", ")", "if", "self", ".", "is_pipe", "and", "self", ".", "use_pipe", ":", "return", "self", ".", "get_pipe", "(", "self", ".", "object_type", ")", "elif", "arguments", ".", "tags", "or", "arguments", ".", "group", "or", "arguments", ".", "search", "or", "arguments", ".", "domain", ":", "return", "self", ".", "argument_search", "(", ")", "else", ":", "return", "self", ".", "search", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
UserSearch.get_domains
Retrieves the domains of the users from elastic.
jackal/core.py
def get_domains(self): """ Retrieves the domains of the users from elastic. """ search = User.search() search.aggs.bucket('domains', 'terms', field='domain', order={'_count': 'desc'}, size=100) response = search.execute() return [entry.key for entry in response.aggregations.domains.buckets]
def get_domains(self): """ Retrieves the domains of the users from elastic. """ search = User.search() search.aggs.bucket('domains', 'terms', field='domain', order={'_count': 'desc'}, size=100) response = search.execute() return [entry.key for entry in response.aggregations.domains.buckets]
[ "Retrieves", "the", "domains", "of", "the", "users", "from", "elastic", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L434-L441
[ "def", "get_domains", "(", "self", ")", ":", "search", "=", "User", ".", "search", "(", ")", "search", ".", "aggs", ".", "bucket", "(", "'domains'", ",", "'terms'", ",", "field", "=", "'domain'", ",", "order", "=", "{", "'_count'", ":", "'desc'", "}", ",", "size", "=", "100", ")", "response", "=", "search", ".", "execute", "(", ")", "return", "[", "entry", ".", "key", "for", "entry", "in", "response", ".", "aggregations", ".", "domains", ".", "buckets", "]" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
CredentialSearch.find_object
Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.
jackal/core.py
def find_object(self, username, secret, domain=None, host_ip=None, service_id=None): """ Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id. """ # Not sure yet if this is advisable... Older passwords can be overwritten... search = Credential.search() search = search.filter("term", username=username) search = search.filter("term", secret=secret) if domain: search = search.filter("term", domain=domain) else: search = search.exclude("exists", field="domain") if host_ip: search = search.filter("term", host_ip=host_ip) else: search = search.exclude("exists", field="host_ip") if service_id: search = search.filter("term", service_id=service_id) else: search = search.exclude("exists", field="service_id") if search.count(): result = search[0].execute()[0] return result else: return None
def find_object(self, username, secret, domain=None, host_ip=None, service_id=None): """ Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id. """ # Not sure yet if this is advisable... Older passwords can be overwritten... search = Credential.search() search = search.filter("term", username=username) search = search.filter("term", secret=secret) if domain: search = search.filter("term", domain=domain) else: search = search.exclude("exists", field="domain") if host_ip: search = search.filter("term", host_ip=host_ip) else: search = search.exclude("exists", field="host_ip") if service_id: search = search.filter("term", service_id=service_id) else: search = search.exclude("exists", field="service_id") if search.count(): result = search[0].execute()[0] return result else: return None
[ "Searches", "elasticsearch", "for", "objects", "with", "the", "same", "username", "password", "optional", "domain", "host_ip", "and", "service_id", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L495-L519
[ "def", "find_object", "(", "self", ",", "username", ",", "secret", ",", "domain", "=", "None", ",", "host_ip", "=", "None", ",", "service_id", "=", "None", ")", ":", "# Not sure yet if this is advisable... Older passwords can be overwritten...", "search", "=", "Credential", ".", "search", "(", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "username", "=", "username", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "secret", "=", "secret", ")", "if", "domain", ":", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "domain", "=", "domain", ")", "else", ":", "search", "=", "search", ".", "exclude", "(", "\"exists\"", ",", "field", "=", "\"domain\"", ")", "if", "host_ip", ":", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "host_ip", "=", "host_ip", ")", "else", ":", "search", "=", "search", ".", "exclude", "(", "\"exists\"", ",", "field", "=", "\"host_ip\"", ")", "if", "service_id", ":", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "service_id", "=", "service_id", ")", "else", ":", "search", "=", "search", ".", "exclude", "(", "\"exists\"", ",", "field", "=", "\"service_id\"", ")", "if", "search", ".", "count", "(", ")", ":", "result", "=", "search", "[", "0", "]", ".", "execute", "(", ")", "[", "0", "]", "return", "result", "else", ":", "return", "None" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
CredentialSearch.object_to_id
Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.
jackal/core.py
def object_to_id(self, obj): """ Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id. """ # Not sure yet if this is advisable... Older passwords can be overwritten... search = Credential.search() search = search.filter("term", username=obj.username) search = search.filter("term", secret=obj.secret) if obj.domain: search = search.filter("term", domain=obj.domain) else: search = search.exclude("exists", field="domain") if obj.host_ip: search = search.filter("term", host_ip=obj.host_ip) else: search = search.exclude("exists", field="host_ip") if obj.service_id: search = search.filter("term", service_id=obj.service_id) else: search = search.exclude("exists", field="service_id") if search.count(): result = search[0].execute()[0] return result.meta.id else: return None
def object_to_id(self, obj): """ Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id. """ # Not sure yet if this is advisable... Older passwords can be overwritten... search = Credential.search() search = search.filter("term", username=obj.username) search = search.filter("term", secret=obj.secret) if obj.domain: search = search.filter("term", domain=obj.domain) else: search = search.exclude("exists", field="domain") if obj.host_ip: search = search.filter("term", host_ip=obj.host_ip) else: search = search.exclude("exists", field="host_ip") if obj.service_id: search = search.filter("term", service_id=obj.service_id) else: search = search.exclude("exists", field="service_id") if search.count(): result = search[0].execute()[0] return result.meta.id else: return None
[ "Searches", "elasticsearch", "for", "objects", "with", "the", "same", "username", "password", "optional", "domain", "host_ip", "and", "service_id", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L522-L546
[ "def", "object_to_id", "(", "self", ",", "obj", ")", ":", "# Not sure yet if this is advisable... Older passwords can be overwritten...", "search", "=", "Credential", ".", "search", "(", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "username", "=", "obj", ".", "username", ")", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "secret", "=", "obj", ".", "secret", ")", "if", "obj", ".", "domain", ":", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "domain", "=", "obj", ".", "domain", ")", "else", ":", "search", "=", "search", ".", "exclude", "(", "\"exists\"", ",", "field", "=", "\"domain\"", ")", "if", "obj", ".", "host_ip", ":", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "host_ip", "=", "obj", ".", "host_ip", ")", "else", ":", "search", "=", "search", ".", "exclude", "(", "\"exists\"", ",", "field", "=", "\"host_ip\"", ")", "if", "obj", ".", "service_id", ":", "search", "=", "search", ".", "filter", "(", "\"term\"", ",", "service_id", "=", "obj", ".", "service_id", ")", "else", ":", "search", "=", "search", ".", "exclude", "(", "\"exists\"", ",", "field", "=", "\"service_id\"", ")", "if", "search", ".", "count", "(", ")", ":", "result", "=", "search", "[", "0", "]", ".", "execute", "(", ")", "[", "0", "]", "return", "result", ".", "meta", ".", "id", "else", ":", "return", "None" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
CredentialSearch.get_credentials
Retrieves the users from elastic.
jackal/core.py
def get_credentials(self, *args, **kwargs): """ Retrieves the users from elastic. """ arguments, _ = self.argparser.parse_known_args() if self.is_pipe and self.use_pipe: return self.get_pipe(self.object_type) elif arguments.tags or arguments.type or arguments.search or arguments.password or arguments.cracked or arguments.range or arguments.domain: return self.argument_search() else: return self.search(*args, **kwargs)
def get_credentials(self, *args, **kwargs): """ Retrieves the users from elastic. """ arguments, _ = self.argparser.parse_known_args() if self.is_pipe and self.use_pipe: return self.get_pipe(self.object_type) elif arguments.tags or arguments.type or arguments.search or arguments.password or arguments.cracked or arguments.range or arguments.domain: return self.argument_search() else: return self.search(*args, **kwargs)
[ "Retrieves", "the", "users", "from", "elastic", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L549-L559
[ "def", "get_credentials", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "arguments", ",", "_", "=", "self", ".", "argparser", ".", "parse_known_args", "(", ")", "if", "self", ".", "is_pipe", "and", "self", ".", "use_pipe", ":", "return", "self", ".", "get_pipe", "(", "self", ".", "object_type", ")", "elif", "arguments", ".", "tags", "or", "arguments", ".", "type", "or", "arguments", ".", "search", "or", "arguments", ".", "password", "or", "arguments", ".", "cracked", "or", "arguments", ".", "range", "or", "arguments", ".", "domain", ":", "return", "self", ".", "argument_search", "(", ")", "else", ":", "return", "self", ".", "search", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
DocMapper.get_pipe
Returns a list that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json.
jackal/core.py
def get_pipe(self): """ Returns a list that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json. """ lines = [] for line in sys.stdin: try: lines.append(self.line_to_object(line.strip())) except ValueError: pass except KeyError: pass return lines
def get_pipe(self): """ Returns a list that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json. """ lines = [] for line in sys.stdin: try: lines.append(self.line_to_object(line.strip())) except ValueError: pass except KeyError: pass return lines
[ "Returns", "a", "list", "that", "maps", "the", "input", "of", "the", "pipe", "to", "an", "elasticsearch", "object", ".", "Will", "call", "id_to_object", "if", "it", "cannot", "serialize", "the", "data", "from", "json", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L610-L623
[ "def", "get_pipe", "(", "self", ")", ":", "lines", "=", "[", "]", "for", "line", "in", "sys", ".", "stdin", ":", "try", ":", "lines", ".", "append", "(", "self", ".", "line_to_object", "(", "line", ".", "strip", "(", ")", ")", ")", "except", "ValueError", ":", "pass", "except", "KeyError", ":", "pass", "return", "lines" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
Protocol.commands2tree
Consumes state.Command commands and converts them to an ET protocol tree
pysyncml/protocol.py
def commands2tree(self, adapter, session, commands): '''Consumes state.Command commands and converts them to an ET protocol tree''' # todo: trap errors... hdrcmd = commands[0] commands = commands[1:] if hdrcmd.name != constants.CMD_SYNCHDR: raise common.InternalError('unexpected first command "%s" (expected "%s")' % (hdrcmd.name, constants.CMD_SYNCHDR)) if hdrcmd.version != constants.SYNCML_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML version "%s"' % (hdrcmd.version,)) xsync = ET.Element(constants.NODE_SYNCML) xhdr = ET.SubElement(xsync, hdrcmd.name) if hdrcmd.version == constants.SYNCML_VERSION_1_2: ET.SubElement(xhdr, 'VerDTD').text = constants.SYNCML_DTD_VERSION_1_2 ET.SubElement(xhdr, 'VerProto').text = hdrcmd.version ET.SubElement(xhdr, 'SessionID').text = hdrcmd.sessionID ET.SubElement(xhdr, 'MsgID').text = hdrcmd.msgID xsrc = ET.SubElement(xhdr, 'Source') ET.SubElement(xsrc, 'LocURI').text = hdrcmd.source if hdrcmd.sourceName is not None: ET.SubElement(xsrc, 'LocName').text = hdrcmd.sourceName xtgt = ET.SubElement(xhdr, 'Target') ET.SubElement(xtgt, 'LocURI').text = hdrcmd.target if hdrcmd.targetName is not None: ET.SubElement(xtgt, 'LocName').text = hdrcmd.targetName if hdrcmd.respUri is not None: ET.SubElement(xhdr, 'RespURI').text = hdrcmd.respUri if hdrcmd.auth is not None and not session.authAccepted: if hdrcmd.auth != constants.NAMESPACE_AUTH_BASIC: raise NotImplementedError('auth method "%s"' % (common.auth2string(hdrcmd.auth),)) if hdrcmd.auth == constants.NAMESPACE_AUTH_BASIC: xcred = ET.SubElement(xhdr, 'Cred') xmeta = ET.SubElement(xcred, 'Meta') ET.SubElement(xmeta, 'Format', {'xmlns': constants.NAMESPACE_METINF}).text = 'b64' ET.SubElement(xmeta, 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.auth ET.SubElement(xcred, 'Data').text = base64.b64encode( '%s:%s' % (adapter.peer.username, adapter.peer.password)) if hdrcmd.maxMsgSize is not None or hdrcmd.maxObjSize is not None: xmeta = ET.SubElement(xhdr, 'Meta') if hdrcmd.maxMsgSize is not None: ET.SubElement(xmeta, 'MaxMsgSize', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.maxMsgSize if hdrcmd.maxObjSize is not None: ET.SubElement(xmeta, 'MaxObjSize', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.maxObjSize xbody = ET.SubElement(xsync, constants.NODE_SYNCBODY) for cmdidx, cmd in enumerate(commands): xcmd = ET.SubElement(xbody, cmd.name) if cmd.cmdID is not None: ET.SubElement(xcmd, 'CmdID').text = cmd.cmdID if cmd.name == constants.CMD_ALERT: ET.SubElement(xcmd, 'Data').text = str(cmd.data) xitem = ET.SubElement(xcmd, 'Item') ET.SubElement(ET.SubElement(xitem, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xitem, 'Target'), 'LocURI').text = cmd.target if cmd.lastAnchor is not None \ or cmd.nextAnchor is not None \ or cmd.maxObjSize is not None: xmeta = ET.SubElement(xitem, 'Meta') xanch = ET.SubElement(xmeta, 'Anchor', {'xmlns': constants.NAMESPACE_METINF}) if cmd.lastAnchor is not None: ET.SubElement(xanch, 'Last').text = cmd.lastAnchor if cmd.nextAnchor is not None: ET.SubElement(xanch, 'Next').text = cmd.nextAnchor if cmd.maxObjSize is not None: ET.SubElement(xmeta, 'MaxObjSize', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.maxObjSize continue if cmd.name == constants.CMD_STATUS: ET.SubElement(xcmd, 'MsgRef').text = cmd.msgRef ET.SubElement(xcmd, 'CmdRef').text = cmd.cmdRef ET.SubElement(xcmd, 'Cmd').text = cmd.statusOf if cmd.sourceRef is not None: ET.SubElement(xcmd, 'SourceRef').text = cmd.sourceRef if cmd.targetRef is not None: ET.SubElement(xcmd, 'TargetRef').text = cmd.targetRef ET.SubElement(xcmd, 'Data').text = cmd.statusCode if cmd.nextAnchor is not None or cmd.lastAnchor is not None: xdata = ET.SubElement(ET.SubElement(xcmd, 'Item'), 'Data') xanch = ET.SubElement(xdata, 'Anchor', {'xmlns': constants.NAMESPACE_METINF}) if cmd.lastAnchor is not None: ET.SubElement(xanch, 'Last').text = cmd.lastAnchor if cmd.nextAnchor is not None: ET.SubElement(xanch, 'Next').text = cmd.nextAnchor # NOTE: this is NOT standard SyncML... if cmd.errorCode is not None or cmd.errorMsg is not None: xerr = ET.SubElement(xcmd, 'Error') if cmd.errorCode is not None: ET.SubElement(xerr, 'Code').text = cmd.errorCode if cmd.errorMsg is not None: ET.SubElement(xerr, 'Message').text = cmd.errorMsg if cmd.errorTrace is not None: ET.SubElement(xerr, 'Trace').text = cmd.errorTrace continue if cmd.name in [constants.CMD_GET, constants.CMD_PUT]: ET.SubElement(ET.SubElement(xcmd, 'Meta'), 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.type if cmd.source is not None or cmd.target is not None or cmd.data: xitem = ET.SubElement(xcmd, 'Item') if cmd.source is not None: xsrc = ET.SubElement(xitem, 'Source') ET.SubElement(xsrc, 'LocURI').text = cmd.source ET.SubElement(xsrc, 'LocName').text = cmd.source if cmd.target is not None: xtgt = ET.SubElement(xitem, 'Target') ET.SubElement(xtgt, 'LocURI').text = cmd.target ET.SubElement(xtgt, 'LocName').text = cmd.target if cmd.data is not None: if isinstance(cmd.data, basestring): ET.SubElement(xitem, 'Data').text = cmd.data else: ET.SubElement(xitem, 'Data').append(cmd.data) continue if cmd.name == constants.CMD_RESULTS: ET.SubElement(xcmd, 'MsgRef').text = cmd.msgRef ET.SubElement(xcmd, 'CmdRef').text = cmd.cmdRef ET.SubElement(ET.SubElement(xcmd, 'Meta'), 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.type xitem = ET.SubElement(xcmd, 'Item') xsrc = ET.SubElement(xitem, 'Source') ET.SubElement(xsrc, 'LocURI').text = cmd.source ET.SubElement(xsrc, 'LocName').text = cmd.source if cmd.data is not None: if isinstance(cmd.data, basestring): ET.SubElement(xitem, 'Data').text = cmd.data else: ET.SubElement(xitem, 'Data').append(cmd.data) continue if cmd.name == constants.CMD_SYNC: ET.SubElement(ET.SubElement(xcmd, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xcmd, 'Target'), 'LocURI').text = cmd.target if cmd.noc is not None: ET.SubElement(xcmd, 'NumberOfChanges').text = cmd.noc if cmd.data is not None: for scmd in cmd.data: xscmd = ET.SubElement(xcmd, scmd.name) if scmd.cmdID is not None: ET.SubElement(xscmd, 'CmdID').text = scmd.cmdID if scmd.type is not None or \ ( scmd.format is not None and scmd.format != constants.FORMAT_AUTO ): xsmeta = ET.SubElement(xscmd, 'Meta') # todo: implement auto encoding determination... # (the current implementation just lets XML encoding do it, # which is for most things good enough, but not so good # for sequences that need a large amount escaping such as # binary data...) if scmd.format is not None and scmd.format != constants.FORMAT_AUTO: ET.SubElement(xsmeta, 'Format', {'xmlns': constants.NAMESPACE_METINF}).text = scmd.format if scmd.type is not None: ET.SubElement(xsmeta, 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = scmd.type xsitem = ET.SubElement(xscmd, 'Item') if scmd.source is not None: ET.SubElement(ET.SubElement(xsitem, 'Source'), 'LocURI').text = scmd.source if scmd.sourceParent is not None: ET.SubElement(ET.SubElement(xsitem, 'SourceParent'), 'LocURI').text = scmd.sourceParent if scmd.target is not None: ET.SubElement(ET.SubElement(xsitem, 'Target'), 'LocURI').text = scmd.target if scmd.targetParent is not None: ET.SubElement(ET.SubElement(xsitem, 'TargetParent'), 'LocURI').text = scmd.targetParent if scmd.data is not None: if isinstance(scmd.data, basestring): ET.SubElement(xsitem, 'Data').text = scmd.data else: ET.SubElement(xsitem, 'Data').append(scmd.data) continue if cmd.name == constants.CMD_MAP: ET.SubElement(ET.SubElement(xcmd, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xcmd, 'Target'), 'LocURI').text = cmd.target if cmd.sourceItem is not None or cmd.targetItem is not None: xitem = ET.SubElement(xcmd, constants.CMD_MAPITEM) if cmd.sourceItem is not None: ET.SubElement(ET.SubElement(xitem, 'Source'), 'LocURI').text = cmd.sourceItem if cmd.targetItem is not None: ET.SubElement(ET.SubElement(xitem, 'Target'), 'LocURI').text = cmd.targetItem continue if cmd.name == constants.CMD_FINAL: if cmdidx + 1 < len(commands): raise common.InternalError('command "%s" not at tail end of commands' % (cmd.name,)) continue raise common.InternalError('unexpected command "%s"' % (cmd.name,)) return xsync
def commands2tree(self, adapter, session, commands): '''Consumes state.Command commands and converts them to an ET protocol tree''' # todo: trap errors... hdrcmd = commands[0] commands = commands[1:] if hdrcmd.name != constants.CMD_SYNCHDR: raise common.InternalError('unexpected first command "%s" (expected "%s")' % (hdrcmd.name, constants.CMD_SYNCHDR)) if hdrcmd.version != constants.SYNCML_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML version "%s"' % (hdrcmd.version,)) xsync = ET.Element(constants.NODE_SYNCML) xhdr = ET.SubElement(xsync, hdrcmd.name) if hdrcmd.version == constants.SYNCML_VERSION_1_2: ET.SubElement(xhdr, 'VerDTD').text = constants.SYNCML_DTD_VERSION_1_2 ET.SubElement(xhdr, 'VerProto').text = hdrcmd.version ET.SubElement(xhdr, 'SessionID').text = hdrcmd.sessionID ET.SubElement(xhdr, 'MsgID').text = hdrcmd.msgID xsrc = ET.SubElement(xhdr, 'Source') ET.SubElement(xsrc, 'LocURI').text = hdrcmd.source if hdrcmd.sourceName is not None: ET.SubElement(xsrc, 'LocName').text = hdrcmd.sourceName xtgt = ET.SubElement(xhdr, 'Target') ET.SubElement(xtgt, 'LocURI').text = hdrcmd.target if hdrcmd.targetName is not None: ET.SubElement(xtgt, 'LocName').text = hdrcmd.targetName if hdrcmd.respUri is not None: ET.SubElement(xhdr, 'RespURI').text = hdrcmd.respUri if hdrcmd.auth is not None and not session.authAccepted: if hdrcmd.auth != constants.NAMESPACE_AUTH_BASIC: raise NotImplementedError('auth method "%s"' % (common.auth2string(hdrcmd.auth),)) if hdrcmd.auth == constants.NAMESPACE_AUTH_BASIC: xcred = ET.SubElement(xhdr, 'Cred') xmeta = ET.SubElement(xcred, 'Meta') ET.SubElement(xmeta, 'Format', {'xmlns': constants.NAMESPACE_METINF}).text = 'b64' ET.SubElement(xmeta, 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.auth ET.SubElement(xcred, 'Data').text = base64.b64encode( '%s:%s' % (adapter.peer.username, adapter.peer.password)) if hdrcmd.maxMsgSize is not None or hdrcmd.maxObjSize is not None: xmeta = ET.SubElement(xhdr, 'Meta') if hdrcmd.maxMsgSize is not None: ET.SubElement(xmeta, 'MaxMsgSize', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.maxMsgSize if hdrcmd.maxObjSize is not None: ET.SubElement(xmeta, 'MaxObjSize', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.maxObjSize xbody = ET.SubElement(xsync, constants.NODE_SYNCBODY) for cmdidx, cmd in enumerate(commands): xcmd = ET.SubElement(xbody, cmd.name) if cmd.cmdID is not None: ET.SubElement(xcmd, 'CmdID').text = cmd.cmdID if cmd.name == constants.CMD_ALERT: ET.SubElement(xcmd, 'Data').text = str(cmd.data) xitem = ET.SubElement(xcmd, 'Item') ET.SubElement(ET.SubElement(xitem, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xitem, 'Target'), 'LocURI').text = cmd.target if cmd.lastAnchor is not None \ or cmd.nextAnchor is not None \ or cmd.maxObjSize is not None: xmeta = ET.SubElement(xitem, 'Meta') xanch = ET.SubElement(xmeta, 'Anchor', {'xmlns': constants.NAMESPACE_METINF}) if cmd.lastAnchor is not None: ET.SubElement(xanch, 'Last').text = cmd.lastAnchor if cmd.nextAnchor is not None: ET.SubElement(xanch, 'Next').text = cmd.nextAnchor if cmd.maxObjSize is not None: ET.SubElement(xmeta, 'MaxObjSize', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.maxObjSize continue if cmd.name == constants.CMD_STATUS: ET.SubElement(xcmd, 'MsgRef').text = cmd.msgRef ET.SubElement(xcmd, 'CmdRef').text = cmd.cmdRef ET.SubElement(xcmd, 'Cmd').text = cmd.statusOf if cmd.sourceRef is not None: ET.SubElement(xcmd, 'SourceRef').text = cmd.sourceRef if cmd.targetRef is not None: ET.SubElement(xcmd, 'TargetRef').text = cmd.targetRef ET.SubElement(xcmd, 'Data').text = cmd.statusCode if cmd.nextAnchor is not None or cmd.lastAnchor is not None: xdata = ET.SubElement(ET.SubElement(xcmd, 'Item'), 'Data') xanch = ET.SubElement(xdata, 'Anchor', {'xmlns': constants.NAMESPACE_METINF}) if cmd.lastAnchor is not None: ET.SubElement(xanch, 'Last').text = cmd.lastAnchor if cmd.nextAnchor is not None: ET.SubElement(xanch, 'Next').text = cmd.nextAnchor # NOTE: this is NOT standard SyncML... if cmd.errorCode is not None or cmd.errorMsg is not None: xerr = ET.SubElement(xcmd, 'Error') if cmd.errorCode is not None: ET.SubElement(xerr, 'Code').text = cmd.errorCode if cmd.errorMsg is not None: ET.SubElement(xerr, 'Message').text = cmd.errorMsg if cmd.errorTrace is not None: ET.SubElement(xerr, 'Trace').text = cmd.errorTrace continue if cmd.name in [constants.CMD_GET, constants.CMD_PUT]: ET.SubElement(ET.SubElement(xcmd, 'Meta'), 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.type if cmd.source is not None or cmd.target is not None or cmd.data: xitem = ET.SubElement(xcmd, 'Item') if cmd.source is not None: xsrc = ET.SubElement(xitem, 'Source') ET.SubElement(xsrc, 'LocURI').text = cmd.source ET.SubElement(xsrc, 'LocName').text = cmd.source if cmd.target is not None: xtgt = ET.SubElement(xitem, 'Target') ET.SubElement(xtgt, 'LocURI').text = cmd.target ET.SubElement(xtgt, 'LocName').text = cmd.target if cmd.data is not None: if isinstance(cmd.data, basestring): ET.SubElement(xitem, 'Data').text = cmd.data else: ET.SubElement(xitem, 'Data').append(cmd.data) continue if cmd.name == constants.CMD_RESULTS: ET.SubElement(xcmd, 'MsgRef').text = cmd.msgRef ET.SubElement(xcmd, 'CmdRef').text = cmd.cmdRef ET.SubElement(ET.SubElement(xcmd, 'Meta'), 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.type xitem = ET.SubElement(xcmd, 'Item') xsrc = ET.SubElement(xitem, 'Source') ET.SubElement(xsrc, 'LocURI').text = cmd.source ET.SubElement(xsrc, 'LocName').text = cmd.source if cmd.data is not None: if isinstance(cmd.data, basestring): ET.SubElement(xitem, 'Data').text = cmd.data else: ET.SubElement(xitem, 'Data').append(cmd.data) continue if cmd.name == constants.CMD_SYNC: ET.SubElement(ET.SubElement(xcmd, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xcmd, 'Target'), 'LocURI').text = cmd.target if cmd.noc is not None: ET.SubElement(xcmd, 'NumberOfChanges').text = cmd.noc if cmd.data is not None: for scmd in cmd.data: xscmd = ET.SubElement(xcmd, scmd.name) if scmd.cmdID is not None: ET.SubElement(xscmd, 'CmdID').text = scmd.cmdID if scmd.type is not None or \ ( scmd.format is not None and scmd.format != constants.FORMAT_AUTO ): xsmeta = ET.SubElement(xscmd, 'Meta') # todo: implement auto encoding determination... # (the current implementation just lets XML encoding do it, # which is for most things good enough, but not so good # for sequences that need a large amount escaping such as # binary data...) if scmd.format is not None and scmd.format != constants.FORMAT_AUTO: ET.SubElement(xsmeta, 'Format', {'xmlns': constants.NAMESPACE_METINF}).text = scmd.format if scmd.type is not None: ET.SubElement(xsmeta, 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = scmd.type xsitem = ET.SubElement(xscmd, 'Item') if scmd.source is not None: ET.SubElement(ET.SubElement(xsitem, 'Source'), 'LocURI').text = scmd.source if scmd.sourceParent is not None: ET.SubElement(ET.SubElement(xsitem, 'SourceParent'), 'LocURI').text = scmd.sourceParent if scmd.target is not None: ET.SubElement(ET.SubElement(xsitem, 'Target'), 'LocURI').text = scmd.target if scmd.targetParent is not None: ET.SubElement(ET.SubElement(xsitem, 'TargetParent'), 'LocURI').text = scmd.targetParent if scmd.data is not None: if isinstance(scmd.data, basestring): ET.SubElement(xsitem, 'Data').text = scmd.data else: ET.SubElement(xsitem, 'Data').append(scmd.data) continue if cmd.name == constants.CMD_MAP: ET.SubElement(ET.SubElement(xcmd, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xcmd, 'Target'), 'LocURI').text = cmd.target if cmd.sourceItem is not None or cmd.targetItem is not None: xitem = ET.SubElement(xcmd, constants.CMD_MAPITEM) if cmd.sourceItem is not None: ET.SubElement(ET.SubElement(xitem, 'Source'), 'LocURI').text = cmd.sourceItem if cmd.targetItem is not None: ET.SubElement(ET.SubElement(xitem, 'Target'), 'LocURI').text = cmd.targetItem continue if cmd.name == constants.CMD_FINAL: if cmdidx + 1 < len(commands): raise common.InternalError('command "%s" not at tail end of commands' % (cmd.name,)) continue raise common.InternalError('unexpected command "%s"' % (cmd.name,)) return xsync
[ "Consumes", "state", ".", "Command", "commands", "and", "converts", "them", "to", "an", "ET", "protocol", "tree" ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/protocol.py#L184-L380
[ "def", "commands2tree", "(", "self", ",", "adapter", ",", "session", ",", "commands", ")", ":", "# todo: trap errors...", "hdrcmd", "=", "commands", "[", "0", "]", "commands", "=", "commands", "[", "1", ":", "]", "if", "hdrcmd", ".", "name", "!=", "constants", ".", "CMD_SYNCHDR", ":", "raise", "common", ".", "InternalError", "(", "'unexpected first command \"%s\" (expected \"%s\")'", "%", "(", "hdrcmd", ".", "name", ",", "constants", ".", "CMD_SYNCHDR", ")", ")", "if", "hdrcmd", ".", "version", "!=", "constants", ".", "SYNCML_VERSION_1_2", ":", "raise", "common", ".", "FeatureNotSupported", "(", "'unsupported SyncML version \"%s\"'", "%", "(", "hdrcmd", ".", "version", ",", ")", ")", "xsync", "=", "ET", ".", "Element", "(", "constants", ".", "NODE_SYNCML", ")", "xhdr", "=", "ET", ".", "SubElement", "(", "xsync", ",", "hdrcmd", ".", "name", ")", "if", "hdrcmd", ".", "version", "==", "constants", ".", "SYNCML_VERSION_1_2", ":", "ET", ".", "SubElement", "(", "xhdr", ",", "'VerDTD'", ")", ".", "text", "=", "constants", ".", "SYNCML_DTD_VERSION_1_2", "ET", ".", "SubElement", "(", "xhdr", ",", "'VerProto'", ")", ".", "text", "=", "hdrcmd", ".", "version", "ET", ".", "SubElement", "(", "xhdr", ",", "'SessionID'", ")", ".", "text", "=", "hdrcmd", ".", "sessionID", "ET", ".", "SubElement", "(", "xhdr", ",", "'MsgID'", ")", ".", "text", "=", "hdrcmd", ".", "msgID", "xsrc", "=", "ET", ".", "SubElement", "(", "xhdr", ",", "'Source'", ")", "ET", ".", "SubElement", "(", "xsrc", ",", "'LocURI'", ")", ".", "text", "=", "hdrcmd", ".", "source", "if", "hdrcmd", ".", "sourceName", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xsrc", ",", "'LocName'", ")", ".", "text", "=", "hdrcmd", ".", "sourceName", "xtgt", "=", "ET", ".", "SubElement", "(", "xhdr", ",", "'Target'", ")", "ET", ".", "SubElement", "(", "xtgt", ",", "'LocURI'", ")", ".", "text", "=", "hdrcmd", ".", "target", "if", "hdrcmd", ".", "targetName", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xtgt", ",", "'LocName'", ")", ".", "text", "=", "hdrcmd", ".", "targetName", "if", "hdrcmd", ".", "respUri", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xhdr", ",", "'RespURI'", ")", ".", "text", "=", "hdrcmd", ".", "respUri", "if", "hdrcmd", ".", "auth", "is", "not", "None", "and", "not", "session", ".", "authAccepted", ":", "if", "hdrcmd", ".", "auth", "!=", "constants", ".", "NAMESPACE_AUTH_BASIC", ":", "raise", "NotImplementedError", "(", "'auth method \"%s\"'", "%", "(", "common", ".", "auth2string", "(", "hdrcmd", ".", "auth", ")", ",", ")", ")", "if", "hdrcmd", ".", "auth", "==", "constants", ".", "NAMESPACE_AUTH_BASIC", ":", "xcred", "=", "ET", ".", "SubElement", "(", "xhdr", ",", "'Cred'", ")", "xmeta", "=", "ET", ".", "SubElement", "(", "xcred", ",", "'Meta'", ")", "ET", ".", "SubElement", "(", "xmeta", ",", "'Format'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "'b64'", "ET", ".", "SubElement", "(", "xmeta", ",", "'Type'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "hdrcmd", ".", "auth", "ET", ".", "SubElement", "(", "xcred", ",", "'Data'", ")", ".", "text", "=", "base64", ".", "b64encode", "(", "'%s:%s'", "%", "(", "adapter", ".", "peer", ".", "username", ",", "adapter", ".", "peer", ".", "password", ")", ")", "if", "hdrcmd", ".", "maxMsgSize", "is", "not", "None", "or", "hdrcmd", ".", "maxObjSize", "is", "not", "None", ":", "xmeta", "=", "ET", ".", "SubElement", "(", "xhdr", ",", "'Meta'", ")", "if", "hdrcmd", ".", "maxMsgSize", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xmeta", ",", "'MaxMsgSize'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "hdrcmd", ".", "maxMsgSize", "if", "hdrcmd", ".", "maxObjSize", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xmeta", ",", "'MaxObjSize'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "hdrcmd", ".", "maxObjSize", "xbody", "=", "ET", ".", "SubElement", "(", "xsync", ",", "constants", ".", "NODE_SYNCBODY", ")", "for", "cmdidx", ",", "cmd", "in", "enumerate", "(", "commands", ")", ":", "xcmd", "=", "ET", ".", "SubElement", "(", "xbody", ",", "cmd", ".", "name", ")", "if", "cmd", ".", "cmdID", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xcmd", ",", "'CmdID'", ")", ".", "text", "=", "cmd", ".", "cmdID", "if", "cmd", ".", "name", "==", "constants", ".", "CMD_ALERT", ":", "ET", ".", "SubElement", "(", "xcmd", ",", "'Data'", ")", ".", "text", "=", "str", "(", "cmd", ".", "data", ")", "xitem", "=", "ET", ".", "SubElement", "(", "xcmd", ",", "'Item'", ")", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xitem", ",", "'Source'", ")", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "source", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xitem", ",", "'Target'", ")", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "target", "if", "cmd", ".", "lastAnchor", "is", "not", "None", "or", "cmd", ".", "nextAnchor", "is", "not", "None", "or", "cmd", ".", "maxObjSize", "is", "not", "None", ":", "xmeta", "=", "ET", ".", "SubElement", "(", "xitem", ",", "'Meta'", ")", "xanch", "=", "ET", ".", "SubElement", "(", "xmeta", ",", "'Anchor'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", "if", "cmd", ".", "lastAnchor", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xanch", ",", "'Last'", ")", ".", "text", "=", "cmd", ".", "lastAnchor", "if", "cmd", ".", "nextAnchor", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xanch", ",", "'Next'", ")", ".", "text", "=", "cmd", ".", "nextAnchor", "if", "cmd", ".", "maxObjSize", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xmeta", ",", "'MaxObjSize'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "cmd", ".", "maxObjSize", "continue", "if", "cmd", ".", "name", "==", "constants", ".", "CMD_STATUS", ":", "ET", ".", "SubElement", "(", "xcmd", ",", "'MsgRef'", ")", ".", "text", "=", "cmd", ".", "msgRef", "ET", ".", "SubElement", "(", "xcmd", ",", "'CmdRef'", ")", ".", "text", "=", "cmd", ".", "cmdRef", "ET", ".", "SubElement", "(", "xcmd", ",", "'Cmd'", ")", ".", "text", "=", "cmd", ".", "statusOf", "if", "cmd", ".", "sourceRef", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xcmd", ",", "'SourceRef'", ")", ".", "text", "=", "cmd", ".", "sourceRef", "if", "cmd", ".", "targetRef", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xcmd", ",", "'TargetRef'", ")", ".", "text", "=", "cmd", ".", "targetRef", "ET", ".", "SubElement", "(", "xcmd", ",", "'Data'", ")", ".", "text", "=", "cmd", ".", "statusCode", "if", "cmd", ".", "nextAnchor", "is", "not", "None", "or", "cmd", ".", "lastAnchor", "is", "not", "None", ":", "xdata", "=", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xcmd", ",", "'Item'", ")", ",", "'Data'", ")", "xanch", "=", "ET", ".", "SubElement", "(", "xdata", ",", "'Anchor'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", "if", "cmd", ".", "lastAnchor", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xanch", ",", "'Last'", ")", ".", "text", "=", "cmd", ".", "lastAnchor", "if", "cmd", ".", "nextAnchor", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xanch", ",", "'Next'", ")", ".", "text", "=", "cmd", ".", "nextAnchor", "# NOTE: this is NOT standard SyncML...", "if", "cmd", ".", "errorCode", "is", "not", "None", "or", "cmd", ".", "errorMsg", "is", "not", "None", ":", "xerr", "=", "ET", ".", "SubElement", "(", "xcmd", ",", "'Error'", ")", "if", "cmd", ".", "errorCode", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xerr", ",", "'Code'", ")", ".", "text", "=", "cmd", ".", "errorCode", "if", "cmd", ".", "errorMsg", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xerr", ",", "'Message'", ")", ".", "text", "=", "cmd", ".", "errorMsg", "if", "cmd", ".", "errorTrace", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xerr", ",", "'Trace'", ")", ".", "text", "=", "cmd", ".", "errorTrace", "continue", "if", "cmd", ".", "name", "in", "[", "constants", ".", "CMD_GET", ",", "constants", ".", "CMD_PUT", "]", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xcmd", ",", "'Meta'", ")", ",", "'Type'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "cmd", ".", "type", "if", "cmd", ".", "source", "is", "not", "None", "or", "cmd", ".", "target", "is", "not", "None", "or", "cmd", ".", "data", ":", "xitem", "=", "ET", ".", "SubElement", "(", "xcmd", ",", "'Item'", ")", "if", "cmd", ".", "source", "is", "not", "None", ":", "xsrc", "=", "ET", ".", "SubElement", "(", "xitem", ",", "'Source'", ")", "ET", ".", "SubElement", "(", "xsrc", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "source", "ET", ".", "SubElement", "(", "xsrc", ",", "'LocName'", ")", ".", "text", "=", "cmd", ".", "source", "if", "cmd", ".", "target", "is", "not", "None", ":", "xtgt", "=", "ET", ".", "SubElement", "(", "xitem", ",", "'Target'", ")", "ET", ".", "SubElement", "(", "xtgt", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "target", "ET", ".", "SubElement", "(", "xtgt", ",", "'LocName'", ")", ".", "text", "=", "cmd", ".", "target", "if", "cmd", ".", "data", "is", "not", "None", ":", "if", "isinstance", "(", "cmd", ".", "data", ",", "basestring", ")", ":", "ET", ".", "SubElement", "(", "xitem", ",", "'Data'", ")", ".", "text", "=", "cmd", ".", "data", "else", ":", "ET", ".", "SubElement", "(", "xitem", ",", "'Data'", ")", ".", "append", "(", "cmd", ".", "data", ")", "continue", "if", "cmd", ".", "name", "==", "constants", ".", "CMD_RESULTS", ":", "ET", ".", "SubElement", "(", "xcmd", ",", "'MsgRef'", ")", ".", "text", "=", "cmd", ".", "msgRef", "ET", ".", "SubElement", "(", "xcmd", ",", "'CmdRef'", ")", ".", "text", "=", "cmd", ".", "cmdRef", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xcmd", ",", "'Meta'", ")", ",", "'Type'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "cmd", ".", "type", "xitem", "=", "ET", ".", "SubElement", "(", "xcmd", ",", "'Item'", ")", "xsrc", "=", "ET", ".", "SubElement", "(", "xitem", ",", "'Source'", ")", "ET", ".", "SubElement", "(", "xsrc", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "source", "ET", ".", "SubElement", "(", "xsrc", ",", "'LocName'", ")", ".", "text", "=", "cmd", ".", "source", "if", "cmd", ".", "data", "is", "not", "None", ":", "if", "isinstance", "(", "cmd", ".", "data", ",", "basestring", ")", ":", "ET", ".", "SubElement", "(", "xitem", ",", "'Data'", ")", ".", "text", "=", "cmd", ".", "data", "else", ":", "ET", ".", "SubElement", "(", "xitem", ",", "'Data'", ")", ".", "append", "(", "cmd", ".", "data", ")", "continue", "if", "cmd", ".", "name", "==", "constants", ".", "CMD_SYNC", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xcmd", ",", "'Source'", ")", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "source", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xcmd", ",", "'Target'", ")", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "target", "if", "cmd", ".", "noc", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xcmd", ",", "'NumberOfChanges'", ")", ".", "text", "=", "cmd", ".", "noc", "if", "cmd", ".", "data", "is", "not", "None", ":", "for", "scmd", "in", "cmd", ".", "data", ":", "xscmd", "=", "ET", ".", "SubElement", "(", "xcmd", ",", "scmd", ".", "name", ")", "if", "scmd", ".", "cmdID", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xscmd", ",", "'CmdID'", ")", ".", "text", "=", "scmd", ".", "cmdID", "if", "scmd", ".", "type", "is", "not", "None", "or", "(", "scmd", ".", "format", "is", "not", "None", "and", "scmd", ".", "format", "!=", "constants", ".", "FORMAT_AUTO", ")", ":", "xsmeta", "=", "ET", ".", "SubElement", "(", "xscmd", ",", "'Meta'", ")", "# todo: implement auto encoding determination...", "# (the current implementation just lets XML encoding do it,", "# which is for most things good enough, but not so good", "# for sequences that need a large amount escaping such as", "# binary data...)", "if", "scmd", ".", "format", "is", "not", "None", "and", "scmd", ".", "format", "!=", "constants", ".", "FORMAT_AUTO", ":", "ET", ".", "SubElement", "(", "xsmeta", ",", "'Format'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "scmd", ".", "format", "if", "scmd", ".", "type", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "xsmeta", ",", "'Type'", ",", "{", "'xmlns'", ":", "constants", ".", "NAMESPACE_METINF", "}", ")", ".", "text", "=", "scmd", ".", "type", "xsitem", "=", "ET", ".", "SubElement", "(", "xscmd", ",", "'Item'", ")", "if", "scmd", ".", "source", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xsitem", ",", "'Source'", ")", ",", "'LocURI'", ")", ".", "text", "=", "scmd", ".", "source", "if", "scmd", ".", "sourceParent", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xsitem", ",", "'SourceParent'", ")", ",", "'LocURI'", ")", ".", "text", "=", "scmd", ".", "sourceParent", "if", "scmd", ".", "target", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xsitem", ",", "'Target'", ")", ",", "'LocURI'", ")", ".", "text", "=", "scmd", ".", "target", "if", "scmd", ".", "targetParent", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xsitem", ",", "'TargetParent'", ")", ",", "'LocURI'", ")", ".", "text", "=", "scmd", ".", "targetParent", "if", "scmd", ".", "data", "is", "not", "None", ":", "if", "isinstance", "(", "scmd", ".", "data", ",", "basestring", ")", ":", "ET", ".", "SubElement", "(", "xsitem", ",", "'Data'", ")", ".", "text", "=", "scmd", ".", "data", "else", ":", "ET", ".", "SubElement", "(", "xsitem", ",", "'Data'", ")", ".", "append", "(", "scmd", ".", "data", ")", "continue", "if", "cmd", ".", "name", "==", "constants", ".", "CMD_MAP", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xcmd", ",", "'Source'", ")", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "source", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xcmd", ",", "'Target'", ")", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "target", "if", "cmd", ".", "sourceItem", "is", "not", "None", "or", "cmd", ".", "targetItem", "is", "not", "None", ":", "xitem", "=", "ET", ".", "SubElement", "(", "xcmd", ",", "constants", ".", "CMD_MAPITEM", ")", "if", "cmd", ".", "sourceItem", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xitem", ",", "'Source'", ")", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "sourceItem", "if", "cmd", ".", "targetItem", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "ET", ".", "SubElement", "(", "xitem", ",", "'Target'", ")", ",", "'LocURI'", ")", ".", "text", "=", "cmd", ".", "targetItem", "continue", "if", "cmd", ".", "name", "==", "constants", ".", "CMD_FINAL", ":", "if", "cmdidx", "+", "1", "<", "len", "(", "commands", ")", ":", "raise", "common", ".", "InternalError", "(", "'command \"%s\" not at tail end of commands'", "%", "(", "cmd", ".", "name", ",", ")", ")", "continue", "raise", "common", ".", "InternalError", "(", "'unexpected command \"%s\"'", "%", "(", "cmd", ".", "name", ",", ")", ")", "return", "xsync" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
Protocol.tree2commands
Consumes an ET protocol tree and converts it to state.Command commands
pysyncml/protocol.py
def tree2commands(self, adapter, session, lastcmds, xsync): '''Consumes an ET protocol tree and converts it to state.Command commands''' # do some preliminary sanity checks... # todo: do i really want to be using assert statements?... assert xsync.tag == constants.NODE_SYNCML assert len(xsync) == 2 assert xsync[0].tag == constants.CMD_SYNCHDR assert xsync[1].tag == constants.NODE_SYNCBODY version = xsync[0].findtext('VerProto') if version != constants.SYNCML_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML version "%s" (expected "%s")' \ % (version, constants.SYNCML_VERSION_1_2)) verdtd = xsync[0].findtext('VerDTD') if verdtd != constants.SYNCML_DTD_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML DTD version "%s" (expected "%s")' \ % (verdtd, constants.SYNCML_DTD_VERSION_1_2)) ret = self.initialize(adapter, session, xsync) hdrcmd = ret[0] if session.isServer: log.debug('received request SyncML message from "%s" (s%s.m%s)', hdrcmd.target, hdrcmd.sessionID, hdrcmd.msgID) else: log.debug('received response SyncML message from "%s" (s%s.m%s)', lastcmds[0].target, lastcmds[0].sessionID, lastcmds[0].msgID) try: return self._tree2commands(adapter, session, lastcmds, xsync, ret) except Exception, e: if not session.isServer: raise # TODO: make this configurable as to whether or not any error # is sent back to the peer as a SyncML "standardized" error # status... code = '%s.%s' % (e.__class__.__module__, e.__class__.__name__) msg = ''.join(traceback.format_exception_only(type(e), e)).strip() log.exception('failed while interpreting command tree: %s', msg) # TODO: for some reason, the active exception is not being logged... return [ hdrcmd, state.Command( name = constants.CMD_STATUS, cmdID = '1', msgRef = session.pendingMsgID, cmdRef = 0, sourceRef = xsync[0].findtext('Source/LocURI'), targetRef = xsync[0].findtext('Target/LocURI'), statusOf = constants.CMD_SYNCHDR, statusCode = constants.STATUS_COMMAND_FAILED, errorCode = code, errorMsg = msg, errorTrace = ''.join(traceback.format_exception(type(e), e, sys.exc_info()[2])), ), state.Command(name=constants.CMD_FINAL)]
def tree2commands(self, adapter, session, lastcmds, xsync): '''Consumes an ET protocol tree and converts it to state.Command commands''' # do some preliminary sanity checks... # todo: do i really want to be using assert statements?... assert xsync.tag == constants.NODE_SYNCML assert len(xsync) == 2 assert xsync[0].tag == constants.CMD_SYNCHDR assert xsync[1].tag == constants.NODE_SYNCBODY version = xsync[0].findtext('VerProto') if version != constants.SYNCML_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML version "%s" (expected "%s")' \ % (version, constants.SYNCML_VERSION_1_2)) verdtd = xsync[0].findtext('VerDTD') if verdtd != constants.SYNCML_DTD_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML DTD version "%s" (expected "%s")' \ % (verdtd, constants.SYNCML_DTD_VERSION_1_2)) ret = self.initialize(adapter, session, xsync) hdrcmd = ret[0] if session.isServer: log.debug('received request SyncML message from "%s" (s%s.m%s)', hdrcmd.target, hdrcmd.sessionID, hdrcmd.msgID) else: log.debug('received response SyncML message from "%s" (s%s.m%s)', lastcmds[0].target, lastcmds[0].sessionID, lastcmds[0].msgID) try: return self._tree2commands(adapter, session, lastcmds, xsync, ret) except Exception, e: if not session.isServer: raise # TODO: make this configurable as to whether or not any error # is sent back to the peer as a SyncML "standardized" error # status... code = '%s.%s' % (e.__class__.__module__, e.__class__.__name__) msg = ''.join(traceback.format_exception_only(type(e), e)).strip() log.exception('failed while interpreting command tree: %s', msg) # TODO: for some reason, the active exception is not being logged... return [ hdrcmd, state.Command( name = constants.CMD_STATUS, cmdID = '1', msgRef = session.pendingMsgID, cmdRef = 0, sourceRef = xsync[0].findtext('Source/LocURI'), targetRef = xsync[0].findtext('Target/LocURI'), statusOf = constants.CMD_SYNCHDR, statusCode = constants.STATUS_COMMAND_FAILED, errorCode = code, errorMsg = msg, errorTrace = ''.join(traceback.format_exception(type(e), e, sys.exc_info()[2])), ), state.Command(name=constants.CMD_FINAL)]
[ "Consumes", "an", "ET", "protocol", "tree", "and", "converts", "it", "to", "state", ".", "Command", "commands" ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/protocol.py#L383-L440
[ "def", "tree2commands", "(", "self", ",", "adapter", ",", "session", ",", "lastcmds", ",", "xsync", ")", ":", "# do some preliminary sanity checks...", "# todo: do i really want to be using assert statements?...", "assert", "xsync", ".", "tag", "==", "constants", ".", "NODE_SYNCML", "assert", "len", "(", "xsync", ")", "==", "2", "assert", "xsync", "[", "0", "]", ".", "tag", "==", "constants", ".", "CMD_SYNCHDR", "assert", "xsync", "[", "1", "]", ".", "tag", "==", "constants", ".", "NODE_SYNCBODY", "version", "=", "xsync", "[", "0", "]", ".", "findtext", "(", "'VerProto'", ")", "if", "version", "!=", "constants", ".", "SYNCML_VERSION_1_2", ":", "raise", "common", ".", "FeatureNotSupported", "(", "'unsupported SyncML version \"%s\" (expected \"%s\")'", "%", "(", "version", ",", "constants", ".", "SYNCML_VERSION_1_2", ")", ")", "verdtd", "=", "xsync", "[", "0", "]", ".", "findtext", "(", "'VerDTD'", ")", "if", "verdtd", "!=", "constants", ".", "SYNCML_DTD_VERSION_1_2", ":", "raise", "common", ".", "FeatureNotSupported", "(", "'unsupported SyncML DTD version \"%s\" (expected \"%s\")'", "%", "(", "verdtd", ",", "constants", ".", "SYNCML_DTD_VERSION_1_2", ")", ")", "ret", "=", "self", ".", "initialize", "(", "adapter", ",", "session", ",", "xsync", ")", "hdrcmd", "=", "ret", "[", "0", "]", "if", "session", ".", "isServer", ":", "log", ".", "debug", "(", "'received request SyncML message from \"%s\" (s%s.m%s)'", ",", "hdrcmd", ".", "target", ",", "hdrcmd", ".", "sessionID", ",", "hdrcmd", ".", "msgID", ")", "else", ":", "log", ".", "debug", "(", "'received response SyncML message from \"%s\" (s%s.m%s)'", ",", "lastcmds", "[", "0", "]", ".", "target", ",", "lastcmds", "[", "0", "]", ".", "sessionID", ",", "lastcmds", "[", "0", "]", ".", "msgID", ")", "try", ":", "return", "self", ".", "_tree2commands", "(", "adapter", ",", "session", ",", "lastcmds", ",", "xsync", ",", "ret", ")", "except", "Exception", ",", "e", ":", "if", "not", "session", ".", "isServer", ":", "raise", "# TODO: make this configurable as to whether or not any error", "# is sent back to the peer as a SyncML \"standardized\" error", "# status...", "code", "=", "'%s.%s'", "%", "(", "e", ".", "__class__", ".", "__module__", ",", "e", ".", "__class__", ".", "__name__", ")", "msg", "=", "''", ".", "join", "(", "traceback", ".", "format_exception_only", "(", "type", "(", "e", ")", ",", "e", ")", ")", ".", "strip", "(", ")", "log", ".", "exception", "(", "'failed while interpreting command tree: %s'", ",", "msg", ")", "# TODO: for some reason, the active exception is not being logged...", "return", "[", "hdrcmd", ",", "state", ".", "Command", "(", "name", "=", "constants", ".", "CMD_STATUS", ",", "cmdID", "=", "'1'", ",", "msgRef", "=", "session", ".", "pendingMsgID", ",", "cmdRef", "=", "0", ",", "sourceRef", "=", "xsync", "[", "0", "]", ".", "findtext", "(", "'Source/LocURI'", ")", ",", "targetRef", "=", "xsync", "[", "0", "]", ".", "findtext", "(", "'Target/LocURI'", ")", ",", "statusOf", "=", "constants", ".", "CMD_SYNCHDR", ",", "statusCode", "=", "constants", ".", "STATUS_COMMAND_FAILED", ",", "errorCode", "=", "code", ",", "errorMsg", "=", "msg", ",", "errorTrace", "=", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "type", "(", "e", ")", ",", "e", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", ")", ",", ")", ",", "state", ".", "Command", "(", "name", "=", "constants", ".", "CMD_FINAL", ")", "]" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
Item.dumps
[OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`.
pysyncml/items/base.py
def dumps(self, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`. ''' buf = six.StringIO() ret = self.dump(buf, contentType, version) if ret is None: return buf.getvalue() return (ret[0], ret[1], buf.getvalue())
def dumps(self, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`. ''' buf = six.StringIO() ret = self.dump(buf, contentType, version) if ret is None: return buf.getvalue() return (ret[0], ret[1], buf.getvalue())
[ "[", "OPTIONAL", "]", "Identical", "to", ":", "meth", ":", "dump", "except", "the", "serialized", "form", "is", "returned", "as", "a", "string", "representation", ".", "As", "documented", "in", ":", "meth", ":", "dump", "the", "return", "value", "can", "optionally", "be", "a", "three", "-", "element", "tuple", "of", "(", "contentType", "version", "data", ")", "if", "the", "provided", "content", "-", "type", "should", "be", "overridden", "or", "enhanced", ".", "The", "default", "implementation", "just", "wraps", ":", "meth", ":", "dump", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/items/base.py#L70-L83
[ "def", "dumps", "(", "self", ",", "contentType", "=", "None", ",", "version", "=", "None", ")", ":", "buf", "=", "six", ".", "StringIO", "(", ")", "ret", "=", "self", ".", "dump", "(", "buf", ",", "contentType", ",", "version", ")", "if", "ret", "is", "None", ":", "return", "buf", ".", "getvalue", "(", ")", "return", "(", "ret", "[", "0", "]", ",", "ret", "[", "1", "]", ",", "buf", ".", "getvalue", "(", ")", ")" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
Item.loads
[OPTIONAL] Identical to :meth:`load`, except the serialized form is provided as a string representation in `data` instead of as a stream. The default implementation just wraps :meth:`load`.
pysyncml/items/base.py
def loads(cls, data, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`load`, except the serialized form is provided as a string representation in `data` instead of as a stream. The default implementation just wraps :meth:`load`. ''' buf = six.StringIO(data) return cls.load(buf, contentType, version)
def loads(cls, data, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`load`, except the serialized form is provided as a string representation in `data` instead of as a stream. The default implementation just wraps :meth:`load`. ''' buf = six.StringIO(data) return cls.load(buf, contentType, version)
[ "[", "OPTIONAL", "]", "Identical", "to", ":", "meth", ":", "load", "except", "the", "serialized", "form", "is", "provided", "as", "a", "string", "representation", "in", "data", "instead", "of", "as", "a", "stream", ".", "The", "default", "implementation", "just", "wraps", ":", "meth", ":", "load", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/items/base.py#L101-L108
[ "def", "loads", "(", "cls", ",", "data", ",", "contentType", "=", "None", ",", "version", "=", "None", ")", ":", "buf", "=", "six", ".", "StringIO", "(", "data", ")", "return", "cls", ".", "load", "(", "buf", ",", "contentType", ",", "version", ")" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
Agent.dumpsItem
[OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`.
pysyncml/agents/base.py
def dumpsItem(self, item, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`. ''' buf = six.StringIO() ret = self.dumpItem(item, buf, contentType, version) if ret is None: return buf.getvalue() return (ret[0], ret[1], buf.getvalue())
def dumpsItem(self, item, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`. ''' buf = six.StringIO() ret = self.dumpItem(item, buf, contentType, version) if ret is None: return buf.getvalue() return (ret[0], ret[1], buf.getvalue())
[ "[", "OPTIONAL", "]", "Identical", "to", ":", "meth", ":", "dump", "except", "the", "serialized", "form", "is", "returned", "as", "a", "string", "representation", ".", "As", "documented", "in", ":", "meth", ":", "dump", "the", "return", "value", "can", "optionally", "be", "a", "three", "-", "element", "tuple", "of", "(", "contentType", "version", "data", ")", "if", "the", "provided", "content", "-", "type", "should", "be", "overridden", "or", "enhanced", ".", "The", "default", "implementation", "just", "wraps", ":", "meth", ":", "dump", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/agents/base.py#L106-L119
[ "def", "dumpsItem", "(", "self", ",", "item", ",", "contentType", "=", "None", ",", "version", "=", "None", ")", ":", "buf", "=", "six", ".", "StringIO", "(", ")", "ret", "=", "self", ".", "dumpItem", "(", "item", ",", "buf", ",", "contentType", ",", "version", ")", "if", "ret", "is", "None", ":", "return", "buf", ".", "getvalue", "(", ")", "return", "(", "ret", "[", "0", "]", ",", "ret", "[", "1", "]", ",", "buf", ".", "getvalue", "(", ")", ")" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
Agent.loadsItem
[OPTIONAL] Identical to :meth:`loadItem`, except the serialized form is provided as a string representation in `data` instead of as a stream. The default implementation just wraps :meth:`loadItem`.
pysyncml/agents/base.py
def loadsItem(self, data, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`loadItem`, except the serialized form is provided as a string representation in `data` instead of as a stream. The default implementation just wraps :meth:`loadItem`. ''' buf = six.StringIO(data) return self.loadItem(buf, contentType, version)
def loadsItem(self, data, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`loadItem`, except the serialized form is provided as a string representation in `data` instead of as a stream. The default implementation just wraps :meth:`loadItem`. ''' buf = six.StringIO(data) return self.loadItem(buf, contentType, version)
[ "[", "OPTIONAL", "]", "Identical", "to", ":", "meth", ":", "loadItem", "except", "the", "serialized", "form", "is", "provided", "as", "a", "string", "representation", "in", "data", "instead", "of", "as", "a", "stream", ".", "The", "default", "implementation", "just", "wraps", ":", "meth", ":", "loadItem", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/agents/base.py#L135-L143
[ "def", "loadsItem", "(", "self", ",", "data", ",", "contentType", "=", "None", ",", "version", "=", "None", ")", ":", "buf", "=", "six", ".", "StringIO", "(", "data", ")", "return", "self", ".", "loadItem", "(", "buf", ",", "contentType", ",", "version", ")" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
Agent.matchItem
[OPTIONAL] Attempts to find the specified item and returns an item that describes the same object although it's specific properties may be different. For example, a contact whose name is an identical match, but whose telephone number has changed would return the matched item. ``None`` should be returned if no match is found, otherwise the item that `item` matched should be returned. This is used primarily when a slow-sync is invoked and objects that exist in both peers should not be replicated. Note that **NO** merging of the items' properties should be done; that will be initiated via a separate call to :meth:`mergeItems`. This method by default will iterate over all items (by calling :meth:`getAllItems`) and compare them using ``cmp()``. This means that if the items managed by this agent implement the ``__eq__`` or ``__cmp__`` methods, then matching items will be detected and returned. Otherwise, any items that exist in both peers will be duplicated on slow-sync. Sub-classes *should* implement a more efficient method of finding matching items. See :doc:`../merging` for details.
pysyncml/agents/base.py
def matchItem(self, item): ''' [OPTIONAL] Attempts to find the specified item and returns an item that describes the same object although it's specific properties may be different. For example, a contact whose name is an identical match, but whose telephone number has changed would return the matched item. ``None`` should be returned if no match is found, otherwise the item that `item` matched should be returned. This is used primarily when a slow-sync is invoked and objects that exist in both peers should not be replicated. Note that **NO** merging of the items' properties should be done; that will be initiated via a separate call to :meth:`mergeItems`. This method by default will iterate over all items (by calling :meth:`getAllItems`) and compare them using ``cmp()``. This means that if the items managed by this agent implement the ``__eq__`` or ``__cmp__`` methods, then matching items will be detected and returned. Otherwise, any items that exist in both peers will be duplicated on slow-sync. Sub-classes *should* implement a more efficient method of finding matching items. See :doc:`../merging` for details. ''' for match in self.getAllItems(): if cmp(match, item) == 0: return match return None
def matchItem(self, item): ''' [OPTIONAL] Attempts to find the specified item and returns an item that describes the same object although it's specific properties may be different. For example, a contact whose name is an identical match, but whose telephone number has changed would return the matched item. ``None`` should be returned if no match is found, otherwise the item that `item` matched should be returned. This is used primarily when a slow-sync is invoked and objects that exist in both peers should not be replicated. Note that **NO** merging of the items' properties should be done; that will be initiated via a separate call to :meth:`mergeItems`. This method by default will iterate over all items (by calling :meth:`getAllItems`) and compare them using ``cmp()``. This means that if the items managed by this agent implement the ``__eq__`` or ``__cmp__`` methods, then matching items will be detected and returned. Otherwise, any items that exist in both peers will be duplicated on slow-sync. Sub-classes *should* implement a more efficient method of finding matching items. See :doc:`../merging` for details. ''' for match in self.getAllItems(): if cmp(match, item) == 0: return match return None
[ "[", "OPTIONAL", "]", "Attempts", "to", "find", "the", "specified", "item", "and", "returns", "an", "item", "that", "describes", "the", "same", "object", "although", "it", "s", "specific", "properties", "may", "be", "different", ".", "For", "example", "a", "contact", "whose", "name", "is", "an", "identical", "match", "but", "whose", "telephone", "number", "has", "changed", "would", "return", "the", "matched", "item", ".", "None", "should", "be", "returned", "if", "no", "match", "is", "found", "otherwise", "the", "item", "that", "item", "matched", "should", "be", "returned", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/agents/base.py#L208-L239
[ "def", "matchItem", "(", "self", ",", "item", ")", ":", "for", "match", "in", "self", ".", "getAllItems", "(", ")", ":", "if", "cmp", "(", "match", ",", "item", ")", "==", "0", ":", "return", "match", "return", "None" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
initialize_indices
Initializes the indices
jackal/scripts/status.py
def initialize_indices(): """ Initializes the indices """ Host.init() Range.init() Service.init() User.init() Credential.init() Log.init()
def initialize_indices(): """ Initializes the indices """ Host.init() Range.init() Service.init() User.init() Credential.init() Log.init()
[ "Initializes", "the", "indices" ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/status.py#L34-L43
[ "def", "initialize_indices", "(", ")", ":", "Host", ".", "init", "(", ")", "Range", ".", "init", "(", ")", "Service", ".", "init", "(", ")", "User", ".", "init", "(", ")", "Credential", ".", "init", "(", ")", "Log", ".", "init", "(", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
ContentTypeInfoMixIn.toSyncML
Returns an ElementTree node representing this ContentTypeInfo. If `nodeName` is not None, then it will be used as the containing element node name (this is useful, for example, to differentiate between a standard content-type and a preferred content-type). If `uniqueVerCt` is True, then an array of elements will be returned instead of a single element with multiple VerCT elements (for content-types that support multiple versions).
pysyncml/ctype.py
def toSyncML(self, nodeName=None, uniqueVerCt=False): ''' Returns an ElementTree node representing this ContentTypeInfo. If `nodeName` is not None, then it will be used as the containing element node name (this is useful, for example, to differentiate between a standard content-type and a preferred content-type). If `uniqueVerCt` is True, then an array of elements will be returned instead of a single element with multiple VerCT elements (for content-types that support multiple versions). ''' if uniqueVerCt: ret = [] for v in self.versions: tmp = ET.Element(nodeName or 'ContentType') ET.SubElement(tmp, 'CTType').text = self.ctype ET.SubElement(tmp, 'VerCT').text = v ret.append(tmp) return ret ret = ET.Element(nodeName or 'ContentType') ET.SubElement(ret, 'CTType').text = self.ctype for v in self.versions: ET.SubElement(ret, 'VerCT').text = v return ret
def toSyncML(self, nodeName=None, uniqueVerCt=False): ''' Returns an ElementTree node representing this ContentTypeInfo. If `nodeName` is not None, then it will be used as the containing element node name (this is useful, for example, to differentiate between a standard content-type and a preferred content-type). If `uniqueVerCt` is True, then an array of elements will be returned instead of a single element with multiple VerCT elements (for content-types that support multiple versions). ''' if uniqueVerCt: ret = [] for v in self.versions: tmp = ET.Element(nodeName or 'ContentType') ET.SubElement(tmp, 'CTType').text = self.ctype ET.SubElement(tmp, 'VerCT').text = v ret.append(tmp) return ret ret = ET.Element(nodeName or 'ContentType') ET.SubElement(ret, 'CTType').text = self.ctype for v in self.versions: ET.SubElement(ret, 'VerCT').text = v return ret
[ "Returns", "an", "ElementTree", "node", "representing", "this", "ContentTypeInfo", ".", "If", "nodeName", "is", "not", "None", "then", "it", "will", "be", "used", "as", "the", "containing", "element", "node", "name", "(", "this", "is", "useful", "for", "example", "to", "differentiate", "between", "a", "standard", "content", "-", "type", "and", "a", "preferred", "content", "-", "type", ")", ".", "If", "uniqueVerCt", "is", "True", "then", "an", "array", "of", "elements", "will", "be", "returned", "instead", "of", "a", "single", "element", "with", "multiple", "VerCT", "elements", "(", "for", "content", "-", "types", "that", "support", "multiple", "versions", ")", "." ]
metagriffin/pysyncml
python
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/ctype.py#L87-L109
[ "def", "toSyncML", "(", "self", ",", "nodeName", "=", "None", ",", "uniqueVerCt", "=", "False", ")", ":", "if", "uniqueVerCt", ":", "ret", "=", "[", "]", "for", "v", "in", "self", ".", "versions", ":", "tmp", "=", "ET", ".", "Element", "(", "nodeName", "or", "'ContentType'", ")", "ET", ".", "SubElement", "(", "tmp", ",", "'CTType'", ")", ".", "text", "=", "self", ".", "ctype", "ET", ".", "SubElement", "(", "tmp", ",", "'VerCT'", ")", ".", "text", "=", "v", "ret", ".", "append", "(", "tmp", ")", "return", "ret", "ret", "=", "ET", ".", "Element", "(", "nodeName", "or", "'ContentType'", ")", "ET", ".", "SubElement", "(", "ret", ",", "'CTType'", ")", ".", "text", "=", "self", ".", "ctype", "for", "v", "in", "self", ".", "versions", ":", "ET", ".", "SubElement", "(", "ret", ",", "'VerCT'", ")", ".", "text", "=", "v", "return", "ret" ]
a583fe0dbffa8b24e5a3e151524f84868b2382bb
valid
parse_single_computer
Parse the entry into a computer object.
jackal/scripts/domaindump.py
def parse_single_computer(entry): """ Parse the entry into a computer object. """ computer = Computer(dns_hostname=get_field(entry, 'dNSHostName'), description=get_field( entry, 'description'), os=get_field(entry, 'operatingSystem'), group_id=get_field(entry, 'primaryGroupID')) try: ip = str(ipaddress.ip_address(get_field(entry, 'IPv4'))) except ValueError: ip = '' if ip: computer.ip = ip elif computer.dns_hostname: computer.ip = resolve_ip(computer.dns_hostname) return computer
def parse_single_computer(entry): """ Parse the entry into a computer object. """ computer = Computer(dns_hostname=get_field(entry, 'dNSHostName'), description=get_field( entry, 'description'), os=get_field(entry, 'operatingSystem'), group_id=get_field(entry, 'primaryGroupID')) try: ip = str(ipaddress.ip_address(get_field(entry, 'IPv4'))) except ValueError: ip = '' if ip: computer.ip = ip elif computer.dns_hostname: computer.ip = resolve_ip(computer.dns_hostname) return computer
[ "Parse", "the", "entry", "into", "a", "computer", "object", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/domaindump.py#L48-L63
[ "def", "parse_single_computer", "(", "entry", ")", ":", "computer", "=", "Computer", "(", "dns_hostname", "=", "get_field", "(", "entry", ",", "'dNSHostName'", ")", ",", "description", "=", "get_field", "(", "entry", ",", "'description'", ")", ",", "os", "=", "get_field", "(", "entry", ",", "'operatingSystem'", ")", ",", "group_id", "=", "get_field", "(", "entry", ",", "'primaryGroupID'", ")", ")", "try", ":", "ip", "=", "str", "(", "ipaddress", ".", "ip_address", "(", "get_field", "(", "entry", ",", "'IPv4'", ")", ")", ")", "except", "ValueError", ":", "ip", "=", "''", "if", "ip", ":", "computer", ".", "ip", "=", "ip", "elif", "computer", ".", "dns_hostname", ":", "computer", ".", "ip", "=", "resolve_ip", "(", "computer", ".", "dns_hostname", ")", "return", "computer" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
parse_domain_computers
Parse the file and extract the computers, import the computers that resolve into jackal.
jackal/scripts/domaindump.py
def parse_domain_computers(filename): """ Parse the file and extract the computers, import the computers that resolve into jackal. """ with open(filename) as f: data = json.loads(f.read()) hs = HostSearch() count = 0 entry_count = 0 print_notification("Parsing {} entries".format(len(data))) for system in data: entry_count += 1 parsed = parse_single_computer(system) if parsed.ip: try: host = hs.id_to_object(parsed.ip) host.description.append(parsed.description) host.hostname.append(parsed.dns_hostname) if parsed.os: host.os = parsed.os host.domain_controller = parsed.dc host.add_tag('domaindump') host.save() count += 1 except ValueError: pass sys.stdout.write('\r') sys.stdout.write( "[{}/{}] {} resolved".format(entry_count, len(data), count)) sys.stdout.flush() sys.stdout.write('\r') return count
def parse_domain_computers(filename): """ Parse the file and extract the computers, import the computers that resolve into jackal. """ with open(filename) as f: data = json.loads(f.read()) hs = HostSearch() count = 0 entry_count = 0 print_notification("Parsing {} entries".format(len(data))) for system in data: entry_count += 1 parsed = parse_single_computer(system) if parsed.ip: try: host = hs.id_to_object(parsed.ip) host.description.append(parsed.description) host.hostname.append(parsed.dns_hostname) if parsed.os: host.os = parsed.os host.domain_controller = parsed.dc host.add_tag('domaindump') host.save() count += 1 except ValueError: pass sys.stdout.write('\r') sys.stdout.write( "[{}/{}] {} resolved".format(entry_count, len(data), count)) sys.stdout.flush() sys.stdout.write('\r') return count
[ "Parse", "the", "file", "and", "extract", "the", "computers", "import", "the", "computers", "that", "resolve", "into", "jackal", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/domaindump.py#L66-L97
[ "def", "parse_domain_computers", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "data", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "hs", "=", "HostSearch", "(", ")", "count", "=", "0", "entry_count", "=", "0", "print_notification", "(", "\"Parsing {} entries\"", ".", "format", "(", "len", "(", "data", ")", ")", ")", "for", "system", "in", "data", ":", "entry_count", "+=", "1", "parsed", "=", "parse_single_computer", "(", "system", ")", "if", "parsed", ".", "ip", ":", "try", ":", "host", "=", "hs", ".", "id_to_object", "(", "parsed", ".", "ip", ")", "host", ".", "description", ".", "append", "(", "parsed", ".", "description", ")", "host", ".", "hostname", ".", "append", "(", "parsed", ".", "dns_hostname", ")", "if", "parsed", ".", "os", ":", "host", ".", "os", "=", "parsed", ".", "os", "host", ".", "domain_controller", "=", "parsed", ".", "dc", "host", ".", "add_tag", "(", "'domaindump'", ")", "host", ".", "save", "(", ")", "count", "+=", "1", "except", "ValueError", ":", "pass", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "sys", ".", "stdout", ".", "write", "(", "\"[{}/{}] {} resolved\"", ".", "format", "(", "entry_count", ",", "len", "(", "data", ")", ",", "count", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "return", "count" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
parse_user
Parses a single entry from the domaindump
jackal/scripts/domaindump.py
def parse_user(entry, domain_groups): """ Parses a single entry from the domaindump """ result = {} distinguished_name = get_field(entry, 'distinguishedName') result['domain'] = ".".join(distinguished_name.split(',DC=')[1:]) result['name'] = get_field(entry, 'name') result['username'] = get_field(entry, 'sAMAccountName') result['description'] = get_field(entry, 'description') result['sid'] = get_field(entry, 'objectSid').split('-')[-1] primary_group = get_field(entry, 'primaryGroupID') member_of = entry['attributes'].get('memberOf', []) groups = [] for member in member_of: for e in member.split(','): if e.startswith('CN='): groups.append(e[3:]) groups.append(domain_groups.get(primary_group, '')) result['groups'] = groups flags = [] try: uac = int(get_field(entry, 'userAccountControl')) for flag, value in uac_flags.items(): if uac & value: flags.append(flag) except ValueError: pass result['flags'] = flags return result
def parse_user(entry, domain_groups): """ Parses a single entry from the domaindump """ result = {} distinguished_name = get_field(entry, 'distinguishedName') result['domain'] = ".".join(distinguished_name.split(',DC=')[1:]) result['name'] = get_field(entry, 'name') result['username'] = get_field(entry, 'sAMAccountName') result['description'] = get_field(entry, 'description') result['sid'] = get_field(entry, 'objectSid').split('-')[-1] primary_group = get_field(entry, 'primaryGroupID') member_of = entry['attributes'].get('memberOf', []) groups = [] for member in member_of: for e in member.split(','): if e.startswith('CN='): groups.append(e[3:]) groups.append(domain_groups.get(primary_group, '')) result['groups'] = groups flags = [] try: uac = int(get_field(entry, 'userAccountControl')) for flag, value in uac_flags.items(): if uac & value: flags.append(flag) except ValueError: pass result['flags'] = flags return result
[ "Parses", "a", "single", "entry", "from", "the", "domaindump" ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/domaindump.py#L125-L157
[ "def", "parse_user", "(", "entry", ",", "domain_groups", ")", ":", "result", "=", "{", "}", "distinguished_name", "=", "get_field", "(", "entry", ",", "'distinguishedName'", ")", "result", "[", "'domain'", "]", "=", "\".\"", ".", "join", "(", "distinguished_name", ".", "split", "(", "',DC='", ")", "[", "1", ":", "]", ")", "result", "[", "'name'", "]", "=", "get_field", "(", "entry", ",", "'name'", ")", "result", "[", "'username'", "]", "=", "get_field", "(", "entry", ",", "'sAMAccountName'", ")", "result", "[", "'description'", "]", "=", "get_field", "(", "entry", ",", "'description'", ")", "result", "[", "'sid'", "]", "=", "get_field", "(", "entry", ",", "'objectSid'", ")", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", "primary_group", "=", "get_field", "(", "entry", ",", "'primaryGroupID'", ")", "member_of", "=", "entry", "[", "'attributes'", "]", ".", "get", "(", "'memberOf'", ",", "[", "]", ")", "groups", "=", "[", "]", "for", "member", "in", "member_of", ":", "for", "e", "in", "member", ".", "split", "(", "','", ")", ":", "if", "e", ".", "startswith", "(", "'CN='", ")", ":", "groups", ".", "append", "(", "e", "[", "3", ":", "]", ")", "groups", ".", "append", "(", "domain_groups", ".", "get", "(", "primary_group", ",", "''", ")", ")", "result", "[", "'groups'", "]", "=", "groups", "flags", "=", "[", "]", "try", ":", "uac", "=", "int", "(", "get_field", "(", "entry", ",", "'userAccountControl'", ")", ")", "for", "flag", ",", "value", "in", "uac_flags", ".", "items", "(", ")", ":", "if", "uac", "&", "value", ":", "flags", ".", "append", "(", "flag", ")", "except", "ValueError", ":", "pass", "result", "[", "'flags'", "]", "=", "flags", "return", "result" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
parse_domain_users
Parses the domain users and groups files.
jackal/scripts/domaindump.py
def parse_domain_users(domain_users_file, domain_groups_file): """ Parses the domain users and groups files. """ with open(domain_users_file) as f: users = json.loads(f.read()) domain_groups = {} if domain_groups_file: with open(domain_groups_file) as f: groups = json.loads(f.read()) for group in groups: sid = get_field(group, 'objectSid') domain_groups[int(sid.split('-')[-1])] = get_field(group, 'cn') user_search = UserSearch() count = 0 total = len(users) print_notification("Importing {} users".format(total)) for entry in users: result = parse_user(entry, domain_groups) user = user_search.id_to_object(result['username']) user.name = result['name'] user.domain.append(result['domain']) user.description = result['description'] user.groups.extend(result['groups']) user.flags.extend(result['flags']) user.sid = result['sid'] user.add_tag("domaindump") user.save() count += 1 sys.stdout.write('\r') sys.stdout.write("[{}/{}]".format(count, total)) sys.stdout.flush() sys.stdout.write('\r') return count
def parse_domain_users(domain_users_file, domain_groups_file): """ Parses the domain users and groups files. """ with open(domain_users_file) as f: users = json.loads(f.read()) domain_groups = {} if domain_groups_file: with open(domain_groups_file) as f: groups = json.loads(f.read()) for group in groups: sid = get_field(group, 'objectSid') domain_groups[int(sid.split('-')[-1])] = get_field(group, 'cn') user_search = UserSearch() count = 0 total = len(users) print_notification("Importing {} users".format(total)) for entry in users: result = parse_user(entry, domain_groups) user = user_search.id_to_object(result['username']) user.name = result['name'] user.domain.append(result['domain']) user.description = result['description'] user.groups.extend(result['groups']) user.flags.extend(result['flags']) user.sid = result['sid'] user.add_tag("domaindump") user.save() count += 1 sys.stdout.write('\r') sys.stdout.write("[{}/{}]".format(count, total)) sys.stdout.flush() sys.stdout.write('\r') return count
[ "Parses", "the", "domain", "users", "and", "groups", "files", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/domaindump.py#L160-L195
[ "def", "parse_domain_users", "(", "domain_users_file", ",", "domain_groups_file", ")", ":", "with", "open", "(", "domain_users_file", ")", "as", "f", ":", "users", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "domain_groups", "=", "{", "}", "if", "domain_groups_file", ":", "with", "open", "(", "domain_groups_file", ")", "as", "f", ":", "groups", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "for", "group", "in", "groups", ":", "sid", "=", "get_field", "(", "group", ",", "'objectSid'", ")", "domain_groups", "[", "int", "(", "sid", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", ")", "]", "=", "get_field", "(", "group", ",", "'cn'", ")", "user_search", "=", "UserSearch", "(", ")", "count", "=", "0", "total", "=", "len", "(", "users", ")", "print_notification", "(", "\"Importing {} users\"", ".", "format", "(", "total", ")", ")", "for", "entry", "in", "users", ":", "result", "=", "parse_user", "(", "entry", ",", "domain_groups", ")", "user", "=", "user_search", ".", "id_to_object", "(", "result", "[", "'username'", "]", ")", "user", ".", "name", "=", "result", "[", "'name'", "]", "user", ".", "domain", ".", "append", "(", "result", "[", "'domain'", "]", ")", "user", ".", "description", "=", "result", "[", "'description'", "]", "user", ".", "groups", ".", "extend", "(", "result", "[", "'groups'", "]", ")", "user", ".", "flags", ".", "extend", "(", "result", "[", "'flags'", "]", ")", "user", ".", "sid", "=", "result", "[", "'sid'", "]", "user", ".", "add_tag", "(", "\"domaindump\"", ")", "user", ".", "save", "(", ")", "count", "+=", "1", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "sys", ".", "stdout", ".", "write", "(", "\"[{}/{}]\"", ".", "format", "(", "count", ",", "total", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "return", "count" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
import_domaindump
Parses ldapdomaindump files and stores hosts and users in elasticsearch.
jackal/scripts/domaindump.py
def import_domaindump(): """ Parses ldapdomaindump files and stores hosts and users in elasticsearch. """ parser = argparse.ArgumentParser( description="Imports users, groups and computers result files from the ldapdomaindump tool, will resolve the names from domain_computers output for IPs") parser.add_argument("files", nargs='+', help="The domaindump files to import") arguments = parser.parse_args() domain_users_file = '' domain_groups_file = '' computer_count = 0 user_count = 0 stats = {} for filename in arguments.files: if filename.endswith('domain_computers.json'): print_notification('Parsing domain computers') computer_count = parse_domain_computers(filename) if computer_count: stats['hosts'] = computer_count print_success("{} hosts imported".format(computer_count)) elif filename.endswith('domain_users.json'): domain_users_file = filename elif filename.endswith('domain_groups.json'): domain_groups_file = filename if domain_users_file: print_notification("Parsing domain users") user_count = parse_domain_users(domain_users_file, domain_groups_file) if user_count: print_success("{} users imported".format(user_count)) stats['users'] = user_count Logger().log("import_domaindump", 'Imported domaindump, found {} user, {} systems'.format(user_count, computer_count), stats)
def import_domaindump(): """ Parses ldapdomaindump files and stores hosts and users in elasticsearch. """ parser = argparse.ArgumentParser( description="Imports users, groups and computers result files from the ldapdomaindump tool, will resolve the names from domain_computers output for IPs") parser.add_argument("files", nargs='+', help="The domaindump files to import") arguments = parser.parse_args() domain_users_file = '' domain_groups_file = '' computer_count = 0 user_count = 0 stats = {} for filename in arguments.files: if filename.endswith('domain_computers.json'): print_notification('Parsing domain computers') computer_count = parse_domain_computers(filename) if computer_count: stats['hosts'] = computer_count print_success("{} hosts imported".format(computer_count)) elif filename.endswith('domain_users.json'): domain_users_file = filename elif filename.endswith('domain_groups.json'): domain_groups_file = filename if domain_users_file: print_notification("Parsing domain users") user_count = parse_domain_users(domain_users_file, domain_groups_file) if user_count: print_success("{} users imported".format(user_count)) stats['users'] = user_count Logger().log("import_domaindump", 'Imported domaindump, found {} user, {} systems'.format(user_count, computer_count), stats)
[ "Parses", "ldapdomaindump", "files", "and", "stores", "hosts", "and", "users", "in", "elasticsearch", "." ]
mwgielen/jackal
python
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/domaindump.py#L198-L229
[ "def", "import_domaindump", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Imports users, groups and computers result files from the ldapdomaindump tool, will resolve the names from domain_computers output for IPs\"", ")", "parser", ".", "add_argument", "(", "\"files\"", ",", "nargs", "=", "'+'", ",", "help", "=", "\"The domaindump files to import\"", ")", "arguments", "=", "parser", ".", "parse_args", "(", ")", "domain_users_file", "=", "''", "domain_groups_file", "=", "''", "computer_count", "=", "0", "user_count", "=", "0", "stats", "=", "{", "}", "for", "filename", "in", "arguments", ".", "files", ":", "if", "filename", ".", "endswith", "(", "'domain_computers.json'", ")", ":", "print_notification", "(", "'Parsing domain computers'", ")", "computer_count", "=", "parse_domain_computers", "(", "filename", ")", "if", "computer_count", ":", "stats", "[", "'hosts'", "]", "=", "computer_count", "print_success", "(", "\"{} hosts imported\"", ".", "format", "(", "computer_count", ")", ")", "elif", "filename", ".", "endswith", "(", "'domain_users.json'", ")", ":", "domain_users_file", "=", "filename", "elif", "filename", ".", "endswith", "(", "'domain_groups.json'", ")", ":", "domain_groups_file", "=", "filename", "if", "domain_users_file", ":", "print_notification", "(", "\"Parsing domain users\"", ")", "user_count", "=", "parse_domain_users", "(", "domain_users_file", ",", "domain_groups_file", ")", "if", "user_count", ":", "print_success", "(", "\"{} users imported\"", ".", "format", "(", "user_count", ")", ")", "stats", "[", "'users'", "]", "=", "user_count", "Logger", "(", ")", ".", "log", "(", "\"import_domaindump\"", ",", "'Imported domaindump, found {} user, {} systems'", ".", "format", "(", "user_count", ",", "computer_count", ")", ",", "stats", ")" ]
7fe62732eb5194b7246215d5277fb37c398097bf
valid
autocomplete
Make an autocomplete API request This can be used to find cities and/or hurricanes by name :param string query: city :param string country: restrict search to a specific country. Must be a two letter country code :param boolean hurricanes: whether to search for hurricanes or not :param boolean cities: whether to search for cities or not :param integer timeout: timeout of the api request :returns: result of the autocomplete API request :rtype: dict
pywunderground/core.py
def autocomplete(query, country=None, hurricanes=False, cities=True, timeout=5): """Make an autocomplete API request This can be used to find cities and/or hurricanes by name :param string query: city :param string country: restrict search to a specific country. Must be a two letter country code :param boolean hurricanes: whether to search for hurricanes or not :param boolean cities: whether to search for cities or not :param integer timeout: timeout of the api request :returns: result of the autocomplete API request :rtype: dict """ data = {} data['query'] = quote(query) data['country'] = country or '' data['hurricanes'] = 1 if hurricanes else 0 data['cities'] = 1 if cities else 0 data['format'] = 'JSON' r = requests.get(AUTOCOMPLETE_URL.format(**data), timeout=timeout) results = json.loads(r.content)['RESULTS'] return results
def autocomplete(query, country=None, hurricanes=False, cities=True, timeout=5): """Make an autocomplete API request This can be used to find cities and/or hurricanes by name :param string query: city :param string country: restrict search to a specific country. Must be a two letter country code :param boolean hurricanes: whether to search for hurricanes or not :param boolean cities: whether to search for cities or not :param integer timeout: timeout of the api request :returns: result of the autocomplete API request :rtype: dict """ data = {} data['query'] = quote(query) data['country'] = country or '' data['hurricanes'] = 1 if hurricanes else 0 data['cities'] = 1 if cities else 0 data['format'] = 'JSON' r = requests.get(AUTOCOMPLETE_URL.format(**data), timeout=timeout) results = json.loads(r.content)['RESULTS'] return results
[ "Make", "an", "autocomplete", "API", "request" ]
Diaoul/pywunderground
python
https://github.com/Diaoul/pywunderground/blob/d0fcb7c573e1c8285f6fc3930c6bddab820a9de7/pywunderground/core.py#L33-L55
[ "def", "autocomplete", "(", "query", ",", "country", "=", "None", ",", "hurricanes", "=", "False", ",", "cities", "=", "True", ",", "timeout", "=", "5", ")", ":", "data", "=", "{", "}", "data", "[", "'query'", "]", "=", "quote", "(", "query", ")", "data", "[", "'country'", "]", "=", "country", "or", "''", "data", "[", "'hurricanes'", "]", "=", "1", "if", "hurricanes", "else", "0", "data", "[", "'cities'", "]", "=", "1", "if", "cities", "else", "0", "data", "[", "'format'", "]", "=", "'JSON'", "r", "=", "requests", ".", "get", "(", "AUTOCOMPLETE_URL", ".", "format", "(", "*", "*", "data", ")", ",", "timeout", "=", "timeout", ")", "results", "=", "json", ".", "loads", "(", "r", ".", "content", ")", "[", "'RESULTS'", "]", "return", "results" ]
d0fcb7c573e1c8285f6fc3930c6bddab820a9de7
valid
request
Make an API request :param string key: API key to use :param list features: features to request. It must be a subset of :data:`FEATURES` :param string query: query to send :param integer timeout: timeout of the request :returns: result of the API request :rtype: dict
pywunderground/core.py
def request(key, features, query, timeout=5): """Make an API request :param string key: API key to use :param list features: features to request. It must be a subset of :data:`FEATURES` :param string query: query to send :param integer timeout: timeout of the request :returns: result of the API request :rtype: dict """ data = {} data['key'] = key data['features'] = '/'.join([f for f in features if f in FEATURES]) data['query'] = quote(query) data['format'] = 'json' r = requests.get(API_URL.format(**data), timeout=timeout) results = json.loads(_unicode(r.content)) return results
def request(key, features, query, timeout=5): """Make an API request :param string key: API key to use :param list features: features to request. It must be a subset of :data:`FEATURES` :param string query: query to send :param integer timeout: timeout of the request :returns: result of the API request :rtype: dict """ data = {} data['key'] = key data['features'] = '/'.join([f for f in features if f in FEATURES]) data['query'] = quote(query) data['format'] = 'json' r = requests.get(API_URL.format(**data), timeout=timeout) results = json.loads(_unicode(r.content)) return results
[ "Make", "an", "API", "request" ]
Diaoul/pywunderground
python
https://github.com/Diaoul/pywunderground/blob/d0fcb7c573e1c8285f6fc3930c6bddab820a9de7/pywunderground/core.py#L58-L76
[ "def", "request", "(", "key", ",", "features", ",", "query", ",", "timeout", "=", "5", ")", ":", "data", "=", "{", "}", "data", "[", "'key'", "]", "=", "key", "data", "[", "'features'", "]", "=", "'/'", ".", "join", "(", "[", "f", "for", "f", "in", "features", "if", "f", "in", "FEATURES", "]", ")", "data", "[", "'query'", "]", "=", "quote", "(", "query", ")", "data", "[", "'format'", "]", "=", "'json'", "r", "=", "requests", ".", "get", "(", "API_URL", ".", "format", "(", "*", "*", "data", ")", ",", "timeout", "=", "timeout", ")", "results", "=", "json", ".", "loads", "(", "_unicode", "(", "r", ".", "content", ")", ")", "return", "results" ]
d0fcb7c573e1c8285f6fc3930c6bddab820a9de7
valid
_unicode
Try to convert a string to unicode using different encodings
pywunderground/core.py
def _unicode(string): """Try to convert a string to unicode using different encodings""" for encoding in ['utf-8', 'latin1']: try: result = unicode(string, encoding) return result except UnicodeDecodeError: pass result = unicode(string, 'utf-8', 'replace') return result
def _unicode(string): """Try to convert a string to unicode using different encodings""" for encoding in ['utf-8', 'latin1']: try: result = unicode(string, encoding) return result except UnicodeDecodeError: pass result = unicode(string, 'utf-8', 'replace') return result
[ "Try", "to", "convert", "a", "string", "to", "unicode", "using", "different", "encodings" ]
Diaoul/pywunderground
python
https://github.com/Diaoul/pywunderground/blob/d0fcb7c573e1c8285f6fc3930c6bddab820a9de7/pywunderground/core.py#L79-L88
[ "def", "_unicode", "(", "string", ")", ":", "for", "encoding", "in", "[", "'utf-8'", ",", "'latin1'", "]", ":", "try", ":", "result", "=", "unicode", "(", "string", ",", "encoding", ")", "return", "result", "except", "UnicodeDecodeError", ":", "pass", "result", "=", "unicode", "(", "string", ",", "'utf-8'", ",", "'replace'", ")", "return", "result" ]
d0fcb7c573e1c8285f6fc3930c6bddab820a9de7
valid
http_get_provider
Handle HTTP GET requests on an authentication endpoint. Authentication flow begins when ``params`` has a ``login`` key with a value of ``start``. For instance, ``/auth/twitter?login=start``. :param str provider: An provider to obtain a user ID from. :param str request_url: The authentication endpoint/callback. :param dict params: GET parameters from the query string. :param str token_secret: An app secret to encode/decode JSON web tokens. :param str token_cookie: The current JSON web token, if available. :return: A dict containing any of the following possible keys: ``status``: an HTTP status code the server should sent ``redirect``: where the client should be directed to continue the flow ``set_token_cookie``: contains a JSON web token and should be stored by the client and passed in the next call. ``provider_user_id``: the user ID from the login provider ``provider_user_name``: the user name from the login provider
socialauth/authentication.py
def http_get_provider(provider, request_url, params, token_secret, token_cookie = None): '''Handle HTTP GET requests on an authentication endpoint. Authentication flow begins when ``params`` has a ``login`` key with a value of ``start``. For instance, ``/auth/twitter?login=start``. :param str provider: An provider to obtain a user ID from. :param str request_url: The authentication endpoint/callback. :param dict params: GET parameters from the query string. :param str token_secret: An app secret to encode/decode JSON web tokens. :param str token_cookie: The current JSON web token, if available. :return: A dict containing any of the following possible keys: ``status``: an HTTP status code the server should sent ``redirect``: where the client should be directed to continue the flow ``set_token_cookie``: contains a JSON web token and should be stored by the client and passed in the next call. ``provider_user_id``: the user ID from the login provider ``provider_user_name``: the user name from the login provider ''' if not validate_provider(provider): raise InvalidUsage('Provider not supported') klass = getattr(socialauth.providers, provider.capitalize()) provider = klass(request_url, params, token_secret, token_cookie) if provider.status == 302: ret = dict(status = 302, redirect = provider.redirect) tc = getattr(provider, 'set_token_cookie', None) if tc is not None: ret['set_token_cookie'] = tc return ret if provider.status == 200 and provider.user_id is not None: ret = dict(status = 200, provider_user_id = provider.user_id) if provider.user_name is not None: ret['provider_user_name'] = provider.user_name return ret raise InvalidUsage('Invalid request')
def http_get_provider(provider, request_url, params, token_secret, token_cookie = None): '''Handle HTTP GET requests on an authentication endpoint. Authentication flow begins when ``params`` has a ``login`` key with a value of ``start``. For instance, ``/auth/twitter?login=start``. :param str provider: An provider to obtain a user ID from. :param str request_url: The authentication endpoint/callback. :param dict params: GET parameters from the query string. :param str token_secret: An app secret to encode/decode JSON web tokens. :param str token_cookie: The current JSON web token, if available. :return: A dict containing any of the following possible keys: ``status``: an HTTP status code the server should sent ``redirect``: where the client should be directed to continue the flow ``set_token_cookie``: contains a JSON web token and should be stored by the client and passed in the next call. ``provider_user_id``: the user ID from the login provider ``provider_user_name``: the user name from the login provider ''' if not validate_provider(provider): raise InvalidUsage('Provider not supported') klass = getattr(socialauth.providers, provider.capitalize()) provider = klass(request_url, params, token_secret, token_cookie) if provider.status == 302: ret = dict(status = 302, redirect = provider.redirect) tc = getattr(provider, 'set_token_cookie', None) if tc is not None: ret['set_token_cookie'] = tc return ret if provider.status == 200 and provider.user_id is not None: ret = dict(status = 200, provider_user_id = provider.user_id) if provider.user_name is not None: ret['provider_user_name'] = provider.user_name return ret raise InvalidUsage('Invalid request')
[ "Handle", "HTTP", "GET", "requests", "on", "an", "authentication", "endpoint", "." ]
emilyhorsman/socialauth
python
https://github.com/emilyhorsman/socialauth/blob/2246a5b2cbbea0936a9b76cc3a7f0a224434d9f6/socialauth/authentication.py#L14-L60
[ "def", "http_get_provider", "(", "provider", ",", "request_url", ",", "params", ",", "token_secret", ",", "token_cookie", "=", "None", ")", ":", "if", "not", "validate_provider", "(", "provider", ")", ":", "raise", "InvalidUsage", "(", "'Provider not supported'", ")", "klass", "=", "getattr", "(", "socialauth", ".", "providers", ",", "provider", ".", "capitalize", "(", ")", ")", "provider", "=", "klass", "(", "request_url", ",", "params", ",", "token_secret", ",", "token_cookie", ")", "if", "provider", ".", "status", "==", "302", ":", "ret", "=", "dict", "(", "status", "=", "302", ",", "redirect", "=", "provider", ".", "redirect", ")", "tc", "=", "getattr", "(", "provider", ",", "'set_token_cookie'", ",", "None", ")", "if", "tc", "is", "not", "None", ":", "ret", "[", "'set_token_cookie'", "]", "=", "tc", "return", "ret", "if", "provider", ".", "status", "==", "200", "and", "provider", ".", "user_id", "is", "not", "None", ":", "ret", "=", "dict", "(", "status", "=", "200", ",", "provider_user_id", "=", "provider", ".", "user_id", ")", "if", "provider", ".", "user_name", "is", "not", "None", ":", "ret", "[", "'provider_user_name'", "]", "=", "provider", ".", "user_name", "return", "ret", "raise", "InvalidUsage", "(", "'Invalid request'", ")" ]
2246a5b2cbbea0936a9b76cc3a7f0a224434d9f6
valid
Target.to_json
Method to call to get a serializable object for json.dump or jsonify based on the target :return: dict
flask_nemo/query/annotation.py
def to_json(self): """ Method to call to get a serializable object for json.dump or jsonify based on the target :return: dict """ if self.subreference is not None: return { "source": self.objectId, "selector": { "type": "FragmentSelector", "conformsTo": "http://ontology-dts.org/terms/subreference", "value": self.subreference } } else: return {"source": self.objectId}
def to_json(self): """ Method to call to get a serializable object for json.dump or jsonify based on the target :return: dict """ if self.subreference is not None: return { "source": self.objectId, "selector": { "type": "FragmentSelector", "conformsTo": "http://ontology-dts.org/terms/subreference", "value": self.subreference } } else: return {"source": self.objectId}
[ "Method", "to", "call", "to", "get", "a", "serializable", "object", "for", "json", ".", "dump", "or", "jsonify", "based", "on", "the", "target" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/query/annotation.py#L39-L54
[ "def", "to_json", "(", "self", ")", ":", "if", "self", ".", "subreference", "is", "not", "None", ":", "return", "{", "\"source\"", ":", "self", ".", "objectId", ",", "\"selector\"", ":", "{", "\"type\"", ":", "\"FragmentSelector\"", ",", "\"conformsTo\"", ":", "\"http://ontology-dts.org/terms/subreference\"", ",", "\"value\"", ":", "self", ".", "subreference", "}", "}", "else", ":", "return", "{", "\"source\"", ":", "self", ".", "objectId", "}" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
AnnotationResource.read
Read the contents of the Annotation Resource :return: the contents of the resource :rtype: str or bytes or flask.response
flask_nemo/query/annotation.py
def read(self): """ Read the contents of the Annotation Resource :return: the contents of the resource :rtype: str or bytes or flask.response """ if not self.__content__: self.__retriever__ = self.__resolver__.resolve(self.uri) self.__content__, self.__mimetype__ = self.__retriever__.read(self.uri) return self.__content__
def read(self): """ Read the contents of the Annotation Resource :return: the contents of the resource :rtype: str or bytes or flask.response """ if not self.__content__: self.__retriever__ = self.__resolver__.resolve(self.uri) self.__content__, self.__mimetype__ = self.__retriever__.read(self.uri) return self.__content__
[ "Read", "the", "contents", "of", "the", "Annotation", "Resource" ]
Capitains/flask-capitains-nemo
python
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/query/annotation.py#L132-L141
[ "def", "read", "(", "self", ")", ":", "if", "not", "self", ".", "__content__", ":", "self", ".", "__retriever__", "=", "self", ".", "__resolver__", ".", "resolve", "(", "self", ".", "uri", ")", "self", ".", "__content__", ",", "self", ".", "__mimetype__", "=", "self", ".", "__retriever__", ".", "read", "(", "self", ".", "uri", ")", "return", "self", ".", "__content__" ]
8d91f2c05b925a6c8ea8c997baf698c87257bc58
valid
build_index_and_mapping
index all triples into indexes and return their mappings
kgekit/data.py
def build_index_and_mapping(triples): """index all triples into indexes and return their mappings""" ents = bidict() rels = bidict() ent_id = 0 rel_id = 0 collected = [] for t in triples: for e in (t.head, t.tail): if e not in ents: ents[e] = ent_id ent_id += 1 if t.relation not in rels: rels[t.relation] = rel_id rel_id += 1 collected.append(kgedata.TripleIndex(ents[t.head], rels[t.relation], ents[t.tail])) return collected, ents, rels
def build_index_and_mapping(triples): """index all triples into indexes and return their mappings""" ents = bidict() rels = bidict() ent_id = 0 rel_id = 0 collected = [] for t in triples: for e in (t.head, t.tail): if e not in ents: ents[e] = ent_id ent_id += 1 if t.relation not in rels: rels[t.relation] = rel_id rel_id += 1 collected.append(kgedata.TripleIndex(ents[t.head], rels[t.relation], ents[t.tail])) return collected, ents, rels
[ "index", "all", "triples", "into", "indexes", "and", "return", "their", "mappings" ]
fantasticfears/kgekit
python
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L108-L126
[ "def", "build_index_and_mapping", "(", "triples", ")", ":", "ents", "=", "bidict", "(", ")", "rels", "=", "bidict", "(", ")", "ent_id", "=", "0", "rel_id", "=", "0", "collected", "=", "[", "]", "for", "t", "in", "triples", ":", "for", "e", "in", "(", "t", ".", "head", ",", "t", ".", "tail", ")", ":", "if", "e", "not", "in", "ents", ":", "ents", "[", "e", "]", "=", "ent_id", "ent_id", "+=", "1", "if", "t", ".", "relation", "not", "in", "rels", ":", "rels", "[", "t", ".", "relation", "]", "=", "rel_id", "rel_id", "+=", "1", "collected", ".", "append", "(", "kgedata", ".", "TripleIndex", "(", "ents", "[", "t", ".", "head", "]", ",", "rels", "[", "t", ".", "relation", "]", ",", "ents", "[", "t", ".", "tail", "]", ")", ")", "return", "collected", ",", "ents", ",", "rels" ]
5e464e1fc3ae9c7e216f6dd94f879a967d065247
valid
recover_triples_from_mapping
recover triples from mapping.
kgekit/data.py
def recover_triples_from_mapping(indexes, ents: bidict, rels: bidict): """recover triples from mapping.""" triples = [] for t in indexes: triples.append(kgedata.Triple(ents.inverse[t.head], rels.inverse[t.relation], ents.inverse[t.tail])) return triples
def recover_triples_from_mapping(indexes, ents: bidict, rels: bidict): """recover triples from mapping.""" triples = [] for t in indexes: triples.append(kgedata.Triple(ents.inverse[t.head], rels.inverse[t.relation], ents.inverse[t.tail])) return triples
[ "recover", "triples", "from", "mapping", "." ]
fantasticfears/kgekit
python
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L129-L134
[ "def", "recover_triples_from_mapping", "(", "indexes", ",", "ents", ":", "bidict", ",", "rels", ":", "bidict", ")", ":", "triples", "=", "[", "]", "for", "t", "in", "indexes", ":", "triples", ".", "append", "(", "kgedata", ".", "Triple", "(", "ents", ".", "inverse", "[", "t", ".", "head", "]", ",", "rels", ".", "inverse", "[", "t", ".", "relation", "]", ",", "ents", ".", "inverse", "[", "t", ".", "tail", "]", ")", ")", "return", "triples" ]
5e464e1fc3ae9c7e216f6dd94f879a967d065247
valid
split_golden_set
Split the data into train/valid/test sets.
kgekit/data.py
def split_golden_set(triples, valid_ratio, test_ratio): """Split the data into train/valid/test sets.""" assert valid_ratio >= 0.0 assert test_ratio >= 0.0 num_valid = int(len(triples) * valid_ratio) num_test = int(len(triples) * test_ratio) valid_set = triples[:num_valid] test_set = triples[num_valid:num_valid+num_test] train_set = triples[num_valid+num_test:] assert len(valid_set) + len(test_set) + len(train_set) == len(triples) return train_set, valid_set, test_set
def split_golden_set(triples, valid_ratio, test_ratio): """Split the data into train/valid/test sets.""" assert valid_ratio >= 0.0 assert test_ratio >= 0.0 num_valid = int(len(triples) * valid_ratio) num_test = int(len(triples) * test_ratio) valid_set = triples[:num_valid] test_set = triples[num_valid:num_valid+num_test] train_set = triples[num_valid+num_test:] assert len(valid_set) + len(test_set) + len(train_set) == len(triples) return train_set, valid_set, test_set
[ "Split", "the", "data", "into", "train", "/", "valid", "/", "test", "sets", "." ]
fantasticfears/kgekit
python
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L145-L156
[ "def", "split_golden_set", "(", "triples", ",", "valid_ratio", ",", "test_ratio", ")", ":", "assert", "valid_ratio", ">=", "0.0", "assert", "test_ratio", ">=", "0.0", "num_valid", "=", "int", "(", "len", "(", "triples", ")", "*", "valid_ratio", ")", "num_test", "=", "int", "(", "len", "(", "triples", ")", "*", "test_ratio", ")", "valid_set", "=", "triples", "[", ":", "num_valid", "]", "test_set", "=", "triples", "[", "num_valid", ":", "num_valid", "+", "num_test", "]", "train_set", "=", "triples", "[", "num_valid", "+", "num_test", ":", "]", "assert", "len", "(", "valid_set", ")", "+", "len", "(", "test_set", ")", "+", "len", "(", "train_set", ")", "==", "len", "(", "triples", ")", "return", "train_set", ",", "valid_set", ",", "test_set" ]
5e464e1fc3ae9c7e216f6dd94f879a967d065247
valid
_transform_triple_numpy
Transform triple index into a 1-D numpy array.
kgekit/data.py
def _transform_triple_numpy(x): """Transform triple index into a 1-D numpy array.""" return np.array([x.head, x.relation, x.tail], dtype=np.int64)
def _transform_triple_numpy(x): """Transform triple index into a 1-D numpy array.""" return np.array([x.head, x.relation, x.tail], dtype=np.int64)
[ "Transform", "triple", "index", "into", "a", "1", "-", "D", "numpy", "array", "." ]
fantasticfears/kgekit
python
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L158-L160
[ "def", "_transform_triple_numpy", "(", "x", ")", ":", "return", "np", ".", "array", "(", "[", "x", ".", "head", ",", "x", ".", "relation", ",", "x", ".", "tail", "]", ",", "dtype", "=", "np", ".", "int64", ")" ]
5e464e1fc3ae9c7e216f6dd94f879a967d065247
valid
pack_triples_numpy
Packs a list of triple indexes into a 2D numpy array.
kgekit/data.py
def pack_triples_numpy(triples): """Packs a list of triple indexes into a 2D numpy array.""" if len(triples) == 0: return np.array([], dtype=np.int64) return np.stack(list(map(_transform_triple_numpy, triples)), axis=0)
def pack_triples_numpy(triples): """Packs a list of triple indexes into a 2D numpy array.""" if len(triples) == 0: return np.array([], dtype=np.int64) return np.stack(list(map(_transform_triple_numpy, triples)), axis=0)
[ "Packs", "a", "list", "of", "triple", "indexes", "into", "a", "2D", "numpy", "array", "." ]
fantasticfears/kgekit
python
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L162-L166
[ "def", "pack_triples_numpy", "(", "triples", ")", ":", "if", "len", "(", "triples", ")", "==", "0", ":", "return", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "np", ".", "int64", ")", "return", "np", ".", "stack", "(", "list", "(", "map", "(", "_transform_triple_numpy", ",", "triples", ")", ")", ",", "axis", "=", "0", ")" ]
5e464e1fc3ae9c7e216f6dd94f879a967d065247
valid
remove_near_duplicate_relation
If entity pairs in a relation is as close as another relations, only keep one relation of such set.
kgekit/data.py
def remove_near_duplicate_relation(triples, threshold=0.97): """If entity pairs in a relation is as close as another relations, only keep one relation of such set.""" logging.debug("remove duplicate") _assert_threshold(threshold) duplicate_rel_counter = defaultdict(list) relations = set() for t in triples: duplicate_rel_counter[t.relation].append(f"{t.head} {t.tail}") relations.add(t.relation) relations = list(relations) num_triples = len(triples) removal_relation_set = set() for rel, values in duplicate_rel_counter.items(): duplicate_rel_counter[rel] = Superminhash(values) for i in relations: for j in relations: if i == j or i in removal_relation_set or j in removal_relation_set: continue close_relations = [i] if _set_close_to(duplicate_rel_counter[i], duplicate_rel_counter[j], threshold): close_relations.append(j) if len(close_relations) > 1: close_relations.pop(np.random.randint(len(close_relations))) removal_relation_set |= set(close_relations) logging.info("Removing {} relations: {}".format(len(removal_relation_set), str(removal_relation_set))) return list(filterfalse(lambda x: x.relation in removal_relation_set, triples))
def remove_near_duplicate_relation(triples, threshold=0.97): """If entity pairs in a relation is as close as another relations, only keep one relation of such set.""" logging.debug("remove duplicate") _assert_threshold(threshold) duplicate_rel_counter = defaultdict(list) relations = set() for t in triples: duplicate_rel_counter[t.relation].append(f"{t.head} {t.tail}") relations.add(t.relation) relations = list(relations) num_triples = len(triples) removal_relation_set = set() for rel, values in duplicate_rel_counter.items(): duplicate_rel_counter[rel] = Superminhash(values) for i in relations: for j in relations: if i == j or i in removal_relation_set or j in removal_relation_set: continue close_relations = [i] if _set_close_to(duplicate_rel_counter[i], duplicate_rel_counter[j], threshold): close_relations.append(j) if len(close_relations) > 1: close_relations.pop(np.random.randint(len(close_relations))) removal_relation_set |= set(close_relations) logging.info("Removing {} relations: {}".format(len(removal_relation_set), str(removal_relation_set))) return list(filterfalse(lambda x: x.relation in removal_relation_set, triples))
[ "If", "entity", "pairs", "in", "a", "relation", "is", "as", "close", "as", "another", "relations", "only", "keep", "one", "relation", "of", "such", "set", "." ]
fantasticfears/kgekit
python
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L190-L219
[ "def", "remove_near_duplicate_relation", "(", "triples", ",", "threshold", "=", "0.97", ")", ":", "logging", ".", "debug", "(", "\"remove duplicate\"", ")", "_assert_threshold", "(", "threshold", ")", "duplicate_rel_counter", "=", "defaultdict", "(", "list", ")", "relations", "=", "set", "(", ")", "for", "t", "in", "triples", ":", "duplicate_rel_counter", "[", "t", ".", "relation", "]", ".", "append", "(", "f\"{t.head} {t.tail}\"", ")", "relations", ".", "add", "(", "t", ".", "relation", ")", "relations", "=", "list", "(", "relations", ")", "num_triples", "=", "len", "(", "triples", ")", "removal_relation_set", "=", "set", "(", ")", "for", "rel", ",", "values", "in", "duplicate_rel_counter", ".", "items", "(", ")", ":", "duplicate_rel_counter", "[", "rel", "]", "=", "Superminhash", "(", "values", ")", "for", "i", "in", "relations", ":", "for", "j", "in", "relations", ":", "if", "i", "==", "j", "or", "i", "in", "removal_relation_set", "or", "j", "in", "removal_relation_set", ":", "continue", "close_relations", "=", "[", "i", "]", "if", "_set_close_to", "(", "duplicate_rel_counter", "[", "i", "]", ",", "duplicate_rel_counter", "[", "j", "]", ",", "threshold", ")", ":", "close_relations", ".", "append", "(", "j", ")", "if", "len", "(", "close_relations", ")", ">", "1", ":", "close_relations", ".", "pop", "(", "np", ".", "random", ".", "randint", "(", "len", "(", "close_relations", ")", ")", ")", "removal_relation_set", "|=", "set", "(", "close_relations", ")", "logging", ".", "info", "(", "\"Removing {} relations: {}\"", ".", "format", "(", "len", "(", "removal_relation_set", ")", ",", "str", "(", "removal_relation_set", ")", ")", ")", "return", "list", "(", "filterfalse", "(", "lambda", "x", ":", "x", ".", "relation", "in", "removal_relation_set", ",", "triples", ")", ")" ]
5e464e1fc3ae9c7e216f6dd94f879a967d065247
valid
remove_direct_link_triples
Remove direct links in the training sets.
kgekit/data.py
def remove_direct_link_triples(train, valid, test): """Remove direct links in the training sets.""" pairs = set() merged = valid + test for t in merged: pairs.add((t.head, t.tail)) filtered = filterfalse(lambda t: (t.head, t.tail) in pairs or (t.tail, t.head) in pairs, train) return list(filtered)
def remove_direct_link_triples(train, valid, test): """Remove direct links in the training sets.""" pairs = set() merged = valid + test for t in merged: pairs.add((t.head, t.tail)) filtered = filterfalse(lambda t: (t.head, t.tail) in pairs or (t.tail, t.head) in pairs, train) return list(filtered)
[ "Remove", "direct", "links", "in", "the", "training", "sets", "." ]
fantasticfears/kgekit
python
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L256-L264
[ "def", "remove_direct_link_triples", "(", "train", ",", "valid", ",", "test", ")", ":", "pairs", "=", "set", "(", ")", "merged", "=", "valid", "+", "test", "for", "t", "in", "merged", ":", "pairs", ".", "add", "(", "(", "t", ".", "head", ",", "t", ".", "tail", ")", ")", "filtered", "=", "filterfalse", "(", "lambda", "t", ":", "(", "t", ".", "head", ",", "t", ".", "tail", ")", "in", "pairs", "or", "(", "t", ".", "tail", ",", "t", ".", "head", ")", "in", "pairs", ",", "train", ")", "return", "list", "(", "filtered", ")" ]
5e464e1fc3ae9c7e216f6dd94f879a967d065247
valid
Indexer.shrink_indexes_in_place
Uses a union find to find segment.
kgekit/data.py
def shrink_indexes_in_place(self, triples): """Uses a union find to find segment.""" _ent_roots = self.UnionFind(self._ent_id) _rel_roots = self.UnionFind(self._rel_id) for t in triples: _ent_roots.add(t.head) _ent_roots.add(t.tail) _rel_roots.add(t.relation) for i, t in enumerate(triples): h = _ent_roots.find(t.head) r = _rel_roots.find(t.relation) t = _ent_roots.find(t.tail) triples[i] = kgedata.TripleIndex(h, r, t) ents = bidict() available_ent_idx = 0 for previous_idx, ent_exist in enumerate(_ent_roots.roots()): if not ent_exist: self._ents.inverse.pop(previous_idx) else: ents[self._ents.inverse[previous_idx]] = available_ent_idx available_ent_idx += 1 rels = bidict() available_rel_idx = 0 for previous_idx, rel_exist in enumerate(_rel_roots.roots()): if not rel_exist: self._rels.inverse.pop(previous_idx) else: rels[self._rels.inverse[previous_idx]] = available_rel_idx available_rel_idx += 1 self._ents = ents self._rels = rels self._ent_id = available_ent_idx self._rel_id = available_rel_idx
def shrink_indexes_in_place(self, triples): """Uses a union find to find segment.""" _ent_roots = self.UnionFind(self._ent_id) _rel_roots = self.UnionFind(self._rel_id) for t in triples: _ent_roots.add(t.head) _ent_roots.add(t.tail) _rel_roots.add(t.relation) for i, t in enumerate(triples): h = _ent_roots.find(t.head) r = _rel_roots.find(t.relation) t = _ent_roots.find(t.tail) triples[i] = kgedata.TripleIndex(h, r, t) ents = bidict() available_ent_idx = 0 for previous_idx, ent_exist in enumerate(_ent_roots.roots()): if not ent_exist: self._ents.inverse.pop(previous_idx) else: ents[self._ents.inverse[previous_idx]] = available_ent_idx available_ent_idx += 1 rels = bidict() available_rel_idx = 0 for previous_idx, rel_exist in enumerate(_rel_roots.roots()): if not rel_exist: self._rels.inverse.pop(previous_idx) else: rels[self._rels.inverse[previous_idx]] = available_rel_idx available_rel_idx += 1 self._ents = ents self._rels = rels self._ent_id = available_ent_idx self._rel_id = available_rel_idx
[ "Uses", "a", "union", "find", "to", "find", "segment", "." ]
fantasticfears/kgekit
python
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L69-L105
[ "def", "shrink_indexes_in_place", "(", "self", ",", "triples", ")", ":", "_ent_roots", "=", "self", ".", "UnionFind", "(", "self", ".", "_ent_id", ")", "_rel_roots", "=", "self", ".", "UnionFind", "(", "self", ".", "_rel_id", ")", "for", "t", "in", "triples", ":", "_ent_roots", ".", "add", "(", "t", ".", "head", ")", "_ent_roots", ".", "add", "(", "t", ".", "tail", ")", "_rel_roots", ".", "add", "(", "t", ".", "relation", ")", "for", "i", ",", "t", "in", "enumerate", "(", "triples", ")", ":", "h", "=", "_ent_roots", ".", "find", "(", "t", ".", "head", ")", "r", "=", "_rel_roots", ".", "find", "(", "t", ".", "relation", ")", "t", "=", "_ent_roots", ".", "find", "(", "t", ".", "tail", ")", "triples", "[", "i", "]", "=", "kgedata", ".", "TripleIndex", "(", "h", ",", "r", ",", "t", ")", "ents", "=", "bidict", "(", ")", "available_ent_idx", "=", "0", "for", "previous_idx", ",", "ent_exist", "in", "enumerate", "(", "_ent_roots", ".", "roots", "(", ")", ")", ":", "if", "not", "ent_exist", ":", "self", ".", "_ents", ".", "inverse", ".", "pop", "(", "previous_idx", ")", "else", ":", "ents", "[", "self", ".", "_ents", ".", "inverse", "[", "previous_idx", "]", "]", "=", "available_ent_idx", "available_ent_idx", "+=", "1", "rels", "=", "bidict", "(", ")", "available_rel_idx", "=", "0", "for", "previous_idx", ",", "rel_exist", "in", "enumerate", "(", "_rel_roots", ".", "roots", "(", ")", ")", ":", "if", "not", "rel_exist", ":", "self", ".", "_rels", ".", "inverse", ".", "pop", "(", "previous_idx", ")", "else", ":", "rels", "[", "self", ".", "_rels", ".", "inverse", "[", "previous_idx", "]", "]", "=", "available_rel_idx", "available_rel_idx", "+=", "1", "self", ".", "_ents", "=", "ents", "self", ".", "_rels", "=", "rels", "self", ".", "_ent_id", "=", "available_ent_idx", "self", ".", "_rel_id", "=", "available_rel_idx" ]
5e464e1fc3ae9c7e216f6dd94f879a967d065247
valid
IndexBuilder.freeze
Create a usable data structure for serializing.
sphinxcontrib/lunrsearch/__init__.py
def freeze(self): """Create a usable data structure for serializing.""" data = super(IndexBuilder, self).freeze() try: # Sphinx >= 1.5 format # Due to changes from github.com/sphinx-doc/sphinx/pull/2454 base_file_names = data['docnames'] except KeyError: # Sphinx < 1.5 format base_file_names = data['filenames'] store = {} c = itertools.count() for prefix, items in iteritems(data['objects']): for name, (index, typeindex, _, shortanchor) in iteritems(items): objtype = data['objtypes'][typeindex] if objtype.startswith('cpp:'): split = name.rsplit('::', 1) if len(split) != 2: warnings.warn("What's up with %s?" % str((prefix, name, objtype))) continue prefix, name = split last_prefix = prefix.split('::')[-1] else: last_prefix = prefix.split('.')[-1] store[next(c)] = { 'filename': base_file_names[index], 'objtype': objtype, 'prefix': prefix, 'last_prefix': last_prefix, 'name': name, 'shortanchor': shortanchor, } data.update({'store': store}) return data
def freeze(self): """Create a usable data structure for serializing.""" data = super(IndexBuilder, self).freeze() try: # Sphinx >= 1.5 format # Due to changes from github.com/sphinx-doc/sphinx/pull/2454 base_file_names = data['docnames'] except KeyError: # Sphinx < 1.5 format base_file_names = data['filenames'] store = {} c = itertools.count() for prefix, items in iteritems(data['objects']): for name, (index, typeindex, _, shortanchor) in iteritems(items): objtype = data['objtypes'][typeindex] if objtype.startswith('cpp:'): split = name.rsplit('::', 1) if len(split) != 2: warnings.warn("What's up with %s?" % str((prefix, name, objtype))) continue prefix, name = split last_prefix = prefix.split('::')[-1] else: last_prefix = prefix.split('.')[-1] store[next(c)] = { 'filename': base_file_names[index], 'objtype': objtype, 'prefix': prefix, 'last_prefix': last_prefix, 'name': name, 'shortanchor': shortanchor, } data.update({'store': store}) return data
[ "Create", "a", "usable", "data", "structure", "for", "serializing", "." ]
rmcgibbo/sphinxcontrib-lunrsearch
python
https://github.com/rmcgibbo/sphinxcontrib-lunrsearch/blob/fd24e6ab524e0a24a805eb1f9c4613646cb03291/sphinxcontrib/lunrsearch/__init__.py#L14-L50
[ "def", "freeze", "(", "self", ")", ":", "data", "=", "super", "(", "IndexBuilder", ",", "self", ")", ".", "freeze", "(", ")", "try", ":", "# Sphinx >= 1.5 format", "# Due to changes from github.com/sphinx-doc/sphinx/pull/2454", "base_file_names", "=", "data", "[", "'docnames'", "]", "except", "KeyError", ":", "# Sphinx < 1.5 format", "base_file_names", "=", "data", "[", "'filenames'", "]", "store", "=", "{", "}", "c", "=", "itertools", ".", "count", "(", ")", "for", "prefix", ",", "items", "in", "iteritems", "(", "data", "[", "'objects'", "]", ")", ":", "for", "name", ",", "(", "index", ",", "typeindex", ",", "_", ",", "shortanchor", ")", "in", "iteritems", "(", "items", ")", ":", "objtype", "=", "data", "[", "'objtypes'", "]", "[", "typeindex", "]", "if", "objtype", ".", "startswith", "(", "'cpp:'", ")", ":", "split", "=", "name", ".", "rsplit", "(", "'::'", ",", "1", ")", "if", "len", "(", "split", ")", "!=", "2", ":", "warnings", ".", "warn", "(", "\"What's up with %s?\"", "%", "str", "(", "(", "prefix", ",", "name", ",", "objtype", ")", ")", ")", "continue", "prefix", ",", "name", "=", "split", "last_prefix", "=", "prefix", ".", "split", "(", "'::'", ")", "[", "-", "1", "]", "else", ":", "last_prefix", "=", "prefix", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "store", "[", "next", "(", "c", ")", "]", "=", "{", "'filename'", ":", "base_file_names", "[", "index", "]", ",", "'objtype'", ":", "objtype", ",", "'prefix'", ":", "prefix", ",", "'last_prefix'", ":", "last_prefix", ",", "'name'", ":", "name", ",", "'shortanchor'", ":", "shortanchor", ",", "}", "data", ".", "update", "(", "{", "'store'", ":", "store", "}", ")", "return", "data" ]
fd24e6ab524e0a24a805eb1f9c4613646cb03291
valid
log_entity_creation
Logs an entity creation
gadget/__init__.py
def log_entity_creation(entity, params=None): """Logs an entity creation """ p = {'entity': entity} if params: p['params'] = params _log(TYPE_CODES.CREATE, p)
def log_entity_creation(entity, params=None): """Logs an entity creation """ p = {'entity': entity} if params: p['params'] = params _log(TYPE_CODES.CREATE, p)
[ "Logs", "an", "entity", "creation" ]
getslash/gadget-python
python
https://github.com/getslash/gadget-python/blob/ff22506f41798c6e11a117b2c1a27f62d8b7b9ad/gadget/__init__.py#L44-L50
[ "def", "log_entity_creation", "(", "entity", ",", "params", "=", "None", ")", ":", "p", "=", "{", "'entity'", ":", "entity", "}", "if", "params", ":", "p", "[", "'params'", "]", "=", "params", "_log", "(", "TYPE_CODES", ".", "CREATE", ",", "p", ")" ]
ff22506f41798c6e11a117b2c1a27f62d8b7b9ad