partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
PipeFunction.reduce
Wrap a reduce function to Pipe object. Reduce function is a function with at least two arguments. It works like built-in reduce function. It takes first argument for accumulated result, second argument for the new data to process. A keyword-based argument named 'init' is optional. If init is provided, it is used for the initial value of accumulated result. Or, the initial value is None. The first argument is the data to be converted. The return data from filter function should be a boolean value. If true, data can pass. Otherwise, data is omitted. :param func: The filter function to be wrapped. :type func: function object :param args: The default arguments to be used for filter function. :param kw: The default keyword arguments to be used for filter function. :returns: Pipe object
cmdlet/cmdlet.py
def reduce(func): """Wrap a reduce function to Pipe object. Reduce function is a function with at least two arguments. It works like built-in reduce function. It takes first argument for accumulated result, second argument for the new data to process. A keyword-based argument named 'init' is optional. If init is provided, it is used for the initial value of accumulated result. Or, the initial value is None. The first argument is the data to be converted. The return data from filter function should be a boolean value. If true, data can pass. Otherwise, data is omitted. :param func: The filter function to be wrapped. :type func: function object :param args: The default arguments to be used for filter function. :param kw: The default keyword arguments to be used for filter function. :returns: Pipe object """ def wrapper(prev, *argv, **kw): accum_value = None if 'init' not in kw else kw.pop('init') if prev is None: raise TypeError('A reducer must have input.') for i in prev: accum_value = func(accum_value, i, *argv, **kw) yield accum_value return Pipe(wrapper)
def reduce(func): """Wrap a reduce function to Pipe object. Reduce function is a function with at least two arguments. It works like built-in reduce function. It takes first argument for accumulated result, second argument for the new data to process. A keyword-based argument named 'init' is optional. If init is provided, it is used for the initial value of accumulated result. Or, the initial value is None. The first argument is the data to be converted. The return data from filter function should be a boolean value. If true, data can pass. Otherwise, data is omitted. :param func: The filter function to be wrapped. :type func: function object :param args: The default arguments to be used for filter function. :param kw: The default keyword arguments to be used for filter function. :returns: Pipe object """ def wrapper(prev, *argv, **kw): accum_value = None if 'init' not in kw else kw.pop('init') if prev is None: raise TypeError('A reducer must have input.') for i in prev: accum_value = func(accum_value, i, *argv, **kw) yield accum_value return Pipe(wrapper)
[ "Wrap", "a", "reduce", "function", "to", "Pipe", "object", ".", "Reduce", "function", "is", "a", "function", "with", "at", "least", "two", "arguments", ".", "It", "works", "like", "built", "-", "in", "reduce", "function", ".", "It", "takes", "first", "argument", "for", "accumulated", "result", "second", "argument", "for", "the", "new", "data", "to", "process", ".", "A", "keyword", "-", "based", "argument", "named", "init", "is", "optional", ".", "If", "init", "is", "provided", "it", "is", "used", "for", "the", "initial", "value", "of", "accumulated", "result", ".", "Or", "the", "initial", "value", "is", "None", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmdlet.py#L270-L295
[ "def", "reduce", "(", "func", ")", ":", "def", "wrapper", "(", "prev", ",", "*", "argv", ",", "*", "*", "kw", ")", ":", "accum_value", "=", "None", "if", "'init'", "not", "in", "kw", "else", "kw", ".", "pop", "(", "'init'", ")", "if", "prev", "is", "None", ":", "raise", "TypeError", "(", "'A reducer must have input.'", ")", "for", "i", "in", "prev", ":", "accum_value", "=", "func", "(", "accum_value", ",", "i", ",", "*", "argv", ",", "*", "*", "kw", ")", "yield", "accum_value", "return", "Pipe", "(", "wrapper", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
PipeFunction.stopper
Wrap a conditoinal function(stopper function) to Pipe object. wrapped function should return boolean value. The cascading pipe will stop the execution if wrapped function return True. Stopper is useful if you have unlimited number of input data. :param func: The conditoinal function to be wrapped. :type func: function object :param args: The default arguments to be used for wrapped function. :param kw: The default keyword arguments to be used for wrapped function. :returns: Pipe object
cmdlet/cmdlet.py
def stopper(func): """Wrap a conditoinal function(stopper function) to Pipe object. wrapped function should return boolean value. The cascading pipe will stop the execution if wrapped function return True. Stopper is useful if you have unlimited number of input data. :param func: The conditoinal function to be wrapped. :type func: function object :param args: The default arguments to be used for wrapped function. :param kw: The default keyword arguments to be used for wrapped function. :returns: Pipe object """ def wrapper(prev, *argv, **kw): if prev is None: raise TypeError('A stopper must have input.') for i in prev: if func(i, *argv, **kw): break yield i return Pipe(wrapper)
def stopper(func): """Wrap a conditoinal function(stopper function) to Pipe object. wrapped function should return boolean value. The cascading pipe will stop the execution if wrapped function return True. Stopper is useful if you have unlimited number of input data. :param func: The conditoinal function to be wrapped. :type func: function object :param args: The default arguments to be used for wrapped function. :param kw: The default keyword arguments to be used for wrapped function. :returns: Pipe object """ def wrapper(prev, *argv, **kw): if prev is None: raise TypeError('A stopper must have input.') for i in prev: if func(i, *argv, **kw): break yield i return Pipe(wrapper)
[ "Wrap", "a", "conditoinal", "function", "(", "stopper", "function", ")", "to", "Pipe", "object", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmdlet.py#L299-L320
[ "def", "stopper", "(", "func", ")", ":", "def", "wrapper", "(", "prev", ",", "*", "argv", ",", "*", "*", "kw", ")", ":", "if", "prev", "is", "None", ":", "raise", "TypeError", "(", "'A stopper must have input.'", ")", "for", "i", "in", "prev", ":", "if", "func", "(", "i", ",", "*", "argv", ",", "*", "*", "kw", ")", ":", "break", "yield", "i", "return", "Pipe", "(", "wrapper", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
_list_networks
Return a dictionary of network name to active status bools. Sample virsh net-list output:: Name State Autostart ----------------------------------------- default active yes juju-test inactive no foobar inactive no Parsing the above would return:: {"default": True, "juju-test": False, "foobar": False} See: http://goo.gl/kXwfC
revolver/tool/lxc.py
def _list_networks(): """Return a dictionary of network name to active status bools. Sample virsh net-list output:: Name State Autostart ----------------------------------------- default active yes juju-test inactive no foobar inactive no Parsing the above would return:: {"default": True, "juju-test": False, "foobar": False} See: http://goo.gl/kXwfC """ output = core.run("virsh net-list --all") networks = {} # Take the header off and normalize whitespace. net_lines = [n.strip() for n in output.splitlines()[2:]] for line in net_lines: if not line: continue name, state, auto = line.split() networks[name] = state == "active" return networks
def _list_networks(): """Return a dictionary of network name to active status bools. Sample virsh net-list output:: Name State Autostart ----------------------------------------- default active yes juju-test inactive no foobar inactive no Parsing the above would return:: {"default": True, "juju-test": False, "foobar": False} See: http://goo.gl/kXwfC """ output = core.run("virsh net-list --all") networks = {} # Take the header off and normalize whitespace. net_lines = [n.strip() for n in output.splitlines()[2:]] for line in net_lines: if not line: continue name, state, auto = line.split() networks[name] = state == "active" return networks
[ "Return", "a", "dictionary", "of", "network", "name", "to", "active", "status", "bools", "." ]
michaelcontento/revolver
python
https://github.com/michaelcontento/revolver/blob/bbae82df0804ff2708a82fd0016b776664ee2deb/revolver/tool/lxc.py#L66-L92
[ "def", "_list_networks", "(", ")", ":", "output", "=", "core", ".", "run", "(", "\"virsh net-list --all\"", ")", "networks", "=", "{", "}", "# Take the header off and normalize whitespace.", "net_lines", "=", "[", "n", ".", "strip", "(", ")", "for", "n", "in", "output", ".", "splitlines", "(", ")", "[", "2", ":", "]", "]", "for", "line", "in", "net_lines", ":", "if", "not", "line", ":", "continue", "name", ",", "state", ",", "auto", "=", "line", ".", "split", "(", ")", "networks", "[", "name", "]", "=", "state", "==", "\"active\"", "return", "networks" ]
bbae82df0804ff2708a82fd0016b776664ee2deb
valid
Captain.flush
flush the line to stdout
captain/client.py
def flush(self, line): """flush the line to stdout""" # TODO -- maybe use echo? sys.stdout.write(line) sys.stdout.flush()
def flush(self, line): """flush the line to stdout""" # TODO -- maybe use echo? sys.stdout.write(line) sys.stdout.flush()
[ "flush", "the", "line", "to", "stdout" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/client.py#L87-L91
[ "def", "flush", "(", "self", ",", "line", ")", ":", "# TODO -- maybe use echo?", "sys", ".", "stdout", ".", "write", "(", "line", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
Captain.execute
runs the passed in arguments and returns an iterator on the output of running command
captain/client.py
def execute(self, arg_str='', **kwargs): """runs the passed in arguments and returns an iterator on the output of running command""" cmd = "{} {} {}".format(self.cmd_prefix, self.script, arg_str) expected_ret_code = kwargs.pop('code', 0) # any kwargs with all capital letters should be considered environment # variables environ = self.environ for k in list(kwargs.keys()): if k.isupper(): environ[k] = kwargs.pop(k) # we will allow overriding of these values kwargs.setdefault("stderr", subprocess.STDOUT) # we will not allow these to be overridden via kwargs kwargs["shell"] = True kwargs["stdout"] = subprocess.PIPE kwargs["cwd"] = self.cwd kwargs["env"] = environ process = None self.buf = deque(maxlen=self.bufsize) try: process = subprocess.Popen( cmd, **kwargs ) # another round of links # http://stackoverflow.com/a/17413045/5006 (what I used) # http://stackoverflow.com/questions/2715847/ for line in iter(process.stdout.readline, b""): line = line.decode(self.encoding) self.buf.append(line.rstrip()) yield line process.wait() if process.returncode != expected_ret_code: if process.returncode > 0: raise RuntimeError("{} returned {} with output: {}".format( cmd, process.returncode, self.output )) except subprocess.CalledProcessError as e: if e.returncode != expected_ret_code: raise RuntimeError("{} returned {} with output: {}".format( cmd, e.returncode, self.output )) finally: if process: process.stdout.close()
def execute(self, arg_str='', **kwargs): """runs the passed in arguments and returns an iterator on the output of running command""" cmd = "{} {} {}".format(self.cmd_prefix, self.script, arg_str) expected_ret_code = kwargs.pop('code', 0) # any kwargs with all capital letters should be considered environment # variables environ = self.environ for k in list(kwargs.keys()): if k.isupper(): environ[k] = kwargs.pop(k) # we will allow overriding of these values kwargs.setdefault("stderr", subprocess.STDOUT) # we will not allow these to be overridden via kwargs kwargs["shell"] = True kwargs["stdout"] = subprocess.PIPE kwargs["cwd"] = self.cwd kwargs["env"] = environ process = None self.buf = deque(maxlen=self.bufsize) try: process = subprocess.Popen( cmd, **kwargs ) # another round of links # http://stackoverflow.com/a/17413045/5006 (what I used) # http://stackoverflow.com/questions/2715847/ for line in iter(process.stdout.readline, b""): line = line.decode(self.encoding) self.buf.append(line.rstrip()) yield line process.wait() if process.returncode != expected_ret_code: if process.returncode > 0: raise RuntimeError("{} returned {} with output: {}".format( cmd, process.returncode, self.output )) except subprocess.CalledProcessError as e: if e.returncode != expected_ret_code: raise RuntimeError("{} returned {} with output: {}".format( cmd, e.returncode, self.output )) finally: if process: process.stdout.close()
[ "runs", "the", "passed", "in", "arguments", "and", "returns", "an", "iterator", "on", "the", "output", "of", "running", "command" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/client.py#L110-L168
[ "def", "execute", "(", "self", ",", "arg_str", "=", "''", ",", "*", "*", "kwargs", ")", ":", "cmd", "=", "\"{} {} {}\"", ".", "format", "(", "self", ".", "cmd_prefix", ",", "self", ".", "script", ",", "arg_str", ")", "expected_ret_code", "=", "kwargs", ".", "pop", "(", "'code'", ",", "0", ")", "# any kwargs with all capital letters should be considered environment", "# variables", "environ", "=", "self", ".", "environ", "for", "k", "in", "list", "(", "kwargs", ".", "keys", "(", ")", ")", ":", "if", "k", ".", "isupper", "(", ")", ":", "environ", "[", "k", "]", "=", "kwargs", ".", "pop", "(", "k", ")", "# we will allow overriding of these values", "kwargs", ".", "setdefault", "(", "\"stderr\"", ",", "subprocess", ".", "STDOUT", ")", "# we will not allow these to be overridden via kwargs", "kwargs", "[", "\"shell\"", "]", "=", "True", "kwargs", "[", "\"stdout\"", "]", "=", "subprocess", ".", "PIPE", "kwargs", "[", "\"cwd\"", "]", "=", "self", ".", "cwd", "kwargs", "[", "\"env\"", "]", "=", "environ", "process", "=", "None", "self", ".", "buf", "=", "deque", "(", "maxlen", "=", "self", ".", "bufsize", ")", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "*", "*", "kwargs", ")", "# another round of links", "# http://stackoverflow.com/a/17413045/5006 (what I used)", "# http://stackoverflow.com/questions/2715847/", "for", "line", "in", "iter", "(", "process", ".", "stdout", ".", "readline", ",", "b\"\"", ")", ":", "line", "=", "line", ".", "decode", "(", "self", ".", "encoding", ")", "self", ".", "buf", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "yield", "line", "process", ".", "wait", "(", ")", "if", "process", ".", "returncode", "!=", "expected_ret_code", ":", "if", "process", ".", "returncode", ">", "0", ":", "raise", "RuntimeError", "(", "\"{} returned {} with output: {}\"", ".", "format", "(", "cmd", ",", "process", ".", "returncode", ",", "self", ".", "output", ")", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "if", "e", ".", "returncode", "!=", "expected_ret_code", ":", "raise", "RuntimeError", "(", "\"{} returned {} with output: {}\"", ".", "format", "(", "cmd", ",", "e", ".", "returncode", ",", "self", ".", "output", ")", ")", "finally", ":", "if", "process", ":", "process", ".", "stdout", ".", "close", "(", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
get_request_subfields
Build a basic 035 subfield with basic information from the OAI-PMH request. :param root: ElementTree root node :return: list of subfield tuples [(..),(..)]
harvestingkit/etree_utils.py
def get_request_subfields(root): """Build a basic 035 subfield with basic information from the OAI-PMH request. :param root: ElementTree root node :return: list of subfield tuples [(..),(..)] """ request = root.find('request') responsedate = root.find('responseDate') subs = [("9", request.text), ("h", responsedate.text), ("m", request.attrib["metadataPrefix"])] return subs
def get_request_subfields(root): """Build a basic 035 subfield with basic information from the OAI-PMH request. :param root: ElementTree root node :return: list of subfield tuples [(..),(..)] """ request = root.find('request') responsedate = root.find('responseDate') subs = [("9", request.text), ("h", responsedate.text), ("m", request.attrib["metadataPrefix"])] return subs
[ "Build", "a", "basic", "035", "subfield", "with", "basic", "information", "from", "the", "OAI", "-", "PMH", "request", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/etree_utils.py#L25-L38
[ "def", "get_request_subfields", "(", "root", ")", ":", "request", "=", "root", ".", "find", "(", "'request'", ")", "responsedate", "=", "root", ".", "find", "(", "'responseDate'", ")", "subs", "=", "[", "(", "\"9\"", ",", "request", ".", "text", ")", ",", "(", "\"h\"", ",", "responsedate", ".", "text", ")", ",", "(", "\"m\"", ",", "request", ".", "attrib", "[", "\"metadataPrefix\"", "]", ")", "]", "return", "subs" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
strip_xml_namespace
Strip out namespace data from an ElementTree. This function is recursive and will traverse all subnodes to the root element @param root: the root element @return: the same root element, minus namespace
harvestingkit/etree_utils.py
def strip_xml_namespace(root): """Strip out namespace data from an ElementTree. This function is recursive and will traverse all subnodes to the root element @param root: the root element @return: the same root element, minus namespace """ try: root.tag = root.tag.split('}')[1] except IndexError: pass for element in root.getchildren(): strip_xml_namespace(element)
def strip_xml_namespace(root): """Strip out namespace data from an ElementTree. This function is recursive and will traverse all subnodes to the root element @param root: the root element @return: the same root element, minus namespace """ try: root.tag = root.tag.split('}')[1] except IndexError: pass for element in root.getchildren(): strip_xml_namespace(element)
[ "Strip", "out", "namespace", "data", "from", "an", "ElementTree", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/etree_utils.py#L41-L57
[ "def", "strip_xml_namespace", "(", "root", ")", ":", "try", ":", "root", ".", "tag", "=", "root", ".", "tag", ".", "split", "(", "'}'", ")", "[", "1", "]", "except", "IndexError", ":", "pass", "for", "element", "in", "root", ".", "getchildren", "(", ")", ":", "strip_xml_namespace", "(", "element", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
element_tree_collection_to_records
Take an ElementTree and converts the nodes into BibRecord records. This function is for a tree root of collection as such: <collection> <record> <!-- MARCXML --> </record> <record> ... </record> </collection>
harvestingkit/etree_utils.py
def element_tree_collection_to_records(tree): """Take an ElementTree and converts the nodes into BibRecord records. This function is for a tree root of collection as such: <collection> <record> <!-- MARCXML --> </record> <record> ... </record> </collection> """ from .bibrecord import create_record records = [] collection = tree.getroot() for record_element in collection.getchildren(): marcxml = ET.tostring(record_element, encoding="utf-8") record, status, errors = create_record(marcxml) if errors: print(str(status)) records.append(record) return records
def element_tree_collection_to_records(tree): """Take an ElementTree and converts the nodes into BibRecord records. This function is for a tree root of collection as such: <collection> <record> <!-- MARCXML --> </record> <record> ... </record> </collection> """ from .bibrecord import create_record records = [] collection = tree.getroot() for record_element in collection.getchildren(): marcxml = ET.tostring(record_element, encoding="utf-8") record, status, errors = create_record(marcxml) if errors: print(str(status)) records.append(record) return records
[ "Take", "an", "ElementTree", "and", "converts", "the", "nodes", "into", "BibRecord", "records", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/etree_utils.py#L60-L81
[ "def", "element_tree_collection_to_records", "(", "tree", ")", ":", "from", ".", "bibrecord", "import", "create_record", "records", "=", "[", "]", "collection", "=", "tree", ".", "getroot", "(", ")", "for", "record_element", "in", "collection", ".", "getchildren", "(", ")", ":", "marcxml", "=", "ET", ".", "tostring", "(", "record_element", ",", "encoding", "=", "\"utf-8\"", ")", "record", ",", "status", ",", "errors", "=", "create_record", "(", "marcxml", ")", "if", "errors", ":", "print", "(", "str", "(", "status", ")", ")", "records", ".", "append", "(", "record", ")", "return", "records" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
element_tree_oai_records
Take an ElementTree and converts the nodes into BibRecord records. This expects a clean OAI response with the tree root as ListRecords or GetRecord and record as the subtag like so: <ListRecords|GetRecord> <record> <header> <!-- Record Information --> </header> <metadata> <record> <!-- MARCXML --> </record> </metadata> </record> <record> ... </record> </ListRecords|GetRecord> :param tree: ElementTree object corresponding to GetRecord node from OAI request :param header_subs: OAI header subfields, if any :yield: (record, is_deleted) A tuple, with first a BibRecord found and second a boolean value saying if this is a deleted record or not.
harvestingkit/etree_utils.py
def element_tree_oai_records(tree, header_subs=None): """Take an ElementTree and converts the nodes into BibRecord records. This expects a clean OAI response with the tree root as ListRecords or GetRecord and record as the subtag like so: <ListRecords|GetRecord> <record> <header> <!-- Record Information --> </header> <metadata> <record> <!-- MARCXML --> </record> </metadata> </record> <record> ... </record> </ListRecords|GetRecord> :param tree: ElementTree object corresponding to GetRecord node from OAI request :param header_subs: OAI header subfields, if any :yield: (record, is_deleted) A tuple, with first a BibRecord found and second a boolean value saying if this is a deleted record or not. """ from .bibrecord import record_add_field, create_record if not header_subs: header_subs = [] # Make it a tuple, this information should not be changed header_subs = tuple(header_subs) oai_records = tree.getroot() for record_element in oai_records.getchildren(): header = record_element.find('header') # Add to OAI subfield datestamp = header.find('datestamp') identifier = header.find('identifier') identifier = identifier.text # The record's subfield is based on header information subs = list(header_subs) subs.append(("a", identifier)) subs.append(("d", datestamp.text)) if "status" in header.attrib and header.attrib["status"] == "deleted": # Record was deleted - create delete record deleted_record = {} record_add_field(deleted_record, "037", subfields=subs) yield deleted_record, True else: marc_root = record_element.find('metadata').find('record') marcxml = ET.tostring(marc_root, encoding="utf-8") record, status, errors = create_record(marcxml) if status == 1: # Add OAI request information record_add_field(record, "035", subfields=subs) yield record, False
def element_tree_oai_records(tree, header_subs=None): """Take an ElementTree and converts the nodes into BibRecord records. This expects a clean OAI response with the tree root as ListRecords or GetRecord and record as the subtag like so: <ListRecords|GetRecord> <record> <header> <!-- Record Information --> </header> <metadata> <record> <!-- MARCXML --> </record> </metadata> </record> <record> ... </record> </ListRecords|GetRecord> :param tree: ElementTree object corresponding to GetRecord node from OAI request :param header_subs: OAI header subfields, if any :yield: (record, is_deleted) A tuple, with first a BibRecord found and second a boolean value saying if this is a deleted record or not. """ from .bibrecord import record_add_field, create_record if not header_subs: header_subs = [] # Make it a tuple, this information should not be changed header_subs = tuple(header_subs) oai_records = tree.getroot() for record_element in oai_records.getchildren(): header = record_element.find('header') # Add to OAI subfield datestamp = header.find('datestamp') identifier = header.find('identifier') identifier = identifier.text # The record's subfield is based on header information subs = list(header_subs) subs.append(("a", identifier)) subs.append(("d", datestamp.text)) if "status" in header.attrib and header.attrib["status"] == "deleted": # Record was deleted - create delete record deleted_record = {} record_add_field(deleted_record, "037", subfields=subs) yield deleted_record, True else: marc_root = record_element.find('metadata').find('record') marcxml = ET.tostring(marc_root, encoding="utf-8") record, status, errors = create_record(marcxml) if status == 1: # Add OAI request information record_add_field(record, "035", subfields=subs) yield record, False
[ "Take", "an", "ElementTree", "and", "converts", "the", "nodes", "into", "BibRecord", "records", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/etree_utils.py#L84-L143
[ "def", "element_tree_oai_records", "(", "tree", ",", "header_subs", "=", "None", ")", ":", "from", ".", "bibrecord", "import", "record_add_field", ",", "create_record", "if", "not", "header_subs", ":", "header_subs", "=", "[", "]", "# Make it a tuple, this information should not be changed", "header_subs", "=", "tuple", "(", "header_subs", ")", "oai_records", "=", "tree", ".", "getroot", "(", ")", "for", "record_element", "in", "oai_records", ".", "getchildren", "(", ")", ":", "header", "=", "record_element", ".", "find", "(", "'header'", ")", "# Add to OAI subfield", "datestamp", "=", "header", ".", "find", "(", "'datestamp'", ")", "identifier", "=", "header", ".", "find", "(", "'identifier'", ")", "identifier", "=", "identifier", ".", "text", "# The record's subfield is based on header information", "subs", "=", "list", "(", "header_subs", ")", "subs", ".", "append", "(", "(", "\"a\"", ",", "identifier", ")", ")", "subs", ".", "append", "(", "(", "\"d\"", ",", "datestamp", ".", "text", ")", ")", "if", "\"status\"", "in", "header", ".", "attrib", "and", "header", ".", "attrib", "[", "\"status\"", "]", "==", "\"deleted\"", ":", "# Record was deleted - create delete record", "deleted_record", "=", "{", "}", "record_add_field", "(", "deleted_record", ",", "\"037\"", ",", "subfields", "=", "subs", ")", "yield", "deleted_record", ",", "True", "else", ":", "marc_root", "=", "record_element", ".", "find", "(", "'metadata'", ")", ".", "find", "(", "'record'", ")", "marcxml", "=", "ET", ".", "tostring", "(", "marc_root", ",", "encoding", "=", "\"utf-8\"", ")", "record", ",", "status", ",", "errors", "=", "create_record", "(", "marcxml", ")", "if", "status", "==", "1", ":", "# Add OAI request information", "record_add_field", "(", "record", ",", "\"035\"", ",", "subfields", "=", "subs", ")", "yield", "record", ",", "False" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
run
Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter.
pgs/bottle.py
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=None, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter. """ if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): import subprocess lockfile = None try: fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) except KeyboardInterrupt: pass finally: if os.path.exists(lockfile): os.unlink(lockfile) return try: if debug is not None: _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) if not callable(app): raise ValueError("Application is not callable: %r" % app) for plugin in plugins or []: if isinstance(plugin, basestring): plugin = load(plugin) app.install(plugin) if server in server_names: server = server_names.get(server) if isinstance(server, basestring): server = load(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise ValueError("Unknown or unsupported server: %r" % server) server.quiet = server.quiet or quiet if not server.quiet: _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) _stderr("Hit Ctrl-C to quit.\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) with bgcheck: server.run(app) if bgcheck.status == 'reload': sys.exit(3) else: server.run(app) except KeyboardInterrupt: pass except (SystemExit, MemoryError): raise except: if not reloader: raise if not getattr(server, 'quiet', quiet): print_exc() time.sleep(interval) sys.exit(3)
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=None, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter. """ if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): import subprocess lockfile = None try: fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) except KeyboardInterrupt: pass finally: if os.path.exists(lockfile): os.unlink(lockfile) return try: if debug is not None: _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) if not callable(app): raise ValueError("Application is not callable: %r" % app) for plugin in plugins or []: if isinstance(plugin, basestring): plugin = load(plugin) app.install(plugin) if server in server_names: server = server_names.get(server) if isinstance(server, basestring): server = load(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise ValueError("Unknown or unsupported server: %r" % server) server.quiet = server.quiet or quiet if not server.quiet: _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) _stderr("Hit Ctrl-C to quit.\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) with bgcheck: server.run(app) if bgcheck.status == 'reload': sys.exit(3) else: server.run(app) except KeyboardInterrupt: pass except (SystemExit, MemoryError): raise except: if not reloader: raise if not getattr(server, 'quiet', quiet): print_exc() time.sleep(interval) sys.exit(3)
[ "Start", "a", "server", "instance", ".", "This", "method", "blocks", "until", "the", "server", "terminates", "." ]
westurner/pgs
python
https://github.com/westurner/pgs/blob/1cc2bf2c41479d8d3ba50480f003183f1675e518/pgs/bottle.py#L3152-L3251
[ "def", "run", "(", "app", "=", "None", ",", "server", "=", "'wsgiref'", ",", "host", "=", "'127.0.0.1'", ",", "port", "=", "8080", ",", "interval", "=", "1", ",", "reloader", "=", "False", ",", "quiet", "=", "False", ",", "plugins", "=", "None", ",", "debug", "=", "None", ",", "*", "*", "kargs", ")", ":", "if", "NORUN", ":", "return", "if", "reloader", "and", "not", "os", ".", "environ", ".", "get", "(", "'BOTTLE_CHILD'", ")", ":", "import", "subprocess", "lockfile", "=", "None", "try", ":", "fd", ",", "lockfile", "=", "tempfile", ".", "mkstemp", "(", "prefix", "=", "'bottle.'", ",", "suffix", "=", "'.lock'", ")", "os", ".", "close", "(", "fd", ")", "# We only need this file to exist. We never write to it", "while", "os", ".", "path", ".", "exists", "(", "lockfile", ")", ":", "args", "=", "[", "sys", ".", "executable", "]", "+", "sys", ".", "argv", "environ", "=", "os", ".", "environ", ".", "copy", "(", ")", "environ", "[", "'BOTTLE_CHILD'", "]", "=", "'true'", "environ", "[", "'BOTTLE_LOCKFILE'", "]", "=", "lockfile", "p", "=", "subprocess", ".", "Popen", "(", "args", ",", "env", "=", "environ", ")", "while", "p", ".", "poll", "(", ")", "is", "None", ":", "# Busy wait...", "os", ".", "utime", "(", "lockfile", ",", "None", ")", "# I am alive!", "time", ".", "sleep", "(", "interval", ")", "if", "p", ".", "poll", "(", ")", "!=", "3", ":", "if", "os", ".", "path", ".", "exists", "(", "lockfile", ")", ":", "os", ".", "unlink", "(", "lockfile", ")", "sys", ".", "exit", "(", "p", ".", "poll", "(", ")", ")", "except", "KeyboardInterrupt", ":", "pass", "finally", ":", "if", "os", ".", "path", ".", "exists", "(", "lockfile", ")", ":", "os", ".", "unlink", "(", "lockfile", ")", "return", "try", ":", "if", "debug", "is", "not", "None", ":", "_debug", "(", "debug", ")", "app", "=", "app", "or", "default_app", "(", ")", "if", "isinstance", "(", "app", ",", "basestring", ")", ":", "app", "=", "load_app", "(", "app", ")", "if", "not", "callable", "(", "app", ")", ":", "raise", "ValueError", "(", "\"Application is not callable: %r\"", "%", "app", ")", "for", "plugin", "in", "plugins", "or", "[", "]", ":", "if", "isinstance", "(", "plugin", ",", "basestring", ")", ":", "plugin", "=", "load", "(", "plugin", ")", "app", ".", "install", "(", "plugin", ")", "if", "server", "in", "server_names", ":", "server", "=", "server_names", ".", "get", "(", "server", ")", "if", "isinstance", "(", "server", ",", "basestring", ")", ":", "server", "=", "load", "(", "server", ")", "if", "isinstance", "(", "server", ",", "type", ")", ":", "server", "=", "server", "(", "host", "=", "host", ",", "port", "=", "port", ",", "*", "*", "kargs", ")", "if", "not", "isinstance", "(", "server", ",", "ServerAdapter", ")", ":", "raise", "ValueError", "(", "\"Unknown or unsupported server: %r\"", "%", "server", ")", "server", ".", "quiet", "=", "server", ".", "quiet", "or", "quiet", "if", "not", "server", ".", "quiet", ":", "_stderr", "(", "\"Bottle v%s server starting up (using %s)...\\n\"", "%", "(", "__version__", ",", "repr", "(", "server", ")", ")", ")", "_stderr", "(", "\"Listening on http://%s:%d/\\n\"", "%", "(", "server", ".", "host", ",", "server", ".", "port", ")", ")", "_stderr", "(", "\"Hit Ctrl-C to quit.\\n\\n\"", ")", "if", "reloader", ":", "lockfile", "=", "os", ".", "environ", ".", "get", "(", "'BOTTLE_LOCKFILE'", ")", "bgcheck", "=", "FileCheckerThread", "(", "lockfile", ",", "interval", ")", "with", "bgcheck", ":", "server", ".", "run", "(", "app", ")", "if", "bgcheck", ".", "status", "==", "'reload'", ":", "sys", ".", "exit", "(", "3", ")", "else", ":", "server", ".", "run", "(", "app", ")", "except", "KeyboardInterrupt", ":", "pass", "except", "(", "SystemExit", ",", "MemoryError", ")", ":", "raise", "except", ":", "if", "not", "reloader", ":", "raise", "if", "not", "getattr", "(", "server", ",", "'quiet'", ",", "quiet", ")", ":", "print_exc", "(", ")", "time", ".", "sleep", "(", "interval", ")", "sys", ".", "exit", "(", "3", ")" ]
1cc2bf2c41479d8d3ba50480f003183f1675e518
valid
ConfigDict.load_dict
Load values from a dictionary structure. Nesting can be used to represent namespaces. >>> c = ConfigDict() >>> c.load_dict({'some': {'namespace': {'key': 'value'} } }) {'some.namespace.key': 'value'}
pgs/bottle.py
def load_dict(self, source, namespace=''): """ Load values from a dictionary structure. Nesting can be used to represent namespaces. >>> c = ConfigDict() >>> c.load_dict({'some': {'namespace': {'key': 'value'} } }) {'some.namespace.key': 'value'} """ for key, value in source.items(): if isinstance(key, str): nskey = (namespace + '.' + key).strip('.') if isinstance(value, dict): self.load_dict(value, namespace=nskey) else: self[nskey] = value else: raise TypeError('Key has type %r (not a string)' % type(key)) return self
def load_dict(self, source, namespace=''): """ Load values from a dictionary structure. Nesting can be used to represent namespaces. >>> c = ConfigDict() >>> c.load_dict({'some': {'namespace': {'key': 'value'} } }) {'some.namespace.key': 'value'} """ for key, value in source.items(): if isinstance(key, str): nskey = (namespace + '.' + key).strip('.') if isinstance(value, dict): self.load_dict(value, namespace=nskey) else: self[nskey] = value else: raise TypeError('Key has type %r (not a string)' % type(key)) return self
[ "Load", "values", "from", "a", "dictionary", "structure", ".", "Nesting", "can", "be", "used", "to", "represent", "namespaces", "." ]
westurner/pgs
python
https://github.com/westurner/pgs/blob/1cc2bf2c41479d8d3ba50480f003183f1675e518/pgs/bottle.py#L2170-L2187
[ "def", "load_dict", "(", "self", ",", "source", ",", "namespace", "=", "''", ")", ":", "for", "key", ",", "value", "in", "source", ".", "items", "(", ")", ":", "if", "isinstance", "(", "key", ",", "str", ")", ":", "nskey", "=", "(", "namespace", "+", "'.'", "+", "key", ")", ".", "strip", "(", "'.'", ")", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "self", ".", "load_dict", "(", "value", ",", "namespace", "=", "nskey", ")", "else", ":", "self", "[", "nskey", "]", "=", "value", "else", ":", "raise", "TypeError", "(", "'Key has type %r (not a string)'", "%", "type", "(", "key", ")", ")", "return", "self" ]
1cc2bf2c41479d8d3ba50480f003183f1675e518
valid
json
The oembed endpoint, or the url to which requests for metadata are passed. Third parties will want to access this view with URLs for your site's content and be returned OEmbed metadata.
oembed/views.py
def json(request, *args, **kwargs): """ The oembed endpoint, or the url to which requests for metadata are passed. Third parties will want to access this view with URLs for your site's content and be returned OEmbed metadata. """ # coerce to dictionary params = dict(request.GET.items()) callback = params.pop('callback', None) url = params.pop('url', None) if not url: return HttpResponseBadRequest('Required parameter missing: URL') try: provider = oembed.site.provider_for_url(url) if not provider.provides: raise OEmbedMissingEndpoint() except OEmbedMissingEndpoint: raise Http404('No provider found for %s' % url) query = dict([(smart_str(k), smart_str(v)) for k, v in params.items() if v]) try: resource = oembed.site.embed(url, **query) except OEmbedException, e: raise Http404('Error embedding %s: %s' % (url, str(e))) response = HttpResponse(mimetype='application/json') json = resource.json if callback: response.write('%s(%s)' % (defaultfilters.force_escape(callback), json)) else: response.write(json) return response
def json(request, *args, **kwargs): """ The oembed endpoint, or the url to which requests for metadata are passed. Third parties will want to access this view with URLs for your site's content and be returned OEmbed metadata. """ # coerce to dictionary params = dict(request.GET.items()) callback = params.pop('callback', None) url = params.pop('url', None) if not url: return HttpResponseBadRequest('Required parameter missing: URL') try: provider = oembed.site.provider_for_url(url) if not provider.provides: raise OEmbedMissingEndpoint() except OEmbedMissingEndpoint: raise Http404('No provider found for %s' % url) query = dict([(smart_str(k), smart_str(v)) for k, v in params.items() if v]) try: resource = oembed.site.embed(url, **query) except OEmbedException, e: raise Http404('Error embedding %s: %s' % (url, str(e))) response = HttpResponse(mimetype='application/json') json = resource.json if callback: response.write('%s(%s)' % (defaultfilters.force_escape(callback), json)) else: response.write(json) return response
[ "The", "oembed", "endpoint", "or", "the", "url", "to", "which", "requests", "for", "metadata", "are", "passed", ".", "Third", "parties", "will", "want", "to", "access", "this", "view", "with", "URLs", "for", "your", "site", "s", "content", "and", "be", "returned", "OEmbed", "metadata", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/views.py#L19-L56
[ "def", "json", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# coerce to dictionary", "params", "=", "dict", "(", "request", ".", "GET", ".", "items", "(", ")", ")", "callback", "=", "params", ".", "pop", "(", "'callback'", ",", "None", ")", "url", "=", "params", ".", "pop", "(", "'url'", ",", "None", ")", "if", "not", "url", ":", "return", "HttpResponseBadRequest", "(", "'Required parameter missing: URL'", ")", "try", ":", "provider", "=", "oembed", ".", "site", ".", "provider_for_url", "(", "url", ")", "if", "not", "provider", ".", "provides", ":", "raise", "OEmbedMissingEndpoint", "(", ")", "except", "OEmbedMissingEndpoint", ":", "raise", "Http404", "(", "'No provider found for %s'", "%", "url", ")", "query", "=", "dict", "(", "[", "(", "smart_str", "(", "k", ")", ",", "smart_str", "(", "v", ")", ")", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", "if", "v", "]", ")", "try", ":", "resource", "=", "oembed", ".", "site", ".", "embed", "(", "url", ",", "*", "*", "query", ")", "except", "OEmbedException", ",", "e", ":", "raise", "Http404", "(", "'Error embedding %s: %s'", "%", "(", "url", ",", "str", "(", "e", ")", ")", ")", "response", "=", "HttpResponse", "(", "mimetype", "=", "'application/json'", ")", "json", "=", "resource", ".", "json", "if", "callback", ":", "response", ".", "write", "(", "'%s(%s)'", "%", "(", "defaultfilters", ".", "force_escape", "(", "callback", ")", ",", "json", ")", ")", "else", ":", "response", ".", "write", "(", "json", ")", "return", "response" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
consume_json
Extract and return oembed content for given urls. Required GET params: urls - list of urls to consume Optional GET params: width - maxwidth attribute for oembed content height - maxheight attribute for oembed content template_dir - template_dir to use when rendering oembed Returns: list of dictionaries with oembed metadata and renderings, json encoded
oembed/views.py
def consume_json(request): """ Extract and return oembed content for given urls. Required GET params: urls - list of urls to consume Optional GET params: width - maxwidth attribute for oembed content height - maxheight attribute for oembed content template_dir - template_dir to use when rendering oembed Returns: list of dictionaries with oembed metadata and renderings, json encoded """ client = OEmbedConsumer() urls = request.GET.getlist('urls') width = request.GET.get('width') height = request.GET.get('height') template_dir = request.GET.get('template_dir') output = {} ctx = RequestContext(request) for url in urls: try: provider = oembed.site.provider_for_url(url) except OEmbedMissingEndpoint: oembeds = None rendered = None else: oembeds = url rendered = client.parse_text(url, width, height, context=ctx, template_dir=template_dir) output[url] = { 'oembeds': oembeds, 'rendered': rendered, } return HttpResponse(simplejson.dumps(output), mimetype='application/json')
def consume_json(request): """ Extract and return oembed content for given urls. Required GET params: urls - list of urls to consume Optional GET params: width - maxwidth attribute for oembed content height - maxheight attribute for oembed content template_dir - template_dir to use when rendering oembed Returns: list of dictionaries with oembed metadata and renderings, json encoded """ client = OEmbedConsumer() urls = request.GET.getlist('urls') width = request.GET.get('width') height = request.GET.get('height') template_dir = request.GET.get('template_dir') output = {} ctx = RequestContext(request) for url in urls: try: provider = oembed.site.provider_for_url(url) except OEmbedMissingEndpoint: oembeds = None rendered = None else: oembeds = url rendered = client.parse_text(url, width, height, context=ctx, template_dir=template_dir) output[url] = { 'oembeds': oembeds, 'rendered': rendered, } return HttpResponse(simplejson.dumps(output), mimetype='application/json')
[ "Extract", "and", "return", "oembed", "content", "for", "given", "urls", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/views.py#L59-L99
[ "def", "consume_json", "(", "request", ")", ":", "client", "=", "OEmbedConsumer", "(", ")", "urls", "=", "request", ".", "GET", ".", "getlist", "(", "'urls'", ")", "width", "=", "request", ".", "GET", ".", "get", "(", "'width'", ")", "height", "=", "request", ".", "GET", ".", "get", "(", "'height'", ")", "template_dir", "=", "request", ".", "GET", ".", "get", "(", "'template_dir'", ")", "output", "=", "{", "}", "ctx", "=", "RequestContext", "(", "request", ")", "for", "url", "in", "urls", ":", "try", ":", "provider", "=", "oembed", ".", "site", ".", "provider_for_url", "(", "url", ")", "except", "OEmbedMissingEndpoint", ":", "oembeds", "=", "None", "rendered", "=", "None", "else", ":", "oembeds", "=", "url", "rendered", "=", "client", ".", "parse_text", "(", "url", ",", "width", ",", "height", ",", "context", "=", "ctx", ",", "template_dir", "=", "template_dir", ")", "output", "[", "url", "]", "=", "{", "'oembeds'", ":", "oembeds", ",", "'rendered'", ":", "rendered", ",", "}", "return", "HttpResponse", "(", "simplejson", ".", "dumps", "(", "output", ")", ",", "mimetype", "=", "'application/json'", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
oembed_schema
A site profile detailing valid endpoints for a given domain. Allows for better auto-discovery of embeddable content. OEmbed-able content lives at a URL that maps to a provider.
oembed/views.py
def oembed_schema(request): """ A site profile detailing valid endpoints for a given domain. Allows for better auto-discovery of embeddable content. OEmbed-able content lives at a URL that maps to a provider. """ current_domain = Site.objects.get_current().domain url_schemes = [] # a list of dictionaries for all the urls we can match endpoint = reverse('oembed_json') # the public endpoint for our oembeds providers = oembed.site.get_providers() for provider in providers: # first make sure this provider class is exposed at the public endpoint if not provider.provides: continue match = None if isinstance(provider, DjangoProvider): # django providers define their regex_list by using urlreversing url_pattern = resolver.reverse_dict.get(provider._meta.named_view) # this regex replacement is set to be non-greedy, which results # in things like /news/*/*/*/*/ -- this is more explicit if url_pattern: regex = re.sub(r'%\(.+?\)s', '*', url_pattern[0][0][0]) match = 'http://%s/%s' % (current_domain, regex) elif isinstance(provider, HTTPProvider): match = provider.url_scheme else: match = provider.regex if match: url_schemes.append({ 'type': provider.resource_type, 'matches': match, 'endpoint': endpoint }) url_schemes.sort(key=lambda item: item['matches']) response = HttpResponse(mimetype='application/json') response.write(simplejson.dumps(url_schemes)) return response
def oembed_schema(request): """ A site profile detailing valid endpoints for a given domain. Allows for better auto-discovery of embeddable content. OEmbed-able content lives at a URL that maps to a provider. """ current_domain = Site.objects.get_current().domain url_schemes = [] # a list of dictionaries for all the urls we can match endpoint = reverse('oembed_json') # the public endpoint for our oembeds providers = oembed.site.get_providers() for provider in providers: # first make sure this provider class is exposed at the public endpoint if not provider.provides: continue match = None if isinstance(provider, DjangoProvider): # django providers define their regex_list by using urlreversing url_pattern = resolver.reverse_dict.get(provider._meta.named_view) # this regex replacement is set to be non-greedy, which results # in things like /news/*/*/*/*/ -- this is more explicit if url_pattern: regex = re.sub(r'%\(.+?\)s', '*', url_pattern[0][0][0]) match = 'http://%s/%s' % (current_domain, regex) elif isinstance(provider, HTTPProvider): match = provider.url_scheme else: match = provider.regex if match: url_schemes.append({ 'type': provider.resource_type, 'matches': match, 'endpoint': endpoint }) url_schemes.sort(key=lambda item: item['matches']) response = HttpResponse(mimetype='application/json') response.write(simplejson.dumps(url_schemes)) return response
[ "A", "site", "profile", "detailing", "valid", "endpoints", "for", "a", "given", "domain", ".", "Allows", "for", "better", "auto", "-", "discovery", "of", "embeddable", "content", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/views.py#L101-L144
[ "def", "oembed_schema", "(", "request", ")", ":", "current_domain", "=", "Site", ".", "objects", ".", "get_current", "(", ")", ".", "domain", "url_schemes", "=", "[", "]", "# a list of dictionaries for all the urls we can match", "endpoint", "=", "reverse", "(", "'oembed_json'", ")", "# the public endpoint for our oembeds", "providers", "=", "oembed", ".", "site", ".", "get_providers", "(", ")", "for", "provider", "in", "providers", ":", "# first make sure this provider class is exposed at the public endpoint", "if", "not", "provider", ".", "provides", ":", "continue", "match", "=", "None", "if", "isinstance", "(", "provider", ",", "DjangoProvider", ")", ":", "# django providers define their regex_list by using urlreversing", "url_pattern", "=", "resolver", ".", "reverse_dict", ".", "get", "(", "provider", ".", "_meta", ".", "named_view", ")", "# this regex replacement is set to be non-greedy, which results", "# in things like /news/*/*/*/*/ -- this is more explicit", "if", "url_pattern", ":", "regex", "=", "re", ".", "sub", "(", "r'%\\(.+?\\)s'", ",", "'*'", ",", "url_pattern", "[", "0", "]", "[", "0", "]", "[", "0", "]", ")", "match", "=", "'http://%s/%s'", "%", "(", "current_domain", ",", "regex", ")", "elif", "isinstance", "(", "provider", ",", "HTTPProvider", ")", ":", "match", "=", "provider", ".", "url_scheme", "else", ":", "match", "=", "provider", ".", "regex", "if", "match", ":", "url_schemes", ".", "append", "(", "{", "'type'", ":", "provider", ".", "resource_type", ",", "'matches'", ":", "match", ",", "'endpoint'", ":", "endpoint", "}", ")", "url_schemes", ".", "sort", "(", "key", "=", "lambda", "item", ":", "item", "[", "'matches'", "]", ")", "response", "=", "HttpResponse", "(", "mimetype", "=", "'application/json'", ")", "response", ".", "write", "(", "simplejson", ".", "dumps", "(", "url_schemes", ")", ")", "return", "response" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
find_meta
Extract __*meta*__ from meta_file.
setup.py
def find_meta(*meta_file_parts, meta_key): """Extract __*meta*__ from meta_file.""" meta_file = read(*meta_file_parts) meta_match = re.search(r"^__{}__ = ['\"]([^'\"]*)['\"]".format(meta_key), meta_file, re.M) if meta_match: return meta_match.group(1) raise RuntimeError("Unable to find __{}__ string.".format(meta_key))
def find_meta(*meta_file_parts, meta_key): """Extract __*meta*__ from meta_file.""" meta_file = read(*meta_file_parts) meta_match = re.search(r"^__{}__ = ['\"]([^'\"]*)['\"]".format(meta_key), meta_file, re.M) if meta_match: return meta_match.group(1) raise RuntimeError("Unable to find __{}__ string.".format(meta_key))
[ "Extract", "__", "*", "meta", "*", "__", "from", "meta_file", "." ]
MinchinWeb/minchin.pelican.jinja_filters
python
https://github.com/MinchinWeb/minchin.pelican.jinja_filters/blob/94b8b1dd04be49950d660fe11d28f0df0fe49664/setup.py#L16-L23
[ "def", "find_meta", "(", "*", "meta_file_parts", ",", "meta_key", ")", ":", "meta_file", "=", "read", "(", "*", "meta_file_parts", ")", "meta_match", "=", "re", ".", "search", "(", "r\"^__{}__ = ['\\\"]([^'\\\"]*)['\\\"]\"", ".", "format", "(", "meta_key", ")", ",", "meta_file", ",", "re", ".", "M", ")", "if", "meta_match", ":", "return", "meta_match", ".", "group", "(", "1", ")", "raise", "RuntimeError", "(", "\"Unable to find __{}__ string.\"", ".", "format", "(", "meta_key", ")", ")" ]
94b8b1dd04be49950d660fe11d28f0df0fe49664
valid
main
scan path directory and any subdirectories for valid captain scripts
captain/__main__.py
def main(path): '''scan path directory and any subdirectories for valid captain scripts''' basepath = os.path.abspath(os.path.expanduser(str(path))) echo.h2("Available scripts in {}".format(basepath)) echo.br() for root_dir, dirs, files in os.walk(basepath, topdown=True): for f in fnmatch.filter(files, '*.py'): try: filepath = os.path.join(root_dir, f) # super edge case, this makes sure the python script won't start # an interactive console session which would cause the session # to start and not allow the for loop to complete with open(filepath, encoding="UTF-8") as fp: body = fp.read() is_console = "InteractiveConsole" in body is_console = is_console or "code" in body is_console = is_console and "interact(" in body if is_console: continue s = captain.Script(filepath) if s.can_run_from_cli(): rel_filepath = s.call_path(basepath) p = s.parser echo.h3(rel_filepath) desc = p.description if desc: echo.indent(desc, indent=(" " * 4)) subcommands = s.subcommands if subcommands: echo.br() echo.indent("Subcommands:", indent=(" " * 4)) for sc in subcommands.keys(): echo.indent(sc, indent=(" " * 6)) echo.br() except captain.ParseError: pass except Exception as e: #echo.exception(e) #echo.err("Failed to parse {} because {}", f, e.message) echo.err("Failed to parse {}", f) echo.verbose(e.message) echo.br()
def main(path): '''scan path directory and any subdirectories for valid captain scripts''' basepath = os.path.abspath(os.path.expanduser(str(path))) echo.h2("Available scripts in {}".format(basepath)) echo.br() for root_dir, dirs, files in os.walk(basepath, topdown=True): for f in fnmatch.filter(files, '*.py'): try: filepath = os.path.join(root_dir, f) # super edge case, this makes sure the python script won't start # an interactive console session which would cause the session # to start and not allow the for loop to complete with open(filepath, encoding="UTF-8") as fp: body = fp.read() is_console = "InteractiveConsole" in body is_console = is_console or "code" in body is_console = is_console and "interact(" in body if is_console: continue s = captain.Script(filepath) if s.can_run_from_cli(): rel_filepath = s.call_path(basepath) p = s.parser echo.h3(rel_filepath) desc = p.description if desc: echo.indent(desc, indent=(" " * 4)) subcommands = s.subcommands if subcommands: echo.br() echo.indent("Subcommands:", indent=(" " * 4)) for sc in subcommands.keys(): echo.indent(sc, indent=(" " * 6)) echo.br() except captain.ParseError: pass except Exception as e: #echo.exception(e) #echo.err("Failed to parse {} because {}", f, e.message) echo.err("Failed to parse {}", f) echo.verbose(e.message) echo.br()
[ "scan", "path", "directory", "and", "any", "subdirectories", "for", "valid", "captain", "scripts" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/__main__.py#L15-L65
[ "def", "main", "(", "path", ")", ":", "basepath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "str", "(", "path", ")", ")", ")", "echo", ".", "h2", "(", "\"Available scripts in {}\"", ".", "format", "(", "basepath", ")", ")", "echo", ".", "br", "(", ")", "for", "root_dir", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "basepath", ",", "topdown", "=", "True", ")", ":", "for", "f", "in", "fnmatch", ".", "filter", "(", "files", ",", "'*.py'", ")", ":", "try", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "f", ")", "# super edge case, this makes sure the python script won't start", "# an interactive console session which would cause the session", "# to start and not allow the for loop to complete", "with", "open", "(", "filepath", ",", "encoding", "=", "\"UTF-8\"", ")", "as", "fp", ":", "body", "=", "fp", ".", "read", "(", ")", "is_console", "=", "\"InteractiveConsole\"", "in", "body", "is_console", "=", "is_console", "or", "\"code\"", "in", "body", "is_console", "=", "is_console", "and", "\"interact(\"", "in", "body", "if", "is_console", ":", "continue", "s", "=", "captain", ".", "Script", "(", "filepath", ")", "if", "s", ".", "can_run_from_cli", "(", ")", ":", "rel_filepath", "=", "s", ".", "call_path", "(", "basepath", ")", "p", "=", "s", ".", "parser", "echo", ".", "h3", "(", "rel_filepath", ")", "desc", "=", "p", ".", "description", "if", "desc", ":", "echo", ".", "indent", "(", "desc", ",", "indent", "=", "(", "\" \"", "*", "4", ")", ")", "subcommands", "=", "s", ".", "subcommands", "if", "subcommands", ":", "echo", ".", "br", "(", ")", "echo", ".", "indent", "(", "\"Subcommands:\"", ",", "indent", "=", "(", "\" \"", "*", "4", ")", ")", "for", "sc", "in", "subcommands", ".", "keys", "(", ")", ":", "echo", ".", "indent", "(", "sc", ",", "indent", "=", "(", "\" \"", "*", "6", ")", ")", "echo", ".", "br", "(", ")", "except", "captain", ".", "ParseError", ":", "pass", "except", "Exception", "as", "e", ":", "#echo.exception(e)", "#echo.err(\"Failed to parse {} because {}\", f, e.message)", "echo", ".", "err", "(", "\"Failed to parse {}\"", ",", "f", ")", "echo", ".", "verbose", "(", "e", ".", "message", ")", "echo", ".", "br", "(", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
ZipTaxClient.get_rate
Finds sales tax for given info. Returns Decimal of the tax rate, e.g. 8.750.
pyziptax/ziptax.py
def get_rate(self, zipcode, city=None, state=None, multiple_rates=False): """ Finds sales tax for given info. Returns Decimal of the tax rate, e.g. 8.750. """ data = self.make_request_data(zipcode, city, state) r = requests.get(self.url, params=data) resp = r.json() return self.process_response(resp, multiple_rates)
def get_rate(self, zipcode, city=None, state=None, multiple_rates=False): """ Finds sales tax for given info. Returns Decimal of the tax rate, e.g. 8.750. """ data = self.make_request_data(zipcode, city, state) r = requests.get(self.url, params=data) resp = r.json() return self.process_response(resp, multiple_rates)
[ "Finds", "sales", "tax", "for", "given", "info", ".", "Returns", "Decimal", "of", "the", "tax", "rate", "e", ".", "g", ".", "8", ".", "750", "." ]
albertyw/pyziptax
python
https://github.com/albertyw/pyziptax/blob/c56dd440e4cadff7f2dd4b72e5dcced06a44969d/pyziptax/ziptax.py#L24-L34
[ "def", "get_rate", "(", "self", ",", "zipcode", ",", "city", "=", "None", ",", "state", "=", "None", ",", "multiple_rates", "=", "False", ")", ":", "data", "=", "self", ".", "make_request_data", "(", "zipcode", ",", "city", ",", "state", ")", "r", "=", "requests", ".", "get", "(", "self", ".", "url", ",", "params", "=", "data", ")", "resp", "=", "r", ".", "json", "(", ")", "return", "self", ".", "process_response", "(", "resp", ",", "multiple_rates", ")" ]
c56dd440e4cadff7f2dd4b72e5dcced06a44969d
valid
ZipTaxClient.make_request_data
Make the request params given location data
pyziptax/ziptax.py
def make_request_data(self, zipcode, city, state): """ Make the request params given location data """ data = {'key': self.api_key, 'postalcode': str(zipcode), 'city': city, 'state': state } data = ZipTaxClient._clean_request_data(data) return data
def make_request_data(self, zipcode, city, state): """ Make the request params given location data """ data = {'key': self.api_key, 'postalcode': str(zipcode), 'city': city, 'state': state } data = ZipTaxClient._clean_request_data(data) return data
[ "Make", "the", "request", "params", "given", "location", "data" ]
albertyw/pyziptax
python
https://github.com/albertyw/pyziptax/blob/c56dd440e4cadff7f2dd4b72e5dcced06a44969d/pyziptax/ziptax.py#L36-L44
[ "def", "make_request_data", "(", "self", ",", "zipcode", ",", "city", ",", "state", ")", ":", "data", "=", "{", "'key'", ":", "self", ".", "api_key", ",", "'postalcode'", ":", "str", "(", "zipcode", ")", ",", "'city'", ":", "city", ",", "'state'", ":", "state", "}", "data", "=", "ZipTaxClient", ".", "_clean_request_data", "(", "data", ")", "return", "data" ]
c56dd440e4cadff7f2dd4b72e5dcced06a44969d
valid
ZipTaxClient.process_response
Get the tax rate from the ZipTax response
pyziptax/ziptax.py
def process_response(self, resp, multiple_rates): """ Get the tax rate from the ZipTax response """ self._check_for_exceptions(resp, multiple_rates) rates = {} for result in resp['results']: rate = ZipTaxClient._cast_tax_rate(result['taxSales']) rates[result['geoCity']] = rate if not multiple_rates: return rates[list(rates.keys())[0]] return rates
def process_response(self, resp, multiple_rates): """ Get the tax rate from the ZipTax response """ self._check_for_exceptions(resp, multiple_rates) rates = {} for result in resp['results']: rate = ZipTaxClient._cast_tax_rate(result['taxSales']) rates[result['geoCity']] = rate if not multiple_rates: return rates[list(rates.keys())[0]] return rates
[ "Get", "the", "tax", "rate", "from", "the", "ZipTax", "response" ]
albertyw/pyziptax
python
https://github.com/albertyw/pyziptax/blob/c56dd440e4cadff7f2dd4b72e5dcced06a44969d/pyziptax/ziptax.py#L57-L67
[ "def", "process_response", "(", "self", ",", "resp", ",", "multiple_rates", ")", ":", "self", ".", "_check_for_exceptions", "(", "resp", ",", "multiple_rates", ")", "rates", "=", "{", "}", "for", "result", "in", "resp", "[", "'results'", "]", ":", "rate", "=", "ZipTaxClient", ".", "_cast_tax_rate", "(", "result", "[", "'taxSales'", "]", ")", "rates", "[", "result", "[", "'geoCity'", "]", "]", "=", "rate", "if", "not", "multiple_rates", ":", "return", "rates", "[", "list", "(", "rates", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "return", "rates" ]
c56dd440e4cadff7f2dd4b72e5dcced06a44969d
valid
ZipTaxClient._check_for_exceptions
Check if there are exceptions that should be raised
pyziptax/ziptax.py
def _check_for_exceptions(self, resp, multiple_rates): """ Check if there are exceptions that should be raised """ if resp['rCode'] != 100: raise exceptions.get_exception_for_code(resp['rCode'])(resp) results = resp['results'] if len(results) == 0: raise exceptions.ZipTaxNoResults('No results found') if len(results) > 1 and not multiple_rates: # It's fine if all the taxes are the same rates = [result['taxSales'] for result in results] if len(set(rates)) != 1: raise exceptions.ZipTaxMultipleResults('Multiple results found but requested only one')
def _check_for_exceptions(self, resp, multiple_rates): """ Check if there are exceptions that should be raised """ if resp['rCode'] != 100: raise exceptions.get_exception_for_code(resp['rCode'])(resp) results = resp['results'] if len(results) == 0: raise exceptions.ZipTaxNoResults('No results found') if len(results) > 1 and not multiple_rates: # It's fine if all the taxes are the same rates = [result['taxSales'] for result in results] if len(set(rates)) != 1: raise exceptions.ZipTaxMultipleResults('Multiple results found but requested only one')
[ "Check", "if", "there", "are", "exceptions", "that", "should", "be", "raised" ]
albertyw/pyziptax
python
https://github.com/albertyw/pyziptax/blob/c56dd440e4cadff7f2dd4b72e5dcced06a44969d/pyziptax/ziptax.py#L69-L81
[ "def", "_check_for_exceptions", "(", "self", ",", "resp", ",", "multiple_rates", ")", ":", "if", "resp", "[", "'rCode'", "]", "!=", "100", ":", "raise", "exceptions", ".", "get_exception_for_code", "(", "resp", "[", "'rCode'", "]", ")", "(", "resp", ")", "results", "=", "resp", "[", "'results'", "]", "if", "len", "(", "results", ")", "==", "0", ":", "raise", "exceptions", ".", "ZipTaxNoResults", "(", "'No results found'", ")", "if", "len", "(", "results", ")", ">", "1", "and", "not", "multiple_rates", ":", "# It's fine if all the taxes are the same", "rates", "=", "[", "result", "[", "'taxSales'", "]", "for", "result", "in", "results", "]", "if", "len", "(", "set", "(", "rates", ")", ")", "!=", "1", ":", "raise", "exceptions", ".", "ZipTaxMultipleResults", "(", "'Multiple results found but requested only one'", ")" ]
c56dd440e4cadff7f2dd4b72e5dcced06a44969d
valid
get_all_text
Recursively extract all text from node.
harvestingkit/minidom_utils.py
def get_all_text(node): """Recursively extract all text from node.""" if node.nodeType == node.TEXT_NODE: return node.data else: text_string = "" for child_node in node.childNodes: text_string += get_all_text(child_node) return text_string
def get_all_text(node): """Recursively extract all text from node.""" if node.nodeType == node.TEXT_NODE: return node.data else: text_string = "" for child_node in node.childNodes: text_string += get_all_text(child_node) return text_string
[ "Recursively", "extract", "all", "text", "from", "node", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/minidom_utils.py#L65-L73
[ "def", "get_all_text", "(", "node", ")", ":", "if", "node", ".", "nodeType", "==", "node", ".", "TEXT_NODE", ":", "return", "node", ".", "data", "else", ":", "text_string", "=", "\"\"", "for", "child_node", "in", "node", ".", "childNodes", ":", "text_string", "+=", "get_all_text", "(", "child_node", ")", "return", "text_string" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
ContrastOutConnector._extract_packages
Extract a package in a new temporary directory.
harvestingkit/contrast_out.py
def _extract_packages(self): """ Extract a package in a new temporary directory. """ self.path_unpacked = mkdtemp(prefix="scoap3_package_", dir=CFG_TMPSHAREDDIR) for path in self.retrieved_packages_unpacked: scoap3utils_extract_package(path, self.path_unpacked, self.logger) return self.path_unpacked
def _extract_packages(self): """ Extract a package in a new temporary directory. """ self.path_unpacked = mkdtemp(prefix="scoap3_package_", dir=CFG_TMPSHAREDDIR) for path in self.retrieved_packages_unpacked: scoap3utils_extract_package(path, self.path_unpacked, self.logger) return self.path_unpacked
[ "Extract", "a", "package", "in", "a", "new", "temporary", "directory", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/contrast_out.py#L217-L226
[ "def", "_extract_packages", "(", "self", ")", ":", "self", ".", "path_unpacked", "=", "mkdtemp", "(", "prefix", "=", "\"scoap3_package_\"", ",", "dir", "=", "CFG_TMPSHAREDDIR", ")", "for", "path", "in", "self", ".", "retrieved_packages_unpacked", ":", "scoap3utils_extract_package", "(", "path", ",", "self", ".", "path_unpacked", ",", "self", ".", "logger", ")", "return", "self", ".", "path_unpacked" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
ProviderSite.register
Registers a provider with the site.
oembed/sites.py
def register(self, provider_class): """ Registers a provider with the site. """ if not issubclass(provider_class, BaseProvider): raise TypeError('%s is not a subclass of BaseProvider' % provider_class.__name__) if provider_class in self._registered_providers: raise AlreadyRegistered('%s is already registered' % provider_class.__name__) if issubclass(provider_class, DjangoProvider): # set up signal handler for cache invalidation signals.post_save.connect( self.invalidate_stored_oembeds, sender=provider_class._meta.model ) # don't build the regex yet - if not all urlconfs have been loaded # and processed at this point, the DjangoProvider instances will fail # when attempting to reverse urlpatterns that haven't been created. # Rather, the regex-list will be populated once, on-demand. self._registered_providers.append(provider_class) # flag for re-population self.invalidate_providers()
def register(self, provider_class): """ Registers a provider with the site. """ if not issubclass(provider_class, BaseProvider): raise TypeError('%s is not a subclass of BaseProvider' % provider_class.__name__) if provider_class in self._registered_providers: raise AlreadyRegistered('%s is already registered' % provider_class.__name__) if issubclass(provider_class, DjangoProvider): # set up signal handler for cache invalidation signals.post_save.connect( self.invalidate_stored_oembeds, sender=provider_class._meta.model ) # don't build the regex yet - if not all urlconfs have been loaded # and processed at this point, the DjangoProvider instances will fail # when attempting to reverse urlpatterns that haven't been created. # Rather, the regex-list will be populated once, on-demand. self._registered_providers.append(provider_class) # flag for re-population self.invalidate_providers()
[ "Registers", "a", "provider", "with", "the", "site", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L29-L53
[ "def", "register", "(", "self", ",", "provider_class", ")", ":", "if", "not", "issubclass", "(", "provider_class", ",", "BaseProvider", ")", ":", "raise", "TypeError", "(", "'%s is not a subclass of BaseProvider'", "%", "provider_class", ".", "__name__", ")", "if", "provider_class", "in", "self", ".", "_registered_providers", ":", "raise", "AlreadyRegistered", "(", "'%s is already registered'", "%", "provider_class", ".", "__name__", ")", "if", "issubclass", "(", "provider_class", ",", "DjangoProvider", ")", ":", "# set up signal handler for cache invalidation", "signals", ".", "post_save", ".", "connect", "(", "self", ".", "invalidate_stored_oembeds", ",", "sender", "=", "provider_class", ".", "_meta", ".", "model", ")", "# don't build the regex yet - if not all urlconfs have been loaded", "# and processed at this point, the DjangoProvider instances will fail", "# when attempting to reverse urlpatterns that haven't been created.", "# Rather, the regex-list will be populated once, on-demand.", "self", ".", "_registered_providers", ".", "append", "(", "provider_class", ")", "# flag for re-population", "self", ".", "invalidate_providers", "(", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
ProviderSite.unregister
Unregisters a provider from the site.
oembed/sites.py
def unregister(self, provider_class): """ Unregisters a provider from the site. """ if not issubclass(provider_class, BaseProvider): raise TypeError('%s must be a subclass of BaseProvider' % provider_class.__name__) if provider_class not in self._registered_providers: raise NotRegistered('%s is not registered' % provider_class.__name__) self._registered_providers.remove(provider_class) # flag for repopulation self.invalidate_providers()
def unregister(self, provider_class): """ Unregisters a provider from the site. """ if not issubclass(provider_class, BaseProvider): raise TypeError('%s must be a subclass of BaseProvider' % provider_class.__name__) if provider_class not in self._registered_providers: raise NotRegistered('%s is not registered' % provider_class.__name__) self._registered_providers.remove(provider_class) # flag for repopulation self.invalidate_providers()
[ "Unregisters", "a", "provider", "from", "the", "site", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L55-L68
[ "def", "unregister", "(", "self", ",", "provider_class", ")", ":", "if", "not", "issubclass", "(", "provider_class", ",", "BaseProvider", ")", ":", "raise", "TypeError", "(", "'%s must be a subclass of BaseProvider'", "%", "provider_class", ".", "__name__", ")", "if", "provider_class", "not", "in", "self", ".", "_registered_providers", ":", "raise", "NotRegistered", "(", "'%s is not registered'", "%", "provider_class", ".", "__name__", ")", "self", ".", "_registered_providers", ".", "remove", "(", "provider_class", ")", "# flag for repopulation", "self", ".", "invalidate_providers", "(", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
ProviderSite.populate
Populate the internal registry's dictionary with the regexes for each provider instance
oembed/sites.py
def populate(self): """ Populate the internal registry's dictionary with the regexes for each provider instance """ self._registry = {} for provider_class in self._registered_providers: instance = provider_class() self._registry[instance] = instance.regex for stored_provider in StoredProvider.objects.active(): self._registry[stored_provider] = stored_provider.regex self._populated = True
def populate(self): """ Populate the internal registry's dictionary with the regexes for each provider instance """ self._registry = {} for provider_class in self._registered_providers: instance = provider_class() self._registry[instance] = instance.regex for stored_provider in StoredProvider.objects.active(): self._registry[stored_provider] = stored_provider.regex self._populated = True
[ "Populate", "the", "internal", "registry", "s", "dictionary", "with", "the", "regexes", "for", "each", "provider", "instance" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L70-L84
[ "def", "populate", "(", "self", ")", ":", "self", ".", "_registry", "=", "{", "}", "for", "provider_class", "in", "self", ".", "_registered_providers", ":", "instance", "=", "provider_class", "(", ")", "self", ".", "_registry", "[", "instance", "]", "=", "instance", ".", "regex", "for", "stored_provider", "in", "StoredProvider", ".", "objects", ".", "active", "(", ")", ":", "self", ".", "_registry", "[", "stored_provider", "]", "=", "stored_provider", ".", "regex", "self", ".", "_populated", "=", "True" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
ProviderSite.provider_for_url
Find the right provider for a URL
oembed/sites.py
def provider_for_url(self, url): """ Find the right provider for a URL """ for provider, regex in self.get_registry().items(): if re.match(regex, url) is not None: return provider raise OEmbedMissingEndpoint('No endpoint matches URL: %s' % url)
def provider_for_url(self, url): """ Find the right provider for a URL """ for provider, regex in self.get_registry().items(): if re.match(regex, url) is not None: return provider raise OEmbedMissingEndpoint('No endpoint matches URL: %s' % url)
[ "Find", "the", "right", "provider", "for", "a", "URL" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L106-L114
[ "def", "provider_for_url", "(", "self", ",", "url", ")", ":", "for", "provider", ",", "regex", "in", "self", ".", "get_registry", "(", ")", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "regex", ",", "url", ")", "is", "not", "None", ":", "return", "provider", "raise", "OEmbedMissingEndpoint", "(", "'No endpoint matches URL: %s'", "%", "url", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
ProviderSite.invalidate_stored_oembeds
A hook for django-based oembed providers to delete any stored oembeds
oembed/sites.py
def invalidate_stored_oembeds(self, sender, instance, created, **kwargs): """ A hook for django-based oembed providers to delete any stored oembeds """ ctype = ContentType.objects.get_for_model(instance) StoredOEmbed.objects.filter( object_id=instance.pk, content_type=ctype).delete()
def invalidate_stored_oembeds(self, sender, instance, created, **kwargs): """ A hook for django-based oembed providers to delete any stored oembeds """ ctype = ContentType.objects.get_for_model(instance) StoredOEmbed.objects.filter( object_id=instance.pk, content_type=ctype).delete()
[ "A", "hook", "for", "django", "-", "based", "oembed", "providers", "to", "delete", "any", "stored", "oembeds" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L116-L123
[ "def", "invalidate_stored_oembeds", "(", "self", ",", "sender", ",", "instance", ",", "created", ",", "*", "*", "kwargs", ")", ":", "ctype", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "instance", ")", "StoredOEmbed", ".", "objects", ".", "filter", "(", "object_id", "=", "instance", ".", "pk", ",", "content_type", "=", "ctype", ")", ".", "delete", "(", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
ProviderSite.embed
The heart of the matter
oembed/sites.py
def embed(self, url, **kwargs): """ The heart of the matter """ try: # first figure out the provider provider = self.provider_for_url(url) except OEmbedMissingEndpoint: raise else: try: # check the database for a cached response, because of certain # race conditions that exist with get_or_create(), do a filter # lookup and just grab the first item stored_match = StoredOEmbed.objects.filter( match=url, maxwidth=kwargs.get('maxwidth', None), maxheight=kwargs.get('maxheight', None), date_expires__gte=datetime.datetime.now())[0] return OEmbedResource.create_json(stored_match.response_json) except IndexError: # query the endpoint and cache response in db # prevent None from being passed in as a GET param params = dict([(k, v) for k, v in kwargs.items() if v]) # request an oembed resource for the url resource = provider.request_resource(url, **params) try: cache_age = int(resource.cache_age) if cache_age < MIN_OEMBED_TTL: cache_age = MIN_OEMBED_TTL except: cache_age = DEFAULT_OEMBED_TTL date_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age) stored_oembed, created = StoredOEmbed.objects.get_or_create( match=url, maxwidth=kwargs.get('maxwidth', None), maxheight=kwargs.get('maxheight', None)) stored_oembed.response_json = resource.json stored_oembed.resource_type = resource.type stored_oembed.date_expires = date_expires if resource.content_object: stored_oembed.content_object = resource.content_object stored_oembed.save() return resource
def embed(self, url, **kwargs): """ The heart of the matter """ try: # first figure out the provider provider = self.provider_for_url(url) except OEmbedMissingEndpoint: raise else: try: # check the database for a cached response, because of certain # race conditions that exist with get_or_create(), do a filter # lookup and just grab the first item stored_match = StoredOEmbed.objects.filter( match=url, maxwidth=kwargs.get('maxwidth', None), maxheight=kwargs.get('maxheight', None), date_expires__gte=datetime.datetime.now())[0] return OEmbedResource.create_json(stored_match.response_json) except IndexError: # query the endpoint and cache response in db # prevent None from being passed in as a GET param params = dict([(k, v) for k, v in kwargs.items() if v]) # request an oembed resource for the url resource = provider.request_resource(url, **params) try: cache_age = int(resource.cache_age) if cache_age < MIN_OEMBED_TTL: cache_age = MIN_OEMBED_TTL except: cache_age = DEFAULT_OEMBED_TTL date_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age) stored_oembed, created = StoredOEmbed.objects.get_or_create( match=url, maxwidth=kwargs.get('maxwidth', None), maxheight=kwargs.get('maxheight', None)) stored_oembed.response_json = resource.json stored_oembed.resource_type = resource.type stored_oembed.date_expires = date_expires if resource.content_object: stored_oembed.content_object = resource.content_object stored_oembed.save() return resource
[ "The", "heart", "of", "the", "matter" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L125-L175
[ "def", "embed", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "try", ":", "# first figure out the provider", "provider", "=", "self", ".", "provider_for_url", "(", "url", ")", "except", "OEmbedMissingEndpoint", ":", "raise", "else", ":", "try", ":", "# check the database for a cached response, because of certain", "# race conditions that exist with get_or_create(), do a filter", "# lookup and just grab the first item", "stored_match", "=", "StoredOEmbed", ".", "objects", ".", "filter", "(", "match", "=", "url", ",", "maxwidth", "=", "kwargs", ".", "get", "(", "'maxwidth'", ",", "None", ")", ",", "maxheight", "=", "kwargs", ".", "get", "(", "'maxheight'", ",", "None", ")", ",", "date_expires__gte", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "[", "0", "]", "return", "OEmbedResource", ".", "create_json", "(", "stored_match", ".", "response_json", ")", "except", "IndexError", ":", "# query the endpoint and cache response in db", "# prevent None from being passed in as a GET param", "params", "=", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "v", "]", ")", "# request an oembed resource for the url", "resource", "=", "provider", ".", "request_resource", "(", "url", ",", "*", "*", "params", ")", "try", ":", "cache_age", "=", "int", "(", "resource", ".", "cache_age", ")", "if", "cache_age", "<", "MIN_OEMBED_TTL", ":", "cache_age", "=", "MIN_OEMBED_TTL", "except", ":", "cache_age", "=", "DEFAULT_OEMBED_TTL", "date_expires", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "cache_age", ")", "stored_oembed", ",", "created", "=", "StoredOEmbed", ".", "objects", ".", "get_or_create", "(", "match", "=", "url", ",", "maxwidth", "=", "kwargs", ".", "get", "(", "'maxwidth'", ",", "None", ")", ",", "maxheight", "=", "kwargs", ".", "get", "(", "'maxheight'", ",", "None", ")", ")", "stored_oembed", ".", "response_json", "=", "resource", ".", "json", "stored_oembed", ".", "resource_type", "=", "resource", ".", "type", "stored_oembed", ".", "date_expires", "=", "date_expires", "if", "resource", ".", "content_object", ":", "stored_oembed", ".", "content_object", "=", "resource", ".", "content_object", "stored_oembed", ".", "save", "(", ")", "return", "resource" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
ProviderSite.autodiscover
Load up StoredProviders from url if it is an oembed scheme
oembed/sites.py
def autodiscover(self, url): """ Load up StoredProviders from url if it is an oembed scheme """ headers, response = fetch_url(url) if headers['content-type'].split(';')[0] in ('application/json', 'text/javascript'): provider_data = json.loads(response) return self.store_providers(provider_data)
def autodiscover(self, url): """ Load up StoredProviders from url if it is an oembed scheme """ headers, response = fetch_url(url) if headers['content-type'].split(';')[0] in ('application/json', 'text/javascript'): provider_data = json.loads(response) return self.store_providers(provider_data)
[ "Load", "up", "StoredProviders", "from", "url", "if", "it", "is", "an", "oembed", "scheme" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L177-L184
[ "def", "autodiscover", "(", "self", ",", "url", ")", ":", "headers", ",", "response", "=", "fetch_url", "(", "url", ")", "if", "headers", "[", "'content-type'", "]", ".", "split", "(", "';'", ")", "[", "0", "]", "in", "(", "'application/json'", ",", "'text/javascript'", ")", ":", "provider_data", "=", "json", ".", "loads", "(", "response", ")", "return", "self", ".", "store_providers", "(", "provider_data", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
ProviderSite.store_providers
Iterate over the returned json and try to sort out any new providers
oembed/sites.py
def store_providers(self, provider_data): """ Iterate over the returned json and try to sort out any new providers """ if not hasattr(provider_data, '__iter__'): raise OEmbedException('Autodiscovered response not iterable') provider_pks = [] for provider in provider_data: if 'endpoint' not in provider or \ 'matches' not in provider: continue resource_type = provider.get('type') if resource_type not in RESOURCE_TYPES: continue stored_provider, created = StoredProvider.objects.get_or_create( wildcard_regex=provider['matches'] ) if created: stored_provider.endpoint_url = relative_to_full( provider['endpoint'], provider['matches'] ) stored_provider.resource_type = resource_type stored_provider.save() provider_pks.append(stored_provider.pk) return StoredProvider.objects.filter(pk__in=provider_pks)
def store_providers(self, provider_data): """ Iterate over the returned json and try to sort out any new providers """ if not hasattr(provider_data, '__iter__'): raise OEmbedException('Autodiscovered response not iterable') provider_pks = [] for provider in provider_data: if 'endpoint' not in provider or \ 'matches' not in provider: continue resource_type = provider.get('type') if resource_type not in RESOURCE_TYPES: continue stored_provider, created = StoredProvider.objects.get_or_create( wildcard_regex=provider['matches'] ) if created: stored_provider.endpoint_url = relative_to_full( provider['endpoint'], provider['matches'] ) stored_provider.resource_type = resource_type stored_provider.save() provider_pks.append(stored_provider.pk) return StoredProvider.objects.filter(pk__in=provider_pks)
[ "Iterate", "over", "the", "returned", "json", "and", "try", "to", "sort", "out", "any", "new", "providers" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L186-L218
[ "def", "store_providers", "(", "self", ",", "provider_data", ")", ":", "if", "not", "hasattr", "(", "provider_data", ",", "'__iter__'", ")", ":", "raise", "OEmbedException", "(", "'Autodiscovered response not iterable'", ")", "provider_pks", "=", "[", "]", "for", "provider", "in", "provider_data", ":", "if", "'endpoint'", "not", "in", "provider", "or", "'matches'", "not", "in", "provider", ":", "continue", "resource_type", "=", "provider", ".", "get", "(", "'type'", ")", "if", "resource_type", "not", "in", "RESOURCE_TYPES", ":", "continue", "stored_provider", ",", "created", "=", "StoredProvider", ".", "objects", ".", "get_or_create", "(", "wildcard_regex", "=", "provider", "[", "'matches'", "]", ")", "if", "created", ":", "stored_provider", ".", "endpoint_url", "=", "relative_to_full", "(", "provider", "[", "'endpoint'", "]", ",", "provider", "[", "'matches'", "]", ")", "stored_provider", ".", "resource_type", "=", "resource_type", "stored_provider", ".", "save", "(", ")", "provider_pks", ".", "append", "(", "stored_provider", ".", "pk", ")", "return", "StoredProvider", ".", "objects", ".", "filter", "(", "pk__in", "=", "provider_pks", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
HTTPProvider.request_resource
Request an OEmbedResource for a given url. Some valid keyword args: - format - maxwidth - maxheight
oembed/providers.py
def request_resource(self, url, **kwargs): """ Request an OEmbedResource for a given url. Some valid keyword args: - format - maxwidth - maxheight """ params = kwargs params['url'] = url params['format'] = 'json' if '?' in self.endpoint_url: url_with_qs = '%s&%s' % (self.endpoint_url.rstrip('&'), urlencode(params)) else: url_with_qs = "%s?%s" % (self.endpoint_url, urlencode(params)) headers, raw_response = self._fetch(url_with_qs) resource = self.convert_to_resource(headers, raw_response, params) return resource
def request_resource(self, url, **kwargs): """ Request an OEmbedResource for a given url. Some valid keyword args: - format - maxwidth - maxheight """ params = kwargs params['url'] = url params['format'] = 'json' if '?' in self.endpoint_url: url_with_qs = '%s&%s' % (self.endpoint_url.rstrip('&'), urlencode(params)) else: url_with_qs = "%s?%s" % (self.endpoint_url, urlencode(params)) headers, raw_response = self._fetch(url_with_qs) resource = self.convert_to_resource(headers, raw_response, params) return resource
[ "Request", "an", "OEmbedResource", "for", "a", "given", "url", ".", "Some", "valid", "keyword", "args", ":", "-", "format", "-", "maxwidth", "-", "maxheight" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L114-L134
[ "def", "request_resource", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "params", "=", "kwargs", "params", "[", "'url'", "]", "=", "url", "params", "[", "'format'", "]", "=", "'json'", "if", "'?'", "in", "self", ".", "endpoint_url", ":", "url_with_qs", "=", "'%s&%s'", "%", "(", "self", ".", "endpoint_url", ".", "rstrip", "(", "'&'", ")", ",", "urlencode", "(", "params", ")", ")", "else", ":", "url_with_qs", "=", "\"%s?%s\"", "%", "(", "self", ".", "endpoint_url", ",", "urlencode", "(", "params", ")", ")", "headers", ",", "raw_response", "=", "self", ".", "_fetch", "(", "url_with_qs", ")", "resource", "=", "self", ".", "convert_to_resource", "(", "headers", ",", "raw_response", ",", "params", ")", "return", "resource" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProviderOptions._image_field
Try to automatically detect an image field
oembed/providers.py
def _image_field(self): """ Try to automatically detect an image field """ for field in self.model._meta.fields: if isinstance(field, ImageField): return field.name
def _image_field(self): """ Try to automatically detect an image field """ for field in self.model._meta.fields: if isinstance(field, ImageField): return field.name
[ "Try", "to", "automatically", "detect", "an", "image", "field" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L179-L185
[ "def", "_image_field", "(", "self", ")", ":", "for", "field", "in", "self", ".", "model", ".", "_meta", ".", "fields", ":", "if", "isinstance", "(", "field", ",", "ImageField", ")", ":", "return", "field", ".", "name" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProviderOptions._date_field
Try to automatically detect an image field
oembed/providers.py
def _date_field(self): """ Try to automatically detect an image field """ for field in self.model._meta.fields: if isinstance(field, (DateTimeField, DateField)): return field.name
def _date_field(self): """ Try to automatically detect an image field """ for field in self.model._meta.fields: if isinstance(field, (DateTimeField, DateField)): return field.name
[ "Try", "to", "automatically", "detect", "an", "image", "field" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L188-L194
[ "def", "_date_field", "(", "self", ")", ":", "for", "field", "in", "self", ".", "model", ".", "_meta", ".", "fields", ":", "if", "isinstance", "(", "field", ",", "(", "DateTimeField", ",", "DateField", ")", ")", ":", "return", "field", ".", "name" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider._build_regex
Performs a reverse lookup on a named view and generates a list of regexes that match that object. It generates regexes with the domain name included, using sites provided by the get_sites() method. >>> regex = provider.regex >>> regex.pattern 'http://(www2.kusports.com|www2.ljworld.com|www.lawrence.com)/photos/(?P<year>\\d{4})/(?P<month>\\w{3})/(?P<day>\\d{1,2})/(?P<object_id>\\d+)/$'
oembed/providers.py
def _build_regex(self): """ Performs a reverse lookup on a named view and generates a list of regexes that match that object. It generates regexes with the domain name included, using sites provided by the get_sites() method. >>> regex = provider.regex >>> regex.pattern 'http://(www2.kusports.com|www2.ljworld.com|www.lawrence.com)/photos/(?P<year>\\d{4})/(?P<month>\\w{3})/(?P<day>\\d{1,2})/(?P<object_id>\\d+)/$' """ # get the regexes from the urlconf url_patterns = resolver.reverse_dict.get(self._meta.named_view) try: regex = url_patterns[1] except TypeError: raise OEmbedException('Error looking up %s' % self._meta.named_view) # get a list of normalized domains cleaned_sites = self.get_cleaned_sites() site_regexes = [] for site in self.get_sites(): site_regexes.append(cleaned_sites[site.pk][0]) # join the sites together with the regex 'or' sites = '|'.join(site_regexes) # create URL-matching regexes for sites regex = re.compile('(%s)/%s' % (sites, regex)) return regex
def _build_regex(self): """ Performs a reverse lookup on a named view and generates a list of regexes that match that object. It generates regexes with the domain name included, using sites provided by the get_sites() method. >>> regex = provider.regex >>> regex.pattern 'http://(www2.kusports.com|www2.ljworld.com|www.lawrence.com)/photos/(?P<year>\\d{4})/(?P<month>\\w{3})/(?P<day>\\d{1,2})/(?P<object_id>\\d+)/$' """ # get the regexes from the urlconf url_patterns = resolver.reverse_dict.get(self._meta.named_view) try: regex = url_patterns[1] except TypeError: raise OEmbedException('Error looking up %s' % self._meta.named_view) # get a list of normalized domains cleaned_sites = self.get_cleaned_sites() site_regexes = [] for site in self.get_sites(): site_regexes.append(cleaned_sites[site.pk][0]) # join the sites together with the regex 'or' sites = '|'.join(site_regexes) # create URL-matching regexes for sites regex = re.compile('(%s)/%s' % (sites, regex)) return regex
[ "Performs", "a", "reverse", "lookup", "on", "a", "named", "view", "and", "generates", "a", "list", "of", "regexes", "that", "match", "that", "object", ".", "It", "generates", "regexes", "with", "the", "domain", "name", "included", "using", "sites", "provided", "by", "the", "get_sites", "()", "method", ".", ">>>", "regex", "=", "provider", ".", "regex", ">>>", "regex", ".", "pattern", "http", ":", "//", "(", "www2", ".", "kusports", ".", "com|www2", ".", "ljworld", ".", "com|www", ".", "lawrence", ".", "com", ")", "/", "photos", "/", "(", "?P<year", ">", "\\\\", "d", "{", "4", "}", ")", "/", "(", "?P<month", ">", "\\\\", "w", "{", "3", "}", ")", "/", "(", "?P<day", ">", "\\\\", "d", "{", "1", "2", "}", ")", "/", "(", "?P<object_id", ">", "\\\\", "d", "+", ")", "/", "$" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L276-L309
[ "def", "_build_regex", "(", "self", ")", ":", "# get the regexes from the urlconf", "url_patterns", "=", "resolver", ".", "reverse_dict", ".", "get", "(", "self", ".", "_meta", ".", "named_view", ")", "try", ":", "regex", "=", "url_patterns", "[", "1", "]", "except", "TypeError", ":", "raise", "OEmbedException", "(", "'Error looking up %s'", "%", "self", ".", "_meta", ".", "named_view", ")", "# get a list of normalized domains", "cleaned_sites", "=", "self", ".", "get_cleaned_sites", "(", ")", "site_regexes", "=", "[", "]", "for", "site", "in", "self", ".", "get_sites", "(", ")", ":", "site_regexes", ".", "append", "(", "cleaned_sites", "[", "site", ".", "pk", "]", "[", "0", "]", ")", "# join the sites together with the regex 'or'", "sites", "=", "'|'", ".", "join", "(", "site_regexes", ")", "# create URL-matching regexes for sites", "regex", "=", "re", ".", "compile", "(", "'(%s)/%s'", "%", "(", "sites", ",", "regex", ")", ")", "return", "regex" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.provider_from_url
Given a URL for any of our sites, try and match it to one, returning the domain & name of the match. If no match is found, return current. Returns a tuple of domain, site name -- used to determine 'provider'
oembed/providers.py
def provider_from_url(self, url): """ Given a URL for any of our sites, try and match it to one, returning the domain & name of the match. If no match is found, return current. Returns a tuple of domain, site name -- used to determine 'provider' """ domain = get_domain(url) site_tuples = self.get_cleaned_sites().values() for domain_re, name, normalized_domain in site_tuples: if re.match(domain_re, domain): return normalized_domain, name site = Site.objects.get_current() return site.domain, site.name
def provider_from_url(self, url): """ Given a URL for any of our sites, try and match it to one, returning the domain & name of the match. If no match is found, return current. Returns a tuple of domain, site name -- used to determine 'provider' """ domain = get_domain(url) site_tuples = self.get_cleaned_sites().values() for domain_re, name, normalized_domain in site_tuples: if re.match(domain_re, domain): return normalized_domain, name site = Site.objects.get_current() return site.domain, site.name
[ "Given", "a", "URL", "for", "any", "of", "our", "sites", "try", "and", "match", "it", "to", "one", "returning", "the", "domain", "&", "name", "of", "the", "match", ".", "If", "no", "match", "is", "found", "return", "current", ".", "Returns", "a", "tuple", "of", "domain", "site", "name", "--", "used", "to", "determine", "provider" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L327-L340
[ "def", "provider_from_url", "(", "self", ",", "url", ")", ":", "domain", "=", "get_domain", "(", "url", ")", "site_tuples", "=", "self", ".", "get_cleaned_sites", "(", ")", ".", "values", "(", ")", "for", "domain_re", ",", "name", ",", "normalized_domain", "in", "site_tuples", ":", "if", "re", ".", "match", "(", "domain_re", ",", "domain", ")", ":", "return", "normalized_domain", ",", "name", "site", "=", "Site", ".", "objects", ".", "get_current", "(", ")", "return", "site", ".", "domain", ",", "site", ".", "name" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.get_params
Extract the named parameters from a url regex. If the url regex does not contain named parameters, they will be keyed _0, _1, ... * Named parameters Regex: /photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/ URL: http://www2.ljworld.com/photos/2009/oct/11/12345/ Return Value: {u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'} * Unnamed parameters Regex: /blah/([\w-]+)/(\d+)/ URL: http://www.example.com/blah/hello/123/ Return Value: {u'_0': 'hello', u'_1': '123'}
oembed/providers.py
def get_params(self, url): """ Extract the named parameters from a url regex. If the url regex does not contain named parameters, they will be keyed _0, _1, ... * Named parameters Regex: /photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/ URL: http://www2.ljworld.com/photos/2009/oct/11/12345/ Return Value: {u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'} * Unnamed parameters Regex: /blah/([\w-]+)/(\d+)/ URL: http://www.example.com/blah/hello/123/ Return Value: {u'_0': 'hello', u'_1': '123'} """ match = re.match(self.regex, url) if match is not None: params = match.groupdict() if not params: params = {} for i, group in enumerate(match.groups()[1:]): params['_%s' % i] = group return params raise OEmbedException('No regex matched the url %s' % (url))
def get_params(self, url): """ Extract the named parameters from a url regex. If the url regex does not contain named parameters, they will be keyed _0, _1, ... * Named parameters Regex: /photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/ URL: http://www2.ljworld.com/photos/2009/oct/11/12345/ Return Value: {u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'} * Unnamed parameters Regex: /blah/([\w-]+)/(\d+)/ URL: http://www.example.com/blah/hello/123/ Return Value: {u'_0': 'hello', u'_1': '123'} """ match = re.match(self.regex, url) if match is not None: params = match.groupdict() if not params: params = {} for i, group in enumerate(match.groups()[1:]): params['_%s' % i] = group return params raise OEmbedException('No regex matched the url %s' % (url))
[ "Extract", "the", "named", "parameters", "from", "a", "url", "regex", ".", "If", "the", "url", "regex", "does", "not", "contain", "named", "parameters", "they", "will", "be", "keyed", "_0", "_1", "...", "*", "Named", "parameters", "Regex", ":", "/", "photos", "/", "^", "(", "?P<year", ">", "\\", "d", "{", "4", "}", ")", "/", "(", "?P<month", ">", "\\", "w", "{", "3", "}", ")", "/", "(", "?P<day", ">", "\\", "d", "{", "1", "2", "}", ")", "/", "(", "?P<object_id", ">", "\\", "d", "+", ")", "/", "URL", ":", "http", ":", "//", "www2", ".", "ljworld", ".", "com", "/", "photos", "/", "2009", "/", "oct", "/", "11", "/", "12345", "/", "Return", "Value", ":", "{", "u", "day", ":", "11", "u", "month", ":", "oct", "u", "object_id", ":", "12345", "u", "year", ":", "2009", "}", "*", "Unnamed", "parameters", "Regex", ":", "/", "blah", "/", "(", "[", "\\", "w", "-", "]", "+", ")", "/", "(", "\\", "d", "+", ")", "/", "URL", ":", "http", ":", "//", "www", ".", "example", ".", "com", "/", "blah", "/", "hello", "/", "123", "/", "Return", "Value", ":", "{", "u", "_0", ":", "hello", "u", "_1", ":", "123", "}" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L342-L376
[ "def", "get_params", "(", "self", ",", "url", ")", ":", "match", "=", "re", ".", "match", "(", "self", ".", "regex", ",", "url", ")", "if", "match", "is", "not", "None", ":", "params", "=", "match", ".", "groupdict", "(", ")", "if", "not", "params", ":", "params", "=", "{", "}", "for", "i", ",", "group", "in", "enumerate", "(", "match", ".", "groups", "(", ")", "[", "1", ":", "]", ")", ":", "params", "[", "'_%s'", "%", "i", "]", "=", "group", "return", "params", "raise", "OEmbedException", "(", "'No regex matched the url %s'", "%", "(", "url", ")", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.get_object
Fields to match is a mapping of url params to fields, so for the photos example above, it would be: fields_to_match = { 'object_id': 'id' } This procedure parses out named params from a URL and then uses the fields_to_match dictionary to generate a query.
oembed/providers.py
def get_object(self, url): """ Fields to match is a mapping of url params to fields, so for the photos example above, it would be: fields_to_match = { 'object_id': 'id' } This procedure parses out named params from a URL and then uses the fields_to_match dictionary to generate a query. """ params = self.get_params(url) query = {} for key, value in self._meta.fields_to_match.iteritems(): try: query[value] = params[key] except KeyError: raise OEmbedException('%s was not found in the urlpattern parameters. Valid names are: %s' % (key, ', '.join(params.keys()))) try: obj = self.get_queryset().get(**query) except self._meta.model.DoesNotExist: raise OEmbedException('Requested object not found') return obj
def get_object(self, url): """ Fields to match is a mapping of url params to fields, so for the photos example above, it would be: fields_to_match = { 'object_id': 'id' } This procedure parses out named params from a URL and then uses the fields_to_match dictionary to generate a query. """ params = self.get_params(url) query = {} for key, value in self._meta.fields_to_match.iteritems(): try: query[value] = params[key] except KeyError: raise OEmbedException('%s was not found in the urlpattern parameters. Valid names are: %s' % (key, ', '.join(params.keys()))) try: obj = self.get_queryset().get(**query) except self._meta.model.DoesNotExist: raise OEmbedException('Requested object not found') return obj
[ "Fields", "to", "match", "is", "a", "mapping", "of", "url", "params", "to", "fields", "so", "for", "the", "photos", "example", "above", "it", "would", "be", ":", "fields_to_match", "=", "{", "object_id", ":", "id", "}", "This", "procedure", "parses", "out", "named", "params", "from", "a", "URL", "and", "then", "uses", "the", "fields_to_match", "dictionary", "to", "generate", "a", "query", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L381-L404
[ "def", "get_object", "(", "self", ",", "url", ")", ":", "params", "=", "self", ".", "get_params", "(", "url", ")", "query", "=", "{", "}", "for", "key", ",", "value", "in", "self", ".", "_meta", ".", "fields_to_match", ".", "iteritems", "(", ")", ":", "try", ":", "query", "[", "value", "]", "=", "params", "[", "key", "]", "except", "KeyError", ":", "raise", "OEmbedException", "(", "'%s was not found in the urlpattern parameters. Valid names are: %s'", "%", "(", "key", ",", "', '", ".", "join", "(", "params", ".", "keys", "(", ")", ")", ")", ")", "try", ":", "obj", "=", "self", ".", "get_queryset", "(", ")", ".", "get", "(", "*", "*", "query", ")", "except", "self", ".", "_meta", ".", "model", ".", "DoesNotExist", ":", "raise", "OEmbedException", "(", "'Requested object not found'", ")", "return", "obj" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.render_html
Generate the 'html' attribute of an oembed resource using a template. Sort of a corollary to the parser's render_oembed method. By default, the current mapping will be passed in as the context. OEmbed templates are stored in: oembed/provider/[app_label]_[model].html -- or -- oembed/provider/media_video.html
oembed/providers.py
def render_html(self, obj, context=None): """ Generate the 'html' attribute of an oembed resource using a template. Sort of a corollary to the parser's render_oembed method. By default, the current mapping will be passed in as the context. OEmbed templates are stored in: oembed/provider/[app_label]_[model].html -- or -- oembed/provider/media_video.html """ provided_context = context or Context() context = RequestContext(mock_request()) context.update(provided_context) context.push() context[self._meta.context_varname] = obj rendered = render_to_string(self._meta.template_name, context) context.pop() return rendered
def render_html(self, obj, context=None): """ Generate the 'html' attribute of an oembed resource using a template. Sort of a corollary to the parser's render_oembed method. By default, the current mapping will be passed in as the context. OEmbed templates are stored in: oembed/provider/[app_label]_[model].html -- or -- oembed/provider/media_video.html """ provided_context = context or Context() context = RequestContext(mock_request()) context.update(provided_context) context.push() context[self._meta.context_varname] = obj rendered = render_to_string(self._meta.template_name, context) context.pop() return rendered
[ "Generate", "the", "html", "attribute", "of", "an", "oembed", "resource", "using", "a", "template", ".", "Sort", "of", "a", "corollary", "to", "the", "parser", "s", "render_oembed", "method", ".", "By", "default", "the", "current", "mapping", "will", "be", "passed", "in", "as", "the", "context", ".", "OEmbed", "templates", "are", "stored", "in", ":", "oembed", "/", "provider", "/", "[", "app_label", "]", "_", "[", "model", "]", ".", "html", "--", "or", "--", "oembed", "/", "provider", "/", "media_video", ".", "html" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L406-L428
[ "def", "render_html", "(", "self", ",", "obj", ",", "context", "=", "None", ")", ":", "provided_context", "=", "context", "or", "Context", "(", ")", "context", "=", "RequestContext", "(", "mock_request", "(", ")", ")", "context", ".", "update", "(", "provided_context", ")", "context", ".", "push", "(", ")", "context", "[", "self", ".", "_meta", ".", "context_varname", "]", "=", "obj", "rendered", "=", "render_to_string", "(", "self", ".", "_meta", ".", "template_name", ",", "context", ")", "context", ".", "pop", "(", ")", "return", "rendered" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.map_attr
A kind of cheesy method that allows for callables or attributes to be used interchangably
oembed/providers.py
def map_attr(self, mapping, attr, obj): """ A kind of cheesy method that allows for callables or attributes to be used interchangably """ if attr not in mapping and hasattr(self, attr): if not callable(getattr(self, attr)): mapping[attr] = getattr(self, attr) else: mapping[attr] = getattr(self, attr)(obj)
def map_attr(self, mapping, attr, obj): """ A kind of cheesy method that allows for callables or attributes to be used interchangably """ if attr not in mapping and hasattr(self, attr): if not callable(getattr(self, attr)): mapping[attr] = getattr(self, attr) else: mapping[attr] = getattr(self, attr)(obj)
[ "A", "kind", "of", "cheesy", "method", "that", "allows", "for", "callables", "or", "attributes", "to", "be", "used", "interchangably" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L430-L439
[ "def", "map_attr", "(", "self", ",", "mapping", ",", "attr", ",", "obj", ")", ":", "if", "attr", "not", "in", "mapping", "and", "hasattr", "(", "self", ",", "attr", ")", ":", "if", "not", "callable", "(", "getattr", "(", "self", ",", "attr", ")", ")", ":", "mapping", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "else", ":", "mapping", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "(", "obj", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.get_image
Return an ImageFileField instance
oembed/providers.py
def get_image(self, obj): """ Return an ImageFileField instance """ if self._meta.image_field: return getattr(obj, self._meta.image_field)
def get_image(self, obj): """ Return an ImageFileField instance """ if self._meta.image_field: return getattr(obj, self._meta.image_field)
[ "Return", "an", "ImageFileField", "instance" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L441-L446
[ "def", "get_image", "(", "self", ",", "obj", ")", ":", "if", "self", ".", "_meta", ".", "image_field", ":", "return", "getattr", "(", "obj", ",", "self", ".", "_meta", ".", "image_field", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.resize
Resize an image to the 'best fit' width & height, maintaining the scale of the image, so a 500x500 image sized to 300x400 will actually be scaled to 300x300. Params: image: ImageFieldFile to be resized (i.e. model.image_field) new_width & new_height: desired maximums for resizing Returns: the url to the new image and the new width & height (http://path-to-new-image, 300, 300)
oembed/providers.py
def resize(self, image_field, new_width=None, new_height=None): """ Resize an image to the 'best fit' width & height, maintaining the scale of the image, so a 500x500 image sized to 300x400 will actually be scaled to 300x300. Params: image: ImageFieldFile to be resized (i.e. model.image_field) new_width & new_height: desired maximums for resizing Returns: the url to the new image and the new width & height (http://path-to-new-image, 300, 300) """ if isinstance(image_field, ImageFieldFile) and \ image_field.field.width_field and \ image_field.field.height_field: # use model fields current_width = getattr(image_field.instance, image_field.field.width_field) current_height = getattr(image_field.instance, image_field.field.height_field) else: # use PIL try: file_obj = storage.default_storage.open(image_field.name, 'rb') img_obj = Image.open(file_obj) current_width, current_height = img_obj.size except IOError: return (image_field.url, 0, 0) # determine if resizing needs to be done (will not scale up) if current_width < new_width: if not new_height or current_height < new_height: return (image_field.url, current_width, current_height) # calculate ratios new_width, new_height = scale(current_width, current_height, new_width, new_height) # use the image_processor defined in the settings, or PIL by default return self._meta.image_processor.resize(image_field, new_width, new_height)
def resize(self, image_field, new_width=None, new_height=None): """ Resize an image to the 'best fit' width & height, maintaining the scale of the image, so a 500x500 image sized to 300x400 will actually be scaled to 300x300. Params: image: ImageFieldFile to be resized (i.e. model.image_field) new_width & new_height: desired maximums for resizing Returns: the url to the new image and the new width & height (http://path-to-new-image, 300, 300) """ if isinstance(image_field, ImageFieldFile) and \ image_field.field.width_field and \ image_field.field.height_field: # use model fields current_width = getattr(image_field.instance, image_field.field.width_field) current_height = getattr(image_field.instance, image_field.field.height_field) else: # use PIL try: file_obj = storage.default_storage.open(image_field.name, 'rb') img_obj = Image.open(file_obj) current_width, current_height = img_obj.size except IOError: return (image_field.url, 0, 0) # determine if resizing needs to be done (will not scale up) if current_width < new_width: if not new_height or current_height < new_height: return (image_field.url, current_width, current_height) # calculate ratios new_width, new_height = scale(current_width, current_height, new_width, new_height) # use the image_processor defined in the settings, or PIL by default return self._meta.image_processor.resize(image_field, new_width, new_height)
[ "Resize", "an", "image", "to", "the", "best", "fit", "width", "&", "height", "maintaining", "the", "scale", "of", "the", "image", "so", "a", "500x500", "image", "sized", "to", "300x400", "will", "actually", "be", "scaled", "to", "300x300", ".", "Params", ":", "image", ":", "ImageFieldFile", "to", "be", "resized", "(", "i", ".", "e", ".", "model", ".", "image_field", ")", "new_width", "&", "new_height", ":", "desired", "maximums", "for", "resizing", "Returns", ":", "the", "url", "to", "the", "new", "image", "and", "the", "new", "width", "&", "height", "(", "http", ":", "//", "path", "-", "to", "-", "new", "-", "image", "300", "300", ")" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L448-L486
[ "def", "resize", "(", "self", ",", "image_field", ",", "new_width", "=", "None", ",", "new_height", "=", "None", ")", ":", "if", "isinstance", "(", "image_field", ",", "ImageFieldFile", ")", "and", "image_field", ".", "field", ".", "width_field", "and", "image_field", ".", "field", ".", "height_field", ":", "# use model fields", "current_width", "=", "getattr", "(", "image_field", ".", "instance", ",", "image_field", ".", "field", ".", "width_field", ")", "current_height", "=", "getattr", "(", "image_field", ".", "instance", ",", "image_field", ".", "field", ".", "height_field", ")", "else", ":", "# use PIL", "try", ":", "file_obj", "=", "storage", ".", "default_storage", ".", "open", "(", "image_field", ".", "name", ",", "'rb'", ")", "img_obj", "=", "Image", ".", "open", "(", "file_obj", ")", "current_width", ",", "current_height", "=", "img_obj", ".", "size", "except", "IOError", ":", "return", "(", "image_field", ".", "url", ",", "0", ",", "0", ")", "# determine if resizing needs to be done (will not scale up)", "if", "current_width", "<", "new_width", ":", "if", "not", "new_height", "or", "current_height", "<", "new_height", ":", "return", "(", "image_field", ".", "url", ",", "current_width", ",", "current_height", ")", "# calculate ratios", "new_width", ",", "new_height", "=", "scale", "(", "current_width", ",", "current_height", ",", "new_width", ",", "new_height", ")", "# use the image_processor defined in the settings, or PIL by default", "return", "self", ".", "_meta", ".", "image_processor", ".", "resize", "(", "image_field", ",", "new_width", ",", "new_height", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.map_to_dictionary
Build a dictionary of metadata for the requested object.
oembed/providers.py
def map_to_dictionary(self, url, obj, **kwargs): """ Build a dictionary of metadata for the requested object. """ maxwidth = kwargs.get('maxwidth', None) maxheight = kwargs.get('maxheight', None) provider_url, provider_name = self.provider_from_url(url) mapping = { 'version': '1.0', 'url': url, 'provider_name': provider_name, 'provider_url': provider_url, 'type': self.resource_type } # a hook self.preprocess(obj, mapping, **kwargs) # resize image if we have a photo, otherwise use the given maximums if self.resource_type == 'photo' and self.get_image(obj): self.resize_photo(obj, mapping, maxwidth, maxheight) elif self.resource_type in ('video', 'rich', 'photo'): width, height = size_to_nearest( maxwidth, maxheight, self._meta.valid_sizes, self._meta.force_fit ) mapping.update(width=width, height=height) # create a thumbnail if self.get_image(obj): self.thumbnail(obj, mapping) # map attributes to the mapping dictionary. if the attribute is # a callable, it must have an argument signature of # (self, obj) for attr in ('title', 'author_name', 'author_url', 'html'): self.map_attr(mapping, attr, obj) # fix any urls if 'url' in mapping: mapping['url'] = relative_to_full(mapping['url'], url) if 'thumbnail_url' in mapping: mapping['thumbnail_url'] = relative_to_full(mapping['thumbnail_url'], url) if 'html' not in mapping and mapping['type'] in ('video', 'rich'): mapping['html'] = self.render_html(obj, context=Context(mapping)) # a hook self.postprocess(obj, mapping, **kwargs) return mapping
def map_to_dictionary(self, url, obj, **kwargs): """ Build a dictionary of metadata for the requested object. """ maxwidth = kwargs.get('maxwidth', None) maxheight = kwargs.get('maxheight', None) provider_url, provider_name = self.provider_from_url(url) mapping = { 'version': '1.0', 'url': url, 'provider_name': provider_name, 'provider_url': provider_url, 'type': self.resource_type } # a hook self.preprocess(obj, mapping, **kwargs) # resize image if we have a photo, otherwise use the given maximums if self.resource_type == 'photo' and self.get_image(obj): self.resize_photo(obj, mapping, maxwidth, maxheight) elif self.resource_type in ('video', 'rich', 'photo'): width, height = size_to_nearest( maxwidth, maxheight, self._meta.valid_sizes, self._meta.force_fit ) mapping.update(width=width, height=height) # create a thumbnail if self.get_image(obj): self.thumbnail(obj, mapping) # map attributes to the mapping dictionary. if the attribute is # a callable, it must have an argument signature of # (self, obj) for attr in ('title', 'author_name', 'author_url', 'html'): self.map_attr(mapping, attr, obj) # fix any urls if 'url' in mapping: mapping['url'] = relative_to_full(mapping['url'], url) if 'thumbnail_url' in mapping: mapping['thumbnail_url'] = relative_to_full(mapping['thumbnail_url'], url) if 'html' not in mapping and mapping['type'] in ('video', 'rich'): mapping['html'] = self.render_html(obj, context=Context(mapping)) # a hook self.postprocess(obj, mapping, **kwargs) return mapping
[ "Build", "a", "dictionary", "of", "metadata", "for", "the", "requested", "object", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L513-L568
[ "def", "map_to_dictionary", "(", "self", ",", "url", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "maxwidth", "=", "kwargs", ".", "get", "(", "'maxwidth'", ",", "None", ")", "maxheight", "=", "kwargs", ".", "get", "(", "'maxheight'", ",", "None", ")", "provider_url", ",", "provider_name", "=", "self", ".", "provider_from_url", "(", "url", ")", "mapping", "=", "{", "'version'", ":", "'1.0'", ",", "'url'", ":", "url", ",", "'provider_name'", ":", "provider_name", ",", "'provider_url'", ":", "provider_url", ",", "'type'", ":", "self", ".", "resource_type", "}", "# a hook", "self", ".", "preprocess", "(", "obj", ",", "mapping", ",", "*", "*", "kwargs", ")", "# resize image if we have a photo, otherwise use the given maximums", "if", "self", ".", "resource_type", "==", "'photo'", "and", "self", ".", "get_image", "(", "obj", ")", ":", "self", ".", "resize_photo", "(", "obj", ",", "mapping", ",", "maxwidth", ",", "maxheight", ")", "elif", "self", ".", "resource_type", "in", "(", "'video'", ",", "'rich'", ",", "'photo'", ")", ":", "width", ",", "height", "=", "size_to_nearest", "(", "maxwidth", ",", "maxheight", ",", "self", ".", "_meta", ".", "valid_sizes", ",", "self", ".", "_meta", ".", "force_fit", ")", "mapping", ".", "update", "(", "width", "=", "width", ",", "height", "=", "height", ")", "# create a thumbnail", "if", "self", ".", "get_image", "(", "obj", ")", ":", "self", ".", "thumbnail", "(", "obj", ",", "mapping", ")", "# map attributes to the mapping dictionary. if the attribute is", "# a callable, it must have an argument signature of", "# (self, obj)", "for", "attr", "in", "(", "'title'", ",", "'author_name'", ",", "'author_url'", ",", "'html'", ")", ":", "self", ".", "map_attr", "(", "mapping", ",", "attr", ",", "obj", ")", "# fix any urls", "if", "'url'", "in", "mapping", ":", "mapping", "[", "'url'", "]", "=", "relative_to_full", "(", "mapping", "[", "'url'", "]", ",", "url", ")", "if", "'thumbnail_url'", "in", "mapping", ":", "mapping", "[", "'thumbnail_url'", "]", "=", "relative_to_full", "(", "mapping", "[", "'thumbnail_url'", "]", ",", "url", ")", "if", "'html'", "not", "in", "mapping", "and", "mapping", "[", "'type'", "]", "in", "(", "'video'", ",", "'rich'", ")", ":", "mapping", "[", "'html'", "]", "=", "self", ".", "render_html", "(", "obj", ",", "context", "=", "Context", "(", "mapping", ")", ")", "# a hook", "self", ".", "postprocess", "(", "obj", ",", "mapping", ",", "*", "*", "kwargs", ")", "return", "mapping" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoProvider.request_resource
Request an OEmbedResource for a given url. Some valid keyword args: - format - maxwidth - maxheight
oembed/providers.py
def request_resource(self, url, **kwargs): """ Request an OEmbedResource for a given url. Some valid keyword args: - format - maxwidth - maxheight """ obj = self.get_object(url) mapping = self.map_to_dictionary(url, obj, **kwargs) resource = OEmbedResource.create(mapping) resource.content_object = obj return resource
def request_resource(self, url, **kwargs): """ Request an OEmbedResource for a given url. Some valid keyword args: - format - maxwidth - maxheight """ obj = self.get_object(url) mapping = self.map_to_dictionary(url, obj, **kwargs) resource = OEmbedResource.create(mapping) resource.content_object = obj return resource
[ "Request", "an", "OEmbedResource", "for", "a", "given", "url", ".", "Some", "valid", "keyword", "args", ":", "-", "format", "-", "maxwidth", "-", "maxheight" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L570-L584
[ "def", "request_resource", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "self", ".", "get_object", "(", "url", ")", "mapping", "=", "self", ".", "map_to_dictionary", "(", "url", ",", "obj", ",", "*", "*", "kwargs", ")", "resource", "=", "OEmbedResource", ".", "create", "(", "mapping", ")", "resource", ".", "content_object", "=", "obj", "return", "resource" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
DjangoDateBasedProvider.get_object
Parses the date from a url and uses it in the query. For objects which are unique for date.
oembed/providers.py
def get_object(self, url, month_format='%b', day_format='%d'): """ Parses the date from a url and uses it in the query. For objects which are unique for date. """ params = self.get_params(url) try: year = params[self._meta.year_part] month = params[self._meta.month_part] day = params[self._meta.day_part] except KeyError: try: # named lookups failed, so try to get the date using the first # three parameters year, month, day = params['_0'], params['_1'], params['_2'] except KeyError: raise OEmbedException('Error extracting date from url parameters') try: tt = time.strptime('%s-%s-%s' % (year, month, day), '%s-%s-%s' % ('%Y', month_format, day_format)) date = datetime.date(*tt[:3]) except ValueError: raise OEmbedException('Error parsing date from: %s' % url) # apply the date-specific lookups if isinstance(self._meta.model._meta.get_field(self._meta.date_field), DateTimeField): min_date = datetime.datetime.combine(date, datetime.time.min) max_date = datetime.datetime.combine(date, datetime.time.max) query = {'%s__range' % self._meta.date_field: (min_date, max_date)} else: query = {self._meta.date_field: date} # apply the regular search lookups for key, value in self._meta.fields_to_match.iteritems(): try: query[value] = params[key] except KeyError: raise OEmbedException('%s was not found in the urlpattern parameters. Valid names are: %s' % (key, ', '.join(params.keys()))) try: obj = self.get_queryset().get(**query) except self._meta.model.DoesNotExist: raise OEmbedException('Requested object not found') return obj
def get_object(self, url, month_format='%b', day_format='%d'): """ Parses the date from a url and uses it in the query. For objects which are unique for date. """ params = self.get_params(url) try: year = params[self._meta.year_part] month = params[self._meta.month_part] day = params[self._meta.day_part] except KeyError: try: # named lookups failed, so try to get the date using the first # three parameters year, month, day = params['_0'], params['_1'], params['_2'] except KeyError: raise OEmbedException('Error extracting date from url parameters') try: tt = time.strptime('%s-%s-%s' % (year, month, day), '%s-%s-%s' % ('%Y', month_format, day_format)) date = datetime.date(*tt[:3]) except ValueError: raise OEmbedException('Error parsing date from: %s' % url) # apply the date-specific lookups if isinstance(self._meta.model._meta.get_field(self._meta.date_field), DateTimeField): min_date = datetime.datetime.combine(date, datetime.time.min) max_date = datetime.datetime.combine(date, datetime.time.max) query = {'%s__range' % self._meta.date_field: (min_date, max_date)} else: query = {self._meta.date_field: date} # apply the regular search lookups for key, value in self._meta.fields_to_match.iteritems(): try: query[value] = params[key] except KeyError: raise OEmbedException('%s was not found in the urlpattern parameters. Valid names are: %s' % (key, ', '.join(params.keys()))) try: obj = self.get_queryset().get(**query) except self._meta.model.DoesNotExist: raise OEmbedException('Requested object not found') return obj
[ "Parses", "the", "date", "from", "a", "url", "and", "uses", "it", "in", "the", "query", ".", "For", "objects", "which", "are", "unique", "for", "date", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L595-L640
[ "def", "get_object", "(", "self", ",", "url", ",", "month_format", "=", "'%b'", ",", "day_format", "=", "'%d'", ")", ":", "params", "=", "self", ".", "get_params", "(", "url", ")", "try", ":", "year", "=", "params", "[", "self", ".", "_meta", ".", "year_part", "]", "month", "=", "params", "[", "self", ".", "_meta", ".", "month_part", "]", "day", "=", "params", "[", "self", ".", "_meta", ".", "day_part", "]", "except", "KeyError", ":", "try", ":", "# named lookups failed, so try to get the date using the first", "# three parameters", "year", ",", "month", ",", "day", "=", "params", "[", "'_0'", "]", ",", "params", "[", "'_1'", "]", ",", "params", "[", "'_2'", "]", "except", "KeyError", ":", "raise", "OEmbedException", "(", "'Error extracting date from url parameters'", ")", "try", ":", "tt", "=", "time", ".", "strptime", "(", "'%s-%s-%s'", "%", "(", "year", ",", "month", ",", "day", ")", ",", "'%s-%s-%s'", "%", "(", "'%Y'", ",", "month_format", ",", "day_format", ")", ")", "date", "=", "datetime", ".", "date", "(", "*", "tt", "[", ":", "3", "]", ")", "except", "ValueError", ":", "raise", "OEmbedException", "(", "'Error parsing date from: %s'", "%", "url", ")", "# apply the date-specific lookups", "if", "isinstance", "(", "self", ".", "_meta", ".", "model", ".", "_meta", ".", "get_field", "(", "self", ".", "_meta", ".", "date_field", ")", ",", "DateTimeField", ")", ":", "min_date", "=", "datetime", ".", "datetime", ".", "combine", "(", "date", ",", "datetime", ".", "time", ".", "min", ")", "max_date", "=", "datetime", ".", "datetime", ".", "combine", "(", "date", ",", "datetime", ".", "time", ".", "max", ")", "query", "=", "{", "'%s__range'", "%", "self", ".", "_meta", ".", "date_field", ":", "(", "min_date", ",", "max_date", ")", "}", "else", ":", "query", "=", "{", "self", ".", "_meta", ".", "date_field", ":", "date", "}", "# apply the regular search lookups", "for", "key", ",", "value", "in", "self", ".", "_meta", ".", "fields_to_match", ".", "iteritems", "(", ")", ":", "try", ":", "query", "[", "value", "]", "=", "params", "[", "key", "]", "except", "KeyError", ":", "raise", "OEmbedException", "(", "'%s was not found in the urlpattern parameters. Valid names are: %s'", "%", "(", "key", ",", "', '", ".", "join", "(", "params", ".", "keys", "(", ")", ")", ")", ")", "try", ":", "obj", "=", "self", ".", "get_queryset", "(", ")", ".", "get", "(", "*", "*", "query", ")", "except", "self", ".", "_meta", ".", "model", ".", "DoesNotExist", ":", "raise", "OEmbedException", "(", "'Requested object not found'", ")", "return", "obj" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
Inspire2CDS.get_record
Override the base.
harvestingkit/inspire_cds_package/from_inspire.py
def get_record(self): """Override the base.""" self.recid = self.get_recid() self.remove_controlfields() self.update_system_numbers() self.add_systemnumber("Inspire", recid=self.recid) self.add_control_number("003", "SzGeCERN") self.update_collections() self.update_languages() self.update_reportnumbers() self.update_authors() self.update_journals() self.update_subject_categories("INSPIRE", "SzGeCERN", "categories_cds") self.update_pagenumber() self.update_notes() self.update_experiments() self.update_isbn() self.update_dois() self.update_links_and_ffts() self.update_date() self.update_date_year() self.update_hidden_notes() self.update_oai_info() self.update_cnum() self.update_conference_info() self.fields_list = [ "909", "541", "961", "970", "690", "695", "981", ] self.strip_fields() if "ANNOUNCEMENT" in self.collections: self.update_conference_111() self.update_conference_links() record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")]) if "THESIS" in self.collections: self.update_thesis_information() self.update_thesis_supervisors() if "PROCEEDINGS" in self.collections: # Special proceeding syntax self.update_title_to_proceeding() self.update_author_to_proceeding() record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")]) # 690 tags if self.tag_as_cern: record_add_field(self.record, "690", ind1="C", subfields=[("a", "CERN")]) return self.record
def get_record(self): """Override the base.""" self.recid = self.get_recid() self.remove_controlfields() self.update_system_numbers() self.add_systemnumber("Inspire", recid=self.recid) self.add_control_number("003", "SzGeCERN") self.update_collections() self.update_languages() self.update_reportnumbers() self.update_authors() self.update_journals() self.update_subject_categories("INSPIRE", "SzGeCERN", "categories_cds") self.update_pagenumber() self.update_notes() self.update_experiments() self.update_isbn() self.update_dois() self.update_links_and_ffts() self.update_date() self.update_date_year() self.update_hidden_notes() self.update_oai_info() self.update_cnum() self.update_conference_info() self.fields_list = [ "909", "541", "961", "970", "690", "695", "981", ] self.strip_fields() if "ANNOUNCEMENT" in self.collections: self.update_conference_111() self.update_conference_links() record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")]) if "THESIS" in self.collections: self.update_thesis_information() self.update_thesis_supervisors() if "PROCEEDINGS" in self.collections: # Special proceeding syntax self.update_title_to_proceeding() self.update_author_to_proceeding() record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")]) # 690 tags if self.tag_as_cern: record_add_field(self.record, "690", ind1="C", subfields=[("a", "CERN")]) return self.record
[ "Override", "the", "base", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L178-L230
[ "def", "get_record", "(", "self", ")", ":", "self", ".", "recid", "=", "self", ".", "get_recid", "(", ")", "self", ".", "remove_controlfields", "(", ")", "self", ".", "update_system_numbers", "(", ")", "self", ".", "add_systemnumber", "(", "\"Inspire\"", ",", "recid", "=", "self", ".", "recid", ")", "self", ".", "add_control_number", "(", "\"003\"", ",", "\"SzGeCERN\"", ")", "self", ".", "update_collections", "(", ")", "self", ".", "update_languages", "(", ")", "self", ".", "update_reportnumbers", "(", ")", "self", ".", "update_authors", "(", ")", "self", ".", "update_journals", "(", ")", "self", ".", "update_subject_categories", "(", "\"INSPIRE\"", ",", "\"SzGeCERN\"", ",", "\"categories_cds\"", ")", "self", ".", "update_pagenumber", "(", ")", "self", ".", "update_notes", "(", ")", "self", ".", "update_experiments", "(", ")", "self", ".", "update_isbn", "(", ")", "self", ".", "update_dois", "(", ")", "self", ".", "update_links_and_ffts", "(", ")", "self", ".", "update_date", "(", ")", "self", ".", "update_date_year", "(", ")", "self", ".", "update_hidden_notes", "(", ")", "self", ".", "update_oai_info", "(", ")", "self", ".", "update_cnum", "(", ")", "self", ".", "update_conference_info", "(", ")", "self", ".", "fields_list", "=", "[", "\"909\"", ",", "\"541\"", ",", "\"961\"", ",", "\"970\"", ",", "\"690\"", ",", "\"695\"", ",", "\"981\"", ",", "]", "self", ".", "strip_fields", "(", ")", "if", "\"ANNOUNCEMENT\"", "in", "self", ".", "collections", ":", "self", ".", "update_conference_111", "(", ")", "self", ".", "update_conference_links", "(", ")", "record_add_field", "(", "self", ".", "record", ",", "\"690\"", ",", "ind1", "=", "\"C\"", ",", "subfields", "=", "[", "(", "\"a\"", ",", "\"CONFERENCE\"", ")", "]", ")", "if", "\"THESIS\"", "in", "self", ".", "collections", ":", "self", ".", "update_thesis_information", "(", ")", "self", ".", "update_thesis_supervisors", "(", ")", "if", "\"PROCEEDINGS\"", "in", "self", ".", "collections", ":", "# Special proceeding syntax", "self", ".", "update_title_to_proceeding", "(", ")", "self", ".", "update_author_to_proceeding", "(", ")", "record_add_field", "(", "self", ".", "record", ",", "\"690\"", ",", "ind1", "=", "\"C\"", ",", "subfields", "=", "[", "(", "\"a\"", ",", "\"CONFERENCE\"", ")", "]", ")", "# 690 tags", "if", "self", ".", "tag_as_cern", ":", "record_add_field", "(", "self", ".", "record", ",", "\"690\"", ",", "ind1", "=", "\"C\"", ",", "subfields", "=", "[", "(", "\"a\"", ",", "\"CERN\"", ")", "]", ")", "return", "self", ".", "record" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_oai_info
Add the 909 OAI info to 035.
harvestingkit/inspire_cds_package/from_inspire.py
def update_oai_info(self): """Add the 909 OAI info to 035.""" for field in record_get_field_instances(self.record, '909', ind1="C", ind2="O"): new_subs = [] for tag, value in field[0]: if tag == "o": new_subs.append(("a", value)) else: new_subs.append((tag, value)) if value in ["CERN", "CDS", "ForCDS"]: self.tag_as_cern = True record_add_field(self.record, '024', ind1="8", subfields=new_subs) record_delete_fields(self.record, '909')
def update_oai_info(self): """Add the 909 OAI info to 035.""" for field in record_get_field_instances(self.record, '909', ind1="C", ind2="O"): new_subs = [] for tag, value in field[0]: if tag == "o": new_subs.append(("a", value)) else: new_subs.append((tag, value)) if value in ["CERN", "CDS", "ForCDS"]: self.tag_as_cern = True record_add_field(self.record, '024', ind1="8", subfields=new_subs) record_delete_fields(self.record, '909')
[ "Add", "the", "909", "OAI", "info", "to", "035", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L232-L244
[ "def", "update_oai_info", "(", "self", ")", ":", "for", "field", "in", "record_get_field_instances", "(", "self", ".", "record", ",", "'909'", ",", "ind1", "=", "\"C\"", ",", "ind2", "=", "\"O\"", ")", ":", "new_subs", "=", "[", "]", "for", "tag", ",", "value", "in", "field", "[", "0", "]", ":", "if", "tag", "==", "\"o\"", ":", "new_subs", ".", "append", "(", "(", "\"a\"", ",", "value", ")", ")", "else", ":", "new_subs", ".", "append", "(", "(", "tag", ",", "value", ")", ")", "if", "value", "in", "[", "\"CERN\"", ",", "\"CDS\"", ",", "\"ForCDS\"", "]", ":", "self", ".", "tag_as_cern", "=", "True", "record_add_field", "(", "self", ".", "record", ",", "'024'", ",", "ind1", "=", "\"8\"", ",", "subfields", "=", "new_subs", ")", "record_delete_fields", "(", "self", ".", "record", ",", "'909'", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_cnum
Check if we shall add cnum in 035.
harvestingkit/inspire_cds_package/from_inspire.py
def update_cnum(self): """Check if we shall add cnum in 035.""" if "ConferencePaper" not in self.collections: cnums = record_get_field_values(self.record, '773', code="w") for cnum in cnums: cnum_subs = [ ("9", "INSPIRE-CNUM"), ("a", cnum) ] record_add_field(self.record, "035", subfields=cnum_subs)
def update_cnum(self): """Check if we shall add cnum in 035.""" if "ConferencePaper" not in self.collections: cnums = record_get_field_values(self.record, '773', code="w") for cnum in cnums: cnum_subs = [ ("9", "INSPIRE-CNUM"), ("a", cnum) ] record_add_field(self.record, "035", subfields=cnum_subs)
[ "Check", "if", "we", "shall", "add", "cnum", "in", "035", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L246-L255
[ "def", "update_cnum", "(", "self", ")", ":", "if", "\"ConferencePaper\"", "not", "in", "self", ".", "collections", ":", "cnums", "=", "record_get_field_values", "(", "self", ".", "record", ",", "'773'", ",", "code", "=", "\"w\"", ")", "for", "cnum", "in", "cnums", ":", "cnum_subs", "=", "[", "(", "\"9\"", ",", "\"INSPIRE-CNUM\"", ")", ",", "(", "\"a\"", ",", "cnum", ")", "]", "record_add_field", "(", "self", ".", "record", ",", "\"035\"", ",", "subfields", "=", "cnum_subs", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_hidden_notes
Remove hidden notes and tag a CERN if detected.
harvestingkit/inspire_cds_package/from_inspire.py
def update_hidden_notes(self): """Remove hidden notes and tag a CERN if detected.""" if not self.tag_as_cern: notes = record_get_field_instances(self.record, tag="595") for field in notes: for dummy, value in field[0]: if value == "CDS": self.tag_as_cern = True record_delete_fields(self.record, tag="595")
def update_hidden_notes(self): """Remove hidden notes and tag a CERN if detected.""" if not self.tag_as_cern: notes = record_get_field_instances(self.record, tag="595") for field in notes: for dummy, value in field[0]: if value == "CDS": self.tag_as_cern = True record_delete_fields(self.record, tag="595")
[ "Remove", "hidden", "notes", "and", "tag", "a", "CERN", "if", "detected", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L257-L266
[ "def", "update_hidden_notes", "(", "self", ")", ":", "if", "not", "self", ".", "tag_as_cern", ":", "notes", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "tag", "=", "\"595\"", ")", "for", "field", "in", "notes", ":", "for", "dummy", ",", "value", "in", "field", "[", "0", "]", ":", "if", "value", "==", "\"CDS\"", ":", "self", ".", "tag_as_cern", "=", "True", "record_delete_fields", "(", "self", ".", "record", ",", "tag", "=", "\"595\"", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_system_numbers
035 Externals.
harvestingkit/inspire_cds_package/from_inspire.py
def update_system_numbers(self): """035 Externals.""" scn_035_fields = record_get_field_instances(self.record, '035') new_fields = [] for field in scn_035_fields: subs = field_get_subfields(field) if '9' in subs: if subs['9'][0].lower() == "cds" and subs.get('a'): self.add_control_number("001", subs.get('a')[0]) if subs['9'][0].lower() in ["inspire", "spirestex", "inspiretex", "desy", "cds"]: continue new_fields.append(field_get_subfield_instances(field)) record_delete_fields(self.record, tag="035") for field in new_fields: record_add_field(self.record, tag="035", subfields=field)
def update_system_numbers(self): """035 Externals.""" scn_035_fields = record_get_field_instances(self.record, '035') new_fields = [] for field in scn_035_fields: subs = field_get_subfields(field) if '9' in subs: if subs['9'][0].lower() == "cds" and subs.get('a'): self.add_control_number("001", subs.get('a')[0]) if subs['9'][0].lower() in ["inspire", "spirestex", "inspiretex", "desy", "cds"]: continue new_fields.append(field_get_subfield_instances(field)) record_delete_fields(self.record, tag="035") for field in new_fields: record_add_field(self.record, tag="035", subfields=field)
[ "035", "Externals", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L268-L282
[ "def", "update_system_numbers", "(", "self", ")", ":", "scn_035_fields", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'035'", ")", "new_fields", "=", "[", "]", "for", "field", "in", "scn_035_fields", ":", "subs", "=", "field_get_subfields", "(", "field", ")", "if", "'9'", "in", "subs", ":", "if", "subs", "[", "'9'", "]", "[", "0", "]", ".", "lower", "(", ")", "==", "\"cds\"", "and", "subs", ".", "get", "(", "'a'", ")", ":", "self", ".", "add_control_number", "(", "\"001\"", ",", "subs", ".", "get", "(", "'a'", ")", "[", "0", "]", ")", "if", "subs", "[", "'9'", "]", "[", "0", "]", ".", "lower", "(", ")", "in", "[", "\"inspire\"", ",", "\"spirestex\"", ",", "\"inspiretex\"", ",", "\"desy\"", ",", "\"cds\"", "]", ":", "continue", "new_fields", ".", "append", "(", "field_get_subfield_instances", "(", "field", ")", ")", "record_delete_fields", "(", "self", ".", "record", ",", "tag", "=", "\"035\"", ")", "for", "field", "in", "new_fields", ":", "record_add_field", "(", "self", ".", "record", ",", "tag", "=", "\"035\"", ",", "subfields", "=", "field", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_collections
Try to determine which collections this record should belong to.
harvestingkit/inspire_cds_package/from_inspire.py
def update_collections(self): """Try to determine which collections this record should belong to.""" for value in record_get_field_values(self.record, '980', code='a'): if 'NOTE' in value.upper(): self.collections.add('NOTE') if 'THESIS' in value.upper(): self.collections.add('THESIS') if 'PUBLISHED' in value.upper(): self.collections.add('ARTICLE') if 'CONFERENCES' in value.upper(): self.collections.add('ANNOUNCEMENT') if 'PROCEEDINGS' in value.upper(): self.collections.add('PROCEEDINGS') elif 'CONFERENCEPAPER' in value.upper() and \ "ConferencePaper" not in self.collections: self.collections.add('ConferencePaper') if self.is_published() and "ARTICLE" not in self.collections: self.collections.add('ARTICLE') else: self.collections.add('PREPRINT') if "HIDDEN" in value.upper(): self.hidden = True # Clear out any existing ones. record_delete_fields(self.record, "980") if not self.collections: self.collections.add('PREPRINT') for collection in self.collections: record_add_field(self.record, tag='980', subfields=[('a', collection)]) if collection in self.collection_base: subs = [('a', self.collection_base[collection])] record_add_field(self.record, tag='960', subfields=subs)
def update_collections(self): """Try to determine which collections this record should belong to.""" for value in record_get_field_values(self.record, '980', code='a'): if 'NOTE' in value.upper(): self.collections.add('NOTE') if 'THESIS' in value.upper(): self.collections.add('THESIS') if 'PUBLISHED' in value.upper(): self.collections.add('ARTICLE') if 'CONFERENCES' in value.upper(): self.collections.add('ANNOUNCEMENT') if 'PROCEEDINGS' in value.upper(): self.collections.add('PROCEEDINGS') elif 'CONFERENCEPAPER' in value.upper() and \ "ConferencePaper" not in self.collections: self.collections.add('ConferencePaper') if self.is_published() and "ARTICLE" not in self.collections: self.collections.add('ARTICLE') else: self.collections.add('PREPRINT') if "HIDDEN" in value.upper(): self.hidden = True # Clear out any existing ones. record_delete_fields(self.record, "980") if not self.collections: self.collections.add('PREPRINT') for collection in self.collections: record_add_field(self.record, tag='980', subfields=[('a', collection)]) if collection in self.collection_base: subs = [('a', self.collection_base[collection])] record_add_field(self.record, tag='960', subfields=subs)
[ "Try", "to", "determine", "which", "collections", "this", "record", "should", "belong", "to", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L284-L325
[ "def", "update_collections", "(", "self", ")", ":", "for", "value", "in", "record_get_field_values", "(", "self", ".", "record", ",", "'980'", ",", "code", "=", "'a'", ")", ":", "if", "'NOTE'", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'NOTE'", ")", "if", "'THESIS'", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'THESIS'", ")", "if", "'PUBLISHED'", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'ARTICLE'", ")", "if", "'CONFERENCES'", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'ANNOUNCEMENT'", ")", "if", "'PROCEEDINGS'", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'PROCEEDINGS'", ")", "elif", "'CONFERENCEPAPER'", "in", "value", ".", "upper", "(", ")", "and", "\"ConferencePaper\"", "not", "in", "self", ".", "collections", ":", "self", ".", "collections", ".", "add", "(", "'ConferencePaper'", ")", "if", "self", ".", "is_published", "(", ")", "and", "\"ARTICLE\"", "not", "in", "self", ".", "collections", ":", "self", ".", "collections", ".", "add", "(", "'ARTICLE'", ")", "else", ":", "self", ".", "collections", ".", "add", "(", "'PREPRINT'", ")", "if", "\"HIDDEN\"", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "hidden", "=", "True", "# Clear out any existing ones.", "record_delete_fields", "(", "self", ".", "record", ",", "\"980\"", ")", "if", "not", "self", ".", "collections", ":", "self", ".", "collections", ".", "add", "(", "'PREPRINT'", ")", "for", "collection", "in", "self", ".", "collections", ":", "record_add_field", "(", "self", ".", "record", ",", "tag", "=", "'980'", ",", "subfields", "=", "[", "(", "'a'", ",", "collection", ")", "]", ")", "if", "collection", "in", "self", ".", "collection_base", ":", "subs", "=", "[", "(", "'a'", ",", "self", ".", "collection_base", "[", "collection", "]", ")", "]", "record_add_field", "(", "self", ".", "record", ",", "tag", "=", "'960'", ",", "subfields", "=", "subs", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_notes
Remove INSPIRE specific notes.
harvestingkit/inspire_cds_package/from_inspire.py
def update_notes(self): """Remove INSPIRE specific notes.""" fields = record_get_field_instances(self.record, '500') for field in fields: subs = field_get_subfields(field) for sub in subs.get('a', []): sub = sub.strip() # remove any spaces before/after if sub.startswith("*") and sub.endswith("*"): record_delete_field(self.record, tag="500", field_position_global=field[4])
def update_notes(self): """Remove INSPIRE specific notes.""" fields = record_get_field_instances(self.record, '500') for field in fields: subs = field_get_subfields(field) for sub in subs.get('a', []): sub = sub.strip() # remove any spaces before/after if sub.startswith("*") and sub.endswith("*"): record_delete_field(self.record, tag="500", field_position_global=field[4])
[ "Remove", "INSPIRE", "specific", "notes", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L327-L336
[ "def", "update_notes", "(", "self", ")", ":", "fields", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'500'", ")", "for", "field", "in", "fields", ":", "subs", "=", "field_get_subfields", "(", "field", ")", "for", "sub", "in", "subs", ".", "get", "(", "'a'", ",", "[", "]", ")", ":", "sub", "=", "sub", ".", "strip", "(", ")", "# remove any spaces before/after", "if", "sub", ".", "startswith", "(", "\"*\"", ")", "and", "sub", ".", "endswith", "(", "\"*\"", ")", ":", "record_delete_field", "(", "self", ".", "record", ",", "tag", "=", "\"500\"", ",", "field_position_global", "=", "field", "[", "4", "]", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_title_to_proceeding
Move title info from 245 to 111 proceeding style.
harvestingkit/inspire_cds_package/from_inspire.py
def update_title_to_proceeding(self): """Move title info from 245 to 111 proceeding style.""" titles = record_get_field_instances(self.record, tag="245") for title in titles: subs = field_get_subfields(title) new_subs = [] if "a" in subs: new_subs.append(("a", subs['a'][0])) if "b" in subs: new_subs.append(("c", subs['b'][0])) record_add_field(self.record, tag="111", subfields=new_subs) record_delete_fields(self.record, tag="245") record_delete_fields(self.record, tag="246")
def update_title_to_proceeding(self): """Move title info from 245 to 111 proceeding style.""" titles = record_get_field_instances(self.record, tag="245") for title in titles: subs = field_get_subfields(title) new_subs = [] if "a" in subs: new_subs.append(("a", subs['a'][0])) if "b" in subs: new_subs.append(("c", subs['b'][0])) record_add_field(self.record, tag="111", subfields=new_subs) record_delete_fields(self.record, tag="245") record_delete_fields(self.record, tag="246")
[ "Move", "title", "info", "from", "245", "to", "111", "proceeding", "style", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L338-L353
[ "def", "update_title_to_proceeding", "(", "self", ")", ":", "titles", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "tag", "=", "\"245\"", ")", "for", "title", "in", "titles", ":", "subs", "=", "field_get_subfields", "(", "title", ")", "new_subs", "=", "[", "]", "if", "\"a\"", "in", "subs", ":", "new_subs", ".", "append", "(", "(", "\"a\"", ",", "subs", "[", "'a'", "]", "[", "0", "]", ")", ")", "if", "\"b\"", "in", "subs", ":", "new_subs", ".", "append", "(", "(", "\"c\"", ",", "subs", "[", "'b'", "]", "[", "0", "]", ")", ")", "record_add_field", "(", "self", ".", "record", ",", "tag", "=", "\"111\"", ",", "subfields", "=", "new_subs", ")", "record_delete_fields", "(", "self", ".", "record", ",", "tag", "=", "\"245\"", ")", "record_delete_fields", "(", "self", ".", "record", ",", "tag", "=", "\"246\"", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_experiments
Experiment mapping.
harvestingkit/inspire_cds_package/from_inspire.py
def update_experiments(self): """Experiment mapping.""" # 693 Remove if 'not applicable' for field in record_get_field_instances(self.record, '693'): subs = field_get_subfields(field) acc_experiment = subs.get("e", []) if not acc_experiment: acc_experiment = subs.get("a", []) if not acc_experiment: continue experiment = acc_experiment[-1] # Handle special case of leading experiments numbers NA-050 -> NA 50 e_suffix = "" if "-NA-" in experiment or \ "-RD-" in experiment or \ "-WA-" in experiment: splitted_experiment = experiment.split("-") e_suffix = "-".join(splitted_experiment[2:]) if e_suffix.startswith("0"): e_suffix = e_suffix[1:] experiment = "-".join(splitted_experiment[:2]) # only CERN-NA translated_experiment = self.get_config_item(experiment, "experiments") if not translated_experiment: continue new_subs = [] if "---" in translated_experiment: experiment_a, experiment_e = translated_experiment.split("---") new_subs.append(("a", experiment_a.replace("-", " "))) else: experiment_e = translated_experiment new_subs.append(("e", experiment_e.replace("-", " ") + e_suffix)) record_delete_field(self.record, tag="693", field_position_global=field[4]) record_add_field(self.record, "693", subfields=new_subs)
def update_experiments(self): """Experiment mapping.""" # 693 Remove if 'not applicable' for field in record_get_field_instances(self.record, '693'): subs = field_get_subfields(field) acc_experiment = subs.get("e", []) if not acc_experiment: acc_experiment = subs.get("a", []) if not acc_experiment: continue experiment = acc_experiment[-1] # Handle special case of leading experiments numbers NA-050 -> NA 50 e_suffix = "" if "-NA-" in experiment or \ "-RD-" in experiment or \ "-WA-" in experiment: splitted_experiment = experiment.split("-") e_suffix = "-".join(splitted_experiment[2:]) if e_suffix.startswith("0"): e_suffix = e_suffix[1:] experiment = "-".join(splitted_experiment[:2]) # only CERN-NA translated_experiment = self.get_config_item(experiment, "experiments") if not translated_experiment: continue new_subs = [] if "---" in translated_experiment: experiment_a, experiment_e = translated_experiment.split("---") new_subs.append(("a", experiment_a.replace("-", " "))) else: experiment_e = translated_experiment new_subs.append(("e", experiment_e.replace("-", " ") + e_suffix)) record_delete_field(self.record, tag="693", field_position_global=field[4]) record_add_field(self.record, "693", subfields=new_subs)
[ "Experiment", "mapping", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L372-L408
[ "def", "update_experiments", "(", "self", ")", ":", "# 693 Remove if 'not applicable'", "for", "field", "in", "record_get_field_instances", "(", "self", ".", "record", ",", "'693'", ")", ":", "subs", "=", "field_get_subfields", "(", "field", ")", "acc_experiment", "=", "subs", ".", "get", "(", "\"e\"", ",", "[", "]", ")", "if", "not", "acc_experiment", ":", "acc_experiment", "=", "subs", ".", "get", "(", "\"a\"", ",", "[", "]", ")", "if", "not", "acc_experiment", ":", "continue", "experiment", "=", "acc_experiment", "[", "-", "1", "]", "# Handle special case of leading experiments numbers NA-050 -> NA 50", "e_suffix", "=", "\"\"", "if", "\"-NA-\"", "in", "experiment", "or", "\"-RD-\"", "in", "experiment", "or", "\"-WA-\"", "in", "experiment", ":", "splitted_experiment", "=", "experiment", ".", "split", "(", "\"-\"", ")", "e_suffix", "=", "\"-\"", ".", "join", "(", "splitted_experiment", "[", "2", ":", "]", ")", "if", "e_suffix", ".", "startswith", "(", "\"0\"", ")", ":", "e_suffix", "=", "e_suffix", "[", "1", ":", "]", "experiment", "=", "\"-\"", ".", "join", "(", "splitted_experiment", "[", ":", "2", "]", ")", "# only CERN-NA", "translated_experiment", "=", "self", ".", "get_config_item", "(", "experiment", ",", "\"experiments\"", ")", "if", "not", "translated_experiment", ":", "continue", "new_subs", "=", "[", "]", "if", "\"---\"", "in", "translated_experiment", ":", "experiment_a", ",", "experiment_e", "=", "translated_experiment", ".", "split", "(", "\"---\"", ")", "new_subs", ".", "append", "(", "(", "\"a\"", ",", "experiment_a", ".", "replace", "(", "\"-\"", ",", "\" \"", ")", ")", ")", "else", ":", "experiment_e", "=", "translated_experiment", "new_subs", ".", "append", "(", "(", "\"e\"", ",", "experiment_e", ".", "replace", "(", "\"-\"", ",", "\" \"", ")", "+", "e_suffix", ")", ")", "record_delete_field", "(", "self", ".", "record", ",", "tag", "=", "\"693\"", ",", "field_position_global", "=", "field", "[", "4", "]", ")", "record_add_field", "(", "self", ".", "record", ",", "\"693\"", ",", "subfields", "=", "new_subs", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_reportnumbers
Update reportnumbers.
harvestingkit/inspire_cds_package/from_inspire.py
def update_reportnumbers(self): """Update reportnumbers.""" report_037_fields = record_get_field_instances(self.record, '037') for field in report_037_fields: subs = field_get_subfields(field) for val in subs.get("a", []): if "arXiv" not in val: record_delete_field(self.record, tag="037", field_position_global=field[4]) new_subs = [(code, val[0]) for code, val in subs.items()] record_add_field(self.record, "088", subfields=new_subs) break
def update_reportnumbers(self): """Update reportnumbers.""" report_037_fields = record_get_field_instances(self.record, '037') for field in report_037_fields: subs = field_get_subfields(field) for val in subs.get("a", []): if "arXiv" not in val: record_delete_field(self.record, tag="037", field_position_global=field[4]) new_subs = [(code, val[0]) for code, val in subs.items()] record_add_field(self.record, "088", subfields=new_subs) break
[ "Update", "reportnumbers", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L410-L422
[ "def", "update_reportnumbers", "(", "self", ")", ":", "report_037_fields", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'037'", ")", "for", "field", "in", "report_037_fields", ":", "subs", "=", "field_get_subfields", "(", "field", ")", "for", "val", "in", "subs", ".", "get", "(", "\"a\"", ",", "[", "]", ")", ":", "if", "\"arXiv\"", "not", "in", "val", ":", "record_delete_field", "(", "self", ".", "record", ",", "tag", "=", "\"037\"", ",", "field_position_global", "=", "field", "[", "4", "]", ")", "new_subs", "=", "[", "(", "code", ",", "val", "[", "0", "]", ")", "for", "code", ",", "val", "in", "subs", ".", "items", "(", ")", "]", "record_add_field", "(", "self", ".", "record", ",", "\"088\"", ",", "subfields", "=", "new_subs", ")", "break" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_authors
100 & 700 punctuate author names.
harvestingkit/inspire_cds_package/from_inspire.py
def update_authors(self): """100 & 700 punctuate author names.""" author_names = record_get_field_instances(self.record, '100') author_names.extend(record_get_field_instances(self.record, '700')) for field in author_names: subs = field_get_subfields(field) for idx, (key, value) in enumerate(field[0]): if key == 'a': field[0][idx] = ('a', value.replace(".", " ").strip()) elif key == 'v': del field[0][idx] if subs.get("u", None) == "CERN": self.tag_as_cern = True
def update_authors(self): """100 & 700 punctuate author names.""" author_names = record_get_field_instances(self.record, '100') author_names.extend(record_get_field_instances(self.record, '700')) for field in author_names: subs = field_get_subfields(field) for idx, (key, value) in enumerate(field[0]): if key == 'a': field[0][idx] = ('a', value.replace(".", " ").strip()) elif key == 'v': del field[0][idx] if subs.get("u", None) == "CERN": self.tag_as_cern = True
[ "100", "&", "700", "punctuate", "author", "names", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L424-L436
[ "def", "update_authors", "(", "self", ")", ":", "author_names", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'100'", ")", "author_names", ".", "extend", "(", "record_get_field_instances", "(", "self", ".", "record", ",", "'700'", ")", ")", "for", "field", "in", "author_names", ":", "subs", "=", "field_get_subfields", "(", "field", ")", "for", "idx", ",", "(", "key", ",", "value", ")", "in", "enumerate", "(", "field", "[", "0", "]", ")", ":", "if", "key", "==", "'a'", ":", "field", "[", "0", "]", "[", "idx", "]", "=", "(", "'a'", ",", "value", ".", "replace", "(", "\".\"", ",", "\" \"", ")", ".", "strip", "(", ")", ")", "elif", "key", "==", "'v'", ":", "del", "field", "[", "0", "]", "[", "idx", "]", "if", "subs", ".", "get", "(", "\"u\"", ",", "None", ")", "==", "\"CERN\"", ":", "self", ".", "tag_as_cern", "=", "True" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_isbn
Remove dashes from ISBN.
harvestingkit/inspire_cds_package/from_inspire.py
def update_isbn(self): """Remove dashes from ISBN.""" isbns = record_get_field_instances(self.record, '020') for field in isbns: for idx, (key, value) in enumerate(field[0]): if key == 'a': field[0][idx] = ('a', value.replace("-", "").strip())
def update_isbn(self): """Remove dashes from ISBN.""" isbns = record_get_field_instances(self.record, '020') for field in isbns: for idx, (key, value) in enumerate(field[0]): if key == 'a': field[0][idx] = ('a', value.replace("-", "").strip())
[ "Remove", "dashes", "from", "ISBN", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L438-L444
[ "def", "update_isbn", "(", "self", ")", ":", "isbns", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'020'", ")", "for", "field", "in", "isbns", ":", "for", "idx", ",", "(", "key", ",", "value", ")", "in", "enumerate", "(", "field", "[", "0", "]", ")", ":", "if", "key", "==", "'a'", ":", "field", "[", "0", "]", "[", "idx", "]", "=", "(", "'a'", ",", "value", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_dois
Remove duplicate BibMatch DOIs.
harvestingkit/inspire_cds_package/from_inspire.py
def update_dois(self): """Remove duplicate BibMatch DOIs.""" dois = record_get_field_instances(self.record, '024', ind1="7") all_dois = {} for field in dois: subs = field_get_subfield_instances(field) subs_dict = dict(subs) if subs_dict.get('a'): if subs_dict['a'] in all_dois: record_delete_field(self.record, tag='024', ind1='7', field_position_global=field[4]) continue all_dois[subs_dict['a']] = field
def update_dois(self): """Remove duplicate BibMatch DOIs.""" dois = record_get_field_instances(self.record, '024', ind1="7") all_dois = {} for field in dois: subs = field_get_subfield_instances(field) subs_dict = dict(subs) if subs_dict.get('a'): if subs_dict['a'] in all_dois: record_delete_field(self.record, tag='024', ind1='7', field_position_global=field[4]) continue all_dois[subs_dict['a']] = field
[ "Remove", "duplicate", "BibMatch", "DOIs", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L446-L457
[ "def", "update_dois", "(", "self", ")", ":", "dois", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'024'", ",", "ind1", "=", "\"7\"", ")", "all_dois", "=", "{", "}", "for", "field", "in", "dois", ":", "subs", "=", "field_get_subfield_instances", "(", "field", ")", "subs_dict", "=", "dict", "(", "subs", ")", "if", "subs_dict", ".", "get", "(", "'a'", ")", ":", "if", "subs_dict", "[", "'a'", "]", "in", "all_dois", ":", "record_delete_field", "(", "self", ".", "record", ",", "tag", "=", "'024'", ",", "ind1", "=", "'7'", ",", "field_position_global", "=", "field", "[", "4", "]", ")", "continue", "all_dois", "[", "subs_dict", "[", "'a'", "]", "]", "=", "field" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_journals
773 journal translations.
harvestingkit/inspire_cds_package/from_inspire.py
def update_journals(self): """773 journal translations.""" for field in record_get_field_instances(self.record, '773'): subs = field_get_subfield_instances(field) new_subs = [] volume_letter = "" journal_name = "" for idx, (key, value) in enumerate(subs): if key == 'p': journal_name = self.get_config_item(value, "journals", allow_substring=False) # Make sure journal names have the form (dot)(space) (I know it's horrible) journal_name = journal_name.replace('. ', '.').replace('.', '. ').replace('. ,', '.,').strip() elif key == 'v': volume_letter = value else: new_subs.append((key, value)) if not journal_name == "PoS": # Special handling of journal name and volumes, except PoS letter = return_letters_from_string(volume_letter) if letter: journal_name = "{0} {1}".format(journal_name, letter) volume_letter = volume_letter.strip(letter) if journal_name: new_subs.append(("p", journal_name)) if volume_letter: new_subs.append(("v", volume_letter)) record_delete_field(self.record, tag="773", field_position_global=field[4]) record_add_field(self.record, "773", subfields=new_subs)
def update_journals(self): """773 journal translations.""" for field in record_get_field_instances(self.record, '773'): subs = field_get_subfield_instances(field) new_subs = [] volume_letter = "" journal_name = "" for idx, (key, value) in enumerate(subs): if key == 'p': journal_name = self.get_config_item(value, "journals", allow_substring=False) # Make sure journal names have the form (dot)(space) (I know it's horrible) journal_name = journal_name.replace('. ', '.').replace('.', '. ').replace('. ,', '.,').strip() elif key == 'v': volume_letter = value else: new_subs.append((key, value)) if not journal_name == "PoS": # Special handling of journal name and volumes, except PoS letter = return_letters_from_string(volume_letter) if letter: journal_name = "{0} {1}".format(journal_name, letter) volume_letter = volume_letter.strip(letter) if journal_name: new_subs.append(("p", journal_name)) if volume_letter: new_subs.append(("v", volume_letter)) record_delete_field(self.record, tag="773", field_position_global=field[4]) record_add_field(self.record, "773", subfields=new_subs)
[ "773", "journal", "translations", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L459-L489
[ "def", "update_journals", "(", "self", ")", ":", "for", "field", "in", "record_get_field_instances", "(", "self", ".", "record", ",", "'773'", ")", ":", "subs", "=", "field_get_subfield_instances", "(", "field", ")", "new_subs", "=", "[", "]", "volume_letter", "=", "\"\"", "journal_name", "=", "\"\"", "for", "idx", ",", "(", "key", ",", "value", ")", "in", "enumerate", "(", "subs", ")", ":", "if", "key", "==", "'p'", ":", "journal_name", "=", "self", ".", "get_config_item", "(", "value", ",", "\"journals\"", ",", "allow_substring", "=", "False", ")", "# Make sure journal names have the form (dot)(space) (I know it's horrible)", "journal_name", "=", "journal_name", ".", "replace", "(", "'. '", ",", "'.'", ")", ".", "replace", "(", "'.'", ",", "'. '", ")", ".", "replace", "(", "'. ,'", ",", "'.,'", ")", ".", "strip", "(", ")", "elif", "key", "==", "'v'", ":", "volume_letter", "=", "value", "else", ":", "new_subs", ".", "append", "(", "(", "key", ",", "value", ")", ")", "if", "not", "journal_name", "==", "\"PoS\"", ":", "# Special handling of journal name and volumes, except PoS", "letter", "=", "return_letters_from_string", "(", "volume_letter", ")", "if", "letter", ":", "journal_name", "=", "\"{0} {1}\"", ".", "format", "(", "journal_name", ",", "letter", ")", "volume_letter", "=", "volume_letter", ".", "strip", "(", "letter", ")", "if", "journal_name", ":", "new_subs", ".", "append", "(", "(", "\"p\"", ",", "journal_name", ")", ")", "if", "volume_letter", ":", "new_subs", ".", "append", "(", "(", "\"v\"", ",", "volume_letter", ")", ")", "record_delete_field", "(", "self", ".", "record", ",", "tag", "=", "\"773\"", ",", "field_position_global", "=", "field", "[", "4", "]", ")", "record_add_field", "(", "self", ".", "record", ",", "\"773\"", ",", "subfields", "=", "new_subs", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_thesis_supervisors
700 -> 701 Thesis supervisors.
harvestingkit/inspire_cds_package/from_inspire.py
def update_thesis_supervisors(self): """700 -> 701 Thesis supervisors.""" for field in record_get_field_instances(self.record, '701'): subs = list(field[0]) subs.append(("e", "dir.")) record_add_field(self.record, '700', subfields=subs) record_delete_fields(self.record, '701')
def update_thesis_supervisors(self): """700 -> 701 Thesis supervisors.""" for field in record_get_field_instances(self.record, '701'): subs = list(field[0]) subs.append(("e", "dir.")) record_add_field(self.record, '700', subfields=subs) record_delete_fields(self.record, '701')
[ "700", "-", ">", "701", "Thesis", "supervisors", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L491-L497
[ "def", "update_thesis_supervisors", "(", "self", ")", ":", "for", "field", "in", "record_get_field_instances", "(", "self", ".", "record", ",", "'701'", ")", ":", "subs", "=", "list", "(", "field", "[", "0", "]", ")", "subs", ".", "append", "(", "(", "\"e\"", ",", "\"dir.\"", ")", ")", "record_add_field", "(", "self", ".", "record", ",", "'700'", ",", "subfields", "=", "subs", ")", "record_delete_fields", "(", "self", ".", "record", ",", "'701'", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_thesis_information
501 degree info - move subfields.
harvestingkit/inspire_cds_package/from_inspire.py
def update_thesis_information(self): """501 degree info - move subfields.""" fields_501 = record_get_field_instances(self.record, '502') for field in fields_501: new_subs = [] for key, value in field[0]: if key == 'b': new_subs.append(('a', value)) elif key == 'c': new_subs.append(('b', value)) elif key == 'd': new_subs.append(('c', value)) else: new_subs.append((key, value)) record_delete_field(self.record, tag="502", field_position_global=field[4]) record_add_field(self.record, "502", subfields=new_subs)
def update_thesis_information(self): """501 degree info - move subfields.""" fields_501 = record_get_field_instances(self.record, '502') for field in fields_501: new_subs = [] for key, value in field[0]: if key == 'b': new_subs.append(('a', value)) elif key == 'c': new_subs.append(('b', value)) elif key == 'd': new_subs.append(('c', value)) else: new_subs.append((key, value)) record_delete_field(self.record, tag="502", field_position_global=field[4]) record_add_field(self.record, "502", subfields=new_subs)
[ "501", "degree", "info", "-", "move", "subfields", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L499-L515
[ "def", "update_thesis_information", "(", "self", ")", ":", "fields_501", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'502'", ")", "for", "field", "in", "fields_501", ":", "new_subs", "=", "[", "]", "for", "key", ",", "value", "in", "field", "[", "0", "]", ":", "if", "key", "==", "'b'", ":", "new_subs", ".", "append", "(", "(", "'a'", ",", "value", ")", ")", "elif", "key", "==", "'c'", ":", "new_subs", ".", "append", "(", "(", "'b'", ",", "value", ")", ")", "elif", "key", "==", "'d'", ":", "new_subs", ".", "append", "(", "(", "'c'", ",", "value", ")", ")", "else", ":", "new_subs", ".", "append", "(", "(", "key", ",", "value", ")", ")", "record_delete_field", "(", "self", ".", "record", ",", "tag", "=", "\"502\"", ",", "field_position_global", "=", "field", "[", "4", "]", ")", "record_add_field", "(", "self", ".", "record", ",", "\"502\"", ",", "subfields", "=", "new_subs", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_pagenumber
300 page number.
harvestingkit/inspire_cds_package/from_inspire.py
def update_pagenumber(self): """300 page number.""" pages = record_get_field_instances(self.record, '300') for field in pages: for idx, (key, value) in enumerate(field[0]): if key == 'a': field[0][idx] = ('a', "{0} p".format(value))
def update_pagenumber(self): """300 page number.""" pages = record_get_field_instances(self.record, '300') for field in pages: for idx, (key, value) in enumerate(field[0]): if key == 'a': field[0][idx] = ('a', "{0} p".format(value))
[ "300", "page", "number", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L517-L523
[ "def", "update_pagenumber", "(", "self", ")", ":", "pages", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'300'", ")", "for", "field", "in", "pages", ":", "for", "idx", ",", "(", "key", ",", "value", ")", "in", "enumerate", "(", "field", "[", "0", "]", ")", ":", "if", "key", "==", "'a'", ":", "field", "[", "0", "]", "[", "idx", "]", "=", "(", "'a'", ",", "\"{0} p\"", ".", "format", "(", "value", ")", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_date
269 Date normalization.
harvestingkit/inspire_cds_package/from_inspire.py
def update_date(self): """269 Date normalization.""" dates_269 = record_get_field_instances(self.record, '269') for idx, field in enumerate(dates_269): new_subs = [] old_subs = field[0] for code, value in old_subs: if code == "c": new_subs.append(( "c", convert_date_from_iso_to_human(value) )) else: new_subs.append((code, value)) dates_269[idx] = field_swap_subfields(field, new_subs)
def update_date(self): """269 Date normalization.""" dates_269 = record_get_field_instances(self.record, '269') for idx, field in enumerate(dates_269): new_subs = [] old_subs = field[0] for code, value in old_subs: if code == "c": new_subs.append(( "c", convert_date_from_iso_to_human(value) )) else: new_subs.append((code, value)) dates_269[idx] = field_swap_subfields(field, new_subs)
[ "269", "Date", "normalization", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L525-L539
[ "def", "update_date", "(", "self", ")", ":", "dates_269", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'269'", ")", "for", "idx", ",", "field", "in", "enumerate", "(", "dates_269", ")", ":", "new_subs", "=", "[", "]", "old_subs", "=", "field", "[", "0", "]", "for", "code", ",", "value", "in", "old_subs", ":", "if", "code", "==", "\"c\"", ":", "new_subs", ".", "append", "(", "(", "\"c\"", ",", "convert_date_from_iso_to_human", "(", "value", ")", ")", ")", "else", ":", "new_subs", ".", "append", "(", "(", "code", ",", "value", ")", ")", "dates_269", "[", "idx", "]", "=", "field_swap_subfields", "(", "field", ",", "new_subs", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_date_year
260 Date normalization.
harvestingkit/inspire_cds_package/from_inspire.py
def update_date_year(self): """260 Date normalization.""" dates = record_get_field_instances(self.record, '260') for field in dates: for idx, (key, value) in enumerate(field[0]): if key == 'c': field[0][idx] = ('c', value[:4]) elif key == 't': del field[0][idx] if not dates: published_years = record_get_field_values(self.record, "773", code="y") if published_years: record_add_field( self.record, "260", subfields=[("c", published_years[0][:4])]) else: other_years = record_get_field_values(self.record, "269", code="c") if other_years: record_add_field( self.record, "260", subfields=[("c", other_years[0][:4])])
def update_date_year(self): """260 Date normalization.""" dates = record_get_field_instances(self.record, '260') for field in dates: for idx, (key, value) in enumerate(field[0]): if key == 'c': field[0][idx] = ('c', value[:4]) elif key == 't': del field[0][idx] if not dates: published_years = record_get_field_values(self.record, "773", code="y") if published_years: record_add_field( self.record, "260", subfields=[("c", published_years[0][:4])]) else: other_years = record_get_field_values(self.record, "269", code="c") if other_years: record_add_field( self.record, "260", subfields=[("c", other_years[0][:4])])
[ "260", "Date", "normalization", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L541-L559
[ "def", "update_date_year", "(", "self", ")", ":", "dates", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'260'", ")", "for", "field", "in", "dates", ":", "for", "idx", ",", "(", "key", ",", "value", ")", "in", "enumerate", "(", "field", "[", "0", "]", ")", ":", "if", "key", "==", "'c'", ":", "field", "[", "0", "]", "[", "idx", "]", "=", "(", "'c'", ",", "value", "[", ":", "4", "]", ")", "elif", "key", "==", "'t'", ":", "del", "field", "[", "0", "]", "[", "idx", "]", "if", "not", "dates", ":", "published_years", "=", "record_get_field_values", "(", "self", ".", "record", ",", "\"773\"", ",", "code", "=", "\"y\"", ")", "if", "published_years", ":", "record_add_field", "(", "self", ".", "record", ",", "\"260\"", ",", "subfields", "=", "[", "(", "\"c\"", ",", "published_years", "[", "0", "]", "[", ":", "4", "]", ")", "]", ")", "else", ":", "other_years", "=", "record_get_field_values", "(", "self", ".", "record", ",", "\"269\"", ",", "code", "=", "\"c\"", ")", "if", "other_years", ":", "record_add_field", "(", "self", ".", "record", ",", "\"260\"", ",", "subfields", "=", "[", "(", "\"c\"", ",", "other_years", "[", "0", "]", "[", ":", "4", "]", ")", "]", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.is_published
Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False
harvestingkit/inspire_cds_package/from_inspire.py
def is_published(self): """Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False """ field773 = record_get_field_instances(self.record, '773') for f773 in field773: if 'c' in field_get_subfields(f773): return True return False
def is_published(self): """Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False """ field773 = record_get_field_instances(self.record, '773') for f773 in field773: if 'c' in field_get_subfields(f773): return True return False
[ "Check", "fields", "980", "and", "773", "to", "see", "if", "the", "record", "has", "already", "been", "published", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L561-L570
[ "def", "is_published", "(", "self", ")", ":", "field773", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'773'", ")", "for", "f773", "in", "field773", ":", "if", "'c'", "in", "field_get_subfields", "(", "f773", ")", ":", "return", "True", "return", "False" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_links_and_ffts
FFT (856) Dealing with files.
harvestingkit/inspire_cds_package/from_inspire.py
def update_links_and_ffts(self): """FFT (856) Dealing with files.""" for field in record_get_field_instances(self.record, tag='856', ind1='4'): subs = field_get_subfields(field) newsubs = [] url = subs.get("u", []) if not url: record_delete_field(self.record, '856', ind1='4', field_position_global=field[4]) continue url = url[0] if "inspirehep.net/record" in url and url.endswith("pdf"): # We have an FFT from INSPIRE newsubs.append(('a', url)) description = subs.get("y", []) if description: newsubs.append(('d', description[0])) if newsubs: record_add_field(self.record, 'FFT', subfields=newsubs) record_delete_field(self.record, '856', ind1='4', field_position_global=field[4]) else: # Remove $w for idx, (key, value) in enumerate(field[0]): if key == 'w': del field[0][idx]
def update_links_and_ffts(self): """FFT (856) Dealing with files.""" for field in record_get_field_instances(self.record, tag='856', ind1='4'): subs = field_get_subfields(field) newsubs = [] url = subs.get("u", []) if not url: record_delete_field(self.record, '856', ind1='4', field_position_global=field[4]) continue url = url[0] if "inspirehep.net/record" in url and url.endswith("pdf"): # We have an FFT from INSPIRE newsubs.append(('a', url)) description = subs.get("y", []) if description: newsubs.append(('d', description[0])) if newsubs: record_add_field(self.record, 'FFT', subfields=newsubs) record_delete_field(self.record, '856', ind1='4', field_position_global=field[4]) else: # Remove $w for idx, (key, value) in enumerate(field[0]): if key == 'w': del field[0][idx]
[ "FFT", "(", "856", ")", "Dealing", "with", "files", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L572-L600
[ "def", "update_links_and_ffts", "(", "self", ")", ":", "for", "field", "in", "record_get_field_instances", "(", "self", ".", "record", ",", "tag", "=", "'856'", ",", "ind1", "=", "'4'", ")", ":", "subs", "=", "field_get_subfields", "(", "field", ")", "newsubs", "=", "[", "]", "url", "=", "subs", ".", "get", "(", "\"u\"", ",", "[", "]", ")", "if", "not", "url", ":", "record_delete_field", "(", "self", ".", "record", ",", "'856'", ",", "ind1", "=", "'4'", ",", "field_position_global", "=", "field", "[", "4", "]", ")", "continue", "url", "=", "url", "[", "0", "]", "if", "\"inspirehep.net/record\"", "in", "url", "and", "url", ".", "endswith", "(", "\"pdf\"", ")", ":", "# We have an FFT from INSPIRE", "newsubs", ".", "append", "(", "(", "'a'", ",", "url", ")", ")", "description", "=", "subs", ".", "get", "(", "\"y\"", ",", "[", "]", ")", "if", "description", ":", "newsubs", ".", "append", "(", "(", "'d'", ",", "description", "[", "0", "]", ")", ")", "if", "newsubs", ":", "record_add_field", "(", "self", ".", "record", ",", "'FFT'", ",", "subfields", "=", "newsubs", ")", "record_delete_field", "(", "self", ".", "record", ",", "'856'", ",", "ind1", "=", "'4'", ",", "field_position_global", "=", "field", "[", "4", "]", ")", "else", ":", "# Remove $w", "for", "idx", ",", "(", "key", ",", "value", ")", "in", "enumerate", "(", "field", "[", "0", "]", ")", ":", "if", "key", "==", "'w'", ":", "del", "field", "[", "0", "]", "[", "idx", "]" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
Inspire2CDS.update_languages
041 Language.
harvestingkit/inspire_cds_package/from_inspire.py
def update_languages(self): """041 Language.""" language_fields = record_get_field_instances(self.record, '041') language = "eng" record_delete_fields(self.record, "041") for field in language_fields: subs = field_get_subfields(field) if 'a' in subs: language = self.get_config_item(subs['a'][0], "languages") break new_subs = [('a', language)] record_add_field(self.record, "041", subfields=new_subs)
def update_languages(self): """041 Language.""" language_fields = record_get_field_instances(self.record, '041') language = "eng" record_delete_fields(self.record, "041") for field in language_fields: subs = field_get_subfields(field) if 'a' in subs: language = self.get_config_item(subs['a'][0], "languages") break new_subs = [('a', language)] record_add_field(self.record, "041", subfields=new_subs)
[ "041", "Language", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L602-L613
[ "def", "update_languages", "(", "self", ")", ":", "language_fields", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'041'", ")", "language", "=", "\"eng\"", "record_delete_fields", "(", "self", ".", "record", ",", "\"041\"", ")", "for", "field", "in", "language_fields", ":", "subs", "=", "field_get_subfields", "(", "field", ")", "if", "'a'", "in", "subs", ":", "language", "=", "self", ".", "get_config_item", "(", "subs", "[", "'a'", "]", "[", "0", "]", ",", "\"languages\"", ")", "break", "new_subs", "=", "[", "(", "'a'", ",", "language", ")", "]", "record_add_field", "(", "self", ".", "record", ",", "\"041\"", ",", "subfields", "=", "new_subs", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
pathjoin
Arguments: args (list): *args list of paths if len(args) == 1, args[0] is not a string, and args[0] is iterable, set args to args[0]. Basically:: joined_path = u'/'.join( [args[0].rstrip('/')] + [a.strip('/') for a in args[1:-1]] + [args[-1].lstrip('/')])
pgs/app.py
def pathjoin(*args, **kwargs): """ Arguments: args (list): *args list of paths if len(args) == 1, args[0] is not a string, and args[0] is iterable, set args to args[0]. Basically:: joined_path = u'/'.join( [args[0].rstrip('/')] + [a.strip('/') for a in args[1:-1]] + [args[-1].lstrip('/')]) """ log.debug('pathjoin: %r' % list(args)) def _pathjoin(*args, **kwargs): len_ = len(args) - 1 if len_ < 0: raise Exception('no args specified') elif len_ == 0: if not isinstance(args, basestring): if hasattr(args, '__iter__'): _args = args _args args = args[0] for i, arg in enumerate(args): if not i: yield arg.rstrip('/') elif i == len_: yield arg.lstrip('/') else: yield arg.strip('/') joined_path = u'/'.join(_pathjoin(*args)) return sanitize_path(joined_path)
def pathjoin(*args, **kwargs): """ Arguments: args (list): *args list of paths if len(args) == 1, args[0] is not a string, and args[0] is iterable, set args to args[0]. Basically:: joined_path = u'/'.join( [args[0].rstrip('/')] + [a.strip('/') for a in args[1:-1]] + [args[-1].lstrip('/')]) """ log.debug('pathjoin: %r' % list(args)) def _pathjoin(*args, **kwargs): len_ = len(args) - 1 if len_ < 0: raise Exception('no args specified') elif len_ == 0: if not isinstance(args, basestring): if hasattr(args, '__iter__'): _args = args _args args = args[0] for i, arg in enumerate(args): if not i: yield arg.rstrip('/') elif i == len_: yield arg.lstrip('/') else: yield arg.strip('/') joined_path = u'/'.join(_pathjoin(*args)) return sanitize_path(joined_path)
[ "Arguments", ":", "args", "(", "list", ")", ":", "*", "args", "list", "of", "paths", "if", "len", "(", "args", ")", "==", "1", "args", "[", "0", "]", "is", "not", "a", "string", "and", "args", "[", "0", "]", "is", "iterable", "set", "args", "to", "args", "[", "0", "]", "." ]
westurner/pgs
python
https://github.com/westurner/pgs/blob/1cc2bf2c41479d8d3ba50480f003183f1675e518/pgs/app.py#L60-L94
[ "def", "pathjoin", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "log", ".", "debug", "(", "'pathjoin: %r'", "%", "list", "(", "args", ")", ")", "def", "_pathjoin", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "len_", "=", "len", "(", "args", ")", "-", "1", "if", "len_", "<", "0", ":", "raise", "Exception", "(", "'no args specified'", ")", "elif", "len_", "==", "0", ":", "if", "not", "isinstance", "(", "args", ",", "basestring", ")", ":", "if", "hasattr", "(", "args", ",", "'__iter__'", ")", ":", "_args", "=", "args", "_args", "args", "=", "args", "[", "0", "]", "for", "i", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "if", "not", "i", ":", "yield", "arg", ".", "rstrip", "(", "'/'", ")", "elif", "i", "==", "len_", ":", "yield", "arg", ".", "lstrip", "(", "'/'", ")", "else", ":", "yield", "arg", ".", "strip", "(", "'/'", ")", "joined_path", "=", "u'/'", ".", "join", "(", "_pathjoin", "(", "*", "args", ")", ")", "return", "sanitize_path", "(", "joined_path", ")" ]
1cc2bf2c41479d8d3ba50480f003183f1675e518
valid
generate_dirlist_html
Generate directory listing HTML Arguments: FS (FS): filesystem object to read files from filepath (str): path to generate directory listings for Keyword Arguments: list_dir (callable: list[str]): list file names in a directory isdir (callable: bool): os.path.isdir Yields: str: lines of an HTML table
pgs/app.py
def generate_dirlist_html(FS, filepath): """ Generate directory listing HTML Arguments: FS (FS): filesystem object to read files from filepath (str): path to generate directory listings for Keyword Arguments: list_dir (callable: list[str]): list file names in a directory isdir (callable: bool): os.path.isdir Yields: str: lines of an HTML table """ yield '<table class="dirlist">' if filepath == '/': filepath = '' for name in FS.listdir(filepath): full_path = pathjoin(filepath, name) if FS.isdir(full_path): full_path = full_path + '/' yield u'<tr><td><a href="{0}">{0}</a></td></tr>'.format( cgi.escape(full_path)) # TODO XXX yield '</table>'
def generate_dirlist_html(FS, filepath): """ Generate directory listing HTML Arguments: FS (FS): filesystem object to read files from filepath (str): path to generate directory listings for Keyword Arguments: list_dir (callable: list[str]): list file names in a directory isdir (callable: bool): os.path.isdir Yields: str: lines of an HTML table """ yield '<table class="dirlist">' if filepath == '/': filepath = '' for name in FS.listdir(filepath): full_path = pathjoin(filepath, name) if FS.isdir(full_path): full_path = full_path + '/' yield u'<tr><td><a href="{0}">{0}</a></td></tr>'.format( cgi.escape(full_path)) # TODO XXX yield '</table>'
[ "Generate", "directory", "listing", "HTML" ]
westurner/pgs
python
https://github.com/westurner/pgs/blob/1cc2bf2c41479d8d3ba50480f003183f1675e518/pgs/app.py#L386-L410
[ "def", "generate_dirlist_html", "(", "FS", ",", "filepath", ")", ":", "yield", "'<table class=\"dirlist\">'", "if", "filepath", "==", "'/'", ":", "filepath", "=", "''", "for", "name", "in", "FS", ".", "listdir", "(", "filepath", ")", ":", "full_path", "=", "pathjoin", "(", "filepath", ",", "name", ")", "if", "FS", ".", "isdir", "(", "full_path", ")", ":", "full_path", "=", "full_path", "+", "'/'", "yield", "u'<tr><td><a href=\"{0}\">{0}</a></td></tr>'", ".", "format", "(", "cgi", ".", "escape", "(", "full_path", ")", ")", "# TODO XXX", "yield", "'</table>'" ]
1cc2bf2c41479d8d3ba50480f003183f1675e518
valid
git_static_file
This method is derived from bottle.static_file: Open [a file] and return :exc:`HTTPResponse` with status code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, ``Content-Length`` and ``Last-Modified`` headers are set if possible. Special support for ``If-Modified-Since`` [...]. :param filename: Name or path of the file to send. :param mimetype: Defines the content-type header (default: guess from file extension) :param download: If True, ask the browser to open a `Save as...` dialog instead of opening the file with the associated program. You can specify a custom filename as a string. If not specified, the original filename is used (default: False). :param charset: The charset to use for files with a ``text/*`` mime-type. (default: UTF-8)
pgs/app.py
def git_static_file(filename, mimetype='auto', download=False, charset='UTF-8'): """ This method is derived from bottle.static_file: Open [a file] and return :exc:`HTTPResponse` with status code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, ``Content-Length`` and ``Last-Modified`` headers are set if possible. Special support for ``If-Modified-Since`` [...]. :param filename: Name or path of the file to send. :param mimetype: Defines the content-type header (default: guess from file extension) :param download: If True, ask the browser to open a `Save as...` dialog instead of opening the file with the associated program. You can specify a custom filename as a string. If not specified, the original filename is used (default: False). :param charset: The charset to use for files with a ``text/*`` mime-type. (default: UTF-8) """ # root = os.path.abspath(root) + os.sep # filename = os.path.abspath(pathjoin(root, filename.strip('/\\'))) filename = filename.strip('/\\') headers = dict() FS = request.app.config['pgs.FS'] # if not filename.startswith(root): # return HTTPError(403, "Access denied.") if not FS.exists(filename): return HTTPError(404, "Not found.") # if not os.access(filename, os.R_OK): # return HTTPError(403, "You do not have permission to access this file.") if mimetype == 'auto': if download and download is not True: mimetype, encoding = mimetypes.guess_type(download) else: mimetype, encoding = mimetypes.guess_type(filename) if encoding: headers['Content-Encoding'] = encoding if mimetype: if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: mimetype += '; charset=%s' % charset headers['Content-Type'] = mimetype if download: download = os.path.basename(filename if download else download) headers['Content-Disposition'] = 'attachment; filename="%s"' % download # stats = os.stat(filename) info = FS.getinfo(filename) headers['Content-Length'] = clen = info['size'] lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(info['modified_time'])) headers['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) mtime = info['modified_time'] if mtime and ims is not None and ims >= int(mtime): headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, **headers) body = '' if request.method == 'HEAD' else FS.get_fileobj(filename) clen # headers["Accept-Ranges"] = "bytes" # ranges = request.environ.get('HTTP_RANGE') # if 'HTTP_RANGE' in request.environ: # ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) # if not ranges: # return HTTPError(416, "Requested Range Not Satisfiable") # offset, end = ranges[0] # headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen) # headers["Content-Length"] = str(end - offset) # if body: body = _file_iter_range(body, offset, end - offset) # return HTTPResponse(body, status=206, **headers) return HTTPResponse(body, **headers)
def git_static_file(filename, mimetype='auto', download=False, charset='UTF-8'): """ This method is derived from bottle.static_file: Open [a file] and return :exc:`HTTPResponse` with status code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, ``Content-Length`` and ``Last-Modified`` headers are set if possible. Special support for ``If-Modified-Since`` [...]. :param filename: Name or path of the file to send. :param mimetype: Defines the content-type header (default: guess from file extension) :param download: If True, ask the browser to open a `Save as...` dialog instead of opening the file with the associated program. You can specify a custom filename as a string. If not specified, the original filename is used (default: False). :param charset: The charset to use for files with a ``text/*`` mime-type. (default: UTF-8) """ # root = os.path.abspath(root) + os.sep # filename = os.path.abspath(pathjoin(root, filename.strip('/\\'))) filename = filename.strip('/\\') headers = dict() FS = request.app.config['pgs.FS'] # if not filename.startswith(root): # return HTTPError(403, "Access denied.") if not FS.exists(filename): return HTTPError(404, "Not found.") # if not os.access(filename, os.R_OK): # return HTTPError(403, "You do not have permission to access this file.") if mimetype == 'auto': if download and download is not True: mimetype, encoding = mimetypes.guess_type(download) else: mimetype, encoding = mimetypes.guess_type(filename) if encoding: headers['Content-Encoding'] = encoding if mimetype: if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: mimetype += '; charset=%s' % charset headers['Content-Type'] = mimetype if download: download = os.path.basename(filename if download else download) headers['Content-Disposition'] = 'attachment; filename="%s"' % download # stats = os.stat(filename) info = FS.getinfo(filename) headers['Content-Length'] = clen = info['size'] lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(info['modified_time'])) headers['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) mtime = info['modified_time'] if mtime and ims is not None and ims >= int(mtime): headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, **headers) body = '' if request.method == 'HEAD' else FS.get_fileobj(filename) clen # headers["Accept-Ranges"] = "bytes" # ranges = request.environ.get('HTTP_RANGE') # if 'HTTP_RANGE' in request.environ: # ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) # if not ranges: # return HTTPError(416, "Requested Range Not Satisfiable") # offset, end = ranges[0] # headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen) # headers["Content-Length"] = str(end - offset) # if body: body = _file_iter_range(body, offset, end - offset) # return HTTPResponse(body, status=206, **headers) return HTTPResponse(body, **headers)
[ "This", "method", "is", "derived", "from", "bottle", ".", "static_file", ":" ]
westurner/pgs
python
https://github.com/westurner/pgs/blob/1cc2bf2c41479d8d3ba50480f003183f1675e518/pgs/app.py#L460-L542
[ "def", "git_static_file", "(", "filename", ",", "mimetype", "=", "'auto'", ",", "download", "=", "False", ",", "charset", "=", "'UTF-8'", ")", ":", "# root = os.path.abspath(root) + os.sep", "# filename = os.path.abspath(pathjoin(root, filename.strip('/\\\\')))", "filename", "=", "filename", ".", "strip", "(", "'/\\\\'", ")", "headers", "=", "dict", "(", ")", "FS", "=", "request", ".", "app", ".", "config", "[", "'pgs.FS'", "]", "# if not filename.startswith(root):", "# return HTTPError(403, \"Access denied.\")", "if", "not", "FS", ".", "exists", "(", "filename", ")", ":", "return", "HTTPError", "(", "404", ",", "\"Not found.\"", ")", "# if not os.access(filename, os.R_OK):", "# return HTTPError(403, \"You do not have permission to access this file.\")", "if", "mimetype", "==", "'auto'", ":", "if", "download", "and", "download", "is", "not", "True", ":", "mimetype", ",", "encoding", "=", "mimetypes", ".", "guess_type", "(", "download", ")", "else", ":", "mimetype", ",", "encoding", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "if", "encoding", ":", "headers", "[", "'Content-Encoding'", "]", "=", "encoding", "if", "mimetype", ":", "if", "mimetype", "[", ":", "5", "]", "==", "'text/'", "and", "charset", "and", "'charset'", "not", "in", "mimetype", ":", "mimetype", "+=", "'; charset=%s'", "%", "charset", "headers", "[", "'Content-Type'", "]", "=", "mimetype", "if", "download", ":", "download", "=", "os", ".", "path", ".", "basename", "(", "filename", "if", "download", "else", "download", ")", "headers", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=\"%s\"'", "%", "download", "# stats = os.stat(filename)", "info", "=", "FS", ".", "getinfo", "(", "filename", ")", "headers", "[", "'Content-Length'", "]", "=", "clen", "=", "info", "[", "'size'", "]", "lm", "=", "time", ".", "strftime", "(", "\"%a, %d %b %Y %H:%M:%S GMT\"", ",", "time", ".", "gmtime", "(", "info", "[", "'modified_time'", "]", ")", ")", "headers", "[", "'Last-Modified'", "]", "=", "lm", "ims", "=", "request", ".", "environ", ".", "get", "(", "'HTTP_IF_MODIFIED_SINCE'", ")", "if", "ims", ":", "ims", "=", "parse_date", "(", "ims", ".", "split", "(", "\";\"", ")", "[", "0", "]", ".", "strip", "(", ")", ")", "mtime", "=", "info", "[", "'modified_time'", "]", "if", "mtime", "and", "ims", "is", "not", "None", "and", "ims", ">=", "int", "(", "mtime", ")", ":", "headers", "[", "'Date'", "]", "=", "time", ".", "strftime", "(", "\"%a, %d %b %Y %H:%M:%S GMT\"", ",", "time", ".", "gmtime", "(", ")", ")", "return", "HTTPResponse", "(", "status", "=", "304", ",", "*", "*", "headers", ")", "body", "=", "''", "if", "request", ".", "method", "==", "'HEAD'", "else", "FS", ".", "get_fileobj", "(", "filename", ")", "clen", "# headers[\"Accept-Ranges\"] = \"bytes\"", "# ranges = request.environ.get('HTTP_RANGE')", "# if 'HTTP_RANGE' in request.environ:", "# ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))", "# if not ranges:", "# return HTTPError(416, \"Requested Range Not Satisfiable\")", "# offset, end = ranges[0]", "# headers[\"Content-Range\"] = \"bytes %d-%d/%d\" % (offset, end - 1, clen)", "# headers[\"Content-Length\"] = str(end - offset)", "# if body: body = _file_iter_range(body, offset, end - offset)", "# return HTTPResponse(body, status=206, **headers)", "return", "HTTPResponse", "(", "body", ",", "*", "*", "headers", ")" ]
1cc2bf2c41479d8d3ba50480f003183f1675e518
valid
check_pkgs_integrity
Checks if files are not being uploaded to server. @timeout - time after which the script will register an error.
harvestingkit/scoap3utils.py
def check_pkgs_integrity(filelist, logger, ftp_connector, timeout=120, sleep_time=10): """ Checks if files are not being uploaded to server. @timeout - time after which the script will register an error. """ ref_1 = [] ref_2 = [] i = 1 print >> sys.stdout, "\nChecking packages integrity." for filename in filelist: # ref_1.append(self.get_remote_file_size(filename)) get_remote_file_size(ftp_connector, filename, ref_1) print >> sys.stdout, "\nGoing to sleep for %i sec." % (sleep_time,) time.sleep(sleep_time) while sleep_time*i < timeout: for filename in filelist: # ref_2.append(self.get_remote_file_size(filename)) get_remote_file_size(ftp_connector, filename, ref_2) if ref_1 == ref_2: print >> sys.stdout, "\nIntegrity OK:)" logger.info("Packages integrity OK.") break else: print >> sys.stdout, "\nWaiting %d time for itegrity..." % (i,) logger.info("\nWaiting %d time for itegrity..." % (i,)) i += 1 ref_1, ref_2 = ref_2, [] time.sleep(sleep_time) else: not_finished_files = [] for count, val1 in enumerate(ref_1): if val1 != ref_2[count]: not_finished_files.append(filelist[count]) print >> sys.stdout, "\nOMG, OMG something wrong with integrity." logger.error("Integrity check faild for files %s" % (not_finished_files,))
def check_pkgs_integrity(filelist, logger, ftp_connector, timeout=120, sleep_time=10): """ Checks if files are not being uploaded to server. @timeout - time after which the script will register an error. """ ref_1 = [] ref_2 = [] i = 1 print >> sys.stdout, "\nChecking packages integrity." for filename in filelist: # ref_1.append(self.get_remote_file_size(filename)) get_remote_file_size(ftp_connector, filename, ref_1) print >> sys.stdout, "\nGoing to sleep for %i sec." % (sleep_time,) time.sleep(sleep_time) while sleep_time*i < timeout: for filename in filelist: # ref_2.append(self.get_remote_file_size(filename)) get_remote_file_size(ftp_connector, filename, ref_2) if ref_1 == ref_2: print >> sys.stdout, "\nIntegrity OK:)" logger.info("Packages integrity OK.") break else: print >> sys.stdout, "\nWaiting %d time for itegrity..." % (i,) logger.info("\nWaiting %d time for itegrity..." % (i,)) i += 1 ref_1, ref_2 = ref_2, [] time.sleep(sleep_time) else: not_finished_files = [] for count, val1 in enumerate(ref_1): if val1 != ref_2[count]: not_finished_files.append(filelist[count]) print >> sys.stdout, "\nOMG, OMG something wrong with integrity." logger.error("Integrity check faild for files %s" % (not_finished_files,))
[ "Checks", "if", "files", "are", "not", "being", "uploaded", "to", "server", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/scoap3utils.py#L120-L158
[ "def", "check_pkgs_integrity", "(", "filelist", ",", "logger", ",", "ftp_connector", ",", "timeout", "=", "120", ",", "sleep_time", "=", "10", ")", ":", "ref_1", "=", "[", "]", "ref_2", "=", "[", "]", "i", "=", "1", "print", ">>", "sys", ".", "stdout", ",", "\"\\nChecking packages integrity.\"", "for", "filename", "in", "filelist", ":", "# ref_1.append(self.get_remote_file_size(filename))", "get_remote_file_size", "(", "ftp_connector", ",", "filename", ",", "ref_1", ")", "print", ">>", "sys", ".", "stdout", ",", "\"\\nGoing to sleep for %i sec.\"", "%", "(", "sleep_time", ",", ")", "time", ".", "sleep", "(", "sleep_time", ")", "while", "sleep_time", "*", "i", "<", "timeout", ":", "for", "filename", "in", "filelist", ":", "# ref_2.append(self.get_remote_file_size(filename))", "get_remote_file_size", "(", "ftp_connector", ",", "filename", ",", "ref_2", ")", "if", "ref_1", "==", "ref_2", ":", "print", ">>", "sys", ".", "stdout", ",", "\"\\nIntegrity OK:)\"", "logger", ".", "info", "(", "\"Packages integrity OK.\"", ")", "break", "else", ":", "print", ">>", "sys", ".", "stdout", ",", "\"\\nWaiting %d time for itegrity...\"", "%", "(", "i", ",", ")", "logger", ".", "info", "(", "\"\\nWaiting %d time for itegrity...\"", "%", "(", "i", ",", ")", ")", "i", "+=", "1", "ref_1", ",", "ref_2", "=", "ref_2", ",", "[", "]", "time", ".", "sleep", "(", "sleep_time", ")", "else", ":", "not_finished_files", "=", "[", "]", "for", "count", ",", "val1", "in", "enumerate", "(", "ref_1", ")", ":", "if", "val1", "!=", "ref_2", "[", "count", "]", ":", "not_finished_files", ".", "append", "(", "filelist", "[", "count", "]", ")", "print", ">>", "sys", ".", "stdout", ",", "\"\\nOMG, OMG something wrong with integrity.\"", "logger", ".", "error", "(", "\"Integrity check faild for files %s\"", "%", "(", "not_finished_files", ",", ")", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
normalize_query
Example: >>> normalize_query(' some random words "with quotes " and spaces') ['some', 'random', 'words', 'with quotes', 'and', 'spaces']
model_search/lib.py
def normalize_query(query_string, terms=TERMS, norm_space=NORM_SPACE): """ Example: >>> normalize_query(' some random words "with quotes " and spaces') ['some', 'random', 'words', 'with quotes', 'and', 'spaces'] """ return [ norm_space(' ', (t[0] or t[1]).strip()) for t in terms(query_string)]
def normalize_query(query_string, terms=TERMS, norm_space=NORM_SPACE): """ Example: >>> normalize_query(' some random words "with quotes " and spaces') ['some', 'random', 'words', 'with quotes', 'and', 'spaces'] """ return [ norm_space(' ', (t[0] or t[1]).strip()) for t in terms(query_string)]
[ "Example", ":", ">>>", "normalize_query", "(", "some", "random", "words", "with", "quotes", "and", "spaces", ")", "[", "some", "random", "words", "with", "quotes", "and", "spaces", "]" ]
pmaigutyak/mp-search
python
https://github.com/pmaigutyak/mp-search/blob/48d82a335667517f28893a2828101a5bd0b1c64b/model_search/lib.py#L19-L26
[ "def", "normalize_query", "(", "query_string", ",", "terms", "=", "TERMS", ",", "norm_space", "=", "NORM_SPACE", ")", ":", "return", "[", "norm_space", "(", "' '", ",", "(", "t", "[", "0", "]", "or", "t", "[", "1", "]", ")", ".", "strip", "(", ")", ")", "for", "t", "in", "terms", "(", "query_string", ")", "]" ]
48d82a335667517f28893a2828101a5bd0b1c64b
valid
collapse_initials
Removes the space between initials. eg T. A. --> T.A.
harvestingkit/scripts/fix_marc_record.py
def collapse_initials(name): """ Removes the space between initials. eg T. A. --> T.A.""" if len(name.split()) > 1: name = re.sub(r'([A-Z]\.) +(?=[A-Z]\.)', r'\1', name) return name
def collapse_initials(name): """ Removes the space between initials. eg T. A. --> T.A.""" if len(name.split()) > 1: name = re.sub(r'([A-Z]\.) +(?=[A-Z]\.)', r'\1', name) return name
[ "Removes", "the", "space", "between", "initials", ".", "eg", "T", ".", "A", ".", "--", ">", "T", ".", "A", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/scripts/fix_marc_record.py#L48-L53
[ "def", "collapse_initials", "(", "name", ")", ":", "if", "len", "(", "name", ".", "split", "(", ")", ")", ">", "1", ":", "name", "=", "re", ".", "sub", "(", "r'([A-Z]\\.) +(?=[A-Z]\\.)'", ",", "r'\\1'", ",", "name", ")", "return", "name" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
fix_name_capitalization
Converts capital letters to lower keeps first letter capital.
harvestingkit/scripts/fix_marc_record.py
def fix_name_capitalization(lastname, givennames): """ Converts capital letters to lower keeps first letter capital. """ lastnames = lastname.split() if len(lastnames) == 1: if '-' in lastname: names = lastname.split('-') names = map(lambda a: a[0] + a[1:].lower(), names) lastname = '-'.join(names) else: lastname = lastname[0] + lastname[1:].lower() else: names = [] for name in lastnames: if re.search(r'[A-Z]\.', name): names.append(name) else: names.append(name[0] + name[1:].lower()) lastname = ' '.join(names) lastname = collapse_initials(lastname) names = [] for name in givennames: if re.search(r'[A-Z]\.', name): names.append(name) else: names.append(name[0] + name[1:].lower()) givennames = ' '.join(names) return lastname, givennames
def fix_name_capitalization(lastname, givennames): """ Converts capital letters to lower keeps first letter capital. """ lastnames = lastname.split() if len(lastnames) == 1: if '-' in lastname: names = lastname.split('-') names = map(lambda a: a[0] + a[1:].lower(), names) lastname = '-'.join(names) else: lastname = lastname[0] + lastname[1:].lower() else: names = [] for name in lastnames: if re.search(r'[A-Z]\.', name): names.append(name) else: names.append(name[0] + name[1:].lower()) lastname = ' '.join(names) lastname = collapse_initials(lastname) names = [] for name in givennames: if re.search(r'[A-Z]\.', name): names.append(name) else: names.append(name[0] + name[1:].lower()) givennames = ' '.join(names) return lastname, givennames
[ "Converts", "capital", "letters", "to", "lower", "keeps", "first", "letter", "capital", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/scripts/fix_marc_record.py#L56-L82
[ "def", "fix_name_capitalization", "(", "lastname", ",", "givennames", ")", ":", "lastnames", "=", "lastname", ".", "split", "(", ")", "if", "len", "(", "lastnames", ")", "==", "1", ":", "if", "'-'", "in", "lastname", ":", "names", "=", "lastname", ".", "split", "(", "'-'", ")", "names", "=", "map", "(", "lambda", "a", ":", "a", "[", "0", "]", "+", "a", "[", "1", ":", "]", ".", "lower", "(", ")", ",", "names", ")", "lastname", "=", "'-'", ".", "join", "(", "names", ")", "else", ":", "lastname", "=", "lastname", "[", "0", "]", "+", "lastname", "[", "1", ":", "]", ".", "lower", "(", ")", "else", ":", "names", "=", "[", "]", "for", "name", "in", "lastnames", ":", "if", "re", ".", "search", "(", "r'[A-Z]\\.'", ",", "name", ")", ":", "names", ".", "append", "(", "name", ")", "else", ":", "names", ".", "append", "(", "name", "[", "0", "]", "+", "name", "[", "1", ":", "]", ".", "lower", "(", ")", ")", "lastname", "=", "' '", ".", "join", "(", "names", ")", "lastname", "=", "collapse_initials", "(", "lastname", ")", "names", "=", "[", "]", "for", "name", "in", "givennames", ":", "if", "re", ".", "search", "(", "r'[A-Z]\\.'", ",", "name", ")", ":", "names", ".", "append", "(", "name", ")", "else", ":", "names", ".", "append", "(", "name", "[", "0", "]", "+", "name", "[", "1", ":", "]", ".", "lower", "(", ")", ")", "givennames", "=", "' '", ".", "join", "(", "names", ")", "return", "lastname", ",", "givennames" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
OEmbedConsumer.extract_oembeds
Scans a block of text and extracts oembed data on any urls, returning it in a list of dictionaries
oembed/consumer.py
def extract_oembeds(self, text, maxwidth=None, maxheight=None, resource_type=None): """ Scans a block of text and extracts oembed data on any urls, returning it in a list of dictionaries """ parser = text_parser() urls = parser.extract_urls(text) return self.handle_extracted_urls(urls, maxwidth, maxheight, resource_type)
def extract_oembeds(self, text, maxwidth=None, maxheight=None, resource_type=None): """ Scans a block of text and extracts oembed data on any urls, returning it in a list of dictionaries """ parser = text_parser() urls = parser.extract_urls(text) return self.handle_extracted_urls(urls, maxwidth, maxheight, resource_type)
[ "Scans", "a", "block", "of", "text", "and", "extracts", "oembed", "data", "on", "any", "urls", "returning", "it", "in", "a", "list", "of", "dictionaries" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/consumer.py#L30-L37
[ "def", "extract_oembeds", "(", "self", ",", "text", ",", "maxwidth", "=", "None", ",", "maxheight", "=", "None", ",", "resource_type", "=", "None", ")", ":", "parser", "=", "text_parser", "(", ")", "urls", "=", "parser", ".", "extract_urls", "(", "text", ")", "return", "self", ".", "handle_extracted_urls", "(", "urls", ",", "maxwidth", ",", "maxheight", ",", "resource_type", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
OEmbedConsumer.strip
Try to maintain parity with what is extracted by extract since strip will most likely be used in conjunction with extract
oembed/consumer.py
def strip(self, text, *args, **kwargs): """ Try to maintain parity with what is extracted by extract since strip will most likely be used in conjunction with extract """ if OEMBED_DEFAULT_PARSE_HTML: extracted = self.extract_oembeds_html(text, *args, **kwargs) else: extracted = self.extract_oembeds(text, *args, **kwargs) matches = [r['original_url'] for r in extracted] match_handler = lambda m: m.group() not in matches and m.group() or '' return re.sub(URL_RE, match_handler, text)
def strip(self, text, *args, **kwargs): """ Try to maintain parity with what is extracted by extract since strip will most likely be used in conjunction with extract """ if OEMBED_DEFAULT_PARSE_HTML: extracted = self.extract_oembeds_html(text, *args, **kwargs) else: extracted = self.extract_oembeds(text, *args, **kwargs) matches = [r['original_url'] for r in extracted] match_handler = lambda m: m.group() not in matches and m.group() or '' return re.sub(URL_RE, match_handler, text)
[ "Try", "to", "maintain", "parity", "with", "what", "is", "extracted", "by", "extract", "since", "strip", "will", "most", "likely", "be", "used", "in", "conjunction", "with", "extract" ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/consumer.py#L60-L73
[ "def", "strip", "(", "self", ",", "text", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "OEMBED_DEFAULT_PARSE_HTML", ":", "extracted", "=", "self", ".", "extract_oembeds_html", "(", "text", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "extracted", "=", "self", ".", "extract_oembeds", "(", "text", ",", "*", "args", ",", "*", "*", "kwargs", ")", "matches", "=", "[", "r", "[", "'original_url'", "]", "for", "r", "in", "extracted", "]", "match_handler", "=", "lambda", "m", ":", "m", ".", "group", "(", ")", "not", "in", "matches", "and", "m", ".", "group", "(", ")", "or", "''", "return", "re", ".", "sub", "(", "URL_RE", ",", "match_handler", ",", "text", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
autodiscover
Automatically build the provider index.
oembed/__init__.py
def autodiscover(): """ Automatically build the provider index. """ import imp from django.conf import settings for app in settings.INSTALLED_APPS: try: app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__ except AttributeError: continue try: imp.find_module('oembed_providers', app_path) except ImportError: continue __import__("%s.oembed_providers" % app)
def autodiscover(): """ Automatically build the provider index. """ import imp from django.conf import settings for app in settings.INSTALLED_APPS: try: app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__ except AttributeError: continue try: imp.find_module('oembed_providers', app_path) except ImportError: continue __import__("%s.oembed_providers" % app)
[ "Automatically", "build", "the", "provider", "index", "." ]
worldcompany/djangoembed
python
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/__init__.py#L12-L30
[ "def", "autodiscover", "(", ")", ":", "import", "imp", "from", "django", ".", "conf", "import", "settings", "for", "app", "in", "settings", ".", "INSTALLED_APPS", ":", "try", ":", "app_path", "=", "__import__", "(", "app", ",", "{", "}", ",", "{", "}", ",", "[", "app", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "]", ")", ".", "__path__", "except", "AttributeError", ":", "continue", "try", ":", "imp", ".", "find_module", "(", "'oembed_providers'", ",", "app_path", ")", "except", "ImportError", ":", "continue", "__import__", "(", "\"%s.oembed_providers\"", "%", "app", ")" ]
f3f2be283441d91d1f89db780444dc75f7b51902
valid
select
pass in a list of options, promt the user to select one, and return the selected option or None
pyselect.py
def select(options=None): """ pass in a list of options, promt the user to select one, and return the selected option or None """ if not options: return None width = len(str(len(options))) for x,option in enumerate(options): sys.stdout.write('{:{width}}) {}\n'.format(x+1,option, width=width)) sys.stdout.write('{:>{width}} '.format('#?', width=width+1)) sys.stdout.flush() if sys.stdin.isatty(): # regular prompt try: response = raw_input().strip() except (EOFError, KeyboardInterrupt): # handle ctrl-d, ctrl-c response = '' else: # try connecting to current tty, when using pipes sys.stdin = open("/dev/tty") try: response = '' while True: response += sys.stdin.read(1) if response.endswith('\n'): break except (EOFError, KeyboardInterrupt): sys.stdout.flush() pass try: response = int(response) - 1 except ValueError: return None if response < 0 or response >= len(options): return None return options[response]
def select(options=None): """ pass in a list of options, promt the user to select one, and return the selected option or None """ if not options: return None width = len(str(len(options))) for x,option in enumerate(options): sys.stdout.write('{:{width}}) {}\n'.format(x+1,option, width=width)) sys.stdout.write('{:>{width}} '.format('#?', width=width+1)) sys.stdout.flush() if sys.stdin.isatty(): # regular prompt try: response = raw_input().strip() except (EOFError, KeyboardInterrupt): # handle ctrl-d, ctrl-c response = '' else: # try connecting to current tty, when using pipes sys.stdin = open("/dev/tty") try: response = '' while True: response += sys.stdin.read(1) if response.endswith('\n'): break except (EOFError, KeyboardInterrupt): sys.stdout.flush() pass try: response = int(response) - 1 except ValueError: return None if response < 0 or response >= len(options): return None return options[response]
[ "pass", "in", "a", "list", "of", "options", "promt", "the", "user", "to", "select", "one", "and", "return", "the", "selected", "option", "or", "None" ]
askedrelic/pyselect
python
https://github.com/askedrelic/pyselect/blob/2f68e3e87e3c44e9d96e1506ba98f9c3a30ded2c/pyselect.py#L11-L46
[ "def", "select", "(", "options", "=", "None", ")", ":", "if", "not", "options", ":", "return", "None", "width", "=", "len", "(", "str", "(", "len", "(", "options", ")", ")", ")", "for", "x", ",", "option", "in", "enumerate", "(", "options", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'{:{width}}) {}\\n'", ".", "format", "(", "x", "+", "1", ",", "option", ",", "width", "=", "width", ")", ")", "sys", ".", "stdout", ".", "write", "(", "'{:>{width}} '", ".", "format", "(", "'#?'", ",", "width", "=", "width", "+", "1", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "sys", ".", "stdin", ".", "isatty", "(", ")", ":", "# regular prompt", "try", ":", "response", "=", "raw_input", "(", ")", ".", "strip", "(", ")", "except", "(", "EOFError", ",", "KeyboardInterrupt", ")", ":", "# handle ctrl-d, ctrl-c", "response", "=", "''", "else", ":", "# try connecting to current tty, when using pipes", "sys", ".", "stdin", "=", "open", "(", "\"/dev/tty\"", ")", "try", ":", "response", "=", "''", "while", "True", ":", "response", "+=", "sys", ".", "stdin", ".", "read", "(", "1", ")", "if", "response", ".", "endswith", "(", "'\\n'", ")", ":", "break", "except", "(", "EOFError", ",", "KeyboardInterrupt", ")", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "pass", "try", ":", "response", "=", "int", "(", "response", ")", "-", "1", "except", "ValueError", ":", "return", "None", "if", "response", "<", "0", "or", "response", ">=", "len", "(", "options", ")", ":", "return", "None", "return", "options", "[", "response", "]" ]
2f68e3e87e3c44e9d96e1506ba98f9c3a30ded2c
valid
main
Transforms the argparse arguments from Namespace to dict and then to Bunch Therefore it is not necessary to access the arguments using the dict syntax The settings can be called like regular vars on the settings object
harvestingkit/harvestingkit_cli.py
def main(): argparser = ArgumentParser() subparsers = argparser.add_subparsers(dest='selected_subparser') all_parser = subparsers.add_parser('all') elsevier_parser = subparsers.add_parser('elsevier') oxford_parser = subparsers.add_parser('oxford') springer_parser = subparsers.add_parser('springer') all_parser.add_argument('--update-credentials', action='store_true') elsevier_parser.add_argument('--run-locally', action='store_true') elsevier_parser.add_argument('--package-name') elsevier_parser.add_argument('--path') elsevier_parser.add_argument('--CONSYN', action='store_true') elsevier_parser.add_argument('--update-credentials', action='store_true') elsevier_parser.add_argument('--extract-nations', action='store_true') oxford_parser.add_argument('--dont-empty-ftp', action='store_true') oxford_parser.add_argument('--package-name') oxford_parser.add_argument('--path') oxford_parser.add_argument('--update-credentials', action='store_true') oxford_parser.add_argument('--extract-nations', action='store_true') springer_parser.add_argument('--package-name') springer_parser.add_argument('--path') springer_parser.add_argument('--update-credentials', action='store_true') springer_parser.add_argument('--extract-nations', action='store_true') ''' Transforms the argparse arguments from Namespace to dict and then to Bunch Therefore it is not necessary to access the arguments using the dict syntax The settings can be called like regular vars on the settings object ''' settings = Bunch(vars(argparser.parse_args())) call_package(settings)
def main(): argparser = ArgumentParser() subparsers = argparser.add_subparsers(dest='selected_subparser') all_parser = subparsers.add_parser('all') elsevier_parser = subparsers.add_parser('elsevier') oxford_parser = subparsers.add_parser('oxford') springer_parser = subparsers.add_parser('springer') all_parser.add_argument('--update-credentials', action='store_true') elsevier_parser.add_argument('--run-locally', action='store_true') elsevier_parser.add_argument('--package-name') elsevier_parser.add_argument('--path') elsevier_parser.add_argument('--CONSYN', action='store_true') elsevier_parser.add_argument('--update-credentials', action='store_true') elsevier_parser.add_argument('--extract-nations', action='store_true') oxford_parser.add_argument('--dont-empty-ftp', action='store_true') oxford_parser.add_argument('--package-name') oxford_parser.add_argument('--path') oxford_parser.add_argument('--update-credentials', action='store_true') oxford_parser.add_argument('--extract-nations', action='store_true') springer_parser.add_argument('--package-name') springer_parser.add_argument('--path') springer_parser.add_argument('--update-credentials', action='store_true') springer_parser.add_argument('--extract-nations', action='store_true') ''' Transforms the argparse arguments from Namespace to dict and then to Bunch Therefore it is not necessary to access the arguments using the dict syntax The settings can be called like regular vars on the settings object ''' settings = Bunch(vars(argparser.parse_args())) call_package(settings)
[ "Transforms", "the", "argparse", "arguments", "from", "Namespace", "to", "dict", "and", "then", "to", "Bunch", "Therefore", "it", "is", "not", "necessary", "to", "access", "the", "arguments", "using", "the", "dict", "syntax", "The", "settings", "can", "be", "called", "like", "regular", "vars", "on", "the", "settings", "object" ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/harvestingkit_cli.py#L119-L157
[ "def", "main", "(", ")", ":", "argparser", "=", "ArgumentParser", "(", ")", "subparsers", "=", "argparser", ".", "add_subparsers", "(", "dest", "=", "'selected_subparser'", ")", "all_parser", "=", "subparsers", ".", "add_parser", "(", "'all'", ")", "elsevier_parser", "=", "subparsers", ".", "add_parser", "(", "'elsevier'", ")", "oxford_parser", "=", "subparsers", ".", "add_parser", "(", "'oxford'", ")", "springer_parser", "=", "subparsers", ".", "add_parser", "(", "'springer'", ")", "all_parser", ".", "add_argument", "(", "'--update-credentials'", ",", "action", "=", "'store_true'", ")", "elsevier_parser", ".", "add_argument", "(", "'--run-locally'", ",", "action", "=", "'store_true'", ")", "elsevier_parser", ".", "add_argument", "(", "'--package-name'", ")", "elsevier_parser", ".", "add_argument", "(", "'--path'", ")", "elsevier_parser", ".", "add_argument", "(", "'--CONSYN'", ",", "action", "=", "'store_true'", ")", "elsevier_parser", ".", "add_argument", "(", "'--update-credentials'", ",", "action", "=", "'store_true'", ")", "elsevier_parser", ".", "add_argument", "(", "'--extract-nations'", ",", "action", "=", "'store_true'", ")", "oxford_parser", ".", "add_argument", "(", "'--dont-empty-ftp'", ",", "action", "=", "'store_true'", ")", "oxford_parser", ".", "add_argument", "(", "'--package-name'", ")", "oxford_parser", ".", "add_argument", "(", "'--path'", ")", "oxford_parser", ".", "add_argument", "(", "'--update-credentials'", ",", "action", "=", "'store_true'", ")", "oxford_parser", ".", "add_argument", "(", "'--extract-nations'", ",", "action", "=", "'store_true'", ")", "springer_parser", ".", "add_argument", "(", "'--package-name'", ")", "springer_parser", ".", "add_argument", "(", "'--path'", ")", "springer_parser", ".", "add_argument", "(", "'--update-credentials'", ",", "action", "=", "'store_true'", ")", "springer_parser", ".", "add_argument", "(", "'--extract-nations'", ",", "action", "=", "'store_true'", ")", "settings", "=", "Bunch", "(", "vars", "(", "argparser", ".", "parse_args", "(", ")", ")", ")", "call_package", "(", "settings", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
PosPackage.get_record
Reads a dom xml element in oaidc format and returns the bibrecord object
harvestingkit/pos_package.py
def get_record(self, record): """ Reads a dom xml element in oaidc format and returns the bibrecord object """ self.document = record rec = create_record() language = self._get_language() if language and language != 'en': record_add_field(rec, '041', subfields=[('a', language)]) publisher = self._get_publisher() date = self._get_date() if publisher and date: record_add_field(rec, '260', subfields=[('b', publisher), ('c', date)]) elif publisher: record_add_field(rec, '260', subfields=[('b', publisher)]) elif date: record_add_field(rec, '260', subfields=[('c', date)]) title = self._get_title() if title: record_add_field(rec, '245', subfields=[('a', title)]) record_copyright = self._get_copyright() if record_copyright: record_add_field(rec, '540', subfields=[('a', record_copyright)]) subject = self._get_subject() if subject: record_add_field(rec, '650', ind1='1', ind2='7', subfields=[('a', subject), ('2', 'PoS')]) authors = self._get_authors() first_author = True for author in authors: subfields = [('a', author[0])] for affiliation in author[1]: subfields.append(('v', affiliation)) if first_author: record_add_field(rec, '100', subfields=subfields) first_author = False else: record_add_field(rec, '700', subfields=subfields) identifier = self.get_identifier() conference = identifier.split(':')[2] conference = conference.split('/')[0] contribution = identifier.split(':')[2] contribution = contribution.split('/')[1] record_add_field(rec, '773', subfields=[('p', 'PoS'), ('v', conference.replace(' ', '')), ('c', contribution), ('y', date[:4])]) record_add_field(rec, '980', subfields=[('a', 'ConferencePaper')]) record_add_field(rec, '980', subfields=[('a', 'HEP')]) return rec
def get_record(self, record): """ Reads a dom xml element in oaidc format and returns the bibrecord object """ self.document = record rec = create_record() language = self._get_language() if language and language != 'en': record_add_field(rec, '041', subfields=[('a', language)]) publisher = self._get_publisher() date = self._get_date() if publisher and date: record_add_field(rec, '260', subfields=[('b', publisher), ('c', date)]) elif publisher: record_add_field(rec, '260', subfields=[('b', publisher)]) elif date: record_add_field(rec, '260', subfields=[('c', date)]) title = self._get_title() if title: record_add_field(rec, '245', subfields=[('a', title)]) record_copyright = self._get_copyright() if record_copyright: record_add_field(rec, '540', subfields=[('a', record_copyright)]) subject = self._get_subject() if subject: record_add_field(rec, '650', ind1='1', ind2='7', subfields=[('a', subject), ('2', 'PoS')]) authors = self._get_authors() first_author = True for author in authors: subfields = [('a', author[0])] for affiliation in author[1]: subfields.append(('v', affiliation)) if first_author: record_add_field(rec, '100', subfields=subfields) first_author = False else: record_add_field(rec, '700', subfields=subfields) identifier = self.get_identifier() conference = identifier.split(':')[2] conference = conference.split('/')[0] contribution = identifier.split(':')[2] contribution = contribution.split('/')[1] record_add_field(rec, '773', subfields=[('p', 'PoS'), ('v', conference.replace(' ', '')), ('c', contribution), ('y', date[:4])]) record_add_field(rec, '980', subfields=[('a', 'ConferencePaper')]) record_add_field(rec, '980', subfields=[('a', 'HEP')]) return rec
[ "Reads", "a", "dom", "xml", "element", "in", "oaidc", "format", "and", "returns", "the", "bibrecord", "object" ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/pos_package.py#L117-L166
[ "def", "get_record", "(", "self", ",", "record", ")", ":", "self", ".", "document", "=", "record", "rec", "=", "create_record", "(", ")", "language", "=", "self", ".", "_get_language", "(", ")", "if", "language", "and", "language", "!=", "'en'", ":", "record_add_field", "(", "rec", ",", "'041'", ",", "subfields", "=", "[", "(", "'a'", ",", "language", ")", "]", ")", "publisher", "=", "self", ".", "_get_publisher", "(", ")", "date", "=", "self", ".", "_get_date", "(", ")", "if", "publisher", "and", "date", ":", "record_add_field", "(", "rec", ",", "'260'", ",", "subfields", "=", "[", "(", "'b'", ",", "publisher", ")", ",", "(", "'c'", ",", "date", ")", "]", ")", "elif", "publisher", ":", "record_add_field", "(", "rec", ",", "'260'", ",", "subfields", "=", "[", "(", "'b'", ",", "publisher", ")", "]", ")", "elif", "date", ":", "record_add_field", "(", "rec", ",", "'260'", ",", "subfields", "=", "[", "(", "'c'", ",", "date", ")", "]", ")", "title", "=", "self", ".", "_get_title", "(", ")", "if", "title", ":", "record_add_field", "(", "rec", ",", "'245'", ",", "subfields", "=", "[", "(", "'a'", ",", "title", ")", "]", ")", "record_copyright", "=", "self", ".", "_get_copyright", "(", ")", "if", "record_copyright", ":", "record_add_field", "(", "rec", ",", "'540'", ",", "subfields", "=", "[", "(", "'a'", ",", "record_copyright", ")", "]", ")", "subject", "=", "self", ".", "_get_subject", "(", ")", "if", "subject", ":", "record_add_field", "(", "rec", ",", "'650'", ",", "ind1", "=", "'1'", ",", "ind2", "=", "'7'", ",", "subfields", "=", "[", "(", "'a'", ",", "subject", ")", ",", "(", "'2'", ",", "'PoS'", ")", "]", ")", "authors", "=", "self", ".", "_get_authors", "(", ")", "first_author", "=", "True", "for", "author", "in", "authors", ":", "subfields", "=", "[", "(", "'a'", ",", "author", "[", "0", "]", ")", "]", "for", "affiliation", "in", "author", "[", "1", "]", ":", "subfields", ".", "append", "(", "(", "'v'", ",", "affiliation", ")", ")", "if", "first_author", ":", "record_add_field", "(", "rec", ",", "'100'", ",", "subfields", "=", "subfields", ")", "first_author", "=", "False", "else", ":", "record_add_field", "(", "rec", ",", "'700'", ",", "subfields", "=", "subfields", ")", "identifier", "=", "self", ".", "get_identifier", "(", ")", "conference", "=", "identifier", ".", "split", "(", "':'", ")", "[", "2", "]", "conference", "=", "conference", ".", "split", "(", "'/'", ")", "[", "0", "]", "contribution", "=", "identifier", ".", "split", "(", "':'", ")", "[", "2", "]", "contribution", "=", "contribution", ".", "split", "(", "'/'", ")", "[", "1", "]", "record_add_field", "(", "rec", ",", "'773'", ",", "subfields", "=", "[", "(", "'p'", ",", "'PoS'", ")", ",", "(", "'v'", ",", "conference", ".", "replace", "(", "' '", ",", "''", ")", ")", ",", "(", "'c'", ",", "contribution", ")", ",", "(", "'y'", ",", "date", "[", ":", "4", "]", ")", "]", ")", "record_add_field", "(", "rec", ",", "'980'", ",", "subfields", "=", "[", "(", "'a'", ",", "'ConferencePaper'", ")", "]", ")", "record_add_field", "(", "rec", ",", "'980'", ",", "subfields", "=", "[", "(", "'a'", ",", "'HEP'", ")", "]", ")", "return", "rec" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
progress
display a progress that can update in place example -- total_length = 1000 with echo.progress(total_length) as p: for x in range(total_length): # do something crazy p.update(x) length -- int -- the total size of what you will be updating progress on
captain/echo.py
def progress(length, **kwargs): """display a progress that can update in place example -- total_length = 1000 with echo.progress(total_length) as p: for x in range(total_length): # do something crazy p.update(x) length -- int -- the total size of what you will be updating progress on """ quiet = False progress_class = kwargs.pop("progress_class", Progress) kwargs["write_method"] = istdout.info kwargs["width"] = kwargs.get("width", globals()["WIDTH"]) kwargs["length"] = length pbar = progress_class(**kwargs) pbar.update(0) yield pbar pbar.update(length) br()
def progress(length, **kwargs): """display a progress that can update in place example -- total_length = 1000 with echo.progress(total_length) as p: for x in range(total_length): # do something crazy p.update(x) length -- int -- the total size of what you will be updating progress on """ quiet = False progress_class = kwargs.pop("progress_class", Progress) kwargs["write_method"] = istdout.info kwargs["width"] = kwargs.get("width", globals()["WIDTH"]) kwargs["length"] = length pbar = progress_class(**kwargs) pbar.update(0) yield pbar pbar.update(length) br()
[ "display", "a", "progress", "that", "can", "update", "in", "place" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L101-L122
[ "def", "progress", "(", "length", ",", "*", "*", "kwargs", ")", ":", "quiet", "=", "False", "progress_class", "=", "kwargs", ".", "pop", "(", "\"progress_class\"", ",", "Progress", ")", "kwargs", "[", "\"write_method\"", "]", "=", "istdout", ".", "info", "kwargs", "[", "\"width\"", "]", "=", "kwargs", ".", "get", "(", "\"width\"", ",", "globals", "(", ")", "[", "\"WIDTH\"", "]", ")", "kwargs", "[", "\"length\"", "]", "=", "length", "pbar", "=", "progress_class", "(", "*", "*", "kwargs", ")", "pbar", ".", "update", "(", "0", ")", "yield", "pbar", "pbar", ".", "update", "(", "length", ")", "br", "(", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
increment
Similar to enumerate but will set format_msg.format(n) into the prefix on each iteration :Example: for v in increment(["foo", "bar"]): echo.out(v) # 1. foo\n2. bar :param itr: iterator, any iterator you want to set a numeric prefix on on every iteration :param n: integer, the starting integer for the numeric prefix :param format_msg: string, this will basically do: format_msg.format(n) so there should only be one set of curly brackets :returns: yield generator
captain/echo.py
def increment(itr, n=1, format_msg="{}. "): """Similar to enumerate but will set format_msg.format(n) into the prefix on each iteration :Example: for v in increment(["foo", "bar"]): echo.out(v) # 1. foo\n2. bar :param itr: iterator, any iterator you want to set a numeric prefix on on every iteration :param n: integer, the starting integer for the numeric prefix :param format_msg: string, this will basically do: format_msg.format(n) so there should only be one set of curly brackets :returns: yield generator """ for i, v in enumerate(itr, n): with prefix(format_msg, i): yield v
def increment(itr, n=1, format_msg="{}. "): """Similar to enumerate but will set format_msg.format(n) into the prefix on each iteration :Example: for v in increment(["foo", "bar"]): echo.out(v) # 1. foo\n2. bar :param itr: iterator, any iterator you want to set a numeric prefix on on every iteration :param n: integer, the starting integer for the numeric prefix :param format_msg: string, this will basically do: format_msg.format(n) so there should only be one set of curly brackets :returns: yield generator """ for i, v in enumerate(itr, n): with prefix(format_msg, i): yield v
[ "Similar", "to", "enumerate", "but", "will", "set", "format_msg", ".", "format", "(", "n", ")", "into", "the", "prefix", "on", "each", "iteration" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L141-L158
[ "def", "increment", "(", "itr", ",", "n", "=", "1", ",", "format_msg", "=", "\"{}. \"", ")", ":", "for", "i", ",", "v", "in", "enumerate", "(", "itr", ",", "n", ")", ":", "with", "prefix", "(", "format_msg", ",", "i", ")", ":", "yield", "v" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
err
print format_msg to stderr
captain/echo.py
def err(format_msg, *args, **kwargs): '''print format_msg to stderr''' exc_info = kwargs.pop("exc_info", False) stderr.warning(str(format_msg).format(*args, **kwargs), exc_info=exc_info)
def err(format_msg, *args, **kwargs): '''print format_msg to stderr''' exc_info = kwargs.pop("exc_info", False) stderr.warning(str(format_msg).format(*args, **kwargs), exc_info=exc_info)
[ "print", "format_msg", "to", "stderr" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L177-L180
[ "def", "err", "(", "format_msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "exc_info", "=", "kwargs", ".", "pop", "(", "\"exc_info\"", ",", "False", ")", "stderr", ".", "warning", "(", "str", "(", "format_msg", ")", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")", ",", "exc_info", "=", "exc_info", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
out
print format_msg to stdout, taking into account --quiet setting
captain/echo.py
def out(format_msg="", *args, **kwargs): '''print format_msg to stdout, taking into account --quiet setting''' logmethod = kwargs.get("logmethod", stdout.info) if format_msg != "": if Prefix.has(): if isinstance(format_msg, basestring): format_msg = Prefix.get() + format_msg else: format_msg = Prefix.get() + str(format_msg) if isinstance(format_msg, basestring): if args or kwargs: s = format_msg.format(*args, **kwargs) else: s = format_msg logmethod(s) # width = globals()["width"] # s = textwrap.fill(s, width=width) # stdout.info(s) else: logmethod(str(format_msg)) else: logmethod("")
def out(format_msg="", *args, **kwargs): '''print format_msg to stdout, taking into account --quiet setting''' logmethod = kwargs.get("logmethod", stdout.info) if format_msg != "": if Prefix.has(): if isinstance(format_msg, basestring): format_msg = Prefix.get() + format_msg else: format_msg = Prefix.get() + str(format_msg) if isinstance(format_msg, basestring): if args or kwargs: s = format_msg.format(*args, **kwargs) else: s = format_msg logmethod(s) # width = globals()["width"] # s = textwrap.fill(s, width=width) # stdout.info(s) else: logmethod(str(format_msg)) else: logmethod("")
[ "print", "format_msg", "to", "stdout", "taking", "into", "account", "--", "quiet", "setting" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L197-L222
[ "def", "out", "(", "format_msg", "=", "\"\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logmethod", "=", "kwargs", ".", "get", "(", "\"logmethod\"", ",", "stdout", ".", "info", ")", "if", "format_msg", "!=", "\"\"", ":", "if", "Prefix", ".", "has", "(", ")", ":", "if", "isinstance", "(", "format_msg", ",", "basestring", ")", ":", "format_msg", "=", "Prefix", ".", "get", "(", ")", "+", "format_msg", "else", ":", "format_msg", "=", "Prefix", ".", "get", "(", ")", "+", "str", "(", "format_msg", ")", "if", "isinstance", "(", "format_msg", ",", "basestring", ")", ":", "if", "args", "or", "kwargs", ":", "s", "=", "format_msg", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "s", "=", "format_msg", "logmethod", "(", "s", ")", "# width = globals()[\"width\"]", "# s = textwrap.fill(s, width=width)", "# stdout.info(s)", "else", ":", "logmethod", "(", "str", "(", "format_msg", ")", ")", "else", ":", "logmethod", "(", "\"\"", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
verbose
print format_msg to stdout, taking into account --verbose flag
captain/echo.py
def verbose(format_msg="", *args, **kwargs): '''print format_msg to stdout, taking into account --verbose flag''' kwargs["logmethod"] = stdout.debug out(format_msg, *args, **kwargs)
def verbose(format_msg="", *args, **kwargs): '''print format_msg to stdout, taking into account --verbose flag''' kwargs["logmethod"] = stdout.debug out(format_msg, *args, **kwargs)
[ "print", "format_msg", "to", "stdout", "taking", "into", "account", "--", "verbose", "flag" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L225-L228
[ "def", "verbose", "(", "format_msg", "=", "\"\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"logmethod\"", "]", "=", "stdout", ".", "debug", "out", "(", "format_msg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
banner
prints a banner sep -- string -- the character that will be on the line on the top and bottom and before any of the lines, defaults to * count -- integer -- the line width, defaults to 80
captain/echo.py
def banner(*lines, **kwargs): """prints a banner sep -- string -- the character that will be on the line on the top and bottom and before any of the lines, defaults to * count -- integer -- the line width, defaults to 80 """ sep = kwargs.get("sep", "*") count = kwargs.get("width", globals()["WIDTH"]) out(sep * count) if lines: out(sep) for line in lines: out("{} {}".format(sep, line)) out(sep) out(sep * count)
def banner(*lines, **kwargs): """prints a banner sep -- string -- the character that will be on the line on the top and bottom and before any of the lines, defaults to * count -- integer -- the line width, defaults to 80 """ sep = kwargs.get("sep", "*") count = kwargs.get("width", globals()["WIDTH"]) out(sep * count) if lines: out(sep) for line in lines: out("{} {}".format(sep, line)) out(sep) out(sep * count)
[ "prints", "a", "banner" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L321-L339
[ "def", "banner", "(", "*", "lines", ",", "*", "*", "kwargs", ")", ":", "sep", "=", "kwargs", ".", "get", "(", "\"sep\"", ",", "\"*\"", ")", "count", "=", "kwargs", ".", "get", "(", "\"width\"", ",", "globals", "(", ")", "[", "\"WIDTH\"", "]", ")", "out", "(", "sep", "*", "count", ")", "if", "lines", ":", "out", "(", "sep", ")", "for", "line", "in", "lines", ":", "out", "(", "\"{} {}\"", ".", "format", "(", "sep", ",", "line", ")", ")", "out", "(", "sep", ")", "out", "(", "sep", "*", "count", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
table
format columned data so we can easily print it out on a console, this just takes columns of data and it will format it into properly aligned columns, it's not fancy, but it works for most type of strings that I need it for, like server name lists. other formatting options: http://stackoverflow.com/a/8234511/5006 other packages that probably do this way better: https://stackoverflow.com/a/26937531/5006 :Example: >>> echo.table([(1, 2), (3, 4), (5, 6), (7, 8), (9, 0)]) 1 2 3 4 5 6 7 8 9 0 >>> echo.table([1, 3, 5, 7, 9], [2, 4, 6, 8, 0]) 1 2 3 4 5 6 7 8 9 0 :param *columns: can either be a list of rows or multiple lists representing each column in the table :param **kwargs: dict prefix -- string -- what you want before each row (eg, a tab) buf_count -- integer -- how many spaces between longest col value and its neighbor headers -- list -- the headers you want, must match column count widths -- list -- the widths of each column you want to use, this doesn't have to match column count, so you can do something like [0, 5] to set the width of the second column width -- int -- similar to widths except it will set this value for all columns
captain/echo.py
def table(*columns, **kwargs): """ format columned data so we can easily print it out on a console, this just takes columns of data and it will format it into properly aligned columns, it's not fancy, but it works for most type of strings that I need it for, like server name lists. other formatting options: http://stackoverflow.com/a/8234511/5006 other packages that probably do this way better: https://stackoverflow.com/a/26937531/5006 :Example: >>> echo.table([(1, 2), (3, 4), (5, 6), (7, 8), (9, 0)]) 1 2 3 4 5 6 7 8 9 0 >>> echo.table([1, 3, 5, 7, 9], [2, 4, 6, 8, 0]) 1 2 3 4 5 6 7 8 9 0 :param *columns: can either be a list of rows or multiple lists representing each column in the table :param **kwargs: dict prefix -- string -- what you want before each row (eg, a tab) buf_count -- integer -- how many spaces between longest col value and its neighbor headers -- list -- the headers you want, must match column count widths -- list -- the widths of each column you want to use, this doesn't have to match column count, so you can do something like [0, 5] to set the width of the second column width -- int -- similar to widths except it will set this value for all columns """ ret = [] prefix = kwargs.get('prefix', '') buf_count = kwargs.get('buf_count', 2) if len(columns) == 1: columns = list(columns[0]) else: # without the list the zip iterator gets spent, I'm sure I can make this # better columns = list(zip(*columns)) headers = kwargs.get("headers", []) if headers: columns.insert(0, headers) # we have to go through all the rows and calculate the length of each # column of each row widths = kwargs.get("widths", []) row_counts = Counter() for i in range(len(widths)): row_counts[i] = int(widths[i]) width = int(kwargs.get("width", 0)) for row in columns: for i, c in enumerate(row): if isinstance(c, basestring): cl = len(c) else: cl = len(str(c)) if cl > row_counts[i]: row_counts[i] = cl width = int(kwargs.get("width", 0)) if width: for i in row_counts: if row_counts[i] < width: row_counts[i] = width # actually go through and format each row def colstr(c): if isinstance(c, basestring): return c return str(c) def rowstr(row, prefix, row_counts): row_format = prefix cols = list(map(colstr, row)) for i in range(len(row_counts)): c = cols[i] # build the format string for each row, we use the row_counts found # above to decide how much padding each column should get # https://stackoverflow.com/a/9536084/5006 if re.match(r"^\d+(?:\.\d+)?$", c): if i == 0: row_format += "{:>" + str(row_counts[i]) + "}" else: row_format += "{:>" + str(row_counts[i] + buf_count) + "}" else: row_format += "{:<" + str(row_counts[i] + buf_count) + "}" return row_format.format(*cols) for row in columns: ret.append(rowstr(row, prefix, row_counts)) out(os.linesep.join(ret))
def table(*columns, **kwargs): """ format columned data so we can easily print it out on a console, this just takes columns of data and it will format it into properly aligned columns, it's not fancy, but it works for most type of strings that I need it for, like server name lists. other formatting options: http://stackoverflow.com/a/8234511/5006 other packages that probably do this way better: https://stackoverflow.com/a/26937531/5006 :Example: >>> echo.table([(1, 2), (3, 4), (5, 6), (7, 8), (9, 0)]) 1 2 3 4 5 6 7 8 9 0 >>> echo.table([1, 3, 5, 7, 9], [2, 4, 6, 8, 0]) 1 2 3 4 5 6 7 8 9 0 :param *columns: can either be a list of rows or multiple lists representing each column in the table :param **kwargs: dict prefix -- string -- what you want before each row (eg, a tab) buf_count -- integer -- how many spaces between longest col value and its neighbor headers -- list -- the headers you want, must match column count widths -- list -- the widths of each column you want to use, this doesn't have to match column count, so you can do something like [0, 5] to set the width of the second column width -- int -- similar to widths except it will set this value for all columns """ ret = [] prefix = kwargs.get('prefix', '') buf_count = kwargs.get('buf_count', 2) if len(columns) == 1: columns = list(columns[0]) else: # without the list the zip iterator gets spent, I'm sure I can make this # better columns = list(zip(*columns)) headers = kwargs.get("headers", []) if headers: columns.insert(0, headers) # we have to go through all the rows and calculate the length of each # column of each row widths = kwargs.get("widths", []) row_counts = Counter() for i in range(len(widths)): row_counts[i] = int(widths[i]) width = int(kwargs.get("width", 0)) for row in columns: for i, c in enumerate(row): if isinstance(c, basestring): cl = len(c) else: cl = len(str(c)) if cl > row_counts[i]: row_counts[i] = cl width = int(kwargs.get("width", 0)) if width: for i in row_counts: if row_counts[i] < width: row_counts[i] = width # actually go through and format each row def colstr(c): if isinstance(c, basestring): return c return str(c) def rowstr(row, prefix, row_counts): row_format = prefix cols = list(map(colstr, row)) for i in range(len(row_counts)): c = cols[i] # build the format string for each row, we use the row_counts found # above to decide how much padding each column should get # https://stackoverflow.com/a/9536084/5006 if re.match(r"^\d+(?:\.\d+)?$", c): if i == 0: row_format += "{:>" + str(row_counts[i]) + "}" else: row_format += "{:>" + str(row_counts[i] + buf_count) + "}" else: row_format += "{:<" + str(row_counts[i] + buf_count) + "}" return row_format.format(*cols) for row in columns: ret.append(rowstr(row, prefix, row_counts)) out(os.linesep.join(ret))
[ "format", "columned", "data", "so", "we", "can", "easily", "print", "it", "out", "on", "a", "console", "this", "just", "takes", "columns", "of", "data", "and", "it", "will", "format", "it", "into", "properly", "aligned", "columns", "it", "s", "not", "fancy", "but", "it", "works", "for", "most", "type", "of", "strings", "that", "I", "need", "it", "for", "like", "server", "name", "lists", "." ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L353-L454
[ "def", "table", "(", "*", "columns", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "[", "]", "prefix", "=", "kwargs", ".", "get", "(", "'prefix'", ",", "''", ")", "buf_count", "=", "kwargs", ".", "get", "(", "'buf_count'", ",", "2", ")", "if", "len", "(", "columns", ")", "==", "1", ":", "columns", "=", "list", "(", "columns", "[", "0", "]", ")", "else", ":", "# without the list the zip iterator gets spent, I'm sure I can make this", "# better", "columns", "=", "list", "(", "zip", "(", "*", "columns", ")", ")", "headers", "=", "kwargs", ".", "get", "(", "\"headers\"", ",", "[", "]", ")", "if", "headers", ":", "columns", ".", "insert", "(", "0", ",", "headers", ")", "# we have to go through all the rows and calculate the length of each", "# column of each row", "widths", "=", "kwargs", ".", "get", "(", "\"widths\"", ",", "[", "]", ")", "row_counts", "=", "Counter", "(", ")", "for", "i", "in", "range", "(", "len", "(", "widths", ")", ")", ":", "row_counts", "[", "i", "]", "=", "int", "(", "widths", "[", "i", "]", ")", "width", "=", "int", "(", "kwargs", ".", "get", "(", "\"width\"", ",", "0", ")", ")", "for", "row", "in", "columns", ":", "for", "i", ",", "c", "in", "enumerate", "(", "row", ")", ":", "if", "isinstance", "(", "c", ",", "basestring", ")", ":", "cl", "=", "len", "(", "c", ")", "else", ":", "cl", "=", "len", "(", "str", "(", "c", ")", ")", "if", "cl", ">", "row_counts", "[", "i", "]", ":", "row_counts", "[", "i", "]", "=", "cl", "width", "=", "int", "(", "kwargs", ".", "get", "(", "\"width\"", ",", "0", ")", ")", "if", "width", ":", "for", "i", "in", "row_counts", ":", "if", "row_counts", "[", "i", "]", "<", "width", ":", "row_counts", "[", "i", "]", "=", "width", "# actually go through and format each row", "def", "colstr", "(", "c", ")", ":", "if", "isinstance", "(", "c", ",", "basestring", ")", ":", "return", "c", "return", "str", "(", "c", ")", "def", "rowstr", "(", "row", ",", "prefix", ",", "row_counts", ")", ":", "row_format", "=", "prefix", "cols", "=", "list", "(", "map", "(", "colstr", ",", "row", ")", ")", "for", "i", "in", "range", "(", "len", "(", "row_counts", ")", ")", ":", "c", "=", "cols", "[", "i", "]", "# build the format string for each row, we use the row_counts found", "# above to decide how much padding each column should get", "# https://stackoverflow.com/a/9536084/5006", "if", "re", ".", "match", "(", "r\"^\\d+(?:\\.\\d+)?$\"", ",", "c", ")", ":", "if", "i", "==", "0", ":", "row_format", "+=", "\"{:>\"", "+", "str", "(", "row_counts", "[", "i", "]", ")", "+", "\"}\"", "else", ":", "row_format", "+=", "\"{:>\"", "+", "str", "(", "row_counts", "[", "i", "]", "+", "buf_count", ")", "+", "\"}\"", "else", ":", "row_format", "+=", "\"{:<\"", "+", "str", "(", "row_counts", "[", "i", "]", "+", "buf_count", ")", "+", "\"}\"", "return", "row_format", ".", "format", "(", "*", "cols", ")", "for", "row", "in", "columns", ":", "ret", ".", "append", "(", "rowstr", "(", "row", ",", "prefix", ",", "row_counts", ")", ")", "out", "(", "os", ".", "linesep", ".", "join", "(", "ret", ")", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
prompt
echo a prompt to the user and wait for an answer question -- string -- the prompt for the user choices -- list -- if given, only exit when prompt matches one of the choices return -- string -- the answer that was given by the user
captain/echo.py
def prompt(question, choices=None): """echo a prompt to the user and wait for an answer question -- string -- the prompt for the user choices -- list -- if given, only exit when prompt matches one of the choices return -- string -- the answer that was given by the user """ if not re.match("\s$", question): question = "{}: ".format(question) while True: if sys.version_info[0] > 2: answer = input(question) else: answer = raw_input(question) if not choices or answer in choices: break return answer
def prompt(question, choices=None): """echo a prompt to the user and wait for an answer question -- string -- the prompt for the user choices -- list -- if given, only exit when prompt matches one of the choices return -- string -- the answer that was given by the user """ if not re.match("\s$", question): question = "{}: ".format(question) while True: if sys.version_info[0] > 2: answer = input(question) else: answer = raw_input(question) if not choices or answer in choices: break return answer
[ "echo", "a", "prompt", "to", "the", "user", "and", "wait", "for", "an", "answer" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L458-L479
[ "def", "prompt", "(", "question", ",", "choices", "=", "None", ")", ":", "if", "not", "re", ".", "match", "(", "\"\\s$\"", ",", "question", ")", ":", "question", "=", "\"{}: \"", ".", "format", "(", "question", ")", "while", "True", ":", "if", "sys", ".", "version_info", "[", "0", "]", ">", "2", ":", "answer", "=", "input", "(", "question", ")", "else", ":", "answer", "=", "raw_input", "(", "question", ")", "if", "not", "choices", "or", "answer", "in", "choices", ":", "break", "return", "answer" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
SpringerCrawler.get_records
Returns the records listed in the webpage given as parameter as a xml String. @param url: the url of the Journal, Book, Protocol or Reference work
harvestingkit/springer_crawler.py
def get_records(self, url): """ Returns the records listed in the webpage given as parameter as a xml String. @param url: the url of the Journal, Book, Protocol or Reference work """ page = urllib2.urlopen(url) pages = [BeautifulSoup(page)] #content spread over several pages? numpag = pages[0].body.findAll('span', attrs={'class': 'number-of-pages'}) if len(numpag) > 0: if re.search('^\d+$', numpag[0].string): for i in range(int(numpag[0].string)-1): page = urllib2.urlopen('%s/page/%i' % (url, i+2)) pages.append(BeautifulSoup(page)) else: print("number of pages %s not an integer" % (numpag[0].string)) impl = getDOMImplementation() doc = impl.createDocument(None, "collection", None) links = [] for page in pages: links += page.body.findAll('p', attrs={'class': 'title'}) links += page.body.findAll('h3', attrs={'class': 'title'}) for link in links: record = self._get_record(link) doc.firstChild.appendChild(record) return doc.toprettyxml()
def get_records(self, url): """ Returns the records listed in the webpage given as parameter as a xml String. @param url: the url of the Journal, Book, Protocol or Reference work """ page = urllib2.urlopen(url) pages = [BeautifulSoup(page)] #content spread over several pages? numpag = pages[0].body.findAll('span', attrs={'class': 'number-of-pages'}) if len(numpag) > 0: if re.search('^\d+$', numpag[0].string): for i in range(int(numpag[0].string)-1): page = urllib2.urlopen('%s/page/%i' % (url, i+2)) pages.append(BeautifulSoup(page)) else: print("number of pages %s not an integer" % (numpag[0].string)) impl = getDOMImplementation() doc = impl.createDocument(None, "collection", None) links = [] for page in pages: links += page.body.findAll('p', attrs={'class': 'title'}) links += page.body.findAll('h3', attrs={'class': 'title'}) for link in links: record = self._get_record(link) doc.firstChild.appendChild(record) return doc.toprettyxml()
[ "Returns", "the", "records", "listed", "in", "the", "webpage", "given", "as", "parameter", "as", "a", "xml", "String", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/springer_crawler.py#L55-L82
[ "def", "get_records", "(", "self", ",", "url", ")", ":", "page", "=", "urllib2", ".", "urlopen", "(", "url", ")", "pages", "=", "[", "BeautifulSoup", "(", "page", ")", "]", "#content spread over several pages?", "numpag", "=", "pages", "[", "0", "]", ".", "body", ".", "findAll", "(", "'span'", ",", "attrs", "=", "{", "'class'", ":", "'number-of-pages'", "}", ")", "if", "len", "(", "numpag", ")", ">", "0", ":", "if", "re", ".", "search", "(", "'^\\d+$'", ",", "numpag", "[", "0", "]", ".", "string", ")", ":", "for", "i", "in", "range", "(", "int", "(", "numpag", "[", "0", "]", ".", "string", ")", "-", "1", ")", ":", "page", "=", "urllib2", ".", "urlopen", "(", "'%s/page/%i'", "%", "(", "url", ",", "i", "+", "2", ")", ")", "pages", ".", "append", "(", "BeautifulSoup", "(", "page", ")", ")", "else", ":", "print", "(", "\"number of pages %s not an integer\"", "%", "(", "numpag", "[", "0", "]", ".", "string", ")", ")", "impl", "=", "getDOMImplementation", "(", ")", "doc", "=", "impl", ".", "createDocument", "(", "None", ",", "\"collection\"", ",", "None", ")", "links", "=", "[", "]", "for", "page", "in", "pages", ":", "links", "+=", "page", ".", "body", ".", "findAll", "(", "'p'", ",", "attrs", "=", "{", "'class'", ":", "'title'", "}", ")", "links", "+=", "page", ".", "body", ".", "findAll", "(", "'h3'", ",", "attrs", "=", "{", "'class'", ":", "'title'", "}", ")", "for", "link", "in", "links", ":", "record", "=", "self", ".", "_get_record", "(", "link", ")", "doc", ".", "firstChild", ".", "appendChild", "(", "record", ")", "return", "doc", ".", "toprettyxml", "(", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
inject_quiet
see --quiet flag help for what this does
captain/logging.py
def inject_quiet(levels): """see --quiet flag help for what this does""" loggers = list(Logger.manager.loggerDict.items()) loggers.append(("root", getLogger())) level_filter = LevelFilter(levels) for logger_name, logger in loggers: for handler in getattr(logger, "handlers", []): handler.addFilter(level_filter)
def inject_quiet(levels): """see --quiet flag help for what this does""" loggers = list(Logger.manager.loggerDict.items()) loggers.append(("root", getLogger())) level_filter = LevelFilter(levels) for logger_name, logger in loggers: for handler in getattr(logger, "handlers", []): handler.addFilter(level_filter)
[ "see", "--", "quiet", "flag", "help", "for", "what", "this", "does" ]
Jaymon/captain
python
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/logging.py#L55-L63
[ "def", "inject_quiet", "(", "levels", ")", ":", "loggers", "=", "list", "(", "Logger", ".", "manager", ".", "loggerDict", ".", "items", "(", ")", ")", "loggers", ".", "append", "(", "(", "\"root\"", ",", "getLogger", "(", ")", ")", ")", "level_filter", "=", "LevelFilter", "(", "levels", ")", "for", "logger_name", ",", "logger", "in", "loggers", ":", "for", "handler", "in", "getattr", "(", "logger", ",", "\"handlers\"", ",", "[", "]", ")", ":", "handler", ".", "addFilter", "(", "level_filter", ")" ]
4297f32961d423a10d0f053bc252e29fbe939a47
valid
OxfordPackage.connect
Logs into the specified ftp server and returns connector.
harvestingkit/oup_package.py
def connect(self): """Logs into the specified ftp server and returns connector.""" for tried_connection_count in range(CFG_FTP_CONNECTION_ATTEMPTS): try: self.ftp = FtpHandler(self.config.OXFORD.URL, self.config.OXFORD.LOGIN, self.config.OXFORD.PASSWORD) self.logger.debug(("Successful connection to the " "Oxford University Press server")) return except socket_timeout_exception as err: self.logger.error(('Failed to connect %d of %d times. ' 'Will sleep for %d seconds and try again.') % (tried_connection_count+1, CFG_FTP_CONNECTION_ATTEMPTS, CFG_FTP_TIMEOUT_SLEEP_DURATION)) time.sleep(CFG_FTP_TIMEOUT_SLEEP_DURATION) except Exception as err: self.logger.error(('Failed to connect to the Oxford ' 'University Press server. %s') % (err,)) break raise LoginException(err)
def connect(self): """Logs into the specified ftp server and returns connector.""" for tried_connection_count in range(CFG_FTP_CONNECTION_ATTEMPTS): try: self.ftp = FtpHandler(self.config.OXFORD.URL, self.config.OXFORD.LOGIN, self.config.OXFORD.PASSWORD) self.logger.debug(("Successful connection to the " "Oxford University Press server")) return except socket_timeout_exception as err: self.logger.error(('Failed to connect %d of %d times. ' 'Will sleep for %d seconds and try again.') % (tried_connection_count+1, CFG_FTP_CONNECTION_ATTEMPTS, CFG_FTP_TIMEOUT_SLEEP_DURATION)) time.sleep(CFG_FTP_TIMEOUT_SLEEP_DURATION) except Exception as err: self.logger.error(('Failed to connect to the Oxford ' 'University Press server. %s') % (err,)) break raise LoginException(err)
[ "Logs", "into", "the", "specified", "ftp", "server", "and", "returns", "connector", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/oup_package.py#L79-L101
[ "def", "connect", "(", "self", ")", ":", "for", "tried_connection_count", "in", "range", "(", "CFG_FTP_CONNECTION_ATTEMPTS", ")", ":", "try", ":", "self", ".", "ftp", "=", "FtpHandler", "(", "self", ".", "config", ".", "OXFORD", ".", "URL", ",", "self", ".", "config", ".", "OXFORD", ".", "LOGIN", ",", "self", ".", "config", ".", "OXFORD", ".", "PASSWORD", ")", "self", ".", "logger", ".", "debug", "(", "(", "\"Successful connection to the \"", "\"Oxford University Press server\"", ")", ")", "return", "except", "socket_timeout_exception", "as", "err", ":", "self", ".", "logger", ".", "error", "(", "(", "'Failed to connect %d of %d times. '", "'Will sleep for %d seconds and try again.'", ")", "%", "(", "tried_connection_count", "+", "1", ",", "CFG_FTP_CONNECTION_ATTEMPTS", ",", "CFG_FTP_TIMEOUT_SLEEP_DURATION", ")", ")", "time", ".", "sleep", "(", "CFG_FTP_TIMEOUT_SLEEP_DURATION", ")", "except", "Exception", "as", "err", ":", "self", ".", "logger", ".", "error", "(", "(", "'Failed to connect to the Oxford '", "'University Press server. %s'", ")", "%", "(", "err", ",", ")", ")", "break", "raise", "LoginException", "(", "err", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
OxfordPackage._extract_packages
Extract a package in a new directory.
harvestingkit/oup_package.py
def _extract_packages(self): """ Extract a package in a new directory. """ if not hasattr(self, "retrieved_packages_unpacked"): self.retrieved_packages_unpacked = [self.package_name] for path in self.retrieved_packages_unpacked: package_name = basename(path) self.path_unpacked = join(CFG_UNPACKED_FILES, package_name.split('.')[0]) self.logger.debug("Extracting package: %s" % (path.split("/")[-1],)) try: if "_archival_pdf" in self.path_unpacked: self.path_unpacked = (self.path_unpacked .rstrip("_archival_pdf")) ZipFile(path).extractall(join(self.path_unpacked, "archival_pdfs")) else: ZipFile(path).extractall(self.path_unpacked) #TarFile.open(path).extractall(self.path_unpacked) except Exception: register_exception(alert_admin=True, prefix="OUP error extracting package.") self.logger.error("Error extraction package file: %s" % (path,)) if hasattr(self, "path_unpacked"): return self.path_unpacked
def _extract_packages(self): """ Extract a package in a new directory. """ if not hasattr(self, "retrieved_packages_unpacked"): self.retrieved_packages_unpacked = [self.package_name] for path in self.retrieved_packages_unpacked: package_name = basename(path) self.path_unpacked = join(CFG_UNPACKED_FILES, package_name.split('.')[0]) self.logger.debug("Extracting package: %s" % (path.split("/")[-1],)) try: if "_archival_pdf" in self.path_unpacked: self.path_unpacked = (self.path_unpacked .rstrip("_archival_pdf")) ZipFile(path).extractall(join(self.path_unpacked, "archival_pdfs")) else: ZipFile(path).extractall(self.path_unpacked) #TarFile.open(path).extractall(self.path_unpacked) except Exception: register_exception(alert_admin=True, prefix="OUP error extracting package.") self.logger.error("Error extraction package file: %s" % (path,)) if hasattr(self, "path_unpacked"): return self.path_unpacked
[ "Extract", "a", "package", "in", "a", "new", "directory", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/oup_package.py#L197-L225
[ "def", "_extract_packages", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"retrieved_packages_unpacked\"", ")", ":", "self", ".", "retrieved_packages_unpacked", "=", "[", "self", ".", "package_name", "]", "for", "path", "in", "self", ".", "retrieved_packages_unpacked", ":", "package_name", "=", "basename", "(", "path", ")", "self", ".", "path_unpacked", "=", "join", "(", "CFG_UNPACKED_FILES", ",", "package_name", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "self", ".", "logger", ".", "debug", "(", "\"Extracting package: %s\"", "%", "(", "path", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ",", ")", ")", "try", ":", "if", "\"_archival_pdf\"", "in", "self", ".", "path_unpacked", ":", "self", ".", "path_unpacked", "=", "(", "self", ".", "path_unpacked", ".", "rstrip", "(", "\"_archival_pdf\"", ")", ")", "ZipFile", "(", "path", ")", ".", "extractall", "(", "join", "(", "self", ".", "path_unpacked", ",", "\"archival_pdfs\"", ")", ")", "else", ":", "ZipFile", "(", "path", ")", ".", "extractall", "(", "self", ".", "path_unpacked", ")", "#TarFile.open(path).extractall(self.path_unpacked)", "except", "Exception", ":", "register_exception", "(", "alert_admin", "=", "True", ",", "prefix", "=", "\"OUP error extracting package.\"", ")", "self", ".", "logger", ".", "error", "(", "\"Error extraction package file: %s\"", "%", "(", "path", ",", ")", ")", "if", "hasattr", "(", "self", ",", "\"path_unpacked\"", ")", ":", "return", "self", ".", "path_unpacked" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
OxfordPackage._crawl_oxford_and_find_main_xml
A package contains several subdirectory corresponding to each article. An article is actually identified by the existence of a main.pdf and a main.xml in a given directory.
harvestingkit/oup_package.py
def _crawl_oxford_and_find_main_xml(self): """ A package contains several subdirectory corresponding to each article. An article is actually identified by the existence of a main.pdf and a main.xml in a given directory. """ self.found_articles = [] def visit(arg, dirname, names): files = [filename for filename in names if ".xml" in filename] if files: try: for f in files: self.found_articles.append(join(dirname, f)) except Exception as err: register_exception() print("ERROR: can't normalize %s: %s" % (dirname, err), file=sys.stderr) if hasattr(self, 'path_unpacked'): walk(self.path_unpacked, visit, None) elif self.path: walk(self.path, visit, None) else: self.logger.info("Nothing to do.")
def _crawl_oxford_and_find_main_xml(self): """ A package contains several subdirectory corresponding to each article. An article is actually identified by the existence of a main.pdf and a main.xml in a given directory. """ self.found_articles = [] def visit(arg, dirname, names): files = [filename for filename in names if ".xml" in filename] if files: try: for f in files: self.found_articles.append(join(dirname, f)) except Exception as err: register_exception() print("ERROR: can't normalize %s: %s" % (dirname, err), file=sys.stderr) if hasattr(self, 'path_unpacked'): walk(self.path_unpacked, visit, None) elif self.path: walk(self.path, visit, None) else: self.logger.info("Nothing to do.")
[ "A", "package", "contains", "several", "subdirectory", "corresponding", "to", "each", "article", ".", "An", "article", "is", "actually", "identified", "by", "the", "existence", "of", "a", "main", ".", "pdf", "and", "a", "main", ".", "xml", "in", "a", "given", "directory", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/oup_package.py#L227-L252
[ "def", "_crawl_oxford_and_find_main_xml", "(", "self", ")", ":", "self", ".", "found_articles", "=", "[", "]", "def", "visit", "(", "arg", ",", "dirname", ",", "names", ")", ":", "files", "=", "[", "filename", "for", "filename", "in", "names", "if", "\".xml\"", "in", "filename", "]", "if", "files", ":", "try", ":", "for", "f", "in", "files", ":", "self", ".", "found_articles", ".", "append", "(", "join", "(", "dirname", ",", "f", ")", ")", "except", "Exception", "as", "err", ":", "register_exception", "(", ")", "print", "(", "\"ERROR: can't normalize %s: %s\"", "%", "(", "dirname", ",", "err", ")", ",", "file", "=", "sys", ".", "stderr", ")", "if", "hasattr", "(", "self", ",", "'path_unpacked'", ")", ":", "walk", "(", "self", ".", "path_unpacked", ",", "visit", ",", "None", ")", "elif", "self", ".", "path", ":", "walk", "(", "self", ".", "path", ",", "visit", ",", "None", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "\"Nothing to do.\"", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
NuHeatThermostat.get_data
Fetch/refresh the current instance's data from the NuHeat API
nuheat/thermostat.py
def get_data(self): """ Fetch/refresh the current instance's data from the NuHeat API """ params = { "serialnumber": self.serial_number } data = self._session.request(config.THERMOSTAT_URL, params=params) self._data = data self.heating = data.get("Heating") self.online = data.get("Online") self.room = data.get("Room") self.serial_number = data.get("SerialNumber") self.temperature = data.get("Temperature") self.min_temperature = data.get("MinTemp") self.max_temperature = data.get("MaxTemp") self.target_temperature = data.get("SetPointTemp") self._schedule_mode = data.get("ScheduleMode")
def get_data(self): """ Fetch/refresh the current instance's data from the NuHeat API """ params = { "serialnumber": self.serial_number } data = self._session.request(config.THERMOSTAT_URL, params=params) self._data = data self.heating = data.get("Heating") self.online = data.get("Online") self.room = data.get("Room") self.serial_number = data.get("SerialNumber") self.temperature = data.get("Temperature") self.min_temperature = data.get("MinTemp") self.max_temperature = data.get("MaxTemp") self.target_temperature = data.get("SetPointTemp") self._schedule_mode = data.get("ScheduleMode")
[ "Fetch", "/", "refresh", "the", "current", "instance", "s", "data", "from", "the", "NuHeat", "API" ]
broox/python-nuheat
python
https://github.com/broox/python-nuheat/blob/3a18852dc9465c34cb96eb3a0c84f1a6caa70707/nuheat/thermostat.py#L133-L152
[ "def", "get_data", "(", "self", ")", ":", "params", "=", "{", "\"serialnumber\"", ":", "self", ".", "serial_number", "}", "data", "=", "self", ".", "_session", ".", "request", "(", "config", ".", "THERMOSTAT_URL", ",", "params", "=", "params", ")", "self", ".", "_data", "=", "data", "self", ".", "heating", "=", "data", ".", "get", "(", "\"Heating\"", ")", "self", ".", "online", "=", "data", ".", "get", "(", "\"Online\"", ")", "self", ".", "room", "=", "data", ".", "get", "(", "\"Room\"", ")", "self", ".", "serial_number", "=", "data", ".", "get", "(", "\"SerialNumber\"", ")", "self", ".", "temperature", "=", "data", ".", "get", "(", "\"Temperature\"", ")", "self", ".", "min_temperature", "=", "data", ".", "get", "(", "\"MinTemp\"", ")", "self", ".", "max_temperature", "=", "data", ".", "get", "(", "\"MaxTemp\"", ")", "self", ".", "target_temperature", "=", "data", ".", "get", "(", "\"SetPointTemp\"", ")", "self", ".", "_schedule_mode", "=", "data", ".", "get", "(", "\"ScheduleMode\"", ")" ]
3a18852dc9465c34cb96eb3a0c84f1a6caa70707
valid
NuHeatThermostat.schedule_mode
Set the thermostat mode :param mode: The desired mode integer value. Auto = 1 Temporary hold = 2 Permanent hold = 3
nuheat/thermostat.py
def schedule_mode(self, mode): """ Set the thermostat mode :param mode: The desired mode integer value. Auto = 1 Temporary hold = 2 Permanent hold = 3 """ modes = [config.SCHEDULE_RUN, config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD] if mode not in modes: raise Exception("Invalid mode. Please use one of: {}".format(modes)) self.set_data({"ScheduleMode": mode})
def schedule_mode(self, mode): """ Set the thermostat mode :param mode: The desired mode integer value. Auto = 1 Temporary hold = 2 Permanent hold = 3 """ modes = [config.SCHEDULE_RUN, config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD] if mode not in modes: raise Exception("Invalid mode. Please use one of: {}".format(modes)) self.set_data({"ScheduleMode": mode})
[ "Set", "the", "thermostat", "mode" ]
broox/python-nuheat
python
https://github.com/broox/python-nuheat/blob/3a18852dc9465c34cb96eb3a0c84f1a6caa70707/nuheat/thermostat.py#L162-L175
[ "def", "schedule_mode", "(", "self", ",", "mode", ")", ":", "modes", "=", "[", "config", ".", "SCHEDULE_RUN", ",", "config", ".", "SCHEDULE_TEMPORARY_HOLD", ",", "config", ".", "SCHEDULE_HOLD", "]", "if", "mode", "not", "in", "modes", ":", "raise", "Exception", "(", "\"Invalid mode. Please use one of: {}\"", ".", "format", "(", "modes", ")", ")", "self", ".", "set_data", "(", "{", "\"ScheduleMode\"", ":", "mode", "}", ")" ]
3a18852dc9465c34cb96eb3a0c84f1a6caa70707
valid
NuHeatThermostat.set_target_fahrenheit
Set the target temperature to the desired fahrenheit, with more granular control of the hold mode :param fahrenheit: The desired temperature in F :param mode: The desired mode to operate in
nuheat/thermostat.py
def set_target_fahrenheit(self, fahrenheit, mode=config.SCHEDULE_HOLD): """ Set the target temperature to the desired fahrenheit, with more granular control of the hold mode :param fahrenheit: The desired temperature in F :param mode: The desired mode to operate in """ temperature = fahrenheit_to_nuheat(fahrenheit) self.set_target_temperature(temperature, mode)
def set_target_fahrenheit(self, fahrenheit, mode=config.SCHEDULE_HOLD): """ Set the target temperature to the desired fahrenheit, with more granular control of the hold mode :param fahrenheit: The desired temperature in F :param mode: The desired mode to operate in """ temperature = fahrenheit_to_nuheat(fahrenheit) self.set_target_temperature(temperature, mode)
[ "Set", "the", "target", "temperature", "to", "the", "desired", "fahrenheit", "with", "more", "granular", "control", "of", "the", "hold", "mode" ]
broox/python-nuheat
python
https://github.com/broox/python-nuheat/blob/3a18852dc9465c34cb96eb3a0c84f1a6caa70707/nuheat/thermostat.py#L183-L192
[ "def", "set_target_fahrenheit", "(", "self", ",", "fahrenheit", ",", "mode", "=", "config", ".", "SCHEDULE_HOLD", ")", ":", "temperature", "=", "fahrenheit_to_nuheat", "(", "fahrenheit", ")", "self", ".", "set_target_temperature", "(", "temperature", ",", "mode", ")" ]
3a18852dc9465c34cb96eb3a0c84f1a6caa70707
valid
NuHeatThermostat.set_target_celsius
Set the target temperature to the desired celsius, with more granular control of the hold mode :param celsius: The desired temperature in C :param mode: The desired mode to operate in
nuheat/thermostat.py
def set_target_celsius(self, celsius, mode=config.SCHEDULE_HOLD): """ Set the target temperature to the desired celsius, with more granular control of the hold mode :param celsius: The desired temperature in C :param mode: The desired mode to operate in """ temperature = celsius_to_nuheat(celsius) self.set_target_temperature(temperature, mode)
def set_target_celsius(self, celsius, mode=config.SCHEDULE_HOLD): """ Set the target temperature to the desired celsius, with more granular control of the hold mode :param celsius: The desired temperature in C :param mode: The desired mode to operate in """ temperature = celsius_to_nuheat(celsius) self.set_target_temperature(temperature, mode)
[ "Set", "the", "target", "temperature", "to", "the", "desired", "celsius", "with", "more", "granular", "control", "of", "the", "hold", "mode" ]
broox/python-nuheat
python
https://github.com/broox/python-nuheat/blob/3a18852dc9465c34cb96eb3a0c84f1a6caa70707/nuheat/thermostat.py#L194-L203
[ "def", "set_target_celsius", "(", "self", ",", "celsius", ",", "mode", "=", "config", ".", "SCHEDULE_HOLD", ")", ":", "temperature", "=", "celsius_to_nuheat", "(", "celsius", ")", "self", ".", "set_target_temperature", "(", "temperature", ",", "mode", ")" ]
3a18852dc9465c34cb96eb3a0c84f1a6caa70707
valid
NuHeatThermostat.set_target_temperature
Updates the target temperature on the NuHeat API :param temperature: The desired temperature in NuHeat format :param permanent: Permanently hold the temperature. If set to False, the schedule will resume at the next programmed event
nuheat/thermostat.py
def set_target_temperature(self, temperature, mode=config.SCHEDULE_HOLD): """ Updates the target temperature on the NuHeat API :param temperature: The desired temperature in NuHeat format :param permanent: Permanently hold the temperature. If set to False, the schedule will resume at the next programmed event """ if temperature < self.min_temperature: temperature = self.min_temperature if temperature > self.max_temperature: temperature = self.max_temperature modes = [config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD] if mode not in modes: raise Exception("Invalid mode. Please use one of: {}".format(modes)) self.set_data({ "SetPointTemp": temperature, "ScheduleMode": mode })
def set_target_temperature(self, temperature, mode=config.SCHEDULE_HOLD): """ Updates the target temperature on the NuHeat API :param temperature: The desired temperature in NuHeat format :param permanent: Permanently hold the temperature. If set to False, the schedule will resume at the next programmed event """ if temperature < self.min_temperature: temperature = self.min_temperature if temperature > self.max_temperature: temperature = self.max_temperature modes = [config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD] if mode not in modes: raise Exception("Invalid mode. Please use one of: {}".format(modes)) self.set_data({ "SetPointTemp": temperature, "ScheduleMode": mode })
[ "Updates", "the", "target", "temperature", "on", "the", "NuHeat", "API" ]
broox/python-nuheat
python
https://github.com/broox/python-nuheat/blob/3a18852dc9465c34cb96eb3a0c84f1a6caa70707/nuheat/thermostat.py#L205-L226
[ "def", "set_target_temperature", "(", "self", ",", "temperature", ",", "mode", "=", "config", ".", "SCHEDULE_HOLD", ")", ":", "if", "temperature", "<", "self", ".", "min_temperature", ":", "temperature", "=", "self", ".", "min_temperature", "if", "temperature", ">", "self", ".", "max_temperature", ":", "temperature", "=", "self", ".", "max_temperature", "modes", "=", "[", "config", ".", "SCHEDULE_TEMPORARY_HOLD", ",", "config", ".", "SCHEDULE_HOLD", "]", "if", "mode", "not", "in", "modes", ":", "raise", "Exception", "(", "\"Invalid mode. Please use one of: {}\"", ".", "format", "(", "modes", ")", ")", "self", ".", "set_data", "(", "{", "\"SetPointTemp\"", ":", "temperature", ",", "\"ScheduleMode\"", ":", "mode", "}", ")" ]
3a18852dc9465c34cb96eb3a0c84f1a6caa70707
valid
NuHeatThermostat.set_data
Update (patch) the current instance's data on the NuHeat API
nuheat/thermostat.py
def set_data(self, post_data): """ Update (patch) the current instance's data on the NuHeat API """ params = { "serialnumber": self.serial_number } self._session.request(config.THERMOSTAT_URL, method="POST", data=post_data, params=params)
def set_data(self, post_data): """ Update (patch) the current instance's data on the NuHeat API """ params = { "serialnumber": self.serial_number } self._session.request(config.THERMOSTAT_URL, method="POST", data=post_data, params=params)
[ "Update", "(", "patch", ")", "the", "current", "instance", "s", "data", "on", "the", "NuHeat", "API" ]
broox/python-nuheat
python
https://github.com/broox/python-nuheat/blob/3a18852dc9465c34cb96eb3a0c84f1a6caa70707/nuheat/thermostat.py#L228-L235
[ "def", "set_data", "(", "self", ",", "post_data", ")", ":", "params", "=", "{", "\"serialnumber\"", ":", "self", ".", "serial_number", "}", "self", ".", "_session", ".", "request", "(", "config", ".", "THERMOSTAT_URL", ",", "method", "=", "\"POST\"", ",", "data", "=", "post_data", ",", "params", "=", "params", ")" ]
3a18852dc9465c34cb96eb3a0c84f1a6caa70707
valid
load_config
This function returns a Bunch object from the stated config file. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NOTE: The values are not evaluated by default. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! filename: The desired config file to read. The config file must be written in a syntax readable to the ConfigParser module -> INI syntax [sectionA] optionA1 = ... optionA2 = ... section_option_dict: A dictionary that contains keys, which are associated to the sections in the config file, and values, which are a list of the desired options. If empty, everything will be loaded. If the lists are empty, everything from the sections will be loaded. Example: dict = {'sectionA': ['optionA1', 'optionA2', ...], 'sectionB': ['optionB1', 'optionB2', ...]} config = get_config('config.cfg', dict) config.sectionA.optionA1 Other: Bunch can be found in configparser.py
harvestingkit/configparser.py
def load_config(filename=None, section_option_dict={}): """ This function returns a Bunch object from the stated config file. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NOTE: The values are not evaluated by default. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! filename: The desired config file to read. The config file must be written in a syntax readable to the ConfigParser module -> INI syntax [sectionA] optionA1 = ... optionA2 = ... section_option_dict: A dictionary that contains keys, which are associated to the sections in the config file, and values, which are a list of the desired options. If empty, everything will be loaded. If the lists are empty, everything from the sections will be loaded. Example: dict = {'sectionA': ['optionA1', 'optionA2', ...], 'sectionB': ['optionB1', 'optionB2', ...]} config = get_config('config.cfg', dict) config.sectionA.optionA1 Other: Bunch can be found in configparser.py """ config = ConfigParser() config.read(filename) working_dict = _prepare_working_dict(config, section_option_dict) tmp_dict = {} for section, options in working_dict.iteritems(): tmp_dict[section] = {} for option in options: tmp_dict[section][option] = config.get(section, option) return Bunch(tmp_dict)
def load_config(filename=None, section_option_dict={}): """ This function returns a Bunch object from the stated config file. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NOTE: The values are not evaluated by default. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! filename: The desired config file to read. The config file must be written in a syntax readable to the ConfigParser module -> INI syntax [sectionA] optionA1 = ... optionA2 = ... section_option_dict: A dictionary that contains keys, which are associated to the sections in the config file, and values, which are a list of the desired options. If empty, everything will be loaded. If the lists are empty, everything from the sections will be loaded. Example: dict = {'sectionA': ['optionA1', 'optionA2', ...], 'sectionB': ['optionB1', 'optionB2', ...]} config = get_config('config.cfg', dict) config.sectionA.optionA1 Other: Bunch can be found in configparser.py """ config = ConfigParser() config.read(filename) working_dict = _prepare_working_dict(config, section_option_dict) tmp_dict = {} for section, options in working_dict.iteritems(): tmp_dict[section] = {} for option in options: tmp_dict[section][option] = config.get(section, option) return Bunch(tmp_dict)
[ "This", "function", "returns", "a", "Bunch", "object", "from", "the", "stated", "config", "file", "." ]
inspirehep/harvesting-kit
python
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/configparser.py#L35-L83
[ "def", "load_config", "(", "filename", "=", "None", ",", "section_option_dict", "=", "{", "}", ")", ":", "config", "=", "ConfigParser", "(", ")", "config", ".", "read", "(", "filename", ")", "working_dict", "=", "_prepare_working_dict", "(", "config", ",", "section_option_dict", ")", "tmp_dict", "=", "{", "}", "for", "section", ",", "options", "in", "working_dict", ".", "iteritems", "(", ")", ":", "tmp_dict", "[", "section", "]", "=", "{", "}", "for", "option", "in", "options", ":", "tmp_dict", "[", "section", "]", "[", "option", "]", "=", "config", ".", "get", "(", "section", ",", "option", ")", "return", "Bunch", "(", "tmp_dict", ")" ]
33a7f8aa9dade1d863110c6d8b27dfd955cb471f
valid
NuHeat.authenticate
Authenticate against the NuHeat API
nuheat/api.py
def authenticate(self): """ Authenticate against the NuHeat API """ if self._session_id: _LOGGER.debug("Using existing NuHeat session") return _LOGGER.debug("Creating NuHeat session") post_data = { "Email": self.username, "Password": self.password, "application": "0" } data = self.request(config.AUTH_URL, method="POST", data=post_data) session_id = data.get("SessionId") if not session_id: raise Exception("Authentication error") self._session_id = session_id
def authenticate(self): """ Authenticate against the NuHeat API """ if self._session_id: _LOGGER.debug("Using existing NuHeat session") return _LOGGER.debug("Creating NuHeat session") post_data = { "Email": self.username, "Password": self.password, "application": "0" } data = self.request(config.AUTH_URL, method="POST", data=post_data) session_id = data.get("SessionId") if not session_id: raise Exception("Authentication error") self._session_id = session_id
[ "Authenticate", "against", "the", "NuHeat", "API" ]
broox/python-nuheat
python
https://github.com/broox/python-nuheat/blob/3a18852dc9465c34cb96eb3a0c84f1a6caa70707/nuheat/api.py#L27-L46
[ "def", "authenticate", "(", "self", ")", ":", "if", "self", ".", "_session_id", ":", "_LOGGER", ".", "debug", "(", "\"Using existing NuHeat session\"", ")", "return", "_LOGGER", ".", "debug", "(", "\"Creating NuHeat session\"", ")", "post_data", "=", "{", "\"Email\"", ":", "self", ".", "username", ",", "\"Password\"", ":", "self", ".", "password", ",", "\"application\"", ":", "\"0\"", "}", "data", "=", "self", ".", "request", "(", "config", ".", "AUTH_URL", ",", "method", "=", "\"POST\"", ",", "data", "=", "post_data", ")", "session_id", "=", "data", ".", "get", "(", "\"SessionId\"", ")", "if", "not", "session_id", ":", "raise", "Exception", "(", "\"Authentication error\"", ")", "self", ".", "_session_id", "=", "session_id" ]
3a18852dc9465c34cb96eb3a0c84f1a6caa70707