partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
MARCXMLQuery._parse_persons
Parse persons from given datafield. Args: datafield (str): code of datafield ("010", "730", etc..) subfield (char): code of subfield ("a", "z", "4", etc..) role (list of str): set to ["any"] for any role, ["aut"] for authors, etc.. For details see http://www.loc.gov/marc/relators/relaterm.html Main records for persons are: "100", "600" and "700", subrecords "c". Returns: list: Person objects.
src/marcxml_parser/query.py
def _parse_persons(self, datafield, subfield, roles=["aut"]): """ Parse persons from given datafield. Args: datafield (str): code of datafield ("010", "730", etc..) subfield (char): code of subfield ("a", "z", "4", etc..) role (list of str): set to ["any"] for any role, ["aut"] for authors, etc.. For details see http://www.loc.gov/marc/relators/relaterm.html Main records for persons are: "100", "600" and "700", subrecords "c". Returns: list: Person objects. """ # parse authors parsed_persons = [] raw_persons = self.get_subfields(datafield, subfield) for person in raw_persons: # check if person have at least one of the roles specified in # 'roles' parameter of function other_subfields = person.other_subfields if "4" in other_subfields and roles != ["any"]: person_roles = other_subfields["4"] # list of role parameters relevant = any(map(lambda role: role in roles, person_roles)) # skip non-relevant persons if not relevant: continue # result of .strip() is string, so ind1/2 in MARCSubrecord are lost ind1 = person.i1 ind2 = person.i2 person = person.strip() name = "" second_name = "" surname = "" title = "" # here it gets nasty - there is lot of options in ind1/ind2 # parameters if ind1 == "1" and ind2 == " ": if "," in person: surname, name = person.split(",", 1) elif " " in person: surname, name = person.split(" ", 1) else: surname = person if "c" in other_subfields: title = ",".join(other_subfields["c"]) elif ind1 == "0" and ind2 == " ": name = person.strip() if "b" in other_subfields: second_name = ",".join(other_subfields["b"]) if "c" in other_subfields: surname = ",".join(other_subfields["c"]) elif ind1 == "1" and ind2 == "0" or ind1 == "0" and ind2 == "0": name = person.strip() if "c" in other_subfields: title = ",".join(other_subfields["c"]) parsed_persons.append( Person( name.strip(), second_name.strip(), surname.strip(), title.strip() ) ) return parsed_persons
def _parse_persons(self, datafield, subfield, roles=["aut"]): """ Parse persons from given datafield. Args: datafield (str): code of datafield ("010", "730", etc..) subfield (char): code of subfield ("a", "z", "4", etc..) role (list of str): set to ["any"] for any role, ["aut"] for authors, etc.. For details see http://www.loc.gov/marc/relators/relaterm.html Main records for persons are: "100", "600" and "700", subrecords "c". Returns: list: Person objects. """ # parse authors parsed_persons = [] raw_persons = self.get_subfields(datafield, subfield) for person in raw_persons: # check if person have at least one of the roles specified in # 'roles' parameter of function other_subfields = person.other_subfields if "4" in other_subfields and roles != ["any"]: person_roles = other_subfields["4"] # list of role parameters relevant = any(map(lambda role: role in roles, person_roles)) # skip non-relevant persons if not relevant: continue # result of .strip() is string, so ind1/2 in MARCSubrecord are lost ind1 = person.i1 ind2 = person.i2 person = person.strip() name = "" second_name = "" surname = "" title = "" # here it gets nasty - there is lot of options in ind1/ind2 # parameters if ind1 == "1" and ind2 == " ": if "," in person: surname, name = person.split(",", 1) elif " " in person: surname, name = person.split(" ", 1) else: surname = person if "c" in other_subfields: title = ",".join(other_subfields["c"]) elif ind1 == "0" and ind2 == " ": name = person.strip() if "b" in other_subfields: second_name = ",".join(other_subfields["b"]) if "c" in other_subfields: surname = ",".join(other_subfields["c"]) elif ind1 == "1" and ind2 == "0" or ind1 == "0" and ind2 == "0": name = person.strip() if "c" in other_subfields: title = ",".join(other_subfields["c"]) parsed_persons.append( Person( name.strip(), second_name.strip(), surname.strip(), title.strip() ) ) return parsed_persons
[ "Parse", "persons", "from", "given", "datafield", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L95-L171
[ "def", "_parse_persons", "(", "self", ",", "datafield", ",", "subfield", ",", "roles", "=", "[", "\"aut\"", "]", ")", ":", "# parse authors", "parsed_persons", "=", "[", "]", "raw_persons", "=", "self", ".", "get_subfields", "(", "datafield", ",", "subfield", ")", "for", "person", "in", "raw_persons", ":", "# check if person have at least one of the roles specified in", "# 'roles' parameter of function", "other_subfields", "=", "person", ".", "other_subfields", "if", "\"4\"", "in", "other_subfields", "and", "roles", "!=", "[", "\"any\"", "]", ":", "person_roles", "=", "other_subfields", "[", "\"4\"", "]", "# list of role parameters", "relevant", "=", "any", "(", "map", "(", "lambda", "role", ":", "role", "in", "roles", ",", "person_roles", ")", ")", "# skip non-relevant persons", "if", "not", "relevant", ":", "continue", "# result of .strip() is string, so ind1/2 in MARCSubrecord are lost", "ind1", "=", "person", ".", "i1", "ind2", "=", "person", ".", "i2", "person", "=", "person", ".", "strip", "(", ")", "name", "=", "\"\"", "second_name", "=", "\"\"", "surname", "=", "\"\"", "title", "=", "\"\"", "# here it gets nasty - there is lot of options in ind1/ind2", "# parameters", "if", "ind1", "==", "\"1\"", "and", "ind2", "==", "\" \"", ":", "if", "\",\"", "in", "person", ":", "surname", ",", "name", "=", "person", ".", "split", "(", "\",\"", ",", "1", ")", "elif", "\" \"", "in", "person", ":", "surname", ",", "name", "=", "person", ".", "split", "(", "\" \"", ",", "1", ")", "else", ":", "surname", "=", "person", "if", "\"c\"", "in", "other_subfields", ":", "title", "=", "\",\"", ".", "join", "(", "other_subfields", "[", "\"c\"", "]", ")", "elif", "ind1", "==", "\"0\"", "and", "ind2", "==", "\" \"", ":", "name", "=", "person", ".", "strip", "(", ")", "if", "\"b\"", "in", "other_subfields", ":", "second_name", "=", "\",\"", ".", "join", "(", "other_subfields", "[", "\"b\"", "]", ")", "if", "\"c\"", "in", "other_subfields", ":", "surname", "=", "\",\"", ".", "join", "(", "other_subfields", "[", "\"c\"", "]", ")", "elif", "ind1", "==", "\"1\"", "and", "ind2", "==", "\"0\"", "or", "ind1", "==", "\"0\"", "and", "ind2", "==", "\"0\"", ":", "name", "=", "person", ".", "strip", "(", ")", "if", "\"c\"", "in", "other_subfields", ":", "title", "=", "\",\"", ".", "join", "(", "other_subfields", "[", "\"c\"", "]", ")", "parsed_persons", ".", "append", "(", "Person", "(", "name", ".", "strip", "(", ")", ",", "second_name", ".", "strip", "(", ")", ",", "surname", ".", "strip", "(", ")", ",", "title", ".", "strip", "(", ")", ")", ")", "return", "parsed_persons" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_subname
Args: undefined (optional): Argument, which will be returned if the `subname` record is not found. Returns: str: Subname of the book or `undefined` if `subname` is not \ found.
src/marcxml_parser/query.py
def get_subname(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `subname` record is not found. Returns: str: Subname of the book or `undefined` if `subname` is not \ found. """ return _undefined_pattern( "".join(self.get_subfields("245", "b")), lambda x: x.strip() == "", undefined )
def get_subname(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `subname` record is not found. Returns: str: Subname of the book or `undefined` if `subname` is not \ found. """ return _undefined_pattern( "".join(self.get_subfields("245", "b")), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "subname", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L185-L199
[ "def", "get_subname", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "return", "_undefined_pattern", "(", "\"\"", ".", "join", "(", "self", ".", "get_subfields", "(", "\"245\"", ",", "\"b\"", ")", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_price
Args: undefined (optional): Argument, which will be returned if the `price` record is not found. Returns: str: Price of the book (with currency) or `undefined` if `price` \ is not found.
src/marcxml_parser/query.py
def get_price(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `price` record is not found. Returns: str: Price of the book (with currency) or `undefined` if `price` \ is not found. """ return _undefined_pattern( "".join(self.get_subfields("020", "c")), lambda x: x.strip() == "", undefined )
def get_price(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `price` record is not found. Returns: str: Price of the book (with currency) or `undefined` if `price` \ is not found. """ return _undefined_pattern( "".join(self.get_subfields("020", "c")), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "price", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L202-L216
[ "def", "get_price", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "return", "_undefined_pattern", "(", "\"\"", ".", "join", "(", "self", ".", "get_subfields", "(", "\"020\"", ",", "\"c\"", ")", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_part
Args: undefined (optional): Argument, which will be returned if the `part` record is not found. Returns: str: Which part of the book series is this record or `undefined` \ if `part` is not found.
src/marcxml_parser/query.py
def get_part(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `part` record is not found. Returns: str: Which part of the book series is this record or `undefined` \ if `part` is not found. """ return _undefined_pattern( "".join(self.get_subfields("245", "p")), lambda x: x.strip() == "", undefined )
def get_part(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `part` record is not found. Returns: str: Which part of the book series is this record or `undefined` \ if `part` is not found. """ return _undefined_pattern( "".join(self.get_subfields("245", "p")), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "part", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L219-L233
[ "def", "get_part", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "return", "_undefined_pattern", "(", "\"\"", ".", "join", "(", "self", ".", "get_subfields", "(", "\"245\"", ",", "\"p\"", ")", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_part_name
Args: undefined (optional): Argument, which will be returned if the `part_name` record is not found. Returns: str: Name of the part of the series. or `undefined` if `part_name`\ is not found.
src/marcxml_parser/query.py
def get_part_name(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `part_name` record is not found. Returns: str: Name of the part of the series. or `undefined` if `part_name`\ is not found. """ return _undefined_pattern( "".join(self.get_subfields("245", "n")), lambda x: x.strip() == "", undefined )
def get_part_name(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `part_name` record is not found. Returns: str: Name of the part of the series. or `undefined` if `part_name`\ is not found. """ return _undefined_pattern( "".join(self.get_subfields("245", "n")), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "part_name", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L236-L250
[ "def", "get_part_name", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "return", "_undefined_pattern", "(", "\"\"", ".", "join", "(", "self", ".", "get_subfields", "(", "\"245\"", ",", "\"n\"", ")", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_publisher
Args: undefined (optional): Argument, which will be returned if the `publisher` record is not found. Returns: str: Name of the publisher ("``Grada``" for example) or \ `undefined` if `publisher` is not found.
src/marcxml_parser/query.py
def get_publisher(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `publisher` record is not found. Returns: str: Name of the publisher ("``Grada``" for example) or \ `undefined` if `publisher` is not found. """ publishers = set([ remove_hairs_fn(publisher) for publisher in self["260b "] + self["264b"] ]) return _undefined_pattern( ", ".join(publishers), lambda x: x.strip() == "", undefined )
def get_publisher(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `publisher` record is not found. Returns: str: Name of the publisher ("``Grada``" for example) or \ `undefined` if `publisher` is not found. """ publishers = set([ remove_hairs_fn(publisher) for publisher in self["260b "] + self["264b"] ]) return _undefined_pattern( ", ".join(publishers), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "publisher", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L253-L272
[ "def", "get_publisher", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "publishers", "=", "set", "(", "[", "remove_hairs_fn", "(", "publisher", ")", "for", "publisher", "in", "self", "[", "\"260b \"", "]", "+", "self", "[", "\"264b\"", "]", "]", ")", "return", "_undefined_pattern", "(", "\", \"", ".", "join", "(", "publishers", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_pub_date
Args: undefined (optional): Argument, which will be returned if the `pub_date` record is not found. Returns: str: Date of publication (month and year usually) or `undefined` \ if `pub_date` is not found.
src/marcxml_parser/query.py
def get_pub_date(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `pub_date` record is not found. Returns: str: Date of publication (month and year usually) or `undefined` \ if `pub_date` is not found. """ dates = self["260c "] + self["264c"] def clean_date(date): """ Clean the `date` strings from special characters, but leave sequences of numbers followed by -. So: [2015]- -> 2015 2015- -> 2015- """ out = "" was_digit = False for c in date: if c.isdigit() or (c == "-" and was_digit) or c == " ": out += c was_digit = c.isdigit() return out # clean all the date strings dates = set([ clean_date(date) for date in self["260c "] + self["264c"] ]) return _undefined_pattern( ", ".join(dates), lambda x: x.strip() == "", undefined )
def get_pub_date(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `pub_date` record is not found. Returns: str: Date of publication (month and year usually) or `undefined` \ if `pub_date` is not found. """ dates = self["260c "] + self["264c"] def clean_date(date): """ Clean the `date` strings from special characters, but leave sequences of numbers followed by -. So: [2015]- -> 2015 2015- -> 2015- """ out = "" was_digit = False for c in date: if c.isdigit() or (c == "-" and was_digit) or c == " ": out += c was_digit = c.isdigit() return out # clean all the date strings dates = set([ clean_date(date) for date in self["260c "] + self["264c"] ]) return _undefined_pattern( ", ".join(dates), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "pub_date", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L274-L315
[ "def", "get_pub_date", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "dates", "=", "self", "[", "\"260c \"", "]", "+", "self", "[", "\"264c\"", "]", "def", "clean_date", "(", "date", ")", ":", "\"\"\"\n Clean the `date` strings from special characters, but leave\n sequences of numbers followed by -.\n\n So:\n [2015]- -> 2015\n 2015- -> 2015-\n \"\"\"", "out", "=", "\"\"", "was_digit", "=", "False", "for", "c", "in", "date", ":", "if", "c", ".", "isdigit", "(", ")", "or", "(", "c", "==", "\"-\"", "and", "was_digit", ")", "or", "c", "==", "\" \"", ":", "out", "+=", "c", "was_digit", "=", "c", ".", "isdigit", "(", ")", "return", "out", "# clean all the date strings", "dates", "=", "set", "(", "[", "clean_date", "(", "date", ")", "for", "date", "in", "self", "[", "\"260c \"", "]", "+", "self", "[", "\"264c\"", "]", "]", ")", "return", "_undefined_pattern", "(", "\", \"", ".", "join", "(", "dates", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_pub_order
Args: undefined (optional): Argument, which will be returned if the `pub_order` record is not found. Returns: str: Information about order in which was the book published or \ `undefined` if `pub_order` is not found.
src/marcxml_parser/query.py
def get_pub_order(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `pub_order` record is not found. Returns: str: Information about order in which was the book published or \ `undefined` if `pub_order` is not found. """ return _undefined_pattern( "".join(self.get_subfields("901", "f")), lambda x: x.strip() == "", undefined )
def get_pub_order(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `pub_order` record is not found. Returns: str: Information about order in which was the book published or \ `undefined` if `pub_order` is not found. """ return _undefined_pattern( "".join(self.get_subfields("901", "f")), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "pub_order", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L318-L332
[ "def", "get_pub_order", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "return", "_undefined_pattern", "(", "\"\"", ".", "join", "(", "self", ".", "get_subfields", "(", "\"901\"", ",", "\"f\"", ")", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_pub_place
Args: undefined (optional): Argument, which will be returned if the `pub_place` record is not found. Returns: str: Name of city/country where the book was published or \ `undefined` if `pub_place` is not found.
src/marcxml_parser/query.py
def get_pub_place(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `pub_place` record is not found. Returns: str: Name of city/country where the book was published or \ `undefined` if `pub_place` is not found. """ places = set([ remove_hairs_fn(place) for place in self["260a "] + self["264a"] ]) return _undefined_pattern( ", ".join(places), lambda x: x.strip() == "", undefined )
def get_pub_place(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `pub_place` record is not found. Returns: str: Name of city/country where the book was published or \ `undefined` if `pub_place` is not found. """ places = set([ remove_hairs_fn(place) for place in self["260a "] + self["264a"] ]) return _undefined_pattern( ", ".join(places), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "pub_place", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L335-L354
[ "def", "get_pub_place", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "places", "=", "set", "(", "[", "remove_hairs_fn", "(", "place", ")", "for", "place", "in", "self", "[", "\"260a \"", "]", "+", "self", "[", "\"264a\"", "]", "]", ")", "return", "_undefined_pattern", "(", "\", \"", ".", "join", "(", "places", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_format
Args: undefined (optional): Argument, which will be returned if the `format` record is not found. Returns: str: Dimensions of the book ('``23 cm``' for example) or `undefined` if `format` is not found.
src/marcxml_parser/query.py
def get_format(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `format` record is not found. Returns: str: Dimensions of the book ('``23 cm``' for example) or `undefined` if `format` is not found. """ return _undefined_pattern( "".join(self.get_subfields("300", "c")), lambda x: x.strip() == "", undefined )
def get_format(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `format` record is not found. Returns: str: Dimensions of the book ('``23 cm``' for example) or `undefined` if `format` is not found. """ return _undefined_pattern( "".join(self.get_subfields("300", "c")), lambda x: x.strip() == "", undefined )
[ "Args", ":", "undefined", "(", "optional", ")", ":", "Argument", "which", "will", "be", "returned", "if", "the", "format", "record", "is", "not", "found", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L357-L371
[ "def", "get_format", "(", "self", ",", "undefined", "=", "\"\"", ")", ":", "return", "_undefined_pattern", "(", "\"\"", ".", "join", "(", "self", ".", "get_subfields", "(", "\"300\"", ",", "\"c\"", ")", ")", ",", "lambda", "x", ":", "x", ".", "strip", "(", ")", "==", "\"\"", ",", "undefined", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_authors
Returns: list: Authors represented as :class:`.Person` objects.
src/marcxml_parser/query.py
def get_authors(self): """ Returns: list: Authors represented as :class:`.Person` objects. """ authors = self._parse_persons("100", "a") authors += self._parse_persons("600", "a") authors += self._parse_persons("700", "a") authors += self._parse_persons("800", "a") return authors
def get_authors(self): """ Returns: list: Authors represented as :class:`.Person` objects. """ authors = self._parse_persons("100", "a") authors += self._parse_persons("600", "a") authors += self._parse_persons("700", "a") authors += self._parse_persons("800", "a") return authors
[ "Returns", ":", "list", ":", "Authors", "represented", "as", ":", "class", ":", ".", "Person", "objects", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L373-L383
[ "def", "get_authors", "(", "self", ")", ":", "authors", "=", "self", ".", "_parse_persons", "(", "\"100\"", ",", "\"a\"", ")", "authors", "+=", "self", ".", "_parse_persons", "(", "\"600\"", ",", "\"a\"", ")", "authors", "+=", "self", ".", "_parse_persons", "(", "\"700\"", ",", "\"a\"", ")", "authors", "+=", "self", ".", "_parse_persons", "(", "\"800\"", ",", "\"a\"", ")", "return", "authors" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_corporations
Args: roles (list, optional): Specify which types of corporations you need. Set to ``["any"]`` for any role, ``["dst"]`` for distributors, etc.. Note: See http://www.loc.gov/marc/relators/relaterm.html for details. Returns: list: :class:`.Corporation` objects specified by roles parameter.
src/marcxml_parser/query.py
def get_corporations(self, roles=["dst"]): """ Args: roles (list, optional): Specify which types of corporations you need. Set to ``["any"]`` for any role, ``["dst"]`` for distributors, etc.. Note: See http://www.loc.gov/marc/relators/relaterm.html for details. Returns: list: :class:`.Corporation` objects specified by roles parameter. """ corporations = self._parse_corporations("110", "a", roles) corporations += self._parse_corporations("610", "a", roles) corporations += self._parse_corporations("710", "a", roles) corporations += self._parse_corporations("810", "a", roles) return corporations
def get_corporations(self, roles=["dst"]): """ Args: roles (list, optional): Specify which types of corporations you need. Set to ``["any"]`` for any role, ``["dst"]`` for distributors, etc.. Note: See http://www.loc.gov/marc/relators/relaterm.html for details. Returns: list: :class:`.Corporation` objects specified by roles parameter. """ corporations = self._parse_corporations("110", "a", roles) corporations += self._parse_corporations("610", "a", roles) corporations += self._parse_corporations("710", "a", roles) corporations += self._parse_corporations("810", "a", roles) return corporations
[ "Args", ":", "roles", "(", "list", "optional", ")", ":", "Specify", "which", "types", "of", "corporations", "you", "need", ".", "Set", "to", "[", "any", "]", "for", "any", "role", "[", "dst", "]", "for", "distributors", "etc", ".." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L385-L403
[ "def", "get_corporations", "(", "self", ",", "roles", "=", "[", "\"dst\"", "]", ")", ":", "corporations", "=", "self", ".", "_parse_corporations", "(", "\"110\"", ",", "\"a\"", ",", "roles", ")", "corporations", "+=", "self", ".", "_parse_corporations", "(", "\"610\"", ",", "\"a\"", ",", "roles", ")", "corporations", "+=", "self", ".", "_parse_corporations", "(", "\"710\"", ",", "\"a\"", ",", "roles", ")", "corporations", "+=", "self", ".", "_parse_corporations", "(", "\"810\"", ",", "\"a\"", ",", "roles", ")", "return", "corporations" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_ISBNs
Get list of VALID ISBN. Returns: list: List with *valid* ISBN strings.
src/marcxml_parser/query.py
def get_ISBNs(self): """ Get list of VALID ISBN. Returns: list: List with *valid* ISBN strings. """ invalid_isbns = set(self.get_invalid_ISBNs()) valid_isbns = [ self._clean_isbn(isbn) for isbn in self["020a"] if self._clean_isbn(isbn) not in invalid_isbns ] if valid_isbns: return valid_isbns # this is used sometimes in czech national library return [ self._clean_isbn(isbn) for isbn in self["901i"] ]
def get_ISBNs(self): """ Get list of VALID ISBN. Returns: list: List with *valid* ISBN strings. """ invalid_isbns = set(self.get_invalid_ISBNs()) valid_isbns = [ self._clean_isbn(isbn) for isbn in self["020a"] if self._clean_isbn(isbn) not in invalid_isbns ] if valid_isbns: return valid_isbns # this is used sometimes in czech national library return [ self._clean_isbn(isbn) for isbn in self["901i"] ]
[ "Get", "list", "of", "VALID", "ISBN", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L430-L452
[ "def", "get_ISBNs", "(", "self", ")", ":", "invalid_isbns", "=", "set", "(", "self", ".", "get_invalid_ISBNs", "(", ")", ")", "valid_isbns", "=", "[", "self", ".", "_clean_isbn", "(", "isbn", ")", "for", "isbn", "in", "self", "[", "\"020a\"", "]", "if", "self", ".", "_clean_isbn", "(", "isbn", ")", "not", "in", "invalid_isbns", "]", "if", "valid_isbns", ":", "return", "valid_isbns", "# this is used sometimes in czech national library", "return", "[", "self", ".", "_clean_isbn", "(", "isbn", ")", "for", "isbn", "in", "self", "[", "\"901i\"", "]", "]" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_ISSNs
Get list of VALID ISSNs (``022a``). Returns: list: List with *valid* ISSN strings.
src/marcxml_parser/query.py
def get_ISSNs(self): """ Get list of VALID ISSNs (``022a``). Returns: list: List with *valid* ISSN strings. """ invalid_issns = set(self.get_invalid_ISSNs()) return [ self._clean_isbn(issn) for issn in self["022a"] if self._clean_isbn(issn) not in invalid_issns ]
def get_ISSNs(self): """ Get list of VALID ISSNs (``022a``). Returns: list: List with *valid* ISSN strings. """ invalid_issns = set(self.get_invalid_ISSNs()) return [ self._clean_isbn(issn) for issn in self["022a"] if self._clean_isbn(issn) not in invalid_issns ]
[ "Get", "list", "of", "VALID", "ISSNs", "(", "022a", ")", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L466-L479
[ "def", "get_ISSNs", "(", "self", ")", ":", "invalid_issns", "=", "set", "(", "self", ".", "get_invalid_ISSNs", "(", ")", ")", "return", "[", "self", ".", "_clean_isbn", "(", "issn", ")", "for", "issn", "in", "self", "[", "\"022a\"", "]", "if", "self", ".", "_clean_isbn", "(", "issn", ")", "not", "in", "invalid_issns", "]" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery._filter_binding
Filter binding from ISBN record. In MARC XML / OAI, the binding information is stored in same subrecord as ISBN. Example: ``<subfield code="a">80-251-0225-4 (brož.) :</subfield>`` -> ``brož.``.
src/marcxml_parser/query.py
def _filter_binding(self, binding): """ Filter binding from ISBN record. In MARC XML / OAI, the binding information is stored in same subrecord as ISBN. Example: ``<subfield code="a">80-251-0225-4 (brož.) :</subfield>`` -> ``brož.``. """ binding = binding.strip().split(" ", 1)[-1] # isolate bind. from ISBN binding = remove_hairs_fn(binding) # remove special chars from binding return binding.split(":")[-1].strip()
def _filter_binding(self, binding): """ Filter binding from ISBN record. In MARC XML / OAI, the binding information is stored in same subrecord as ISBN. Example: ``<subfield code="a">80-251-0225-4 (brož.) :</subfield>`` -> ``brož.``. """ binding = binding.strip().split(" ", 1)[-1] # isolate bind. from ISBN binding = remove_hairs_fn(binding) # remove special chars from binding return binding.split(":")[-1].strip()
[ "Filter", "binding", "from", "ISBN", "record", ".", "In", "MARC", "XML", "/", "OAI", "the", "binding", "information", "is", "stored", "in", "same", "subrecord", "as", "ISBN", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L493-L505
[ "def", "_filter_binding", "(", "self", ",", "binding", ")", ":", "binding", "=", "binding", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ",", "1", ")", "[", "-", "1", "]", "# isolate bind. from ISBN", "binding", "=", "remove_hairs_fn", "(", "binding", ")", "# remove special chars from binding", "return", "binding", ".", "split", "(", "\":\"", ")", "[", "-", "1", "]", ".", "strip", "(", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_urls
Content of field ``856u42``. Typically URL pointing to producers homepage. Returns: list: List of URLs defined by producer.
src/marcxml_parser/query.py
def get_urls(self): """ Content of field ``856u42``. Typically URL pointing to producers homepage. Returns: list: List of URLs defined by producer. """ urls = self.get_subfields("856", "u", i1="4", i2="2") return map(lambda x: x.replace("&amp;", "&"), urls)
def get_urls(self): """ Content of field ``856u42``. Typically URL pointing to producers homepage. Returns: list: List of URLs defined by producer. """ urls = self.get_subfields("856", "u", i1="4", i2="2") return map(lambda x: x.replace("&amp;", "&"), urls)
[ "Content", "of", "field", "856u42", ".", "Typically", "URL", "pointing", "to", "producers", "homepage", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L527-L537
[ "def", "get_urls", "(", "self", ")", ":", "urls", "=", "self", ".", "get_subfields", "(", "\"856\"", ",", "\"u\"", ",", "i1", "=", "\"4\"", ",", "i2", "=", "\"2\"", ")", "return", "map", "(", "lambda", "x", ":", "x", ".", "replace", "(", "\"&amp;\"", ",", "\"&\"", ")", ",", "urls", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_internal_urls
URL's, which may point to edeposit, aleph, kramerius and so on. Fields ``856u40``, ``998a`` and ``URLu``. Returns: list: List of internal URLs.
src/marcxml_parser/query.py
def get_internal_urls(self): """ URL's, which may point to edeposit, aleph, kramerius and so on. Fields ``856u40``, ``998a`` and ``URLu``. Returns: list: List of internal URLs. """ internal_urls = self.get_subfields("856", "u", i1="4", i2="0") internal_urls.extend(self.get_subfields("998", "a")) internal_urls.extend(self.get_subfields("URL", "u")) return map(lambda x: x.replace("&amp;", "&"), internal_urls)
def get_internal_urls(self): """ URL's, which may point to edeposit, aleph, kramerius and so on. Fields ``856u40``, ``998a`` and ``URLu``. Returns: list: List of internal URLs. """ internal_urls = self.get_subfields("856", "u", i1="4", i2="0") internal_urls.extend(self.get_subfields("998", "a")) internal_urls.extend(self.get_subfields("URL", "u")) return map(lambda x: x.replace("&amp;", "&"), internal_urls)
[ "URL", "s", "which", "may", "point", "to", "edeposit", "aleph", "kramerius", "and", "so", "on", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L539-L552
[ "def", "get_internal_urls", "(", "self", ")", ":", "internal_urls", "=", "self", ".", "get_subfields", "(", "\"856\"", ",", "\"u\"", ",", "i1", "=", "\"4\"", ",", "i2", "=", "\"0\"", ")", "internal_urls", ".", "extend", "(", "self", ".", "get_subfields", "(", "\"998\"", ",", "\"a\"", ")", ")", "internal_urls", ".", "extend", "(", "self", ".", "get_subfields", "(", "\"URL\"", ",", "\"u\"", ")", ")", "return", "map", "(", "lambda", "x", ":", "x", ".", "replace", "(", "\"&amp;\"", ",", "\"&\"", ")", ",", "internal_urls", ")" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get_pub_type
Returns: PublicationType: :class:`.PublicationType` enum **value**.
src/marcxml_parser/query.py
def get_pub_type(self): """ Returns: PublicationType: :class:`.PublicationType` enum **value**. """ INFO_CHAR_INDEX = 6 SECOND_INFO_CHAR_I = 18 if not len(self.leader) >= INFO_CHAR_INDEX + 1: return PublicationType.monographic if self.controlfields.get("FMT") == "SE": return PublicationType.continuing info_char = self.leader[INFO_CHAR_INDEX] multipart_n = self.get_subfields("245", "n", exception=False) multipart_p = self.get_subfields("245", "p", exception=False) if info_char in "acd": return PublicationType.monographic elif info_char in "bis": return PublicationType.continuing elif info_char == "m" and (multipart_n or multipart_p): return PublicationType.multipart_monograph elif info_char == "m" and len(self.leader) >= SECOND_INFO_CHAR_I + 1: if self.leader[SECOND_INFO_CHAR_I] == "a": return PublicationType.multipart_monograph elif self.leader[SECOND_INFO_CHAR_I] == " ": return PublicationType.single_unit return PublicationType.monographic
def get_pub_type(self): """ Returns: PublicationType: :class:`.PublicationType` enum **value**. """ INFO_CHAR_INDEX = 6 SECOND_INFO_CHAR_I = 18 if not len(self.leader) >= INFO_CHAR_INDEX + 1: return PublicationType.monographic if self.controlfields.get("FMT") == "SE": return PublicationType.continuing info_char = self.leader[INFO_CHAR_INDEX] multipart_n = self.get_subfields("245", "n", exception=False) multipart_p = self.get_subfields("245", "p", exception=False) if info_char in "acd": return PublicationType.monographic elif info_char in "bis": return PublicationType.continuing elif info_char == "m" and (multipart_n or multipart_p): return PublicationType.multipart_monograph elif info_char == "m" and len(self.leader) >= SECOND_INFO_CHAR_I + 1: if self.leader[SECOND_INFO_CHAR_I] == "a": return PublicationType.multipart_monograph elif self.leader[SECOND_INFO_CHAR_I] == " ": return PublicationType.single_unit return PublicationType.monographic
[ "Returns", ":", "PublicationType", ":", ":", "class", ":", ".", "PublicationType", "enum", "**", "value", "**", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L554-L584
[ "def", "get_pub_type", "(", "self", ")", ":", "INFO_CHAR_INDEX", "=", "6", "SECOND_INFO_CHAR_I", "=", "18", "if", "not", "len", "(", "self", ".", "leader", ")", ">=", "INFO_CHAR_INDEX", "+", "1", ":", "return", "PublicationType", ".", "monographic", "if", "self", ".", "controlfields", ".", "get", "(", "\"FMT\"", ")", "==", "\"SE\"", ":", "return", "PublicationType", ".", "continuing", "info_char", "=", "self", ".", "leader", "[", "INFO_CHAR_INDEX", "]", "multipart_n", "=", "self", ".", "get_subfields", "(", "\"245\"", ",", "\"n\"", ",", "exception", "=", "False", ")", "multipart_p", "=", "self", ".", "get_subfields", "(", "\"245\"", ",", "\"p\"", ",", "exception", "=", "False", ")", "if", "info_char", "in", "\"acd\"", ":", "return", "PublicationType", ".", "monographic", "elif", "info_char", "in", "\"bis\"", ":", "return", "PublicationType", ".", "continuing", "elif", "info_char", "==", "\"m\"", "and", "(", "multipart_n", "or", "multipart_p", ")", ":", "return", "PublicationType", ".", "multipart_monograph", "elif", "info_char", "==", "\"m\"", "and", "len", "(", "self", ".", "leader", ")", ">=", "SECOND_INFO_CHAR_I", "+", "1", ":", "if", "self", ".", "leader", "[", "SECOND_INFO_CHAR_I", "]", "==", "\"a\"", ":", "return", "PublicationType", ".", "multipart_monograph", "elif", "self", ".", "leader", "[", "SECOND_INFO_CHAR_I", "]", "==", "\" \"", ":", "return", "PublicationType", ".", "single_unit", "return", "PublicationType", ".", "monographic" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
MARCXMLQuery.get
Standard dict-like .get() method. Args: item (str): See :meth:`.__getitem__` for details. alt (default None): Alternative value, if item is not found. Returns: obj: `item` or `alt`, if item is not found.
src/marcxml_parser/query.py
def get(self, item, alt=None): """ Standard dict-like .get() method. Args: item (str): See :meth:`.__getitem__` for details. alt (default None): Alternative value, if item is not found. Returns: obj: `item` or `alt`, if item is not found. """ try: val = self[item] except ValueError: return alt return val if val is not None else alt
def get(self, item, alt=None): """ Standard dict-like .get() method. Args: item (str): See :meth:`.__getitem__` for details. alt (default None): Alternative value, if item is not found. Returns: obj: `item` or `alt`, if item is not found. """ try: val = self[item] except ValueError: return alt return val if val is not None else alt
[ "Standard", "dict", "-", "like", ".", "get", "()", "method", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L668-L684
[ "def", "get", "(", "self", ",", "item", ",", "alt", "=", "None", ")", ":", "try", ":", "val", "=", "self", "[", "item", "]", "except", "ValueError", ":", "return", "alt", "return", "val", "if", "val", "is", "not", "None", "else", "alt" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
pid
r'''Create a callable that implements a PID controller. A PID controller returns a control signal :math:`u(t)` given a history of error measurements :math:`e(0) \dots e(t)`, using proportional (P), integral (I), and derivative (D) terms, according to: .. math:: u(t) = kp * e(t) + ki * \int_{s=0}^t e(s) ds + kd * \frac{de(s)}{ds}(t) The proportional term is just the current error, the integral term is the sum of all error measurements, and the derivative term is the instantaneous derivative of the error measurement. Parameters ---------- kp : float The weight associated with the proportional term of the PID controller. ki : float The weight associated with the integral term of the PID controller. kd : float The weight associated with the derivative term of the PID controller. smooth : float in [0, 1] Derivative values will be smoothed with this exponential average. A value of 1 never incorporates new derivative information, a value of 0.5 uses the mean of the historic and new information, and a value of 0 discards historic information (i.e., the derivative in this case will be unsmoothed). The default is 0.1. Returns ------- controller : callable (float, float) -> float Returns a function that accepts an error measurement and a delta-time value since the previous measurement, and returns a control signal.
pagoda/skeleton.py
def pid(kp=0., ki=0., kd=0., smooth=0.1): r'''Create a callable that implements a PID controller. A PID controller returns a control signal :math:`u(t)` given a history of error measurements :math:`e(0) \dots e(t)`, using proportional (P), integral (I), and derivative (D) terms, according to: .. math:: u(t) = kp * e(t) + ki * \int_{s=0}^t e(s) ds + kd * \frac{de(s)}{ds}(t) The proportional term is just the current error, the integral term is the sum of all error measurements, and the derivative term is the instantaneous derivative of the error measurement. Parameters ---------- kp : float The weight associated with the proportional term of the PID controller. ki : float The weight associated with the integral term of the PID controller. kd : float The weight associated with the derivative term of the PID controller. smooth : float in [0, 1] Derivative values will be smoothed with this exponential average. A value of 1 never incorporates new derivative information, a value of 0.5 uses the mean of the historic and new information, and a value of 0 discards historic information (i.e., the derivative in this case will be unsmoothed). The default is 0.1. Returns ------- controller : callable (float, float) -> float Returns a function that accepts an error measurement and a delta-time value since the previous measurement, and returns a control signal. ''' state = dict(p=0, i=0, d=0) def control(error, dt=1): state['d'] = smooth * state['d'] + (1 - smooth) * (error - state['p']) / dt state['i'] += error * dt state['p'] = error return kp * state['p'] + ki * state['i'] + kd * state['d'] return control
def pid(kp=0., ki=0., kd=0., smooth=0.1): r'''Create a callable that implements a PID controller. A PID controller returns a control signal :math:`u(t)` given a history of error measurements :math:`e(0) \dots e(t)`, using proportional (P), integral (I), and derivative (D) terms, according to: .. math:: u(t) = kp * e(t) + ki * \int_{s=0}^t e(s) ds + kd * \frac{de(s)}{ds}(t) The proportional term is just the current error, the integral term is the sum of all error measurements, and the derivative term is the instantaneous derivative of the error measurement. Parameters ---------- kp : float The weight associated with the proportional term of the PID controller. ki : float The weight associated with the integral term of the PID controller. kd : float The weight associated with the derivative term of the PID controller. smooth : float in [0, 1] Derivative values will be smoothed with this exponential average. A value of 1 never incorporates new derivative information, a value of 0.5 uses the mean of the historic and new information, and a value of 0 discards historic information (i.e., the derivative in this case will be unsmoothed). The default is 0.1. Returns ------- controller : callable (float, float) -> float Returns a function that accepts an error measurement and a delta-time value since the previous measurement, and returns a control signal. ''' state = dict(p=0, i=0, d=0) def control(error, dt=1): state['d'] = smooth * state['d'] + (1 - smooth) * (error - state['p']) / dt state['i'] += error * dt state['p'] = error return kp * state['p'] + ki * state['i'] + kd * state['d'] return control
[ "r", "Create", "a", "callable", "that", "implements", "a", "PID", "controller", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L11-L55
[ "def", "pid", "(", "kp", "=", "0.", ",", "ki", "=", "0.", ",", "kd", "=", "0.", ",", "smooth", "=", "0.1", ")", ":", "state", "=", "dict", "(", "p", "=", "0", ",", "i", "=", "0", ",", "d", "=", "0", ")", "def", "control", "(", "error", ",", "dt", "=", "1", ")", ":", "state", "[", "'d'", "]", "=", "smooth", "*", "state", "[", "'d'", "]", "+", "(", "1", "-", "smooth", ")", "*", "(", "error", "-", "state", "[", "'p'", "]", ")", "/", "dt", "state", "[", "'i'", "]", "+=", "error", "*", "dt", "state", "[", "'p'", "]", "=", "error", "return", "kp", "*", "state", "[", "'p'", "]", "+", "ki", "*", "state", "[", "'i'", "]", "+", "kd", "*", "state", "[", "'d'", "]", "return", "control" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
as_flat_array
Given a sequence of sequences, return a flat numpy array. Parameters ---------- iterables : sequence of sequence of number A sequence of tuples or lists containing numbers. Typically these come from something that represents each joint in a skeleton, like angle. Returns ------- ndarray : An array of flattened data from each of the source iterables.
pagoda/skeleton.py
def as_flat_array(iterables): '''Given a sequence of sequences, return a flat numpy array. Parameters ---------- iterables : sequence of sequence of number A sequence of tuples or lists containing numbers. Typically these come from something that represents each joint in a skeleton, like angle. Returns ------- ndarray : An array of flattened data from each of the source iterables. ''' arr = [] for x in iterables: arr.extend(x) return np.array(arr)
def as_flat_array(iterables): '''Given a sequence of sequences, return a flat numpy array. Parameters ---------- iterables : sequence of sequence of number A sequence of tuples or lists containing numbers. Typically these come from something that represents each joint in a skeleton, like angle. Returns ------- ndarray : An array of flattened data from each of the source iterables. ''' arr = [] for x in iterables: arr.extend(x) return np.array(arr)
[ "Given", "a", "sequence", "of", "sequences", "return", "a", "flat", "numpy", "array", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L58-L75
[ "def", "as_flat_array", "(", "iterables", ")", ":", "arr", "=", "[", "]", "for", "x", "in", "iterables", ":", "arr", ".", "extend", "(", "x", ")", "return", "np", ".", "array", "(", "arr", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.load
Load a skeleton definition from a file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton. See :class:`pagoda.parser.Parser` for more information about the format of the text file.
pagoda/skeleton.py
def load(self, source, **kwargs): '''Load a skeleton definition from a file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton. See :class:`pagoda.parser.Parser` for more information about the format of the text file. ''' if hasattr(source, 'endswith') and source.lower().endswith('.asf'): self.load_asf(source, **kwargs) else: self.load_skel(source, **kwargs)
def load(self, source, **kwargs): '''Load a skeleton definition from a file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton. See :class:`pagoda.parser.Parser` for more information about the format of the text file. ''' if hasattr(source, 'endswith') and source.lower().endswith('.asf'): self.load_asf(source, **kwargs) else: self.load_skel(source, **kwargs)
[ "Load", "a", "skeleton", "definition", "from", "a", "file", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L111-L124
[ "def", "load", "(", "self", ",", "source", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "source", ",", "'endswith'", ")", "and", "source", ".", "lower", "(", ")", ".", "endswith", "(", "'.asf'", ")", ":", "self", ".", "load_asf", "(", "source", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "load_skel", "(", "source", ",", "*", "*", "kwargs", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.load_skel
Load a skeleton definition from a text file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton. See :class:`pagoda.parser.BodyParser` for more information about the format of the text file.
pagoda/skeleton.py
def load_skel(self, source, **kwargs): '''Load a skeleton definition from a text file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton. See :class:`pagoda.parser.BodyParser` for more information about the format of the text file. ''' logging.info('%s: parsing skeleton configuration', source) if hasattr(source, 'read'): p = parser.parse(source, self.world, self.jointgroup, **kwargs) else: with open(source) as handle: p = parser.parse(handle, self.world, self.jointgroup, **kwargs) self.bodies = p.bodies self.joints = p.joints self.set_pid_params(kp=0.999 / self.world.dt)
def load_skel(self, source, **kwargs): '''Load a skeleton definition from a text file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton. See :class:`pagoda.parser.BodyParser` for more information about the format of the text file. ''' logging.info('%s: parsing skeleton configuration', source) if hasattr(source, 'read'): p = parser.parse(source, self.world, self.jointgroup, **kwargs) else: with open(source) as handle: p = parser.parse(handle, self.world, self.jointgroup, **kwargs) self.bodies = p.bodies self.joints = p.joints self.set_pid_params(kp=0.999 / self.world.dt)
[ "Load", "a", "skeleton", "definition", "from", "a", "text", "file", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L126-L144
[ "def", "load_skel", "(", "self", ",", "source", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "'%s: parsing skeleton configuration'", ",", "source", ")", "if", "hasattr", "(", "source", ",", "'read'", ")", ":", "p", "=", "parser", ".", "parse", "(", "source", ",", "self", ".", "world", ",", "self", ".", "jointgroup", ",", "*", "*", "kwargs", ")", "else", ":", "with", "open", "(", "source", ")", "as", "handle", ":", "p", "=", "parser", ".", "parse", "(", "handle", ",", "self", ".", "world", ",", "self", ".", "jointgroup", ",", "*", "*", "kwargs", ")", "self", ".", "bodies", "=", "p", ".", "bodies", "self", ".", "joints", "=", "p", ".", "joints", "self", ".", "set_pid_params", "(", "kp", "=", "0.999", "/", "self", ".", "world", ".", "dt", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.load_asf
Load a skeleton definition from an ASF text file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton, in ASF format.
pagoda/skeleton.py
def load_asf(self, source, **kwargs): '''Load a skeleton definition from an ASF text file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton, in ASF format. ''' if hasattr(source, 'read'): p = parser.parse_asf(source, self.world, self.jointgroup, **kwargs) else: with open(source) as handle: p = parser.parse_asf(handle, self.world, self.jointgroup, **kwargs) self.bodies = p.bodies self.joints = p.joints self.set_pid_params(kp=0.999 / self.world.dt)
def load_asf(self, source, **kwargs): '''Load a skeleton definition from an ASF text file. Parameters ---------- source : str or file A filename or file-like object that contains text information describing a skeleton, in ASF format. ''' if hasattr(source, 'read'): p = parser.parse_asf(source, self.world, self.jointgroup, **kwargs) else: with open(source) as handle: p = parser.parse_asf(handle, self.world, self.jointgroup, **kwargs) self.bodies = p.bodies self.joints = p.joints self.set_pid_params(kp=0.999 / self.world.dt)
[ "Load", "a", "skeleton", "definition", "from", "an", "ASF", "text", "file", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L146-L162
[ "def", "load_asf", "(", "self", ",", "source", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "source", ",", "'read'", ")", ":", "p", "=", "parser", ".", "parse_asf", "(", "source", ",", "self", ".", "world", ",", "self", ".", "jointgroup", ",", "*", "*", "kwargs", ")", "else", ":", "with", "open", "(", "source", ")", "as", "handle", ":", "p", "=", "parser", ".", "parse_asf", "(", "handle", ",", "self", ".", "world", ",", "self", ".", "jointgroup", ",", "*", "*", "kwargs", ")", "self", ".", "bodies", "=", "p", ".", "bodies", "self", ".", "joints", "=", "p", ".", "joints", "self", ".", "set_pid_params", "(", "kp", "=", "0.999", "/", "self", ".", "world", ".", "dt", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.set_pid_params
Set PID parameters for all joints in the skeleton. Parameters for this method are passed directly to the `pid` constructor.
pagoda/skeleton.py
def set_pid_params(self, *args, **kwargs): '''Set PID parameters for all joints in the skeleton. Parameters for this method are passed directly to the `pid` constructor. ''' for joint in self.joints: joint.target_angles = [None] * joint.ADOF joint.controllers = [pid(*args, **kwargs) for i in range(joint.ADOF)]
def set_pid_params(self, *args, **kwargs): '''Set PID parameters for all joints in the skeleton. Parameters for this method are passed directly to the `pid` constructor. ''' for joint in self.joints: joint.target_angles = [None] * joint.ADOF joint.controllers = [pid(*args, **kwargs) for i in range(joint.ADOF)]
[ "Set", "PID", "parameters", "for", "all", "joints", "in", "the", "skeleton", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L164-L171
[ "def", "set_pid_params", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "joint", "in", "self", ".", "joints", ":", "joint", ".", "target_angles", "=", "[", "None", "]", "*", "joint", ".", "ADOF", "joint", ".", "controllers", "=", "[", "pid", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "i", "in", "range", "(", "joint", ".", "ADOF", ")", "]" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.joint_torques
Get a list of all current joint torques in the skeleton.
pagoda/skeleton.py
def joint_torques(self): '''Get a list of all current joint torques in the skeleton.''' return as_flat_array(getattr(j, 'amotor', j).feedback[-1][:j.ADOF] for j in self.joints)
def joint_torques(self): '''Get a list of all current joint torques in the skeleton.''' return as_flat_array(getattr(j, 'amotor', j).feedback[-1][:j.ADOF] for j in self.joints)
[ "Get", "a", "list", "of", "all", "current", "joint", "torques", "in", "the", "skeleton", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L198-L201
[ "def", "joint_torques", "(", "self", ")", ":", "return", "as_flat_array", "(", "getattr", "(", "j", ",", "'amotor'", ",", "j", ")", ".", "feedback", "[", "-", "1", "]", "[", ":", "j", ".", "ADOF", "]", "for", "j", "in", "self", ".", "joints", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.indices_for_joint
Get a list of the indices for a specific joint. Parameters ---------- name : str The name of the joint to look up. Returns ------- list of int : A list of the index values for quantities related to the named joint. Often useful for getting, say, the angles for a specific joint in the skeleton.
pagoda/skeleton.py
def indices_for_joint(self, name): '''Get a list of the indices for a specific joint. Parameters ---------- name : str The name of the joint to look up. Returns ------- list of int : A list of the index values for quantities related to the named joint. Often useful for getting, say, the angles for a specific joint in the skeleton. ''' j = 0 for joint in self.joints: if joint.name == name: return list(range(j, j + joint.ADOF)) j += joint.ADOF return []
def indices_for_joint(self, name): '''Get a list of the indices for a specific joint. Parameters ---------- name : str The name of the joint to look up. Returns ------- list of int : A list of the index values for quantities related to the named joint. Often useful for getting, say, the angles for a specific joint in the skeleton. ''' j = 0 for joint in self.joints: if joint.name == name: return list(range(j, j + joint.ADOF)) j += joint.ADOF return []
[ "Get", "a", "list", "of", "the", "indices", "for", "a", "specific", "joint", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L241-L261
[ "def", "indices_for_joint", "(", "self", ",", "name", ")", ":", "j", "=", "0", "for", "joint", "in", "self", ".", "joints", ":", "if", "joint", ".", "name", "==", "name", ":", "return", "list", "(", "range", "(", "j", ",", "j", "+", "joint", ".", "ADOF", ")", ")", "j", "+=", "joint", ".", "ADOF", "return", "[", "]" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.indices_for_body
Get a list of the indices for a specific body. Parameters ---------- name : str The name of the body to look up. step : int, optional The number of numbers for each body. Defaults to 3, should be set to 4 for body rotation (since quaternions have 4 values). Returns ------- list of int : A list of the index values for quantities related to the named body.
pagoda/skeleton.py
def indices_for_body(self, name, step=3): '''Get a list of the indices for a specific body. Parameters ---------- name : str The name of the body to look up. step : int, optional The number of numbers for each body. Defaults to 3, should be set to 4 for body rotation (since quaternions have 4 values). Returns ------- list of int : A list of the index values for quantities related to the named body. ''' for j, body in enumerate(self.bodies): if body.name == name: return list(range(j * step, (j + 1) * step)) return []
def indices_for_body(self, name, step=3): '''Get a list of the indices for a specific body. Parameters ---------- name : str The name of the body to look up. step : int, optional The number of numbers for each body. Defaults to 3, should be set to 4 for body rotation (since quaternions have 4 values). Returns ------- list of int : A list of the index values for quantities related to the named body. ''' for j, body in enumerate(self.bodies): if body.name == name: return list(range(j * step, (j + 1) * step)) return []
[ "Get", "a", "list", "of", "the", "indices", "for", "a", "specific", "body", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L263-L282
[ "def", "indices_for_body", "(", "self", ",", "name", ",", "step", "=", "3", ")", ":", "for", "j", ",", "body", "in", "enumerate", "(", "self", ".", "bodies", ")", ":", "if", "body", ".", "name", "==", "name", ":", "return", "list", "(", "range", "(", "j", "*", "step", ",", "(", "j", "+", "1", ")", "*", "step", ")", ")", "return", "[", "]" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.joint_distances
Get the current joint separations for the skeleton. Returns ------- distances : list of float A list expressing the distance between the two joint anchor points, for each joint in the skeleton. These quantities describe how "exploded" the bodies in the skeleton are; a value of 0 indicates that the constraints are perfectly satisfied for that joint.
pagoda/skeleton.py
def joint_distances(self): '''Get the current joint separations for the skeleton. Returns ------- distances : list of float A list expressing the distance between the two joint anchor points, for each joint in the skeleton. These quantities describe how "exploded" the bodies in the skeleton are; a value of 0 indicates that the constraints are perfectly satisfied for that joint. ''' return [((np.array(j.anchor) - j.anchor2) ** 2).sum() for j in self.joints]
def joint_distances(self): '''Get the current joint separations for the skeleton. Returns ------- distances : list of float A list expressing the distance between the two joint anchor points, for each joint in the skeleton. These quantities describe how "exploded" the bodies in the skeleton are; a value of 0 indicates that the constraints are perfectly satisfied for that joint. ''' return [((np.array(j.anchor) - j.anchor2) ** 2).sum() for j in self.joints]
[ "Get", "the", "current", "joint", "separations", "for", "the", "skeleton", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L284-L295
[ "def", "joint_distances", "(", "self", ")", ":", "return", "[", "(", "(", "np", ".", "array", "(", "j", ".", "anchor", ")", "-", "j", ".", "anchor2", ")", "**", "2", ")", ".", "sum", "(", ")", "for", "j", "in", "self", ".", "joints", "]" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.enable_motors
Enable the joint motors in this skeleton. This method sets the maximum force that can be applied by each joint to attain the desired target velocities. It also enables torque feedback for all joint motors. Parameters ---------- max_force : float The maximum force that each joint is allowed to apply to attain its target velocity.
pagoda/skeleton.py
def enable_motors(self, max_force): '''Enable the joint motors in this skeleton. This method sets the maximum force that can be applied by each joint to attain the desired target velocities. It also enables torque feedback for all joint motors. Parameters ---------- max_force : float The maximum force that each joint is allowed to apply to attain its target velocity. ''' for joint in self.joints: amotor = getattr(joint, 'amotor', joint) amotor.max_forces = max_force if max_force > 0: amotor.enable_feedback() else: amotor.disable_feedback()
def enable_motors(self, max_force): '''Enable the joint motors in this skeleton. This method sets the maximum force that can be applied by each joint to attain the desired target velocities. It also enables torque feedback for all joint motors. Parameters ---------- max_force : float The maximum force that each joint is allowed to apply to attain its target velocity. ''' for joint in self.joints: amotor = getattr(joint, 'amotor', joint) amotor.max_forces = max_force if max_force > 0: amotor.enable_feedback() else: amotor.disable_feedback()
[ "Enable", "the", "joint", "motors", "in", "this", "skeleton", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L319-L338
[ "def", "enable_motors", "(", "self", ",", "max_force", ")", ":", "for", "joint", "in", "self", ".", "joints", ":", "amotor", "=", "getattr", "(", "joint", ",", "'amotor'", ",", "joint", ")", "amotor", ".", "max_forces", "=", "max_force", "if", "max_force", ">", "0", ":", "amotor", ".", "enable_feedback", "(", ")", "else", ":", "amotor", ".", "disable_feedback", "(", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.set_target_angles
Move each joint toward a target angle. This method uses a PID controller to set a target angular velocity for each degree of freedom in the skeleton, based on the difference between the current and the target angle for the respective DOF. PID parameters are by default set to achieve a tiny bit less than complete convergence in one time step, using only the P term (i.e., the P coefficient is set to 1 - \delta, while I and D coefficients are set to 0). PID parameters can be updated by calling the `set_pid_params` method. Parameters ---------- angles : list of float A list of the target angles for every joint in the skeleton.
pagoda/skeleton.py
def set_target_angles(self, angles): '''Move each joint toward a target angle. This method uses a PID controller to set a target angular velocity for each degree of freedom in the skeleton, based on the difference between the current and the target angle for the respective DOF. PID parameters are by default set to achieve a tiny bit less than complete convergence in one time step, using only the P term (i.e., the P coefficient is set to 1 - \delta, while I and D coefficients are set to 0). PID parameters can be updated by calling the `set_pid_params` method. Parameters ---------- angles : list of float A list of the target angles for every joint in the skeleton. ''' j = 0 for joint in self.joints: velocities = [ ctrl(tgt - cur, self.world.dt) for cur, tgt, ctrl in zip(joint.angles, angles[j:j+joint.ADOF], joint.controllers)] joint.velocities = velocities j += joint.ADOF
def set_target_angles(self, angles): '''Move each joint toward a target angle. This method uses a PID controller to set a target angular velocity for each degree of freedom in the skeleton, based on the difference between the current and the target angle for the respective DOF. PID parameters are by default set to achieve a tiny bit less than complete convergence in one time step, using only the P term (i.e., the P coefficient is set to 1 - \delta, while I and D coefficients are set to 0). PID parameters can be updated by calling the `set_pid_params` method. Parameters ---------- angles : list of float A list of the target angles for every joint in the skeleton. ''' j = 0 for joint in self.joints: velocities = [ ctrl(tgt - cur, self.world.dt) for cur, tgt, ctrl in zip(joint.angles, angles[j:j+joint.ADOF], joint.controllers)] joint.velocities = velocities j += joint.ADOF
[ "Move", "each", "joint", "toward", "a", "target", "angle", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L348-L372
[ "def", "set_target_angles", "(", "self", ",", "angles", ")", ":", "j", "=", "0", "for", "joint", "in", "self", ".", "joints", ":", "velocities", "=", "[", "ctrl", "(", "tgt", "-", "cur", ",", "self", ".", "world", ".", "dt", ")", "for", "cur", ",", "tgt", ",", "ctrl", "in", "zip", "(", "joint", ".", "angles", ",", "angles", "[", "j", ":", "j", "+", "joint", ".", "ADOF", "]", ",", "joint", ".", "controllers", ")", "]", "joint", ".", "velocities", "=", "velocities", "j", "+=", "joint", ".", "ADOF" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Skeleton.add_torques
Add torques for each degree of freedom in the skeleton. Parameters ---------- torques : list of float A list of the torques to add to each degree of freedom in the skeleton.
pagoda/skeleton.py
def add_torques(self, torques): '''Add torques for each degree of freedom in the skeleton. Parameters ---------- torques : list of float A list of the torques to add to each degree of freedom in the skeleton. ''' j = 0 for joint in self.joints: joint.add_torques( list(torques[j:j+joint.ADOF]) + [0] * (3 - joint.ADOF)) j += joint.ADOF
def add_torques(self, torques): '''Add torques for each degree of freedom in the skeleton. Parameters ---------- torques : list of float A list of the torques to add to each degree of freedom in the skeleton. ''' j = 0 for joint in self.joints: joint.add_torques( list(torques[j:j+joint.ADOF]) + [0] * (3 - joint.ADOF)) j += joint.ADOF
[ "Add", "torques", "for", "each", "degree", "of", "freedom", "in", "the", "skeleton", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L374-L387
[ "def", "add_torques", "(", "self", ",", "torques", ")", ":", "j", "=", "0", "for", "joint", "in", "self", ".", "joints", ":", "joint", ".", "add_torques", "(", "list", "(", "torques", "[", "j", ":", "j", "+", "joint", ".", "ADOF", "]", ")", "+", "[", "0", "]", "*", "(", "3", "-", "joint", ".", "ADOF", ")", ")", "j", "+=", "joint", ".", "ADOF" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.labels
Return the names of our marker labels in canonical order.
pagoda/cooper.py
def labels(self): '''Return the names of our marker labels in canonical order.''' return sorted(self.channels, key=lambda c: self.channels[c])
def labels(self): '''Return the names of our marker labels in canonical order.''' return sorted(self.channels, key=lambda c: self.channels[c])
[ "Return", "the", "names", "of", "our", "marker", "labels", "in", "canonical", "order", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L63-L65
[ "def", "labels", "(", "self", ")", ":", "return", "sorted", "(", "self", ".", "channels", ",", "key", "=", "lambda", "c", ":", "self", ".", "channels", "[", "c", "]", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.load_csv
Load marker data from a CSV file. The file will be imported using Pandas, which must be installed to use this method. (``pip install pandas``) The first line of the CSV file will be used for header information. The "time" column will be used as the index for the data frame. There must be columns named 'markerAB-foo-x','markerAB-foo-y','markerAB-foo-z', and 'markerAB-foo-c' for marker 'foo' to be included in the model. Parameters ---------- filename : str Name of the CSV file to load.
pagoda/cooper.py
def load_csv(self, filename, start_frame=10, max_frames=int(1e300)): '''Load marker data from a CSV file. The file will be imported using Pandas, which must be installed to use this method. (``pip install pandas``) The first line of the CSV file will be used for header information. The "time" column will be used as the index for the data frame. There must be columns named 'markerAB-foo-x','markerAB-foo-y','markerAB-foo-z', and 'markerAB-foo-c' for marker 'foo' to be included in the model. Parameters ---------- filename : str Name of the CSV file to load. ''' import pandas as pd compression = None if filename.endswith('.gz'): compression = 'gzip' df = pd.read_csv(filename, compression=compression).set_index('time').fillna(-1) # make sure the data frame's time index matches our world. assert self.world.dt == pd.Series(df.index).diff().mean() markers = [] for c in df.columns: m = re.match(r'^marker\d\d-(.*)-c$', c) if m: markers.append(m.group(1)) self.channels = self._map_labels_to_channels(markers) cols = [c for c in df.columns if re.match(r'^marker\d\d-.*-[xyzc]$', c)] self.data = df[cols].values.reshape((len(df), len(markers), 4))[start_frame:] self.data[:, :, [1, 2]] = self.data[:, :, [2, 1]] logging.info('%s: loaded marker data %s', filename, self.data.shape) self.process_data() self.create_bodies()
def load_csv(self, filename, start_frame=10, max_frames=int(1e300)): '''Load marker data from a CSV file. The file will be imported using Pandas, which must be installed to use this method. (``pip install pandas``) The first line of the CSV file will be used for header information. The "time" column will be used as the index for the data frame. There must be columns named 'markerAB-foo-x','markerAB-foo-y','markerAB-foo-z', and 'markerAB-foo-c' for marker 'foo' to be included in the model. Parameters ---------- filename : str Name of the CSV file to load. ''' import pandas as pd compression = None if filename.endswith('.gz'): compression = 'gzip' df = pd.read_csv(filename, compression=compression).set_index('time').fillna(-1) # make sure the data frame's time index matches our world. assert self.world.dt == pd.Series(df.index).diff().mean() markers = [] for c in df.columns: m = re.match(r'^marker\d\d-(.*)-c$', c) if m: markers.append(m.group(1)) self.channels = self._map_labels_to_channels(markers) cols = [c for c in df.columns if re.match(r'^marker\d\d-.*-[xyzc]$', c)] self.data = df[cols].values.reshape((len(df), len(markers), 4))[start_frame:] self.data[:, :, [1, 2]] = self.data[:, :, [2, 1]] logging.info('%s: loaded marker data %s', filename, self.data.shape) self.process_data() self.create_bodies()
[ "Load", "marker", "data", "from", "a", "CSV", "file", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L80-L119
[ "def", "load_csv", "(", "self", ",", "filename", ",", "start_frame", "=", "10", ",", "max_frames", "=", "int", "(", "1e300", ")", ")", ":", "import", "pandas", "as", "pd", "compression", "=", "None", "if", "filename", ".", "endswith", "(", "'.gz'", ")", ":", "compression", "=", "'gzip'", "df", "=", "pd", ".", "read_csv", "(", "filename", ",", "compression", "=", "compression", ")", ".", "set_index", "(", "'time'", ")", ".", "fillna", "(", "-", "1", ")", "# make sure the data frame's time index matches our world.", "assert", "self", ".", "world", ".", "dt", "==", "pd", ".", "Series", "(", "df", ".", "index", ")", ".", "diff", "(", ")", ".", "mean", "(", ")", "markers", "=", "[", "]", "for", "c", "in", "df", ".", "columns", ":", "m", "=", "re", ".", "match", "(", "r'^marker\\d\\d-(.*)-c$'", ",", "c", ")", "if", "m", ":", "markers", ".", "append", "(", "m", ".", "group", "(", "1", ")", ")", "self", ".", "channels", "=", "self", ".", "_map_labels_to_channels", "(", "markers", ")", "cols", "=", "[", "c", "for", "c", "in", "df", ".", "columns", "if", "re", ".", "match", "(", "r'^marker\\d\\d-.*-[xyzc]$'", ",", "c", ")", "]", "self", ".", "data", "=", "df", "[", "cols", "]", ".", "values", ".", "reshape", "(", "(", "len", "(", "df", ")", ",", "len", "(", "markers", ")", ",", "4", ")", ")", "[", "start_frame", ":", "]", "self", ".", "data", "[", ":", ",", ":", ",", "[", "1", ",", "2", "]", "]", "=", "self", ".", "data", "[", ":", ",", ":", ",", "[", "2", ",", "1", "]", "]", "logging", ".", "info", "(", "'%s: loaded marker data %s'", ",", "filename", ",", "self", ".", "data", ".", "shape", ")", "self", ".", "process_data", "(", ")", "self", ".", "create_bodies", "(", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.load_c3d
Load marker data from a C3D file. The file will be imported using the c3d module, which must be installed to use this method. (``pip install c3d``) Parameters ---------- filename : str Name of the C3D file to load. start_frame : int, optional Discard the first N frames. Defaults to 0. max_frames : int, optional Maximum number of frames to load. Defaults to loading all frames.
pagoda/cooper.py
def load_c3d(self, filename, start_frame=0, max_frames=int(1e300)): '''Load marker data from a C3D file. The file will be imported using the c3d module, which must be installed to use this method. (``pip install c3d``) Parameters ---------- filename : str Name of the C3D file to load. start_frame : int, optional Discard the first N frames. Defaults to 0. max_frames : int, optional Maximum number of frames to load. Defaults to loading all frames. ''' import c3d with open(filename, 'rb') as handle: reader = c3d.Reader(handle) logging.info('world frame rate %s, marker frame rate %s', 1 / self.world.dt, reader.point_rate) # set up a map from marker label to index in the data stream. self.channels = self._map_labels_to_channels([ s.strip() for s in reader.point_labels]) # read the actual c3d data into a numpy array. data = [] for i, (_, frame, _) in enumerate(reader.read_frames()): if i >= start_frame: data.append(frame[:, [0, 1, 2, 4]]) if len(data) > max_frames: break self.data = np.array(data) # scale the data to meters -- mm is a very common C3D unit. if reader.get('POINT:UNITS').string_value.strip().lower() == 'mm': logging.info('scaling point data from mm to m') self.data[:, :, :3] /= 1000. logging.info('%s: loaded marker data %s', filename, self.data.shape) self.process_data() self.create_bodies()
def load_c3d(self, filename, start_frame=0, max_frames=int(1e300)): '''Load marker data from a C3D file. The file will be imported using the c3d module, which must be installed to use this method. (``pip install c3d``) Parameters ---------- filename : str Name of the C3D file to load. start_frame : int, optional Discard the first N frames. Defaults to 0. max_frames : int, optional Maximum number of frames to load. Defaults to loading all frames. ''' import c3d with open(filename, 'rb') as handle: reader = c3d.Reader(handle) logging.info('world frame rate %s, marker frame rate %s', 1 / self.world.dt, reader.point_rate) # set up a map from marker label to index in the data stream. self.channels = self._map_labels_to_channels([ s.strip() for s in reader.point_labels]) # read the actual c3d data into a numpy array. data = [] for i, (_, frame, _) in enumerate(reader.read_frames()): if i >= start_frame: data.append(frame[:, [0, 1, 2, 4]]) if len(data) > max_frames: break self.data = np.array(data) # scale the data to meters -- mm is a very common C3D unit. if reader.get('POINT:UNITS').string_value.strip().lower() == 'mm': logging.info('scaling point data from mm to m') self.data[:, :, :3] /= 1000. logging.info('%s: loaded marker data %s', filename, self.data.shape) self.process_data() self.create_bodies()
[ "Load", "marker", "data", "from", "a", "C3D", "file", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L121-L164
[ "def", "load_c3d", "(", "self", ",", "filename", ",", "start_frame", "=", "0", ",", "max_frames", "=", "int", "(", "1e300", ")", ")", ":", "import", "c3d", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "handle", ":", "reader", "=", "c3d", ".", "Reader", "(", "handle", ")", "logging", ".", "info", "(", "'world frame rate %s, marker frame rate %s'", ",", "1", "/", "self", ".", "world", ".", "dt", ",", "reader", ".", "point_rate", ")", "# set up a map from marker label to index in the data stream.", "self", ".", "channels", "=", "self", ".", "_map_labels_to_channels", "(", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "reader", ".", "point_labels", "]", ")", "# read the actual c3d data into a numpy array.", "data", "=", "[", "]", "for", "i", ",", "(", "_", ",", "frame", ",", "_", ")", "in", "enumerate", "(", "reader", ".", "read_frames", "(", ")", ")", ":", "if", "i", ">=", "start_frame", ":", "data", ".", "append", "(", "frame", "[", ":", ",", "[", "0", ",", "1", ",", "2", ",", "4", "]", "]", ")", "if", "len", "(", "data", ")", ">", "max_frames", ":", "break", "self", ".", "data", "=", "np", ".", "array", "(", "data", ")", "# scale the data to meters -- mm is a very common C3D unit.", "if", "reader", ".", "get", "(", "'POINT:UNITS'", ")", ".", "string_value", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "'mm'", ":", "logging", ".", "info", "(", "'scaling point data from mm to m'", ")", "self", ".", "data", "[", ":", ",", ":", ",", ":", "3", "]", "/=", "1000.", "logging", ".", "info", "(", "'%s: loaded marker data %s'", ",", "filename", ",", "self", ".", "data", ".", "shape", ")", "self", ".", "process_data", "(", ")", "self", ".", "create_bodies", "(", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.process_data
Process data to produce velocity and dropout information.
pagoda/cooper.py
def process_data(self): '''Process data to produce velocity and dropout information.''' self.visibility = self.data[:, :, 3] self.positions = self.data[:, :, :3] self.velocities = np.zeros_like(self.positions) + 1000 for frame_no in range(1, len(self.data) - 1): prev = self.data[frame_no - 1] next = self.data[frame_no + 1] for c in range(self.num_markers): if -1 < prev[c, 3] < 100 and -1 < next[c, 3] < 100: self.velocities[frame_no, c] = ( next[c, :3] - prev[c, :3]) / (2 * self.world.dt) self.cfms = np.zeros_like(self.visibility) + self.DEFAULT_CFM
def process_data(self): '''Process data to produce velocity and dropout information.''' self.visibility = self.data[:, :, 3] self.positions = self.data[:, :, :3] self.velocities = np.zeros_like(self.positions) + 1000 for frame_no in range(1, len(self.data) - 1): prev = self.data[frame_no - 1] next = self.data[frame_no + 1] for c in range(self.num_markers): if -1 < prev[c, 3] < 100 and -1 < next[c, 3] < 100: self.velocities[frame_no, c] = ( next[c, :3] - prev[c, :3]) / (2 * self.world.dt) self.cfms = np.zeros_like(self.visibility) + self.DEFAULT_CFM
[ "Process", "data", "to", "produce", "velocity", "and", "dropout", "information", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L166-L178
[ "def", "process_data", "(", "self", ")", ":", "self", ".", "visibility", "=", "self", ".", "data", "[", ":", ",", ":", ",", "3", "]", "self", ".", "positions", "=", "self", ".", "data", "[", ":", ",", ":", ",", ":", "3", "]", "self", ".", "velocities", "=", "np", ".", "zeros_like", "(", "self", ".", "positions", ")", "+", "1000", "for", "frame_no", "in", "range", "(", "1", ",", "len", "(", "self", ".", "data", ")", "-", "1", ")", ":", "prev", "=", "self", ".", "data", "[", "frame_no", "-", "1", "]", "next", "=", "self", ".", "data", "[", "frame_no", "+", "1", "]", "for", "c", "in", "range", "(", "self", ".", "num_markers", ")", ":", "if", "-", "1", "<", "prev", "[", "c", ",", "3", "]", "<", "100", "and", "-", "1", "<", "next", "[", "c", ",", "3", "]", "<", "100", ":", "self", ".", "velocities", "[", "frame_no", ",", "c", "]", "=", "(", "next", "[", "c", ",", ":", "3", "]", "-", "prev", "[", "c", ",", ":", "3", "]", ")", "/", "(", "2", "*", "self", ".", "world", ".", "dt", ")", "self", ".", "cfms", "=", "np", ".", "zeros_like", "(", "self", ".", "visibility", ")", "+", "self", ".", "DEFAULT_CFM" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.create_bodies
Create physics bodies corresponding to each marker in our data.
pagoda/cooper.py
def create_bodies(self): '''Create physics bodies corresponding to each marker in our data.''' self.bodies = {} for label in self.channels: body = self.world.create_body( 'sphere', name='marker:{}'.format(label), radius=0.02) body.is_kinematic = True body.color = 0.9, 0.1, 0.1, 0.5 self.bodies[label] = body
def create_bodies(self): '''Create physics bodies corresponding to each marker in our data.''' self.bodies = {} for label in self.channels: body = self.world.create_body( 'sphere', name='marker:{}'.format(label), radius=0.02) body.is_kinematic = True body.color = 0.9, 0.1, 0.1, 0.5 self.bodies[label] = body
[ "Create", "physics", "bodies", "corresponding", "to", "each", "marker", "in", "our", "data", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L180-L188
[ "def", "create_bodies", "(", "self", ")", ":", "self", ".", "bodies", "=", "{", "}", "for", "label", "in", "self", ".", "channels", ":", "body", "=", "self", ".", "world", ".", "create_body", "(", "'sphere'", ",", "name", "=", "'marker:{}'", ".", "format", "(", "label", ")", ",", "radius", "=", "0.02", ")", "body", ".", "is_kinematic", "=", "True", "body", ".", "color", "=", "0.9", ",", "0.1", ",", "0.1", ",", "0.5", "self", ".", "bodies", "[", "label", "]", "=", "body" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.load_attachments
Load attachment configuration from the given text source. The attachment configuration file has a simple format. After discarding Unix-style comments (any part of a line that starts with the pound (#) character), each line in the file is then expected to have the following format:: marker-name body-name X Y Z The marker name must correspond to an existing "channel" in our marker data. The body name must correspond to a rigid body in the skeleton. The X, Y, and Z coordinates specify the body-relative offsets where the marker should be attached: 0 corresponds to the center of the body along the given axis, while -1 and 1 correspond to the minimal (maximal, respectively) extent of the body's bounding box along the corresponding dimension. Parameters ---------- source : str or file-like A filename or file-like object that we can use to obtain text configuration that describes how markers are attached to skeleton bodies. skeleton : :class:`pagoda.skeleton.Skeleton` The skeleton to attach our marker data to.
pagoda/cooper.py
def load_attachments(self, source, skeleton): '''Load attachment configuration from the given text source. The attachment configuration file has a simple format. After discarding Unix-style comments (any part of a line that starts with the pound (#) character), each line in the file is then expected to have the following format:: marker-name body-name X Y Z The marker name must correspond to an existing "channel" in our marker data. The body name must correspond to a rigid body in the skeleton. The X, Y, and Z coordinates specify the body-relative offsets where the marker should be attached: 0 corresponds to the center of the body along the given axis, while -1 and 1 correspond to the minimal (maximal, respectively) extent of the body's bounding box along the corresponding dimension. Parameters ---------- source : str or file-like A filename or file-like object that we can use to obtain text configuration that describes how markers are attached to skeleton bodies. skeleton : :class:`pagoda.skeleton.Skeleton` The skeleton to attach our marker data to. ''' self.targets = {} self.offsets = {} filename = source if isinstance(source, str): source = open(source) else: filename = '(file-{})'.format(id(source)) for i, line in enumerate(source): tokens = line.split('#')[0].strip().split() if not tokens: continue label = tokens.pop(0) if label not in self.channels: logging.info('%s:%d: unknown marker %s', filename, i, label) continue if not tokens: continue name = tokens.pop(0) bodies = [b for b in skeleton.bodies if b.name == name] if len(bodies) != 1: logging.info('%s:%d: %d skeleton bodies match %s', filename, i, len(bodies), name) continue b = self.targets[label] = bodies[0] o = self.offsets[label] = \ np.array(list(map(float, tokens))) * b.dimensions / 2 logging.info('%s <--> %s, offset %s', label, b.name, o)
def load_attachments(self, source, skeleton): '''Load attachment configuration from the given text source. The attachment configuration file has a simple format. After discarding Unix-style comments (any part of a line that starts with the pound (#) character), each line in the file is then expected to have the following format:: marker-name body-name X Y Z The marker name must correspond to an existing "channel" in our marker data. The body name must correspond to a rigid body in the skeleton. The X, Y, and Z coordinates specify the body-relative offsets where the marker should be attached: 0 corresponds to the center of the body along the given axis, while -1 and 1 correspond to the minimal (maximal, respectively) extent of the body's bounding box along the corresponding dimension. Parameters ---------- source : str or file-like A filename or file-like object that we can use to obtain text configuration that describes how markers are attached to skeleton bodies. skeleton : :class:`pagoda.skeleton.Skeleton` The skeleton to attach our marker data to. ''' self.targets = {} self.offsets = {} filename = source if isinstance(source, str): source = open(source) else: filename = '(file-{})'.format(id(source)) for i, line in enumerate(source): tokens = line.split('#')[0].strip().split() if not tokens: continue label = tokens.pop(0) if label not in self.channels: logging.info('%s:%d: unknown marker %s', filename, i, label) continue if not tokens: continue name = tokens.pop(0) bodies = [b for b in skeleton.bodies if b.name == name] if len(bodies) != 1: logging.info('%s:%d: %d skeleton bodies match %s', filename, i, len(bodies), name) continue b = self.targets[label] = bodies[0] o = self.offsets[label] = \ np.array(list(map(float, tokens))) * b.dimensions / 2 logging.info('%s <--> %s, offset %s', label, b.name, o)
[ "Load", "attachment", "configuration", "from", "the", "given", "text", "source", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L190-L246
[ "def", "load_attachments", "(", "self", ",", "source", ",", "skeleton", ")", ":", "self", ".", "targets", "=", "{", "}", "self", ".", "offsets", "=", "{", "}", "filename", "=", "source", "if", "isinstance", "(", "source", ",", "str", ")", ":", "source", "=", "open", "(", "source", ")", "else", ":", "filename", "=", "'(file-{})'", ".", "format", "(", "id", "(", "source", ")", ")", "for", "i", ",", "line", "in", "enumerate", "(", "source", ")", ":", "tokens", "=", "line", ".", "split", "(", "'#'", ")", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "not", "tokens", ":", "continue", "label", "=", "tokens", ".", "pop", "(", "0", ")", "if", "label", "not", "in", "self", ".", "channels", ":", "logging", ".", "info", "(", "'%s:%d: unknown marker %s'", ",", "filename", ",", "i", ",", "label", ")", "continue", "if", "not", "tokens", ":", "continue", "name", "=", "tokens", ".", "pop", "(", "0", ")", "bodies", "=", "[", "b", "for", "b", "in", "skeleton", ".", "bodies", "if", "b", ".", "name", "==", "name", "]", "if", "len", "(", "bodies", ")", "!=", "1", ":", "logging", ".", "info", "(", "'%s:%d: %d skeleton bodies match %s'", ",", "filename", ",", "i", ",", "len", "(", "bodies", ")", ",", "name", ")", "continue", "b", "=", "self", ".", "targets", "[", "label", "]", "=", "bodies", "[", "0", "]", "o", "=", "self", ".", "offsets", "[", "label", "]", "=", "np", ".", "array", "(", "list", "(", "map", "(", "float", ",", "tokens", ")", ")", ")", "*", "b", ".", "dimensions", "/", "2", "logging", ".", "info", "(", "'%s <--> %s, offset %s'", ",", "label", ",", "b", ".", "name", ",", "o", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.attach
Attach marker bodies to the corresponding skeleton bodies. Attachments are only made for markers that are not in a dropout state in the given frame. Parameters ---------- frame_no : int The frame of data we will use for attaching marker bodies.
pagoda/cooper.py
def attach(self, frame_no): '''Attach marker bodies to the corresponding skeleton bodies. Attachments are only made for markers that are not in a dropout state in the given frame. Parameters ---------- frame_no : int The frame of data we will use for attaching marker bodies. ''' assert not self.joints for label, j in self.channels.items(): target = self.targets.get(label) if target is None: continue if self.visibility[frame_no, j] < 0: continue if np.linalg.norm(self.velocities[frame_no, j]) > 10: continue joint = ode.BallJoint(self.world.ode_world, self.jointgroup) joint.attach(self.bodies[label].ode_body, target.ode_body) joint.setAnchor1Rel([0, 0, 0]) joint.setAnchor2Rel(self.offsets[label]) joint.setParam(ode.ParamCFM, self.cfms[frame_no, j]) joint.setParam(ode.ParamERP, self.erp) joint.name = label self.joints[label] = joint self._frame_no = frame_no
def attach(self, frame_no): '''Attach marker bodies to the corresponding skeleton bodies. Attachments are only made for markers that are not in a dropout state in the given frame. Parameters ---------- frame_no : int The frame of data we will use for attaching marker bodies. ''' assert not self.joints for label, j in self.channels.items(): target = self.targets.get(label) if target is None: continue if self.visibility[frame_no, j] < 0: continue if np.linalg.norm(self.velocities[frame_no, j]) > 10: continue joint = ode.BallJoint(self.world.ode_world, self.jointgroup) joint.attach(self.bodies[label].ode_body, target.ode_body) joint.setAnchor1Rel([0, 0, 0]) joint.setAnchor2Rel(self.offsets[label]) joint.setParam(ode.ParamCFM, self.cfms[frame_no, j]) joint.setParam(ode.ParamERP, self.erp) joint.name = label self.joints[label] = joint self._frame_no = frame_no
[ "Attach", "marker", "bodies", "to", "the", "corresponding", "skeleton", "bodies", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L253-L281
[ "def", "attach", "(", "self", ",", "frame_no", ")", ":", "assert", "not", "self", ".", "joints", "for", "label", ",", "j", "in", "self", ".", "channels", ".", "items", "(", ")", ":", "target", "=", "self", ".", "targets", ".", "get", "(", "label", ")", "if", "target", "is", "None", ":", "continue", "if", "self", ".", "visibility", "[", "frame_no", ",", "j", "]", "<", "0", ":", "continue", "if", "np", ".", "linalg", ".", "norm", "(", "self", ".", "velocities", "[", "frame_no", ",", "j", "]", ")", ">", "10", ":", "continue", "joint", "=", "ode", ".", "BallJoint", "(", "self", ".", "world", ".", "ode_world", ",", "self", ".", "jointgroup", ")", "joint", ".", "attach", "(", "self", ".", "bodies", "[", "label", "]", ".", "ode_body", ",", "target", ".", "ode_body", ")", "joint", ".", "setAnchor1Rel", "(", "[", "0", ",", "0", ",", "0", "]", ")", "joint", ".", "setAnchor2Rel", "(", "self", ".", "offsets", "[", "label", "]", ")", "joint", ".", "setParam", "(", "ode", ".", "ParamCFM", ",", "self", ".", "cfms", "[", "frame_no", ",", "j", "]", ")", "joint", ".", "setParam", "(", "ode", ".", "ParamERP", ",", "self", ".", "erp", ")", "joint", ".", "name", "=", "label", "self", ".", "joints", "[", "label", "]", "=", "joint", "self", ".", "_frame_no", "=", "frame_no" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.reposition
Reposition markers to a specific frame of data. Parameters ---------- frame_no : int The frame of data where we should reposition marker bodies. Markers will be positioned in the appropriate places in world coordinates. In addition, linear velocities of the markers will be set according to the data as long as there are no dropouts in neighboring frames.
pagoda/cooper.py
def reposition(self, frame_no): '''Reposition markers to a specific frame of data. Parameters ---------- frame_no : int The frame of data where we should reposition marker bodies. Markers will be positioned in the appropriate places in world coordinates. In addition, linear velocities of the markers will be set according to the data as long as there are no dropouts in neighboring frames. ''' for label, j in self.channels.items(): body = self.bodies[label] body.position = self.positions[frame_no, j] body.linear_velocity = self.velocities[frame_no, j]
def reposition(self, frame_no): '''Reposition markers to a specific frame of data. Parameters ---------- frame_no : int The frame of data where we should reposition marker bodies. Markers will be positioned in the appropriate places in world coordinates. In addition, linear velocities of the markers will be set according to the data as long as there are no dropouts in neighboring frames. ''' for label, j in self.channels.items(): body = self.bodies[label] body.position = self.positions[frame_no, j] body.linear_velocity = self.velocities[frame_no, j]
[ "Reposition", "markers", "to", "a", "specific", "frame", "of", "data", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L283-L297
[ "def", "reposition", "(", "self", ",", "frame_no", ")", ":", "for", "label", ",", "j", "in", "self", ".", "channels", ".", "items", "(", ")", ":", "body", "=", "self", ".", "bodies", "[", "label", "]", "body", ".", "position", "=", "self", ".", "positions", "[", "frame_no", ",", "j", "]", "body", ".", "linear_velocity", "=", "self", ".", "velocities", "[", "frame_no", ",", "j", "]" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.distances
Get a list of the distances between markers and their attachments. Returns ------- distances : ndarray of shape (num-markers, 3) Array of distances for each marker joint in our attachment setup. If a marker does not currently have an associated joint (e.g. because it is not currently visible) this will contain NaN for that row.
pagoda/cooper.py
def distances(self): '''Get a list of the distances between markers and their attachments. Returns ------- distances : ndarray of shape (num-markers, 3) Array of distances for each marker joint in our attachment setup. If a marker does not currently have an associated joint (e.g. because it is not currently visible) this will contain NaN for that row. ''' distances = [] for label in self.labels: joint = self.joints.get(label) distances.append([np.nan, np.nan, np.nan] if joint is None else np.array(joint.getAnchor()) - joint.getAnchor2()) return np.array(distances)
def distances(self): '''Get a list of the distances between markers and their attachments. Returns ------- distances : ndarray of shape (num-markers, 3) Array of distances for each marker joint in our attachment setup. If a marker does not currently have an associated joint (e.g. because it is not currently visible) this will contain NaN for that row. ''' distances = [] for label in self.labels: joint = self.joints.get(label) distances.append([np.nan, np.nan, np.nan] if joint is None else np.array(joint.getAnchor()) - joint.getAnchor2()) return np.array(distances)
[ "Get", "a", "list", "of", "the", "distances", "between", "markers", "and", "their", "attachments", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L299-L314
[ "def", "distances", "(", "self", ")", ":", "distances", "=", "[", "]", "for", "label", "in", "self", ".", "labels", ":", "joint", "=", "self", ".", "joints", ".", "get", "(", "label", ")", "distances", ".", "append", "(", "[", "np", ".", "nan", ",", "np", ".", "nan", ",", "np", ".", "nan", "]", "if", "joint", "is", "None", "else", "np", ".", "array", "(", "joint", ".", "getAnchor", "(", ")", ")", "-", "joint", ".", "getAnchor2", "(", ")", ")", "return", "np", ".", "array", "(", "distances", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Markers.forces
Return an array of the forces exerted by marker springs. Notes ----- The forces exerted by the marker springs can be approximated by:: F = kp * dx where ``dx`` is the current array of marker distances. An even more accurate value is computed by approximating the velocity of the spring displacement:: F = kp * dx + kd * (dx - dx_tm1) / dt where ``dx_tm1`` is an array of distances from the previous time step. Parameters ---------- dx_tm1 : ndarray An array of distances from markers to their attachment targets, measured at the previous time step. Returns ------- F : ndarray An array of forces that the markers are exerting on the skeleton.
pagoda/cooper.py
def forces(self, dx_tm1=None): '''Return an array of the forces exerted by marker springs. Notes ----- The forces exerted by the marker springs can be approximated by:: F = kp * dx where ``dx`` is the current array of marker distances. An even more accurate value is computed by approximating the velocity of the spring displacement:: F = kp * dx + kd * (dx - dx_tm1) / dt where ``dx_tm1`` is an array of distances from the previous time step. Parameters ---------- dx_tm1 : ndarray An array of distances from markers to their attachment targets, measured at the previous time step. Returns ------- F : ndarray An array of forces that the markers are exerting on the skeleton. ''' cfm = self.cfms[self._frame_no][:, None] kp = self.erp / (cfm * self.world.dt) kd = (1 - self.erp) / cfm dx = self.distances() F = kp * dx if dx_tm1 is not None: bad = np.isnan(dx) | np.isnan(dx_tm1) F[~bad] += (kd * (dx - dx_tm1) / self.world.dt)[~bad] return F
def forces(self, dx_tm1=None): '''Return an array of the forces exerted by marker springs. Notes ----- The forces exerted by the marker springs can be approximated by:: F = kp * dx where ``dx`` is the current array of marker distances. An even more accurate value is computed by approximating the velocity of the spring displacement:: F = kp * dx + kd * (dx - dx_tm1) / dt where ``dx_tm1`` is an array of distances from the previous time step. Parameters ---------- dx_tm1 : ndarray An array of distances from markers to their attachment targets, measured at the previous time step. Returns ------- F : ndarray An array of forces that the markers are exerting on the skeleton. ''' cfm = self.cfms[self._frame_no][:, None] kp = self.erp / (cfm * self.world.dt) kd = (1 - self.erp) / cfm dx = self.distances() F = kp * dx if dx_tm1 is not None: bad = np.isnan(dx) | np.isnan(dx_tm1) F[~bad] += (kd * (dx - dx_tm1) / self.world.dt)[~bad] return F
[ "Return", "an", "array", "of", "the", "forces", "exerted", "by", "marker", "springs", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L316-L353
[ "def", "forces", "(", "self", ",", "dx_tm1", "=", "None", ")", ":", "cfm", "=", "self", ".", "cfms", "[", "self", ".", "_frame_no", "]", "[", ":", ",", "None", "]", "kp", "=", "self", ".", "erp", "/", "(", "cfm", "*", "self", ".", "world", ".", "dt", ")", "kd", "=", "(", "1", "-", "self", ".", "erp", ")", "/", "cfm", "dx", "=", "self", ".", "distances", "(", ")", "F", "=", "kp", "*", "dx", "if", "dx_tm1", "is", "not", "None", ":", "bad", "=", "np", ".", "isnan", "(", "dx", ")", "|", "np", ".", "isnan", "(", "dx_tm1", ")", "F", "[", "~", "bad", "]", "+=", "(", "kd", "*", "(", "dx", "-", "dx_tm1", ")", "/", "self", ".", "world", ".", "dt", ")", "[", "~", "bad", "]", "return", "F" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World.load_skeleton
Create and configure a skeleton in our model. Parameters ---------- filename : str The name of a file containing skeleton configuration data. pid_params : dict, optional If given, use this dictionary to set the PID controller parameters on each joint in the skeleton. See :func:`pagoda.skeleton.pid` for more information.
pagoda/cooper.py
def load_skeleton(self, filename, pid_params=None): '''Create and configure a skeleton in our model. Parameters ---------- filename : str The name of a file containing skeleton configuration data. pid_params : dict, optional If given, use this dictionary to set the PID controller parameters on each joint in the skeleton. See :func:`pagoda.skeleton.pid` for more information. ''' self.skeleton = skeleton.Skeleton(self) self.skeleton.load(filename, color=(0.3, 0.5, 0.9, 0.8)) if pid_params: self.skeleton.set_pid_params(**pid_params) self.skeleton.erp = 0.1 self.skeleton.cfm = 0
def load_skeleton(self, filename, pid_params=None): '''Create and configure a skeleton in our model. Parameters ---------- filename : str The name of a file containing skeleton configuration data. pid_params : dict, optional If given, use this dictionary to set the PID controller parameters on each joint in the skeleton. See :func:`pagoda.skeleton.pid` for more information. ''' self.skeleton = skeleton.Skeleton(self) self.skeleton.load(filename, color=(0.3, 0.5, 0.9, 0.8)) if pid_params: self.skeleton.set_pid_params(**pid_params) self.skeleton.erp = 0.1 self.skeleton.cfm = 0
[ "Create", "and", "configure", "a", "skeleton", "in", "our", "model", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L411-L428
[ "def", "load_skeleton", "(", "self", ",", "filename", ",", "pid_params", "=", "None", ")", ":", "self", ".", "skeleton", "=", "skeleton", ".", "Skeleton", "(", "self", ")", "self", ".", "skeleton", ".", "load", "(", "filename", ",", "color", "=", "(", "0.3", ",", "0.5", ",", "0.9", ",", "0.8", ")", ")", "if", "pid_params", ":", "self", ".", "skeleton", ".", "set_pid_params", "(", "*", "*", "pid_params", ")", "self", ".", "skeleton", ".", "erp", "=", "0.1", "self", ".", "skeleton", ".", "cfm", "=", "0" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World.load_markers
Load marker data and attachment preferences into the model. Parameters ---------- filename : str The name of a file containing marker data. This currently needs to be either a .C3D or a .CSV file. CSV files must adhere to a fairly strict column naming convention; see :func:`Markers.load_csv` for more information. attachments : str The name of a text file specifying how markers are attached to skeleton bodies. max_frames : number, optional Only read in this many frames of marker data. By default, the entire data file is read into memory. Returns ------- markers : :class:`Markers` Returns a markers object containing loaded marker data as well as skeleton attachment configuration.
pagoda/cooper.py
def load_markers(self, filename, attachments, max_frames=1e100): '''Load marker data and attachment preferences into the model. Parameters ---------- filename : str The name of a file containing marker data. This currently needs to be either a .C3D or a .CSV file. CSV files must adhere to a fairly strict column naming convention; see :func:`Markers.load_csv` for more information. attachments : str The name of a text file specifying how markers are attached to skeleton bodies. max_frames : number, optional Only read in this many frames of marker data. By default, the entire data file is read into memory. Returns ------- markers : :class:`Markers` Returns a markers object containing loaded marker data as well as skeleton attachment configuration. ''' self.markers = Markers(self) fn = filename.lower() if fn.endswith('.c3d'): self.markers.load_c3d(filename, max_frames=max_frames) elif fn.endswith('.csv') or fn.endswith('.csv.gz'): self.markers.load_csv(filename, max_frames=max_frames) else: logging.fatal('%s: not sure how to load markers!', filename) self.markers.load_attachments(attachments, self.skeleton)
def load_markers(self, filename, attachments, max_frames=1e100): '''Load marker data and attachment preferences into the model. Parameters ---------- filename : str The name of a file containing marker data. This currently needs to be either a .C3D or a .CSV file. CSV files must adhere to a fairly strict column naming convention; see :func:`Markers.load_csv` for more information. attachments : str The name of a text file specifying how markers are attached to skeleton bodies. max_frames : number, optional Only read in this many frames of marker data. By default, the entire data file is read into memory. Returns ------- markers : :class:`Markers` Returns a markers object containing loaded marker data as well as skeleton attachment configuration. ''' self.markers = Markers(self) fn = filename.lower() if fn.endswith('.c3d'): self.markers.load_c3d(filename, max_frames=max_frames) elif fn.endswith('.csv') or fn.endswith('.csv.gz'): self.markers.load_csv(filename, max_frames=max_frames) else: logging.fatal('%s: not sure how to load markers!', filename) self.markers.load_attachments(attachments, self.skeleton)
[ "Load", "marker", "data", "and", "attachment", "preferences", "into", "the", "model", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L430-L461
[ "def", "load_markers", "(", "self", ",", "filename", ",", "attachments", ",", "max_frames", "=", "1e100", ")", ":", "self", ".", "markers", "=", "Markers", "(", "self", ")", "fn", "=", "filename", ".", "lower", "(", ")", "if", "fn", ".", "endswith", "(", "'.c3d'", ")", ":", "self", ".", "markers", ".", "load_c3d", "(", "filename", ",", "max_frames", "=", "max_frames", ")", "elif", "fn", ".", "endswith", "(", "'.csv'", ")", "or", "fn", ".", "endswith", "(", "'.csv.gz'", ")", ":", "self", ".", "markers", ".", "load_csv", "(", "filename", ",", "max_frames", "=", "max_frames", ")", "else", ":", "logging", ".", "fatal", "(", "'%s: not sure how to load markers!'", ",", "filename", ")", "self", ".", "markers", ".", "load_attachments", "(", "attachments", ",", "self", ".", "skeleton", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World.step
Advance the physics world by one step. Typically this is called as part of a :class:`pagoda.viewer.Viewer`, but it can also be called manually (or some other stepping mechanism entirely can be used).
pagoda/cooper.py
def step(self, substeps=2): '''Advance the physics world by one step. Typically this is called as part of a :class:`pagoda.viewer.Viewer`, but it can also be called manually (or some other stepping mechanism entirely can be used). ''' # by default we step by following our loaded marker data. self.frame_no += 1 try: next(self.follower) except (AttributeError, StopIteration) as err: self.reset()
def step(self, substeps=2): '''Advance the physics world by one step. Typically this is called as part of a :class:`pagoda.viewer.Viewer`, but it can also be called manually (or some other stepping mechanism entirely can be used). ''' # by default we step by following our loaded marker data. self.frame_no += 1 try: next(self.follower) except (AttributeError, StopIteration) as err: self.reset()
[ "Advance", "the", "physics", "world", "by", "one", "step", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L463-L475
[ "def", "step", "(", "self", ",", "substeps", "=", "2", ")", ":", "# by default we step by following our loaded marker data.", "self", ".", "frame_no", "+=", "1", "try", ":", "next", "(", "self", ".", "follower", ")", "except", "(", "AttributeError", ",", "StopIteration", ")", "as", "err", ":", "self", ".", "reset", "(", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World.settle_to_markers
Settle the skeleton to our marker data at a specific frame. Parameters ---------- frame_no : int, optional Settle the skeleton to marker data at this frame. Defaults to 0. max_distance : float, optional The settling process will stop when the mean marker distance falls below this threshold. Defaults to 0.1m (10cm). Setting this too small prevents the settling process from finishing (it will loop indefinitely), and setting it too large prevents the skeleton from settling to a stable state near the markers. max_iters : int, optional Attempt to settle markers for at most this many iterations. Defaults to 1000. states : list of body states, optional If given, set the bodies in our skeleton to these kinematic states before starting the settling process.
pagoda/cooper.py
def settle_to_markers(self, frame_no=0, max_distance=0.05, max_iters=300, states=None): '''Settle the skeleton to our marker data at a specific frame. Parameters ---------- frame_no : int, optional Settle the skeleton to marker data at this frame. Defaults to 0. max_distance : float, optional The settling process will stop when the mean marker distance falls below this threshold. Defaults to 0.1m (10cm). Setting this too small prevents the settling process from finishing (it will loop indefinitely), and setting it too large prevents the skeleton from settling to a stable state near the markers. max_iters : int, optional Attempt to settle markers for at most this many iterations. Defaults to 1000. states : list of body states, optional If given, set the bodies in our skeleton to these kinematic states before starting the settling process. ''' if states is not None: self.skeleton.set_body_states(states) dist = None for _ in range(max_iters): for _ in self._step_to_marker_frame(frame_no): pass dist = np.nanmean(abs(self.markers.distances())) logging.info('settling to frame %d: marker distance %.3f', frame_no, dist) if dist < max_distance: return self.skeleton.get_body_states() for b in self.skeleton.bodies: b.linear_velocity = 0, 0, 0 b.angular_velocity = 0, 0, 0 return states
def settle_to_markers(self, frame_no=0, max_distance=0.05, max_iters=300, states=None): '''Settle the skeleton to our marker data at a specific frame. Parameters ---------- frame_no : int, optional Settle the skeleton to marker data at this frame. Defaults to 0. max_distance : float, optional The settling process will stop when the mean marker distance falls below this threshold. Defaults to 0.1m (10cm). Setting this too small prevents the settling process from finishing (it will loop indefinitely), and setting it too large prevents the skeleton from settling to a stable state near the markers. max_iters : int, optional Attempt to settle markers for at most this many iterations. Defaults to 1000. states : list of body states, optional If given, set the bodies in our skeleton to these kinematic states before starting the settling process. ''' if states is not None: self.skeleton.set_body_states(states) dist = None for _ in range(max_iters): for _ in self._step_to_marker_frame(frame_no): pass dist = np.nanmean(abs(self.markers.distances())) logging.info('settling to frame %d: marker distance %.3f', frame_no, dist) if dist < max_distance: return self.skeleton.get_body_states() for b in self.skeleton.bodies: b.linear_velocity = 0, 0, 0 b.angular_velocity = 0, 0, 0 return states
[ "Settle", "the", "skeleton", "to", "our", "marker", "data", "at", "a", "specific", "frame", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L487-L521
[ "def", "settle_to_markers", "(", "self", ",", "frame_no", "=", "0", ",", "max_distance", "=", "0.05", ",", "max_iters", "=", "300", ",", "states", "=", "None", ")", ":", "if", "states", "is", "not", "None", ":", "self", ".", "skeleton", ".", "set_body_states", "(", "states", ")", "dist", "=", "None", "for", "_", "in", "range", "(", "max_iters", ")", ":", "for", "_", "in", "self", ".", "_step_to_marker_frame", "(", "frame_no", ")", ":", "pass", "dist", "=", "np", ".", "nanmean", "(", "abs", "(", "self", ".", "markers", ".", "distances", "(", ")", ")", ")", "logging", ".", "info", "(", "'settling to frame %d: marker distance %.3f'", ",", "frame_no", ",", "dist", ")", "if", "dist", "<", "max_distance", ":", "return", "self", ".", "skeleton", ".", "get_body_states", "(", ")", "for", "b", "in", "self", ".", "skeleton", ".", "bodies", ":", "b", ".", "linear_velocity", "=", "0", ",", "0", ",", "0", "b", ".", "angular_velocity", "=", "0", ",", "0", ",", "0", "return", "states" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World.follow_markers
Iterate over a set of marker data, dragging its skeleton along. Parameters ---------- start : int, optional Start following marker data after this frame. Defaults to 0. end : int, optional Stop following marker data after this frame. Defaults to the end of the marker data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data.
pagoda/cooper.py
def follow_markers(self, start=0, end=1e100, states=None): '''Iterate over a set of marker data, dragging its skeleton along. Parameters ---------- start : int, optional Start following marker data after this frame. Defaults to 0. end : int, optional Stop following marker data after this frame. Defaults to the end of the marker data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. ''' if states is not None: self.skeleton.set_body_states(states) for frame_no, frame in enumerate(self.markers): if frame_no < start: continue if frame_no >= end: break for states in self._step_to_marker_frame(frame_no): yield states
def follow_markers(self, start=0, end=1e100, states=None): '''Iterate over a set of marker data, dragging its skeleton along. Parameters ---------- start : int, optional Start following marker data after this frame. Defaults to 0. end : int, optional Stop following marker data after this frame. Defaults to the end of the marker data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. ''' if states is not None: self.skeleton.set_body_states(states) for frame_no, frame in enumerate(self.markers): if frame_no < start: continue if frame_no >= end: break for states in self._step_to_marker_frame(frame_no): yield states
[ "Iterate", "over", "a", "set", "of", "marker", "data", "dragging", "its", "skeleton", "along", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L523-L545
[ "def", "follow_markers", "(", "self", ",", "start", "=", "0", ",", "end", "=", "1e100", ",", "states", "=", "None", ")", ":", "if", "states", "is", "not", "None", ":", "self", ".", "skeleton", ".", "set_body_states", "(", "states", ")", "for", "frame_no", ",", "frame", "in", "enumerate", "(", "self", ".", "markers", ")", ":", "if", "frame_no", "<", "start", ":", "continue", "if", "frame_no", ">=", "end", ":", "break", "for", "states", "in", "self", ".", "_step_to_marker_frame", "(", "frame_no", ")", ":", "yield", "states" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World._step_to_marker_frame
Update the simulator to a specific frame of marker data. This method returns a generator of body states for the skeleton! This generator must be exhausted (e.g., by consuming this call in a for loop) for the simulator to work properly. This process involves the following steps: - Move the markers to their new location: - Detach from the skeleton - Update marker locations - Reattach to the skeleton - Detect ODE collisions - Yield the states of the bodies in the skeleton - Advance the ODE world one step Parameters ---------- frame_no : int Step to this frame of marker data. dt : float, optional Step with this time duration. Defaults to ``self.dt``. Returns ------- states : sequence of state tuples A generator of a sequence of one body state for the skeleton. This generator must be exhausted for the simulation to work properly.
pagoda/cooper.py
def _step_to_marker_frame(self, frame_no, dt=None): '''Update the simulator to a specific frame of marker data. This method returns a generator of body states for the skeleton! This generator must be exhausted (e.g., by consuming this call in a for loop) for the simulator to work properly. This process involves the following steps: - Move the markers to their new location: - Detach from the skeleton - Update marker locations - Reattach to the skeleton - Detect ODE collisions - Yield the states of the bodies in the skeleton - Advance the ODE world one step Parameters ---------- frame_no : int Step to this frame of marker data. dt : float, optional Step with this time duration. Defaults to ``self.dt``. Returns ------- states : sequence of state tuples A generator of a sequence of one body state for the skeleton. This generator must be exhausted for the simulation to work properly. ''' # update the positions and velocities of the markers. self.markers.detach() self.markers.reposition(frame_no) self.markers.attach(frame_no) # detect collisions. self.ode_space.collide(None, self.on_collision) # record the state of each skeleton body. states = self.skeleton.get_body_states() self.skeleton.set_body_states(states) # yield the current simulation state to our caller. yield states # update the ode world. self.ode_world.step(dt or self.dt) # clear out contact joints to prepare for the next frame. self.ode_contactgroup.empty()
def _step_to_marker_frame(self, frame_no, dt=None): '''Update the simulator to a specific frame of marker data. This method returns a generator of body states for the skeleton! This generator must be exhausted (e.g., by consuming this call in a for loop) for the simulator to work properly. This process involves the following steps: - Move the markers to their new location: - Detach from the skeleton - Update marker locations - Reattach to the skeleton - Detect ODE collisions - Yield the states of the bodies in the skeleton - Advance the ODE world one step Parameters ---------- frame_no : int Step to this frame of marker data. dt : float, optional Step with this time duration. Defaults to ``self.dt``. Returns ------- states : sequence of state tuples A generator of a sequence of one body state for the skeleton. This generator must be exhausted for the simulation to work properly. ''' # update the positions and velocities of the markers. self.markers.detach() self.markers.reposition(frame_no) self.markers.attach(frame_no) # detect collisions. self.ode_space.collide(None, self.on_collision) # record the state of each skeleton body. states = self.skeleton.get_body_states() self.skeleton.set_body_states(states) # yield the current simulation state to our caller. yield states # update the ode world. self.ode_world.step(dt or self.dt) # clear out contact joints to prepare for the next frame. self.ode_contactgroup.empty()
[ "Update", "the", "simulator", "to", "a", "specific", "frame", "of", "marker", "data", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L547-L596
[ "def", "_step_to_marker_frame", "(", "self", ",", "frame_no", ",", "dt", "=", "None", ")", ":", "# update the positions and velocities of the markers.", "self", ".", "markers", ".", "detach", "(", ")", "self", ".", "markers", ".", "reposition", "(", "frame_no", ")", "self", ".", "markers", ".", "attach", "(", "frame_no", ")", "# detect collisions.", "self", ".", "ode_space", ".", "collide", "(", "None", ",", "self", ".", "on_collision", ")", "# record the state of each skeleton body.", "states", "=", "self", ".", "skeleton", ".", "get_body_states", "(", ")", "self", ".", "skeleton", ".", "set_body_states", "(", "states", ")", "# yield the current simulation state to our caller.", "yield", "states", "# update the ode world.", "self", ".", "ode_world", ".", "step", "(", "dt", "or", "self", ".", "dt", ")", "# clear out contact joints to prepare for the next frame.", "self", ".", "ode_contactgroup", ".", "empty", "(", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World.inverse_kinematics
Follow a set of marker data, yielding kinematic joint angles. Parameters ---------- start : int, optional Start following marker data after this frame. Defaults to 0. end : int, optional Stop following marker data after this frame. Defaults to the end of the marker data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. max_force : float, optional Allow each degree of freedom in the skeleton to exert at most this force when attempting to maintain its equilibrium position. This defaults to 20N. Set this value higher to simulate a stiff skeleton while following marker data. Returns ------- angles : sequence of angle frames Returns a generator of joint angle data for the skeleton. One set of joint angles will be generated for each frame of marker data between `start` and `end`.
pagoda/cooper.py
def inverse_kinematics(self, start=0, end=1e100, states=None, max_force=20): '''Follow a set of marker data, yielding kinematic joint angles. Parameters ---------- start : int, optional Start following marker data after this frame. Defaults to 0. end : int, optional Stop following marker data after this frame. Defaults to the end of the marker data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. max_force : float, optional Allow each degree of freedom in the skeleton to exert at most this force when attempting to maintain its equilibrium position. This defaults to 20N. Set this value higher to simulate a stiff skeleton while following marker data. Returns ------- angles : sequence of angle frames Returns a generator of joint angle data for the skeleton. One set of joint angles will be generated for each frame of marker data between `start` and `end`. ''' zeros = None if max_force > 0: self.skeleton.enable_motors(max_force) zeros = np.zeros(self.skeleton.num_dofs) for _ in self.follow_markers(start, end, states): if zeros is not None: self.skeleton.set_target_angles(zeros) yield self.skeleton.joint_angles
def inverse_kinematics(self, start=0, end=1e100, states=None, max_force=20): '''Follow a set of marker data, yielding kinematic joint angles. Parameters ---------- start : int, optional Start following marker data after this frame. Defaults to 0. end : int, optional Stop following marker data after this frame. Defaults to the end of the marker data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. max_force : float, optional Allow each degree of freedom in the skeleton to exert at most this force when attempting to maintain its equilibrium position. This defaults to 20N. Set this value higher to simulate a stiff skeleton while following marker data. Returns ------- angles : sequence of angle frames Returns a generator of joint angle data for the skeleton. One set of joint angles will be generated for each frame of marker data between `start` and `end`. ''' zeros = None if max_force > 0: self.skeleton.enable_motors(max_force) zeros = np.zeros(self.skeleton.num_dofs) for _ in self.follow_markers(start, end, states): if zeros is not None: self.skeleton.set_target_angles(zeros) yield self.skeleton.joint_angles
[ "Follow", "a", "set", "of", "marker", "data", "yielding", "kinematic", "joint", "angles", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L598-L631
[ "def", "inverse_kinematics", "(", "self", ",", "start", "=", "0", ",", "end", "=", "1e100", ",", "states", "=", "None", ",", "max_force", "=", "20", ")", ":", "zeros", "=", "None", "if", "max_force", ">", "0", ":", "self", ".", "skeleton", ".", "enable_motors", "(", "max_force", ")", "zeros", "=", "np", ".", "zeros", "(", "self", ".", "skeleton", ".", "num_dofs", ")", "for", "_", "in", "self", ".", "follow_markers", "(", "start", ",", "end", ",", "states", ")", ":", "if", "zeros", "is", "not", "None", ":", "self", ".", "skeleton", ".", "set_target_angles", "(", "zeros", ")", "yield", "self", ".", "skeleton", ".", "joint_angles" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World.inverse_dynamics
Follow a set of angle data, yielding dynamic joint torques. Parameters ---------- angles : ndarray (num-frames x num-dofs) Follow angle data provided by this array of angle values. start : int, optional Start following angle data after this frame. Defaults to the start of the angle data. end : int, optional Stop following angle data after this frame. Defaults to the end of the angle data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. max_force : float, optional Allow each degree of freedom in the skeleton to exert at most this force when attempting to follow the given joint angles. Defaults to 100N. Setting this value to be large results in more accurate following but can cause oscillations in the PID controllers, resulting in noisy torques. Returns ------- torques : sequence of torque frames Returns a generator of joint torque data for the skeleton. One set of joint torques will be generated for each frame of angle data between `start` and `end`.
pagoda/cooper.py
def inverse_dynamics(self, angles, start=0, end=1e100, states=None, max_force=100): '''Follow a set of angle data, yielding dynamic joint torques. Parameters ---------- angles : ndarray (num-frames x num-dofs) Follow angle data provided by this array of angle values. start : int, optional Start following angle data after this frame. Defaults to the start of the angle data. end : int, optional Stop following angle data after this frame. Defaults to the end of the angle data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. max_force : float, optional Allow each degree of freedom in the skeleton to exert at most this force when attempting to follow the given joint angles. Defaults to 100N. Setting this value to be large results in more accurate following but can cause oscillations in the PID controllers, resulting in noisy torques. Returns ------- torques : sequence of torque frames Returns a generator of joint torque data for the skeleton. One set of joint torques will be generated for each frame of angle data between `start` and `end`. ''' if states is not None: self.skeleton.set_body_states(states) for frame_no, frame in enumerate(angles): if frame_no < start: continue if frame_no >= end: break self.ode_space.collide(None, self.on_collision) states = self.skeleton.get_body_states() self.skeleton.set_body_states(states) # joseph's stability fix: step to compute torques, then reset the # skeleton to the start of the step, and then step using computed # torques. thus any numerical errors between the body states after # stepping using angle constraints will be removed, because we # will be stepping the model using the computed torques. self.skeleton.enable_motors(max_force) self.skeleton.set_target_angles(angles[frame_no]) self.ode_world.step(self.dt) torques = self.skeleton.joint_torques self.skeleton.disable_motors() self.skeleton.set_body_states(states) self.skeleton.add_torques(torques) yield torques self.ode_world.step(self.dt) self.ode_contactgroup.empty()
def inverse_dynamics(self, angles, start=0, end=1e100, states=None, max_force=100): '''Follow a set of angle data, yielding dynamic joint torques. Parameters ---------- angles : ndarray (num-frames x num-dofs) Follow angle data provided by this array of angle values. start : int, optional Start following angle data after this frame. Defaults to the start of the angle data. end : int, optional Stop following angle data after this frame. Defaults to the end of the angle data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. max_force : float, optional Allow each degree of freedom in the skeleton to exert at most this force when attempting to follow the given joint angles. Defaults to 100N. Setting this value to be large results in more accurate following but can cause oscillations in the PID controllers, resulting in noisy torques. Returns ------- torques : sequence of torque frames Returns a generator of joint torque data for the skeleton. One set of joint torques will be generated for each frame of angle data between `start` and `end`. ''' if states is not None: self.skeleton.set_body_states(states) for frame_no, frame in enumerate(angles): if frame_no < start: continue if frame_no >= end: break self.ode_space.collide(None, self.on_collision) states = self.skeleton.get_body_states() self.skeleton.set_body_states(states) # joseph's stability fix: step to compute torques, then reset the # skeleton to the start of the step, and then step using computed # torques. thus any numerical errors between the body states after # stepping using angle constraints will be removed, because we # will be stepping the model using the computed torques. self.skeleton.enable_motors(max_force) self.skeleton.set_target_angles(angles[frame_no]) self.ode_world.step(self.dt) torques = self.skeleton.joint_torques self.skeleton.disable_motors() self.skeleton.set_body_states(states) self.skeleton.add_torques(torques) yield torques self.ode_world.step(self.dt) self.ode_contactgroup.empty()
[ "Follow", "a", "set", "of", "angle", "data", "yielding", "dynamic", "joint", "torques", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L633-L694
[ "def", "inverse_dynamics", "(", "self", ",", "angles", ",", "start", "=", "0", ",", "end", "=", "1e100", ",", "states", "=", "None", ",", "max_force", "=", "100", ")", ":", "if", "states", "is", "not", "None", ":", "self", ".", "skeleton", ".", "set_body_states", "(", "states", ")", "for", "frame_no", ",", "frame", "in", "enumerate", "(", "angles", ")", ":", "if", "frame_no", "<", "start", ":", "continue", "if", "frame_no", ">=", "end", ":", "break", "self", ".", "ode_space", ".", "collide", "(", "None", ",", "self", ".", "on_collision", ")", "states", "=", "self", ".", "skeleton", ".", "get_body_states", "(", ")", "self", ".", "skeleton", ".", "set_body_states", "(", "states", ")", "# joseph's stability fix: step to compute torques, then reset the", "# skeleton to the start of the step, and then step using computed", "# torques. thus any numerical errors between the body states after", "# stepping using angle constraints will be removed, because we", "# will be stepping the model using the computed torques.", "self", ".", "skeleton", ".", "enable_motors", "(", "max_force", ")", "self", ".", "skeleton", ".", "set_target_angles", "(", "angles", "[", "frame_no", "]", ")", "self", ".", "ode_world", ".", "step", "(", "self", ".", "dt", ")", "torques", "=", "self", ".", "skeleton", ".", "joint_torques", "self", ".", "skeleton", ".", "disable_motors", "(", ")", "self", ".", "skeleton", ".", "set_body_states", "(", "states", ")", "self", ".", "skeleton", ".", "add_torques", "(", "torques", ")", "yield", "torques", "self", ".", "ode_world", ".", "step", "(", "self", ".", "dt", ")", "self", ".", "ode_contactgroup", ".", "empty", "(", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
World.forward_dynamics
Move the body according to a set of torque data.
pagoda/cooper.py
def forward_dynamics(self, torques, start=0, states=None): '''Move the body according to a set of torque data.''' if states is not None: self.skeleton.set_body_states(states) for frame_no, torque in enumerate(torques): if frame_no < start: continue if frame_no >= end: break self.ode_space.collide(None, self.on_collision) self.skeleton.add_torques(torque) self.ode_world.step(self.dt) yield self.ode_contactgroup.empty()
def forward_dynamics(self, torques, start=0, states=None): '''Move the body according to a set of torque data.''' if states is not None: self.skeleton.set_body_states(states) for frame_no, torque in enumerate(torques): if frame_no < start: continue if frame_no >= end: break self.ode_space.collide(None, self.on_collision) self.skeleton.add_torques(torque) self.ode_world.step(self.dt) yield self.ode_contactgroup.empty()
[ "Move", "the", "body", "according", "to", "a", "set", "of", "torque", "data", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/cooper.py#L696-L709
[ "def", "forward_dynamics", "(", "self", ",", "torques", ",", "start", "=", "0", ",", "states", "=", "None", ")", ":", "if", "states", "is", "not", "None", ":", "self", ".", "skeleton", ".", "set_body_states", "(", "states", ")", "for", "frame_no", ",", "torque", "in", "enumerate", "(", "torques", ")", ":", "if", "frame_no", "<", "start", ":", "continue", "if", "frame_no", ">=", "end", ":", "break", "self", ".", "ode_space", ".", "collide", "(", "None", ",", "self", ".", "on_collision", ")", "self", ".", "skeleton", ".", "add_torques", "(", "torque", ")", "self", ".", "ode_world", ".", "step", "(", "self", ".", "dt", ")", "yield", "self", ".", "ode_contactgroup", ".", "empty", "(", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
resorted
Sort values, but put numbers after alphabetically sorted words. This function is here to make outputs diff-compatible with Aleph. Example:: >>> sorted(["b", "1", "a"]) ['1', 'a', 'b'] >>> resorted(["b", "1", "a"]) ['a', 'b', '1'] Args: values (iterable): any iterable object/list/tuple/whatever. Returns: list of sorted values, but with numbers after words
src/marcxml_parser/tools/resorted.py
def resorted(values): """ Sort values, but put numbers after alphabetically sorted words. This function is here to make outputs diff-compatible with Aleph. Example:: >>> sorted(["b", "1", "a"]) ['1', 'a', 'b'] >>> resorted(["b", "1", "a"]) ['a', 'b', '1'] Args: values (iterable): any iterable object/list/tuple/whatever. Returns: list of sorted values, but with numbers after words """ if not values: return values values = sorted(values) # look for first word first_word = next( (cnt for cnt, val in enumerate(values) if val and not val[0].isdigit()), None ) # if not found, just return the values if first_word is None: return values words = values[first_word:] numbers = values[:first_word] return words + numbers
def resorted(values): """ Sort values, but put numbers after alphabetically sorted words. This function is here to make outputs diff-compatible with Aleph. Example:: >>> sorted(["b", "1", "a"]) ['1', 'a', 'b'] >>> resorted(["b", "1", "a"]) ['a', 'b', '1'] Args: values (iterable): any iterable object/list/tuple/whatever. Returns: list of sorted values, but with numbers after words """ if not values: return values values = sorted(values) # look for first word first_word = next( (cnt for cnt, val in enumerate(values) if val and not val[0].isdigit()), None ) # if not found, just return the values if first_word is None: return values words = values[first_word:] numbers = values[:first_word] return words + numbers
[ "Sort", "values", "but", "put", "numbers", "after", "alphabetically", "sorted", "words", "." ]
edeposit/marcxml_parser
python
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/tools/resorted.py#L10-L47
[ "def", "resorted", "(", "values", ")", ":", "if", "not", "values", ":", "return", "values", "values", "=", "sorted", "(", "values", ")", "# look for first word", "first_word", "=", "next", "(", "(", "cnt", "for", "cnt", ",", "val", "in", "enumerate", "(", "values", ")", "if", "val", "and", "not", "val", "[", "0", "]", ".", "isdigit", "(", ")", ")", ",", "None", ")", "# if not found, just return the values", "if", "first_word", "is", "None", ":", "return", "values", "words", "=", "values", "[", "first_word", ":", "]", "numbers", "=", "values", "[", ":", "first_word", "]", "return", "words", "+", "numbers" ]
6d1c77c61fc2827b71f1b3d5aa3332d7f5807820
valid
Viewer.render
Draw all bodies in the world.
pagoda/viewer.py
def render(self, dt): '''Draw all bodies in the world.''' for frame in self._frozen: for body in frame: self.draw_body(body) for body in self.world.bodies: self.draw_body(body) if hasattr(self.world, 'markers'): # draw line between anchor1 and anchor2 for marker joints. window.glColor4f(0.9, 0.1, 0.1, 0.9) window.glLineWidth(3) for j in self.world.markers.joints.values(): window.glBegin(window.GL_LINES) window.glVertex3f(*j.getAnchor()) window.glVertex3f(*j.getAnchor2()) window.glEnd()
def render(self, dt): '''Draw all bodies in the world.''' for frame in self._frozen: for body in frame: self.draw_body(body) for body in self.world.bodies: self.draw_body(body) if hasattr(self.world, 'markers'): # draw line between anchor1 and anchor2 for marker joints. window.glColor4f(0.9, 0.1, 0.1, 0.9) window.glLineWidth(3) for j in self.world.markers.joints.values(): window.glBegin(window.GL_LINES) window.glVertex3f(*j.getAnchor()) window.glVertex3f(*j.getAnchor2()) window.glEnd()
[ "Draw", "all", "bodies", "in", "the", "world", "." ]
EmbodiedCognition/pagoda
python
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/viewer.py#L94-L110
[ "def", "render", "(", "self", ",", "dt", ")", ":", "for", "frame", "in", "self", ".", "_frozen", ":", "for", "body", "in", "frame", ":", "self", ".", "draw_body", "(", "body", ")", "for", "body", "in", "self", ".", "world", ".", "bodies", ":", "self", ".", "draw_body", "(", "body", ")", "if", "hasattr", "(", "self", ".", "world", ",", "'markers'", ")", ":", "# draw line between anchor1 and anchor2 for marker joints.", "window", ".", "glColor4f", "(", "0.9", ",", "0.1", ",", "0.1", ",", "0.9", ")", "window", ".", "glLineWidth", "(", "3", ")", "for", "j", "in", "self", ".", "world", ".", "markers", ".", "joints", ".", "values", "(", ")", ":", "window", ".", "glBegin", "(", "window", ".", "GL_LINES", ")", "window", ".", "glVertex3f", "(", "*", "j", ".", "getAnchor", "(", ")", ")", "window", ".", "glVertex3f", "(", "*", "j", ".", "getAnchor2", "(", ")", ")", "window", ".", "glEnd", "(", ")" ]
8892f847026d98aba8646ecbc4589397e6dec7bd
valid
Room.get_stream
Get room stream to listen for messages. Kwargs: error_callback (func): Callback to call when an error occurred (parameters: exception) live (bool): If True, issue a live stream, otherwise an offline stream Returns: :class:`Stream`. Stream
pyfire/room.py
def get_stream(self, error_callback=None, live=True): """ Get room stream to listen for messages. Kwargs: error_callback (func): Callback to call when an error occurred (parameters: exception) live (bool): If True, issue a live stream, otherwise an offline stream Returns: :class:`Stream`. Stream """ self.join() return Stream(self, error_callback=error_callback, live=live)
def get_stream(self, error_callback=None, live=True): """ Get room stream to listen for messages. Kwargs: error_callback (func): Callback to call when an error occurred (parameters: exception) live (bool): If True, issue a live stream, otherwise an offline stream Returns: :class:`Stream`. Stream """ self.join() return Stream(self, error_callback=error_callback, live=live)
[ "Get", "room", "stream", "to", "listen", "for", "messages", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/room.py#L30-L41
[ "def", "get_stream", "(", "self", ",", "error_callback", "=", "None", ",", "live", "=", "True", ")", ":", "self", ".", "join", "(", ")", "return", "Stream", "(", "self", ",", "error_callback", "=", "error_callback", ",", "live", "=", "live", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Room.get_users
Get list of users in the room. Kwargs: sort (bool): If True, sort rooms by name Returns: array. List of users
pyfire/room.py
def get_users(self, sort=True): """ Get list of users in the room. Kwargs: sort (bool): If True, sort rooms by name Returns: array. List of users """ self._load() if sort: self.users.sort(key=operator.itemgetter("name")) return self.users
def get_users(self, sort=True): """ Get list of users in the room. Kwargs: sort (bool): If True, sort rooms by name Returns: array. List of users """ self._load() if sort: self.users.sort(key=operator.itemgetter("name")) return self.users
[ "Get", "list", "of", "users", "in", "the", "room", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/room.py#L51-L63
[ "def", "get_users", "(", "self", ",", "sort", "=", "True", ")", ":", "self", ".", "_load", "(", ")", "if", "sort", ":", "self", ".", "users", ".", "sort", "(", "key", "=", "operator", ".", "itemgetter", "(", "\"name\"", ")", ")", "return", "self", ".", "users" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Room.recent
Recent messages. Kwargs: message_id (int): If specified, return messages since the specified message ID limit (int): If specified, limit the number of messages Returns: array. Messages
pyfire/room.py
def recent(self, message_id=None, limit=None): """ Recent messages. Kwargs: message_id (int): If specified, return messages since the specified message ID limit (int): If specified, limit the number of messages Returns: array. Messages """ parameters = {} if message_id: parameters["since_message_id"] = message_id if limit: parameters["limit"] = limit messages = self._connection.get("room/%s/recent" % self.id, key="messages", parameters=parameters) if messages: messages = [Message(self._campfire, message) for message in messages] return messages
def recent(self, message_id=None, limit=None): """ Recent messages. Kwargs: message_id (int): If specified, return messages since the specified message ID limit (int): If specified, limit the number of messages Returns: array. Messages """ parameters = {} if message_id: parameters["since_message_id"] = message_id if limit: parameters["limit"] = limit messages = self._connection.get("room/%s/recent" % self.id, key="messages", parameters=parameters) if messages: messages = [Message(self._campfire, message) for message in messages] return messages
[ "Recent", "messages", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/room.py#L89-L107
[ "def", "recent", "(", "self", ",", "message_id", "=", "None", ",", "limit", "=", "None", ")", ":", "parameters", "=", "{", "}", "if", "message_id", ":", "parameters", "[", "\"since_message_id\"", "]", "=", "message_id", "if", "limit", ":", "parameters", "[", "\"limit\"", "]", "=", "limit", "messages", "=", "self", ".", "_connection", ".", "get", "(", "\"room/%s/recent\"", "%", "self", ".", "id", ",", "key", "=", "\"messages\"", ",", "parameters", "=", "parameters", ")", "if", "messages", ":", "messages", "=", "[", "Message", "(", "self", ".", "_campfire", ",", "message", ")", "for", "message", "in", "messages", "]", "return", "messages" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Room.set_name
Set the room name. Args: name (str): Name Returns: bool. Success
pyfire/room.py
def set_name(self, name): """ Set the room name. Args: name (str): Name Returns: bool. Success """ if not self._campfire.get_user().admin: return False result = self._connection.put("room/%s" % self.id, {"room": {"name": name}}) if result["success"]: self._load() return result["success"]
def set_name(self, name): """ Set the room name. Args: name (str): Name Returns: bool. Success """ if not self._campfire.get_user().admin: return False result = self._connection.put("room/%s" % self.id, {"room": {"name": name}}) if result["success"]: self._load() return result["success"]
[ "Set", "the", "room", "name", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/room.py#L109-L124
[ "def", "set_name", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "_campfire", ".", "get_user", "(", ")", ".", "admin", ":", "return", "False", "result", "=", "self", ".", "_connection", ".", "put", "(", "\"room/%s\"", "%", "self", ".", "id", ",", "{", "\"room\"", ":", "{", "\"name\"", ":", "name", "}", "}", ")", "if", "result", "[", "\"success\"", "]", ":", "self", ".", "_load", "(", ")", "return", "result", "[", "\"success\"", "]" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Room.set_topic
Set the room topic. Args: topic (str): Topic Returns: bool. Success
pyfire/room.py
def set_topic(self, topic): """ Set the room topic. Args: topic (str): Topic Returns: bool. Success """ if not topic: topic = '' result = self._connection.put("room/%s" % self.id, {"room": {"topic": topic}}) if result["success"]: self._load() return result["success"]
def set_topic(self, topic): """ Set the room topic. Args: topic (str): Topic Returns: bool. Success """ if not topic: topic = '' result = self._connection.put("room/%s" % self.id, {"room": {"topic": topic}}) if result["success"]: self._load() return result["success"]
[ "Set", "the", "room", "topic", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/room.py#L126-L141
[ "def", "set_topic", "(", "self", ",", "topic", ")", ":", "if", "not", "topic", ":", "topic", "=", "''", "result", "=", "self", ".", "_connection", ".", "put", "(", "\"room/%s\"", "%", "self", ".", "id", ",", "{", "\"room\"", ":", "{", "\"topic\"", ":", "topic", "}", "}", ")", "if", "result", "[", "\"success\"", "]", ":", "self", ".", "_load", "(", ")", "return", "result", "[", "\"success\"", "]" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Room.speak
Post a message. Args: message (:class:`Message` or string): Message Returns: bool. Success
pyfire/room.py
def speak(self, message): """ Post a message. Args: message (:class:`Message` or string): Message Returns: bool. Success """ campfire = self.get_campfire() if not isinstance(message, Message): message = Message(campfire, message) result = self._connection.post( "room/%s/speak" % self.id, {"message": message.get_data()}, parse_data=True, key="message" ) if result["success"]: return Message(campfire, result["data"]) return result["success"]
def speak(self, message): """ Post a message. Args: message (:class:`Message` or string): Message Returns: bool. Success """ campfire = self.get_campfire() if not isinstance(message, Message): message = Message(campfire, message) result = self._connection.post( "room/%s/speak" % self.id, {"message": message.get_data()}, parse_data=True, key="message" ) if result["success"]: return Message(campfire, result["data"]) return result["success"]
[ "Post", "a", "message", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/room.py#L143-L165
[ "def", "speak", "(", "self", ",", "message", ")", ":", "campfire", "=", "self", ".", "get_campfire", "(", ")", "if", "not", "isinstance", "(", "message", ",", "Message", ")", ":", "message", "=", "Message", "(", "campfire", ",", "message", ")", "result", "=", "self", ".", "_connection", ".", "post", "(", "\"room/%s/speak\"", "%", "self", ".", "id", ",", "{", "\"message\"", ":", "message", ".", "get_data", "(", ")", "}", ",", "parse_data", "=", "True", ",", "key", "=", "\"message\"", ")", "if", "result", "[", "\"success\"", "]", ":", "return", "Message", "(", "campfire", ",", "result", "[", "\"data\"", "]", ")", "return", "result", "[", "\"success\"", "]" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Room.transcript
Recent messages. Kwargs: for_date (date): If specified, get the transcript for this specific date Returns: array. Messages
pyfire/room.py
def transcript(self, for_date=None): """ Recent messages. Kwargs: for_date (date): If specified, get the transcript for this specific date Returns: array. Messages """ url = "room/%s/transcript" % self.id if for_date: url = "%s/%d/%d/%d" % (url, for_date.year, for_date.month, for_date.day) messages = self._connection.get(url, key="messages") if messages: messages = [Message(self._campfire, message) for message in messages] return messages
def transcript(self, for_date=None): """ Recent messages. Kwargs: for_date (date): If specified, get the transcript for this specific date Returns: array. Messages """ url = "room/%s/transcript" % self.id if for_date: url = "%s/%d/%d/%d" % (url, for_date.year, for_date.month, for_date.day) messages = self._connection.get(url, key="messages") if messages: messages = [Message(self._campfire, message) for message in messages] return messages
[ "Recent", "messages", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/room.py#L167-L182
[ "def", "transcript", "(", "self", ",", "for_date", "=", "None", ")", ":", "url", "=", "\"room/%s/transcript\"", "%", "self", ".", "id", "if", "for_date", ":", "url", "=", "\"%s/%d/%d/%d\"", "%", "(", "url", ",", "for_date", ".", "year", ",", "for_date", ".", "month", ",", "for_date", ".", "day", ")", "messages", "=", "self", ".", "_connection", ".", "get", "(", "url", ",", "key", "=", "\"messages\"", ")", "if", "messages", ":", "messages", "=", "[", "Message", "(", "self", ".", "_campfire", ",", "message", ")", "for", "message", "in", "messages", "]", "return", "messages" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Room.upload
Create a new thread to upload a file (thread should be then started with start() to perform upload.) Args: path (str): Path to file Kwargs: progress_callback (func): Callback to call as file is uploaded (parameters: current, total) finished_callback (func): Callback to call when upload is finished error_callback (func): Callback to call when an error occurred (parameters: exception) Returns: :class:`Upload`. Upload thread
pyfire/room.py
def upload(self, path, progress_callback=None, finished_callback=None, error_callback=None): """ Create a new thread to upload a file (thread should be then started with start() to perform upload.) Args: path (str): Path to file Kwargs: progress_callback (func): Callback to call as file is uploaded (parameters: current, total) finished_callback (func): Callback to call when upload is finished error_callback (func): Callback to call when an error occurred (parameters: exception) Returns: :class:`Upload`. Upload thread """ return Upload( self, {"upload": path}, progress_callback = progress_callback, finished_callback = finished_callback, error_callback = error_callback )
def upload(self, path, progress_callback=None, finished_callback=None, error_callback=None): """ Create a new thread to upload a file (thread should be then started with start() to perform upload.) Args: path (str): Path to file Kwargs: progress_callback (func): Callback to call as file is uploaded (parameters: current, total) finished_callback (func): Callback to call when upload is finished error_callback (func): Callback to call when an error occurred (parameters: exception) Returns: :class:`Upload`. Upload thread """ return Upload( self, {"upload": path}, progress_callback = progress_callback, finished_callback = finished_callback, error_callback = error_callback )
[ "Create", "a", "new", "thread", "to", "upload", "a", "file", "(", "thread", "should", "be", "then", "started", "with", "start", "()", "to", "perform", "upload", ".", ")" ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/room.py#L192-L213
[ "def", "upload", "(", "self", ",", "path", ",", "progress_callback", "=", "None", ",", "finished_callback", "=", "None", ",", "error_callback", "=", "None", ")", ":", "return", "Upload", "(", "self", ",", "{", "\"upload\"", ":", "path", "}", ",", "progress_callback", "=", "progress_callback", ",", "finished_callback", "=", "finished_callback", ",", "error_callback", "=", "error_callback", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
get_new_call
Build a call to use the new ``get_config`` function from args passed to ``Config.__init__``.
config_resolver/core.py
def get_new_call(group_name, app_name, search_path, filename, require_load, version, secure): # type: (str, str, Optional[str], str, bool, Optional[str], bool) -> str ''' Build a call to use the new ``get_config`` function from args passed to ``Config.__init__``. ''' new_call_kwargs = { 'group_name': group_name, 'filename': filename } # type: Dict[str, Any] new_call_lookup_options = {} # type: Dict[str, Any] new_call_lookup_options['secure'] = secure if search_path: new_call_lookup_options['search_path'] = search_path if require_load: new_call_lookup_options['require_load'] = require_load if version: new_call_lookup_options['version'] = version if new_call_lookup_options: new_call_kwargs['lookup_options'] = new_call_lookup_options output = build_call_str('get_config', (app_name,), new_call_kwargs) return output
def get_new_call(group_name, app_name, search_path, filename, require_load, version, secure): # type: (str, str, Optional[str], str, bool, Optional[str], bool) -> str ''' Build a call to use the new ``get_config`` function from args passed to ``Config.__init__``. ''' new_call_kwargs = { 'group_name': group_name, 'filename': filename } # type: Dict[str, Any] new_call_lookup_options = {} # type: Dict[str, Any] new_call_lookup_options['secure'] = secure if search_path: new_call_lookup_options['search_path'] = search_path if require_load: new_call_lookup_options['require_load'] = require_load if version: new_call_lookup_options['version'] = version if new_call_lookup_options: new_call_kwargs['lookup_options'] = new_call_lookup_options output = build_call_str('get_config', (app_name,), new_call_kwargs) return output
[ "Build", "a", "call", "to", "use", "the", "new", "get_config", "function", "from", "args", "passed", "to", "Config", ".", "__init__", "." ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L63-L86
[ "def", "get_new_call", "(", "group_name", ",", "app_name", ",", "search_path", ",", "filename", ",", "require_load", ",", "version", ",", "secure", ")", ":", "# type: (str, str, Optional[str], str, bool, Optional[str], bool) -> str", "new_call_kwargs", "=", "{", "'group_name'", ":", "group_name", ",", "'filename'", ":", "filename", "}", "# type: Dict[str, Any]", "new_call_lookup_options", "=", "{", "}", "# type: Dict[str, Any]", "new_call_lookup_options", "[", "'secure'", "]", "=", "secure", "if", "search_path", ":", "new_call_lookup_options", "[", "'search_path'", "]", "=", "search_path", "if", "require_load", ":", "new_call_lookup_options", "[", "'require_load'", "]", "=", "require_load", "if", "version", ":", "new_call_lookup_options", "[", "'version'", "]", "=", "version", "if", "new_call_lookup_options", ":", "new_call_kwargs", "[", "'lookup_options'", "]", "=", "new_call_lookup_options", "output", "=", "build_call_str", "(", "'get_config'", ",", "(", "app_name", ",", ")", ",", "new_call_kwargs", ")", "return", "output" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
build_call_str
Build a callable Python string for a function call. The output will be combined similar to this template:: <prefix>(<args>, <kwargs>) Example:: >>> build_call_str('foo', (1, 2), {'a': '10'}) "foo(1, 2, a='10')"
config_resolver/core.py
def build_call_str(prefix, args, kwargs): # type: (str, Any, Any) -> str ''' Build a callable Python string for a function call. The output will be combined similar to this template:: <prefix>(<args>, <kwargs>) Example:: >>> build_call_str('foo', (1, 2), {'a': '10'}) "foo(1, 2, a='10')" ''' kwargs_str = ', '.join(['%s=%r' % (key, value) for key, value in kwargs.items()]) args_str = ', '.join([repr(arg) for arg in args]) output = [prefix, '('] if args: output.append(args_str) if args and kwargs: output.append(', ') if kwargs: output.append(kwargs_str) output.append(')') return ''.join(output)
def build_call_str(prefix, args, kwargs): # type: (str, Any, Any) -> str ''' Build a callable Python string for a function call. The output will be combined similar to this template:: <prefix>(<args>, <kwargs>) Example:: >>> build_call_str('foo', (1, 2), {'a': '10'}) "foo(1, 2, a='10')" ''' kwargs_str = ', '.join(['%s=%r' % (key, value) for key, value in kwargs.items()]) args_str = ', '.join([repr(arg) for arg in args]) output = [prefix, '('] if args: output.append(args_str) if args and kwargs: output.append(', ') if kwargs: output.append(kwargs_str) output.append(')') return ''.join(output)
[ "Build", "a", "callable", "Python", "string", "for", "a", "function", "call", ".", "The", "output", "will", "be", "combined", "similar", "to", "this", "template", "::" ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L89-L113
[ "def", "build_call_str", "(", "prefix", ",", "args", ",", "kwargs", ")", ":", "# type: (str, Any, Any) -> str", "kwargs_str", "=", "', '", ".", "join", "(", "[", "'%s=%r'", "%", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", "]", ")", "args_str", "=", "', '", ".", "join", "(", "[", "repr", "(", "arg", ")", "for", "arg", "in", "args", "]", ")", "output", "=", "[", "prefix", ",", "'('", "]", "if", "args", ":", "output", ".", "append", "(", "args_str", ")", "if", "args", "and", "kwargs", ":", "output", ".", "append", "(", "', '", ")", "if", "kwargs", ":", "output", ".", "append", "(", "kwargs_str", ")", "output", ".", "append", "(", "')'", ")", "return", "''", ".", "join", "(", "output", ")" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
Config.get_xdg_dirs
Returns a list of paths specified by the XDG_CONFIG_DIRS environment variable or the appropriate default. The list is sorted by precedence, with the most important item coming *last* (required by the existing config_resolver logic).
config_resolver/core.py
def get_xdg_dirs(self): # type: () -> List[str] """ Returns a list of paths specified by the XDG_CONFIG_DIRS environment variable or the appropriate default. The list is sorted by precedence, with the most important item coming *last* (required by the existing config_resolver logic). """ config_dirs = getenv('XDG_CONFIG_DIRS', '') if config_dirs: self._log.debug('XDG_CONFIG_DIRS is set to %r', config_dirs) output = [] for path in reversed(config_dirs.split(':')): output.append(join(path, self.group_name, self.app_name)) return output return ['/etc/xdg/%s/%s' % (self.group_name, self.app_name)]
def get_xdg_dirs(self): # type: () -> List[str] """ Returns a list of paths specified by the XDG_CONFIG_DIRS environment variable or the appropriate default. The list is sorted by precedence, with the most important item coming *last* (required by the existing config_resolver logic). """ config_dirs = getenv('XDG_CONFIG_DIRS', '') if config_dirs: self._log.debug('XDG_CONFIG_DIRS is set to %r', config_dirs) output = [] for path in reversed(config_dirs.split(':')): output.append(join(path, self.group_name, self.app_name)) return output return ['/etc/xdg/%s/%s' % (self.group_name, self.app_name)]
[ "Returns", "a", "list", "of", "paths", "specified", "by", "the", "XDG_CONFIG_DIRS", "environment", "variable", "or", "the", "appropriate", "default", "." ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L204-L220
[ "def", "get_xdg_dirs", "(", "self", ")", ":", "# type: () -> List[str]", "config_dirs", "=", "getenv", "(", "'XDG_CONFIG_DIRS'", ",", "''", ")", "if", "config_dirs", ":", "self", ".", "_log", ".", "debug", "(", "'XDG_CONFIG_DIRS is set to %r'", ",", "config_dirs", ")", "output", "=", "[", "]", "for", "path", "in", "reversed", "(", "config_dirs", ".", "split", "(", "':'", ")", ")", ":", "output", ".", "append", "(", "join", "(", "path", ",", "self", ".", "group_name", ",", "self", ".", "app_name", ")", ")", "return", "output", "return", "[", "'/etc/xdg/%s/%s'", "%", "(", "self", ".", "group_name", ",", "self", ".", "app_name", ")", "]" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
Config.get_xdg_home
Returns the value specified in the XDG_CONFIG_HOME environment variable or the appropriate default.
config_resolver/core.py
def get_xdg_home(self): # type: () -> str """ Returns the value specified in the XDG_CONFIG_HOME environment variable or the appropriate default. """ config_home = getenv('XDG_CONFIG_HOME', '') if config_home: self._log.debug('XDG_CONFIG_HOME is set to %r', config_home) return expanduser(join(config_home, self.group_name, self.app_name)) return expanduser('~/.config/%s/%s' % (self.group_name, self.app_name))
def get_xdg_home(self): # type: () -> str """ Returns the value specified in the XDG_CONFIG_HOME environment variable or the appropriate default. """ config_home = getenv('XDG_CONFIG_HOME', '') if config_home: self._log.debug('XDG_CONFIG_HOME is set to %r', config_home) return expanduser(join(config_home, self.group_name, self.app_name)) return expanduser('~/.config/%s/%s' % (self.group_name, self.app_name))
[ "Returns", "the", "value", "specified", "in", "the", "XDG_CONFIG_HOME", "environment", "variable", "or", "the", "appropriate", "default", "." ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L222-L232
[ "def", "get_xdg_home", "(", "self", ")", ":", "# type: () -> str", "config_home", "=", "getenv", "(", "'XDG_CONFIG_HOME'", ",", "''", ")", "if", "config_home", ":", "self", ".", "_log", ".", "debug", "(", "'XDG_CONFIG_HOME is set to %r'", ",", "config_home", ")", "return", "expanduser", "(", "join", "(", "config_home", ",", "self", ".", "group_name", ",", "self", ".", "app_name", ")", ")", "return", "expanduser", "(", "'~/.config/%s/%s'", "%", "(", "self", ".", "group_name", ",", "self", ".", "app_name", ")", ")" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
Config._effective_filename
Returns the filename which is effectively used by the application. If overridden by an environment variable, it will return that filename.
config_resolver/core.py
def _effective_filename(self): # type: () -> str """ Returns the filename which is effectively used by the application. If overridden by an environment variable, it will return that filename. """ # same logic for the configuration filename. First, check if we were # initialized with a filename... config_filename = '' if self.filename: config_filename = self.filename # ... next, take the value from the environment env_filename = getenv(self.env_filename_name) if env_filename: self._log.info('Configuration filename was overridden with %r ' 'by the environment variable %s.', env_filename, self.env_filename_name) config_filename = env_filename return config_filename
def _effective_filename(self): # type: () -> str """ Returns the filename which is effectively used by the application. If overridden by an environment variable, it will return that filename. """ # same logic for the configuration filename. First, check if we were # initialized with a filename... config_filename = '' if self.filename: config_filename = self.filename # ... next, take the value from the environment env_filename = getenv(self.env_filename_name) if env_filename: self._log.info('Configuration filename was overridden with %r ' 'by the environment variable %s.', env_filename, self.env_filename_name) config_filename = env_filename return config_filename
[ "Returns", "the", "filename", "which", "is", "effectively", "used", "by", "the", "application", ".", "If", "overridden", "by", "an", "environment", "variable", "it", "will", "return", "that", "filename", "." ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L234-L255
[ "def", "_effective_filename", "(", "self", ")", ":", "# type: () -> str", "# same logic for the configuration filename. First, check if we were", "# initialized with a filename...", "config_filename", "=", "''", "if", "self", ".", "filename", ":", "config_filename", "=", "self", ".", "filename", "# ... next, take the value from the environment", "env_filename", "=", "getenv", "(", "self", ".", "env_filename_name", ")", "if", "env_filename", ":", "self", ".", "_log", ".", "info", "(", "'Configuration filename was overridden with %r '", "'by the environment variable %s.'", ",", "env_filename", ",", "self", ".", "env_filename_name", ")", "config_filename", "=", "env_filename", "return", "config_filename" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
Config._effective_path
Returns a list of paths to search for config files in reverse order of precedence. In other words: the last path element will override the settings from the first one.
config_resolver/core.py
def _effective_path(self): # type: () -> List[str] """ Returns a list of paths to search for config files in reverse order of precedence. In other words: the last path element will override the settings from the first one. """ # default search path path = (['/etc/%s/%s' % (self.group_name, self.app_name)] + self.get_xdg_dirs() + [expanduser('~/.%s/%s' % (self.group_name, self.app_name)), self.get_xdg_home(), join(getcwd(), '.{}'.format(self.group_name), self.app_name)]) # If a path was passed directly to this instance, override the path. if self.search_path: path = self.search_path.split(pathsep) # Next, consider the environment variables... env_path = getenv(self.env_path_name) if env_path and env_path.startswith('+'): # If prefixed with a '+', append the path elements additional_paths = env_path[1:].split(pathsep) self._log.info('Search path extended with %r by the environment ' 'variable %s.', additional_paths, self.env_path_name) path.extend(additional_paths) elif env_path: # Otherwise, override again. This takes absolute precedence. self._log.info("Configuration search path was overridden with " "%r by the environment variable %r.", env_path, self.env_path_name) path = env_path.split(pathsep) return path
def _effective_path(self): # type: () -> List[str] """ Returns a list of paths to search for config files in reverse order of precedence. In other words: the last path element will override the settings from the first one. """ # default search path path = (['/etc/%s/%s' % (self.group_name, self.app_name)] + self.get_xdg_dirs() + [expanduser('~/.%s/%s' % (self.group_name, self.app_name)), self.get_xdg_home(), join(getcwd(), '.{}'.format(self.group_name), self.app_name)]) # If a path was passed directly to this instance, override the path. if self.search_path: path = self.search_path.split(pathsep) # Next, consider the environment variables... env_path = getenv(self.env_path_name) if env_path and env_path.startswith('+'): # If prefixed with a '+', append the path elements additional_paths = env_path[1:].split(pathsep) self._log.info('Search path extended with %r by the environment ' 'variable %s.', additional_paths, self.env_path_name) path.extend(additional_paths) elif env_path: # Otherwise, override again. This takes absolute precedence. self._log.info("Configuration search path was overridden with " "%r by the environment variable %r.", env_path, self.env_path_name) path = env_path.split(pathsep) return path
[ "Returns", "a", "list", "of", "paths", "to", "search", "for", "config", "files", "in", "reverse", "order", "of", "precedence", ".", "In", "other", "words", ":", "the", "last", "path", "element", "will", "override", "the", "settings", "from", "the", "first", "one", "." ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L257-L294
[ "def", "_effective_path", "(", "self", ")", ":", "# type: () -> List[str]", "# default search path", "path", "=", "(", "[", "'/etc/%s/%s'", "%", "(", "self", ".", "group_name", ",", "self", ".", "app_name", ")", "]", "+", "self", ".", "get_xdg_dirs", "(", ")", "+", "[", "expanduser", "(", "'~/.%s/%s'", "%", "(", "self", ".", "group_name", ",", "self", ".", "app_name", ")", ")", ",", "self", ".", "get_xdg_home", "(", ")", ",", "join", "(", "getcwd", "(", ")", ",", "'.{}'", ".", "format", "(", "self", ".", "group_name", ")", ",", "self", ".", "app_name", ")", "]", ")", "# If a path was passed directly to this instance, override the path.", "if", "self", ".", "search_path", ":", "path", "=", "self", ".", "search_path", ".", "split", "(", "pathsep", ")", "# Next, consider the environment variables...", "env_path", "=", "getenv", "(", "self", ".", "env_path_name", ")", "if", "env_path", "and", "env_path", ".", "startswith", "(", "'+'", ")", ":", "# If prefixed with a '+', append the path elements", "additional_paths", "=", "env_path", "[", "1", ":", "]", ".", "split", "(", "pathsep", ")", "self", ".", "_log", ".", "info", "(", "'Search path extended with %r by the environment '", "'variable %s.'", ",", "additional_paths", ",", "self", ".", "env_path_name", ")", "path", ".", "extend", "(", "additional_paths", ")", "elif", "env_path", ":", "# Otherwise, override again. This takes absolute precedence.", "self", ".", "_log", ".", "info", "(", "\"Configuration search path was overridden with \"", "\"%r by the environment variable %r.\"", ",", "env_path", ",", "self", ".", "env_path_name", ")", "path", "=", "env_path", ".", "split", "(", "pathsep", ")", "return", "path" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
Config.check_file
Check if ``filename`` can be read. Will return boolean which is True if the file can be read, False otherwise.
config_resolver/core.py
def check_file(self, filename): # type: (str) -> bool """ Check if ``filename`` can be read. Will return boolean which is True if the file can be read, False otherwise. """ if not exists(filename): return False # Check if the file is version-compatible with this instance. new_config = ConfigResolverBase() new_config.read(filename) if self.version and not new_config.has_option('meta', 'version'): # self.version is set, so we MUST have a version in the file! raise NoVersionError( "The config option 'meta.version' is missing in {}. The " "application expects version {}!".format(filename, self.version)) elif not self.version and new_config.has_option('meta', 'version'): # Automatically "lock-in" a version number if one is found. # This prevents loading a chain of config files with incompatible # version numbers! self.version = StrictVersion(new_config.get('meta', 'version')) self._log.info('%r contains a version number, but the config ' 'instance was not created with a version ' 'restriction. Will set version number to "%s" to ' 'prevent accidents!', filename, self.version) elif self.version: # This instance expected a certain version. We need to check the # version in the file and compare. file_version = new_config.get('meta', 'version') major, minor, _ = StrictVersion(file_version).version expected_major, expected_minor, _ = self.version.version if expected_major != major: self._log.error( 'Invalid major version number in %r. Expected %r, got %r!', abspath(filename), str(self.version), file_version) return False if expected_minor != minor: self._log.warning( 'Mismatching minor version number in %r. ' 'Expected %r, got %r!', abspath(filename), str(self.version), file_version) return True return True
def check_file(self, filename): # type: (str) -> bool """ Check if ``filename`` can be read. Will return boolean which is True if the file can be read, False otherwise. """ if not exists(filename): return False # Check if the file is version-compatible with this instance. new_config = ConfigResolverBase() new_config.read(filename) if self.version and not new_config.has_option('meta', 'version'): # self.version is set, so we MUST have a version in the file! raise NoVersionError( "The config option 'meta.version' is missing in {}. The " "application expects version {}!".format(filename, self.version)) elif not self.version and new_config.has_option('meta', 'version'): # Automatically "lock-in" a version number if one is found. # This prevents loading a chain of config files with incompatible # version numbers! self.version = StrictVersion(new_config.get('meta', 'version')) self._log.info('%r contains a version number, but the config ' 'instance was not created with a version ' 'restriction. Will set version number to "%s" to ' 'prevent accidents!', filename, self.version) elif self.version: # This instance expected a certain version. We need to check the # version in the file and compare. file_version = new_config.get('meta', 'version') major, minor, _ = StrictVersion(file_version).version expected_major, expected_minor, _ = self.version.version if expected_major != major: self._log.error( 'Invalid major version number in %r. Expected %r, got %r!', abspath(filename), str(self.version), file_version) return False if expected_minor != minor: self._log.warning( 'Mismatching minor version number in %r. ' 'Expected %r, got %r!', abspath(filename), str(self.version), file_version) return True return True
[ "Check", "if", "filename", "can", "be", "read", ".", "Will", "return", "boolean", "which", "is", "True", "if", "the", "file", "can", "be", "read", "False", "otherwise", "." ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L296-L346
[ "def", "check_file", "(", "self", ",", "filename", ")", ":", "# type: (str) -> bool", "if", "not", "exists", "(", "filename", ")", ":", "return", "False", "# Check if the file is version-compatible with this instance.", "new_config", "=", "ConfigResolverBase", "(", ")", "new_config", ".", "read", "(", "filename", ")", "if", "self", ".", "version", "and", "not", "new_config", ".", "has_option", "(", "'meta'", ",", "'version'", ")", ":", "# self.version is set, so we MUST have a version in the file!", "raise", "NoVersionError", "(", "\"The config option 'meta.version' is missing in {}. The \"", "\"application expects version {}!\"", ".", "format", "(", "filename", ",", "self", ".", "version", ")", ")", "elif", "not", "self", ".", "version", "and", "new_config", ".", "has_option", "(", "'meta'", ",", "'version'", ")", ":", "# Automatically \"lock-in\" a version number if one is found.", "# This prevents loading a chain of config files with incompatible", "# version numbers!", "self", ".", "version", "=", "StrictVersion", "(", "new_config", ".", "get", "(", "'meta'", ",", "'version'", ")", ")", "self", ".", "_log", ".", "info", "(", "'%r contains a version number, but the config '", "'instance was not created with a version '", "'restriction. Will set version number to \"%s\" to '", "'prevent accidents!'", ",", "filename", ",", "self", ".", "version", ")", "elif", "self", ".", "version", ":", "# This instance expected a certain version. We need to check the", "# version in the file and compare.", "file_version", "=", "new_config", ".", "get", "(", "'meta'", ",", "'version'", ")", "major", ",", "minor", ",", "_", "=", "StrictVersion", "(", "file_version", ")", ".", "version", "expected_major", ",", "expected_minor", ",", "_", "=", "self", ".", "version", ".", "version", "if", "expected_major", "!=", "major", ":", "self", ".", "_log", ".", "error", "(", "'Invalid major version number in %r. Expected %r, got %r!'", ",", "abspath", "(", "filename", ")", ",", "str", "(", "self", ".", "version", ")", ",", "file_version", ")", "return", "False", "if", "expected_minor", "!=", "minor", ":", "self", ".", "_log", ".", "warning", "(", "'Mismatching minor version number in %r. '", "'Expected %r, got %r!'", ",", "abspath", "(", "filename", ")", ",", "str", "(", "self", ".", "version", ")", ",", "file_version", ")", "return", "True", "return", "True" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
Config.get
Overrides :py:meth:`configparser.ConfigParser.get`. In addition to ``section`` and ``option``, this call takes an optional ``default`` value. This behaviour works in *addition* to the :py:class:`configparser.ConfigParser` default mechanism. Note that a default value from ``ConfigParser`` takes precedence. The reason this additional functionality is added, is because the defaults of :py:class:`configparser.ConfigParser` are not dependent on sections. If you specify a default for the option ``test``, then this value will be returned for both ``section1.test`` and for ``section2.test``. Using the default on the ``get`` call gives you more fine-grained control over this. Also note, that if a default value was used, it will be logged with level ``logging.DEBUG``. :param section: The config file section. :param option: The option name. :param kwargs: These keyword args are passed through to :py:meth:`configparser.ConfigParser.get`.
config_resolver/core.py
def get(self, section, option, **kwargs): # type: ignore # type: (str, str, Any) -> Any """ Overrides :py:meth:`configparser.ConfigParser.get`. In addition to ``section`` and ``option``, this call takes an optional ``default`` value. This behaviour works in *addition* to the :py:class:`configparser.ConfigParser` default mechanism. Note that a default value from ``ConfigParser`` takes precedence. The reason this additional functionality is added, is because the defaults of :py:class:`configparser.ConfigParser` are not dependent on sections. If you specify a default for the option ``test``, then this value will be returned for both ``section1.test`` and for ``section2.test``. Using the default on the ``get`` call gives you more fine-grained control over this. Also note, that if a default value was used, it will be logged with level ``logging.DEBUG``. :param section: The config file section. :param option: The option name. :param kwargs: These keyword args are passed through to :py:meth:`configparser.ConfigParser.get`. """ if "default" in kwargs: default = kwargs.pop("default") new_kwargs = {'fallback': default} new_kwargs.update(kwargs) new_call = build_call_str('.get', (section, option), new_kwargs) warn('Using the "default" argument to Config.get() will no ' 'longer work in config_resolver 5.0! Version 5 will return ' 'standard Python ConfigParser instances which use "fallback" ' 'instead of "default". Replace your code with "%s"' % new_call, DeprecationWarning, stacklevel=2) have_default = True else: have_default = False try: value = super(Config, self).get(section, option, **kwargs) return value except (NoSectionError, NoOptionError) as exc: if have_default: self._log.debug("%s: Returning default value %r", exc, default) return default else: raise
def get(self, section, option, **kwargs): # type: ignore # type: (str, str, Any) -> Any """ Overrides :py:meth:`configparser.ConfigParser.get`. In addition to ``section`` and ``option``, this call takes an optional ``default`` value. This behaviour works in *addition* to the :py:class:`configparser.ConfigParser` default mechanism. Note that a default value from ``ConfigParser`` takes precedence. The reason this additional functionality is added, is because the defaults of :py:class:`configparser.ConfigParser` are not dependent on sections. If you specify a default for the option ``test``, then this value will be returned for both ``section1.test`` and for ``section2.test``. Using the default on the ``get`` call gives you more fine-grained control over this. Also note, that if a default value was used, it will be logged with level ``logging.DEBUG``. :param section: The config file section. :param option: The option name. :param kwargs: These keyword args are passed through to :py:meth:`configparser.ConfigParser.get`. """ if "default" in kwargs: default = kwargs.pop("default") new_kwargs = {'fallback': default} new_kwargs.update(kwargs) new_call = build_call_str('.get', (section, option), new_kwargs) warn('Using the "default" argument to Config.get() will no ' 'longer work in config_resolver 5.0! Version 5 will return ' 'standard Python ConfigParser instances which use "fallback" ' 'instead of "default". Replace your code with "%s"' % new_call, DeprecationWarning, stacklevel=2) have_default = True else: have_default = False try: value = super(Config, self).get(section, option, **kwargs) return value except (NoSectionError, NoOptionError) as exc: if have_default: self._log.debug("%s: Returning default value %r", exc, default) return default else: raise
[ "Overrides", ":", "py", ":", "meth", ":", "configparser", ".", "ConfigParser", ".", "get", "." ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L348-L396
[ "def", "get", "(", "self", ",", "section", ",", "option", ",", "*", "*", "kwargs", ")", ":", "# type: ignore", "# type: (str, str, Any) -> Any", "if", "\"default\"", "in", "kwargs", ":", "default", "=", "kwargs", ".", "pop", "(", "\"default\"", ")", "new_kwargs", "=", "{", "'fallback'", ":", "default", "}", "new_kwargs", ".", "update", "(", "kwargs", ")", "new_call", "=", "build_call_str", "(", "'.get'", ",", "(", "section", ",", "option", ")", ",", "new_kwargs", ")", "warn", "(", "'Using the \"default\" argument to Config.get() will no '", "'longer work in config_resolver 5.0! Version 5 will return '", "'standard Python ConfigParser instances which use \"fallback\" '", "'instead of \"default\". Replace your code with \"%s\"'", "%", "new_call", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "have_default", "=", "True", "else", ":", "have_default", "=", "False", "try", ":", "value", "=", "super", "(", "Config", ",", "self", ")", ".", "get", "(", "section", ",", "option", ",", "*", "*", "kwargs", ")", "return", "value", "except", "(", "NoSectionError", ",", "NoOptionError", ")", "as", "exc", ":", "if", "have_default", ":", "self", ".", "_log", ".", "debug", "(", "\"%s: Returning default value %r\"", ",", "exc", ",", "default", ")", "return", "default", "else", ":", "raise" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
Config.load
Searches for an appropriate config file. If found, loads the file into the current instance. This method can also be used to reload a configuration. Note that you may want to set ``reload`` to ``True`` to clear the configuration before loading in that case. Without doing that, values will remain available even if they have been removed from the config files. :param reload: if set to ``True``, the existing values are cleared before reloading. :param require_load: If set to ``True`` this will raise a :py:exc:`IOError` if no config file has been found to load.
config_resolver/core.py
def load(self, reload=False, require_load=False): # type: (bool, bool) -> None """ Searches for an appropriate config file. If found, loads the file into the current instance. This method can also be used to reload a configuration. Note that you may want to set ``reload`` to ``True`` to clear the configuration before loading in that case. Without doing that, values will remain available even if they have been removed from the config files. :param reload: if set to ``True``, the existing values are cleared before reloading. :param require_load: If set to ``True`` this will raise a :py:exc:`IOError` if no config file has been found to load. """ if reload: # pragma: no cover self.config = None # only load the config if necessary (or explicitly requested) if self.config: # pragma: no cover self._log.debug('Returning cached config instance. Use ' '``reload=True`` to avoid caching!') return path = self._effective_path() config_filename = self._effective_filename() # Next, use the resolved path to find the filenames. Keep track of # which files we loaded in order to inform the user. self._active_path = [join(_, config_filename) for _ in path] for dirname in path: conf_name = join(dirname, config_filename) readable = self.check_file(conf_name) if readable: action = 'Updating' if self._loaded_files else 'Loading initial' self._log.info('%s config from %s', action, conf_name) self.read(conf_name) if conf_name == expanduser("~/.%s/%s/%s" % ( self.group_name, self.app_name, self.filename)): self._log.warning( "DEPRECATION WARNING: The file " "'%s/.%s/%s/app.ini' was loaded. The XDG " "Basedir standard requires this file to be in " "'%s/.config/%s/%s/app.ini'! This location " "will no longer be parsed in a future version of " "config_resolver! You can already (and should) move " "the file!", expanduser("~"), self.group_name, self.app_name, expanduser("~"), self.group_name, self.app_name) self._loaded_files.append(conf_name) if not self._loaded_files and not require_load: self._log.warning( "No config file named %s found! Search path was %r", config_filename, path) elif not self._loaded_files and require_load: raise IOError("No config file named %s found! Search path " "was %r" % (config_filename, path))
def load(self, reload=False, require_load=False): # type: (bool, bool) -> None """ Searches for an appropriate config file. If found, loads the file into the current instance. This method can also be used to reload a configuration. Note that you may want to set ``reload`` to ``True`` to clear the configuration before loading in that case. Without doing that, values will remain available even if they have been removed from the config files. :param reload: if set to ``True``, the existing values are cleared before reloading. :param require_load: If set to ``True`` this will raise a :py:exc:`IOError` if no config file has been found to load. """ if reload: # pragma: no cover self.config = None # only load the config if necessary (or explicitly requested) if self.config: # pragma: no cover self._log.debug('Returning cached config instance. Use ' '``reload=True`` to avoid caching!') return path = self._effective_path() config_filename = self._effective_filename() # Next, use the resolved path to find the filenames. Keep track of # which files we loaded in order to inform the user. self._active_path = [join(_, config_filename) for _ in path] for dirname in path: conf_name = join(dirname, config_filename) readable = self.check_file(conf_name) if readable: action = 'Updating' if self._loaded_files else 'Loading initial' self._log.info('%s config from %s', action, conf_name) self.read(conf_name) if conf_name == expanduser("~/.%s/%s/%s" % ( self.group_name, self.app_name, self.filename)): self._log.warning( "DEPRECATION WARNING: The file " "'%s/.%s/%s/app.ini' was loaded. The XDG " "Basedir standard requires this file to be in " "'%s/.config/%s/%s/app.ini'! This location " "will no longer be parsed in a future version of " "config_resolver! You can already (and should) move " "the file!", expanduser("~"), self.group_name, self.app_name, expanduser("~"), self.group_name, self.app_name) self._loaded_files.append(conf_name) if not self._loaded_files and not require_load: self._log.warning( "No config file named %s found! Search path was %r", config_filename, path) elif not self._loaded_files and require_load: raise IOError("No config file named %s found! Search path " "was %r" % (config_filename, path))
[ "Searches", "for", "an", "appropriate", "config", "file", ".", "If", "found", "loads", "the", "file", "into", "the", "current", "instance", ".", "This", "method", "can", "also", "be", "used", "to", "reload", "a", "configuration", ".", "Note", "that", "you", "may", "want", "to", "set", "reload", "to", "True", "to", "clear", "the", "configuration", "before", "loading", "in", "that", "case", ".", "Without", "doing", "that", "values", "will", "remain", "available", "even", "if", "they", "have", "been", "removed", "from", "the", "config", "files", "." ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L398-L458
[ "def", "load", "(", "self", ",", "reload", "=", "False", ",", "require_load", "=", "False", ")", ":", "# type: (bool, bool) -> None", "if", "reload", ":", "# pragma: no cover", "self", ".", "config", "=", "None", "# only load the config if necessary (or explicitly requested)", "if", "self", ".", "config", ":", "# pragma: no cover", "self", ".", "_log", ".", "debug", "(", "'Returning cached config instance. Use '", "'``reload=True`` to avoid caching!'", ")", "return", "path", "=", "self", ".", "_effective_path", "(", ")", "config_filename", "=", "self", ".", "_effective_filename", "(", ")", "# Next, use the resolved path to find the filenames. Keep track of", "# which files we loaded in order to inform the user.", "self", ".", "_active_path", "=", "[", "join", "(", "_", ",", "config_filename", ")", "for", "_", "in", "path", "]", "for", "dirname", "in", "path", ":", "conf_name", "=", "join", "(", "dirname", ",", "config_filename", ")", "readable", "=", "self", ".", "check_file", "(", "conf_name", ")", "if", "readable", ":", "action", "=", "'Updating'", "if", "self", ".", "_loaded_files", "else", "'Loading initial'", "self", ".", "_log", ".", "info", "(", "'%s config from %s'", ",", "action", ",", "conf_name", ")", "self", ".", "read", "(", "conf_name", ")", "if", "conf_name", "==", "expanduser", "(", "\"~/.%s/%s/%s\"", "%", "(", "self", ".", "group_name", ",", "self", ".", "app_name", ",", "self", ".", "filename", ")", ")", ":", "self", ".", "_log", ".", "warning", "(", "\"DEPRECATION WARNING: The file \"", "\"'%s/.%s/%s/app.ini' was loaded. The XDG \"", "\"Basedir standard requires this file to be in \"", "\"'%s/.config/%s/%s/app.ini'! This location \"", "\"will no longer be parsed in a future version of \"", "\"config_resolver! You can already (and should) move \"", "\"the file!\"", ",", "expanduser", "(", "\"~\"", ")", ",", "self", ".", "group_name", ",", "self", ".", "app_name", ",", "expanduser", "(", "\"~\"", ")", ",", "self", ".", "group_name", ",", "self", ".", "app_name", ")", "self", ".", "_loaded_files", ".", "append", "(", "conf_name", ")", "if", "not", "self", ".", "_loaded_files", "and", "not", "require_load", ":", "self", ".", "_log", ".", "warning", "(", "\"No config file named %s found! Search path was %r\"", ",", "config_filename", ",", "path", ")", "elif", "not", "self", ".", "_loaded_files", "and", "require_load", ":", "raise", "IOError", "(", "\"No config file named %s found! Search path \"", "\"was %r\"", "%", "(", "config_filename", ",", "path", ")", ")" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
SecuredConfig.check_file
Overrides :py:meth:`.Config.check_file`
config_resolver/core.py
def check_file(self, filename): # type: (str) -> bool """ Overrides :py:meth:`.Config.check_file` """ can_read = super(SecuredConfig, self).check_file(filename) if not can_read: return False mode = get_stat(filename).st_mode if (mode & stat.S_IRGRP) or (mode & stat.S_IROTH): msg = "File %r is not secure enough. Change it's mode to 600" self._log.warning(msg, filename) return False return True
def check_file(self, filename): # type: (str) -> bool """ Overrides :py:meth:`.Config.check_file` """ can_read = super(SecuredConfig, self).check_file(filename) if not can_read: return False mode = get_stat(filename).st_mode if (mode & stat.S_IRGRP) or (mode & stat.S_IROTH): msg = "File %r is not secure enough. Change it's mode to 600" self._log.warning(msg, filename) return False return True
[ "Overrides", ":", "py", ":", "meth", ":", ".", "Config", ".", "check_file" ]
exhuma/config_resolver
python
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L467-L481
[ "def", "check_file", "(", "self", ",", "filename", ")", ":", "# type: (str) -> bool", "can_read", "=", "super", "(", "SecuredConfig", ",", "self", ")", ".", "check_file", "(", "filename", ")", "if", "not", "can_read", ":", "return", "False", "mode", "=", "get_stat", "(", "filename", ")", ".", "st_mode", "if", "(", "mode", "&", "stat", ".", "S_IRGRP", ")", "or", "(", "mode", "&", "stat", ".", "S_IROTH", ")", ":", "msg", "=", "\"File %r is not secure enough. Change it's mode to 600\"", "self", ".", "_log", ".", "warning", "(", "msg", ",", "filename", ")", "return", "False", "return", "True" ]
2614ae3d7a49e437954254846b2963ad249b418c
valid
WSGIServer.setup_environ
https://www.python.org/dev/peps/pep-0333/#environ-variables
bustard/wsgi_server.py
def setup_environ(self): """https://www.python.org/dev/peps/pep-0333/#environ-variables""" # Set up base environment env = self.base_environ = {} env['SERVER_NAME'] = self.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PORT'] = str(self.server_port) env['REMOTE_HOST'] = '' env['CONTENT_LENGTH'] = '' env['SCRIPT_NAME'] = ''
def setup_environ(self): """https://www.python.org/dev/peps/pep-0333/#environ-variables""" # Set up base environment env = self.base_environ = {} env['SERVER_NAME'] = self.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PORT'] = str(self.server_port) env['REMOTE_HOST'] = '' env['CONTENT_LENGTH'] = '' env['SCRIPT_NAME'] = ''
[ "https", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0333", "/", "#environ", "-", "variables" ]
mozillazg/bustard
python
https://github.com/mozillazg/bustard/blob/bd7b47f3ba5440cf6ea026c8b633060fedeb80b7/bustard/wsgi_server.py#L50-L59
[ "def", "setup_environ", "(", "self", ")", ":", "# Set up base environment", "env", "=", "self", ".", "base_environ", "=", "{", "}", "env", "[", "'SERVER_NAME'", "]", "=", "self", ".", "server_name", "env", "[", "'GATEWAY_INTERFACE'", "]", "=", "'CGI/1.1'", "env", "[", "'SERVER_PORT'", "]", "=", "str", "(", "self", ".", "server_port", ")", "env", "[", "'REMOTE_HOST'", "]", "=", "''", "env", "[", "'CONTENT_LENGTH'", "]", "=", "''", "env", "[", "'SCRIPT_NAME'", "]", "=", "''" ]
bd7b47f3ba5440cf6ea026c8b633060fedeb80b7
valid
WSGIServer.get_environ
https://www.python.org/dev/peps/pep-0333/#environ-variables
bustard/wsgi_server.py
def get_environ(self): """https://www.python.org/dev/peps/pep-0333/#environ-variables""" env = self.base_environ.copy() env['REQUEST_METHOD'] = self.request_method if '?' in self.path: path, query = self.path.split('?', 1) else: path, query = self.path, '' env['PATH_INFO'] = urllib.parse.unquote(path) env['QUERY_STRING'] = query env['CONTENT_TYPE'] = self.headers.get('Content-Type', '') env['CONTENT_LENGTH'] = self.headers.get('Content-Length', '0') env['SERVER_PROTOCOL'] = self.request_version env['REMOTE_ADDR'] = self.client_address[0] env['REMOTE_PORT'] = self.client_address[1] env['wsgi.version'] = (1, 0) env['wsgi.url_scheme'] = 'http' env['wsgi.input'] = io.BytesIO(self.raw_request) env['wsgi.errors'] = sys.stderr env['wsgi.multithread'] = False env['wsgi.multiprocess'] = True env['wsgi.run_once'] = False for k, v in self.headers.items(): k = k.replace('-', '_').upper() if k in env: continue env['HTTP_' + k] = v return env
def get_environ(self): """https://www.python.org/dev/peps/pep-0333/#environ-variables""" env = self.base_environ.copy() env['REQUEST_METHOD'] = self.request_method if '?' in self.path: path, query = self.path.split('?', 1) else: path, query = self.path, '' env['PATH_INFO'] = urllib.parse.unquote(path) env['QUERY_STRING'] = query env['CONTENT_TYPE'] = self.headers.get('Content-Type', '') env['CONTENT_LENGTH'] = self.headers.get('Content-Length', '0') env['SERVER_PROTOCOL'] = self.request_version env['REMOTE_ADDR'] = self.client_address[0] env['REMOTE_PORT'] = self.client_address[1] env['wsgi.version'] = (1, 0) env['wsgi.url_scheme'] = 'http' env['wsgi.input'] = io.BytesIO(self.raw_request) env['wsgi.errors'] = sys.stderr env['wsgi.multithread'] = False env['wsgi.multiprocess'] = True env['wsgi.run_once'] = False for k, v in self.headers.items(): k = k.replace('-', '_').upper() if k in env: continue env['HTTP_' + k] = v return env
[ "https", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0333", "/", "#environ", "-", "variables" ]
mozillazg/bustard
python
https://github.com/mozillazg/bustard/blob/bd7b47f3ba5440cf6ea026c8b633060fedeb80b7/bustard/wsgi_server.py#L112-L144
[ "def", "get_environ", "(", "self", ")", ":", "env", "=", "self", ".", "base_environ", ".", "copy", "(", ")", "env", "[", "'REQUEST_METHOD'", "]", "=", "self", ".", "request_method", "if", "'?'", "in", "self", ".", "path", ":", "path", ",", "query", "=", "self", ".", "path", ".", "split", "(", "'?'", ",", "1", ")", "else", ":", "path", ",", "query", "=", "self", ".", "path", ",", "''", "env", "[", "'PATH_INFO'", "]", "=", "urllib", ".", "parse", ".", "unquote", "(", "path", ")", "env", "[", "'QUERY_STRING'", "]", "=", "query", "env", "[", "'CONTENT_TYPE'", "]", "=", "self", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", "env", "[", "'CONTENT_LENGTH'", "]", "=", "self", ".", "headers", ".", "get", "(", "'Content-Length'", ",", "'0'", ")", "env", "[", "'SERVER_PROTOCOL'", "]", "=", "self", ".", "request_version", "env", "[", "'REMOTE_ADDR'", "]", "=", "self", ".", "client_address", "[", "0", "]", "env", "[", "'REMOTE_PORT'", "]", "=", "self", ".", "client_address", "[", "1", "]", "env", "[", "'wsgi.version'", "]", "=", "(", "1", ",", "0", ")", "env", "[", "'wsgi.url_scheme'", "]", "=", "'http'", "env", "[", "'wsgi.input'", "]", "=", "io", ".", "BytesIO", "(", "self", ".", "raw_request", ")", "env", "[", "'wsgi.errors'", "]", "=", "sys", ".", "stderr", "env", "[", "'wsgi.multithread'", "]", "=", "False", "env", "[", "'wsgi.multiprocess'", "]", "=", "True", "env", "[", "'wsgi.run_once'", "]", "=", "False", "for", "k", ",", "v", "in", "self", ".", "headers", ".", "items", "(", ")", ":", "k", "=", "k", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "upper", "(", ")", "if", "k", "in", "env", ":", "continue", "env", "[", "'HTTP_'", "+", "k", "]", "=", "v", "return", "env" ]
bd7b47f3ba5440cf6ea026c8b633060fedeb80b7
valid
StylesResource.get
Get styles.
invenio_csl_rest/views.py
def get(self, q=None, page=None): """Get styles.""" # Check cache to exit early if needed etag = generate_etag(current_ext.content_version.encode('utf8')) self.check_etag(etag, weak=True) # Build response res = jsonify(current_ext.styles) res.set_etag(etag) return res
def get(self, q=None, page=None): """Get styles.""" # Check cache to exit early if needed etag = generate_etag(current_ext.content_version.encode('utf8')) self.check_etag(etag, weak=True) # Build response res = jsonify(current_ext.styles) res.set_etag(etag) return res
[ "Get", "styles", "." ]
inveniosoftware/invenio-csl-rest
python
https://github.com/inveniosoftware/invenio-csl-rest/blob/a474a5b4caa9e6ae841a007fa52b30ad7e957560/invenio_csl_rest/views.py#L56-L66
[ "def", "get", "(", "self", ",", "q", "=", "None", ",", "page", "=", "None", ")", ":", "# Check cache to exit early if needed", "etag", "=", "generate_etag", "(", "current_ext", ".", "content_version", ".", "encode", "(", "'utf8'", ")", ")", "self", ".", "check_etag", "(", "etag", ",", "weak", "=", "True", ")", "# Build response", "res", "=", "jsonify", "(", "current_ext", ".", "styles", ")", "res", ".", "set_etag", "(", "etag", ")", "return", "res" ]
a474a5b4caa9e6ae841a007fa52b30ad7e957560
valid
Connection.create_from_settings
Create a connection with given settings. Args: settings (dict): A dictionary of settings Returns: :class:`Connection`. The connection
pyfire/connection.py
def create_from_settings(settings): """ Create a connection with given settings. Args: settings (dict): A dictionary of settings Returns: :class:`Connection`. The connection """ return Connection( settings["url"], settings["base_url"], settings["user"], settings["password"], authorizations = settings["authorizations"], debug = settings["debug"] )
def create_from_settings(settings): """ Create a connection with given settings. Args: settings (dict): A dictionary of settings Returns: :class:`Connection`. The connection """ return Connection( settings["url"], settings["base_url"], settings["user"], settings["password"], authorizations = settings["authorizations"], debug = settings["debug"] )
[ "Create", "a", "connection", "with", "given", "settings", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L84-L100
[ "def", "create_from_settings", "(", "settings", ")", ":", "return", "Connection", "(", "settings", "[", "\"url\"", "]", ",", "settings", "[", "\"base_url\"", "]", ",", "settings", "[", "\"user\"", "]", ",", "settings", "[", "\"password\"", "]", ",", "authorizations", "=", "settings", "[", "\"authorizations\"", "]", ",", "debug", "=", "settings", "[", "\"debug\"", "]", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection.delete
Issue a PUT request. Kwargs: url (str): Destination URL post_data (dict): Dictionary of parameter and values parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
pyfire/connection.py
def delete(self, url=None, post_data={}, parse_data=False, key=None, parameters=None): """ Issue a PUT request. Kwargs: url (str): Destination URL post_data (dict): Dictionary of parameter and values parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception """ return self._fetch("DELETE", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, full_return=True)
def delete(self, url=None, post_data={}, parse_data=False, key=None, parameters=None): """ Issue a PUT request. Kwargs: url (str): Destination URL post_data (dict): Dictionary of parameter and values parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception """ return self._fetch("DELETE", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, full_return=True)
[ "Issue", "a", "PUT", "request", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L138-L154
[ "def", "delete", "(", "self", ",", "url", "=", "None", ",", "post_data", "=", "{", "}", ",", "parse_data", "=", "False", ",", "key", "=", "None", ",", "parameters", "=", "None", ")", ":", "return", "self", ".", "_fetch", "(", "\"DELETE\"", ",", "url", ",", "post_data", "=", "post_data", ",", "parse_data", "=", "parse_data", ",", "key", "=", "key", ",", "parameters", "=", "parameters", ",", "full_return", "=", "True", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection.post
Issue a POST request. Kwargs: url (str): Destination URL post_data (dict): Dictionary of parameter and values parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL listener (func): callback called when uploading a file Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
pyfire/connection.py
def post(self, url=None, post_data={}, parse_data=False, key=None, parameters=None, listener=None): """ Issue a POST request. Kwargs: url (str): Destination URL post_data (dict): Dictionary of parameter and values parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL listener (func): callback called when uploading a file Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception """ return self._fetch("POST", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, listener=listener, full_return=True)
def post(self, url=None, post_data={}, parse_data=False, key=None, parameters=None, listener=None): """ Issue a POST request. Kwargs: url (str): Destination URL post_data (dict): Dictionary of parameter and values parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL listener (func): callback called when uploading a file Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception """ return self._fetch("POST", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, listener=listener, full_return=True)
[ "Issue", "a", "POST", "request", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L174-L191
[ "def", "post", "(", "self", ",", "url", "=", "None", ",", "post_data", "=", "{", "}", ",", "parse_data", "=", "False", ",", "key", "=", "None", ",", "parameters", "=", "None", ",", "listener", "=", "None", ")", ":", "return", "self", ".", "_fetch", "(", "\"POST\"", ",", "url", ",", "post_data", "=", "post_data", ",", "parse_data", "=", "parse_data", ",", "key", "=", "key", ",", "parameters", "=", "parameters", ",", "listener", "=", "listener", ",", "full_return", "=", "True", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection.get
Issue a GET request. Kwargs: url (str): Destination URL parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
pyfire/connection.py
def get(self, url=None, parse_data=True, key=None, parameters=None): """ Issue a GET request. Kwargs: url (str): Destination URL parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception """ return self._fetch("GET", url, post_data=None, parse_data=parse_data, key=key, parameters=parameters)
def get(self, url=None, parse_data=True, key=None, parameters=None): """ Issue a GET request. Kwargs: url (str): Destination URL parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL Returns: dict. Response (a dict with keys: success, data, info, body) Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception """ return self._fetch("GET", url, post_data=None, parse_data=parse_data, key=key, parameters=parameters)
[ "Issue", "a", "GET", "request", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L193-L208
[ "def", "get", "(", "self", ",", "url", "=", "None", ",", "parse_data", "=", "True", ",", "key", "=", "None", ",", "parameters", "=", "None", ")", ":", "return", "self", ".", "_fetch", "(", "\"GET\"", ",", "url", ",", "post_data", "=", "None", ",", "parse_data", "=", "parse_data", ",", "key", "=", "key", ",", "parameters", "=", "parameters", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection.get_headers
Get headers. Returns: tuple: Headers
pyfire/connection.py
def get_headers(self): """ Get headers. Returns: tuple: Headers """ headers = { "User-Agent": "kFlame 1.0" } password_url = self._get_password_url() if password_url and password_url in self._settings["authorizations"]: headers["Authorization"] = self._settings["authorizations"][password_url] return headers
def get_headers(self): """ Get headers. Returns: tuple: Headers """ headers = { "User-Agent": "kFlame 1.0" } password_url = self._get_password_url() if password_url and password_url in self._settings["authorizations"]: headers["Authorization"] = self._settings["authorizations"][password_url] return headers
[ "Get", "headers", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L210-L224
[ "def", "get_headers", "(", "self", ")", ":", "headers", "=", "{", "\"User-Agent\"", ":", "\"kFlame 1.0\"", "}", "password_url", "=", "self", ".", "_get_password_url", "(", ")", "if", "password_url", "and", "password_url", "in", "self", ".", "_settings", "[", "\"authorizations\"", "]", ":", "headers", "[", "\"Authorization\"", "]", "=", "self", ".", "_settings", "[", "\"authorizations\"", "]", "[", "password_url", "]", "return", "headers" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection._get_password_url
Get URL used for authentication Returns: string: URL
pyfire/connection.py
def _get_password_url(self): """ Get URL used for authentication Returns: string: URL """ password_url = None if self._settings["user"] or self._settings["authorization"]: if self._settings["url"]: password_url = self._settings["url"] elif self._settings["base_url"]: password_url = self._settings["base_url"] return password_url
def _get_password_url(self): """ Get URL used for authentication Returns: string: URL """ password_url = None if self._settings["user"] or self._settings["authorization"]: if self._settings["url"]: password_url = self._settings["url"] elif self._settings["base_url"]: password_url = self._settings["base_url"] return password_url
[ "Get", "URL", "used", "for", "authentication" ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L226-L238
[ "def", "_get_password_url", "(", "self", ")", ":", "password_url", "=", "None", "if", "self", ".", "_settings", "[", "\"user\"", "]", "or", "self", ".", "_settings", "[", "\"authorization\"", "]", ":", "if", "self", ".", "_settings", "[", "\"url\"", "]", ":", "password_url", "=", "self", ".", "_settings", "[", "\"url\"", "]", "elif", "self", ".", "_settings", "[", "\"base_url\"", "]", ":", "password_url", "=", "self", ".", "_settings", "[", "\"base_url\"", "]", "return", "password_url" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection.parse
Parses a response. Args: text (str): Text to parse Kwargs: key (str): Key to look for, if any Returns: Parsed value Raises: ValueError
pyfire/connection.py
def parse(self, text, key=None): """ Parses a response. Args: text (str): Text to parse Kwargs: key (str): Key to look for, if any Returns: Parsed value Raises: ValueError """ try: data = json.loads(text) except ValueError as e: raise ValueError("%s: Value: [%s]" % (e, text)) if data and key: if key not in data: raise ValueError("Invalid response (key %s not found): %s" % (key, data)) data = data[key] return data
def parse(self, text, key=None): """ Parses a response. Args: text (str): Text to parse Kwargs: key (str): Key to look for, if any Returns: Parsed value Raises: ValueError """ try: data = json.loads(text) except ValueError as e: raise ValueError("%s: Value: [%s]" % (e, text)) if data and key: if key not in data: raise ValueError("Invalid response (key %s not found): %s" % (key, data)) data = data[key] return data
[ "Parses", "a", "response", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L240-L264
[ "def", "parse", "(", "self", ",", "text", ",", "key", "=", "None", ")", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "text", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "\"%s: Value: [%s]\"", "%", "(", "e", ",", "text", ")", ")", "if", "data", "and", "key", ":", "if", "key", "not", "in", "data", ":", "raise", "ValueError", "(", "\"Invalid response (key %s not found): %s\"", "%", "(", "key", ",", "data", ")", ")", "data", "=", "data", "[", "key", "]", "return", "data" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection.build_twisted_request
Build a request for twisted Args: method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None url (str): Destination URL (full, or relative) Kwargs: extra_headers (dict): Headers (override default connection headers, if any) body_producer (:class:`twisted.web.iweb.IBodyProducer`): Object producing request body full_url (bool): If False, URL is relative Returns: tuple. Tuple with two elements: reactor, and request
pyfire/connection.py
def build_twisted_request(self, method, url, extra_headers={}, body_producer=None, full_url=False): """ Build a request for twisted Args: method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None url (str): Destination URL (full, or relative) Kwargs: extra_headers (dict): Headers (override default connection headers, if any) body_producer (:class:`twisted.web.iweb.IBodyProducer`): Object producing request body full_url (bool): If False, URL is relative Returns: tuple. Tuple with two elements: reactor, and request """ uri = url if full_url else self._url(url) raw_headers = self.get_headers() if extra_headers: raw_headers.update(extra_headers) headers = http_headers.Headers() for header in raw_headers: headers.addRawHeader(header, raw_headers[header]) agent = client.Agent(reactor) request = agent.request(method, uri, headers, body_producer) return (reactor, request)
def build_twisted_request(self, method, url, extra_headers={}, body_producer=None, full_url=False): """ Build a request for twisted Args: method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None url (str): Destination URL (full, or relative) Kwargs: extra_headers (dict): Headers (override default connection headers, if any) body_producer (:class:`twisted.web.iweb.IBodyProducer`): Object producing request body full_url (bool): If False, URL is relative Returns: tuple. Tuple with two elements: reactor, and request """ uri = url if full_url else self._url(url) raw_headers = self.get_headers() if extra_headers: raw_headers.update(extra_headers) headers = http_headers.Headers() for header in raw_headers: headers.addRawHeader(header, raw_headers[header]) agent = client.Agent(reactor) request = agent.request(method, uri, headers, body_producer) return (reactor, request)
[ "Build", "a", "request", "for", "twisted" ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L274-L302
[ "def", "build_twisted_request", "(", "self", ",", "method", ",", "url", ",", "extra_headers", "=", "{", "}", ",", "body_producer", "=", "None", ",", "full_url", "=", "False", ")", ":", "uri", "=", "url", "if", "full_url", "else", "self", ".", "_url", "(", "url", ")", "raw_headers", "=", "self", ".", "get_headers", "(", ")", "if", "extra_headers", ":", "raw_headers", ".", "update", "(", "extra_headers", ")", "headers", "=", "http_headers", ".", "Headers", "(", ")", "for", "header", "in", "raw_headers", ":", "headers", ".", "addRawHeader", "(", "header", ",", "raw_headers", "[", "header", "]", ")", "agent", "=", "client", ".", "Agent", "(", "reactor", ")", "request", "=", "agent", ".", "request", "(", "method", ",", "uri", ",", "headers", ",", "body_producer", ")", "return", "(", "reactor", ",", "request", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection._fetch
Issue a request. Args: method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None Kwargs: url (str): Destination URL post_data (str): A string of what to POST parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL listener (func): callback called when uploading a file full_return (bool): If set to True, get a full response (with success, data, info, body) Returns: dict. Response. If full_return==True, a dict with keys: success, data, info, body, otherwise the parsed data Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError
pyfire/connection.py
def _fetch(self, method, url=None, post_data=None, parse_data=True, key=None, parameters=None, listener=None, full_return=False): """ Issue a request. Args: method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None Kwargs: url (str): Destination URL post_data (str): A string of what to POST parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL listener (func): callback called when uploading a file full_return (bool): If set to True, get a full response (with success, data, info, body) Returns: dict. Response. If full_return==True, a dict with keys: success, data, info, body, otherwise the parsed data Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError """ headers = self.get_headers() headers["Content-Type"] = "application/json" handlers = [] debuglevel = int(self._settings["debug"]) handlers.append(urllib2.HTTPHandler(debuglevel=debuglevel)) if hasattr(httplib, "HTTPS"): handlers.append(urllib2.HTTPSHandler(debuglevel=debuglevel)) handlers.append(urllib2.HTTPCookieProcessor(cookielib.CookieJar())) password_url = self._get_password_url() if password_url and "Authorization" not in headers: pwd_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() pwd_manager.add_password(None, password_url, self._settings["user"], self._settings["password"]) handlers.append(HTTPBasicAuthHandler(pwd_manager)) opener = urllib2.build_opener(*handlers) if post_data is not None: post_data = json.dumps(post_data) uri = self._url(url, parameters) request = RESTRequest(uri, method=method, headers=headers) if post_data is not None: request.add_data(post_data) response = None try: response = opener.open(request) body = response.read() if password_url and password_url not in self._settings["authorizations"] and request.has_header("Authorization"): self._settings["authorizations"][password_url] = request.get_header("Authorization") except urllib2.HTTPError as e: if e.code == 401: raise AuthenticationError("Access denied while trying to access %s" % uri) elif e.code == 404: raise ConnectionError("URL not found: %s" % uri) else: raise except urllib2.URLError as e: raise ConnectionError("Error while fetching from %s: %s" % (uri, e)) finally: if response: response.close() opener.close() data = None if parse_data: if not key: key = string.split(url, "/")[0] data = self.parse(body, key) if full_return: info = response.info() if response else None status = int(string.split(info["status"])[0]) if (info and "status" in info) else None return { "success": (status >= 200 and status < 300), "data": data, "info": info, "body": body } return data
def _fetch(self, method, url=None, post_data=None, parse_data=True, key=None, parameters=None, listener=None, full_return=False): """ Issue a request. Args: method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None Kwargs: url (str): Destination URL post_data (str): A string of what to POST parse_data (bool): If true, parse response data key (string): If parse_data==True, look for this key when parsing data parameters (dict): Additional GET parameters to append to the URL listener (func): callback called when uploading a file full_return (bool): If set to True, get a full response (with success, data, info, body) Returns: dict. Response. If full_return==True, a dict with keys: success, data, info, body, otherwise the parsed data Raises: AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError """ headers = self.get_headers() headers["Content-Type"] = "application/json" handlers = [] debuglevel = int(self._settings["debug"]) handlers.append(urllib2.HTTPHandler(debuglevel=debuglevel)) if hasattr(httplib, "HTTPS"): handlers.append(urllib2.HTTPSHandler(debuglevel=debuglevel)) handlers.append(urllib2.HTTPCookieProcessor(cookielib.CookieJar())) password_url = self._get_password_url() if password_url and "Authorization" not in headers: pwd_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() pwd_manager.add_password(None, password_url, self._settings["user"], self._settings["password"]) handlers.append(HTTPBasicAuthHandler(pwd_manager)) opener = urllib2.build_opener(*handlers) if post_data is not None: post_data = json.dumps(post_data) uri = self._url(url, parameters) request = RESTRequest(uri, method=method, headers=headers) if post_data is not None: request.add_data(post_data) response = None try: response = opener.open(request) body = response.read() if password_url and password_url not in self._settings["authorizations"] and request.has_header("Authorization"): self._settings["authorizations"][password_url] = request.get_header("Authorization") except urllib2.HTTPError as e: if e.code == 401: raise AuthenticationError("Access denied while trying to access %s" % uri) elif e.code == 404: raise ConnectionError("URL not found: %s" % uri) else: raise except urllib2.URLError as e: raise ConnectionError("Error while fetching from %s: %s" % (uri, e)) finally: if response: response.close() opener.close() data = None if parse_data: if not key: key = string.split(url, "/")[0] data = self.parse(body, key) if full_return: info = response.info() if response else None status = int(string.split(info["status"])[0]) if (info and "status" in info) else None return { "success": (status >= 200 and status < 300), "data": data, "info": info, "body": body } return data
[ "Issue", "a", "request", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L304-L394
[ "def", "_fetch", "(", "self", ",", "method", ",", "url", "=", "None", ",", "post_data", "=", "None", ",", "parse_data", "=", "True", ",", "key", "=", "None", ",", "parameters", "=", "None", ",", "listener", "=", "None", ",", "full_return", "=", "False", ")", ":", "headers", "=", "self", ".", "get_headers", "(", ")", "headers", "[", "\"Content-Type\"", "]", "=", "\"application/json\"", "handlers", "=", "[", "]", "debuglevel", "=", "int", "(", "self", ".", "_settings", "[", "\"debug\"", "]", ")", "handlers", ".", "append", "(", "urllib2", ".", "HTTPHandler", "(", "debuglevel", "=", "debuglevel", ")", ")", "if", "hasattr", "(", "httplib", ",", "\"HTTPS\"", ")", ":", "handlers", ".", "append", "(", "urllib2", ".", "HTTPSHandler", "(", "debuglevel", "=", "debuglevel", ")", ")", "handlers", ".", "append", "(", "urllib2", ".", "HTTPCookieProcessor", "(", "cookielib", ".", "CookieJar", "(", ")", ")", ")", "password_url", "=", "self", ".", "_get_password_url", "(", ")", "if", "password_url", "and", "\"Authorization\"", "not", "in", "headers", ":", "pwd_manager", "=", "urllib2", ".", "HTTPPasswordMgrWithDefaultRealm", "(", ")", "pwd_manager", ".", "add_password", "(", "None", ",", "password_url", ",", "self", ".", "_settings", "[", "\"user\"", "]", ",", "self", ".", "_settings", "[", "\"password\"", "]", ")", "handlers", ".", "append", "(", "HTTPBasicAuthHandler", "(", "pwd_manager", ")", ")", "opener", "=", "urllib2", ".", "build_opener", "(", "*", "handlers", ")", "if", "post_data", "is", "not", "None", ":", "post_data", "=", "json", ".", "dumps", "(", "post_data", ")", "uri", "=", "self", ".", "_url", "(", "url", ",", "parameters", ")", "request", "=", "RESTRequest", "(", "uri", ",", "method", "=", "method", ",", "headers", "=", "headers", ")", "if", "post_data", "is", "not", "None", ":", "request", ".", "add_data", "(", "post_data", ")", "response", "=", "None", "try", ":", "response", "=", "opener", ".", "open", "(", "request", ")", "body", "=", "response", ".", "read", "(", ")", "if", "password_url", "and", "password_url", "not", "in", "self", ".", "_settings", "[", "\"authorizations\"", "]", "and", "request", ".", "has_header", "(", "\"Authorization\"", ")", ":", "self", ".", "_settings", "[", "\"authorizations\"", "]", "[", "password_url", "]", "=", "request", ".", "get_header", "(", "\"Authorization\"", ")", "except", "urllib2", ".", "HTTPError", "as", "e", ":", "if", "e", ".", "code", "==", "401", ":", "raise", "AuthenticationError", "(", "\"Access denied while trying to access %s\"", "%", "uri", ")", "elif", "e", ".", "code", "==", "404", ":", "raise", "ConnectionError", "(", "\"URL not found: %s\"", "%", "uri", ")", "else", ":", "raise", "except", "urllib2", ".", "URLError", "as", "e", ":", "raise", "ConnectionError", "(", "\"Error while fetching from %s: %s\"", "%", "(", "uri", ",", "e", ")", ")", "finally", ":", "if", "response", ":", "response", ".", "close", "(", ")", "opener", ".", "close", "(", ")", "data", "=", "None", "if", "parse_data", ":", "if", "not", "key", ":", "key", "=", "string", ".", "split", "(", "url", ",", "\"/\"", ")", "[", "0", "]", "data", "=", "self", ".", "parse", "(", "body", ",", "key", ")", "if", "full_return", ":", "info", "=", "response", ".", "info", "(", ")", "if", "response", "else", "None", "status", "=", "int", "(", "string", ".", "split", "(", "info", "[", "\"status\"", "]", ")", "[", "0", "]", ")", "if", "(", "info", "and", "\"status\"", "in", "info", ")", "else", "None", "return", "{", "\"success\"", ":", "(", "status", ">=", "200", "and", "status", "<", "300", ")", ",", "\"data\"", ":", "data", ",", "\"info\"", ":", "info", ",", "\"body\"", ":", "body", "}", "return", "data" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Connection._url
Build destination URL. Kwargs: url (str): Destination URL parameters (dict): Additional GET parameters to append to the URL Returns: str. URL
pyfire/connection.py
def _url(self, url=None, parameters=None): """ Build destination URL. Kwargs: url (str): Destination URL parameters (dict): Additional GET parameters to append to the URL Returns: str. URL """ uri = url or self._settings["url"] if url and self._settings["base_url"]: uri = "%s/%s" % (self._settings["base_url"], url) uri += ".json" if parameters: uri += "?%s" % urllib.urlencode(parameters) return uri
def _url(self, url=None, parameters=None): """ Build destination URL. Kwargs: url (str): Destination URL parameters (dict): Additional GET parameters to append to the URL Returns: str. URL """ uri = url or self._settings["url"] if url and self._settings["base_url"]: uri = "%s/%s" % (self._settings["base_url"], url) uri += ".json" if parameters: uri += "?%s" % urllib.urlencode(parameters) return uri
[ "Build", "destination", "URL", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/connection.py#L396-L413
[ "def", "_url", "(", "self", ",", "url", "=", "None", ",", "parameters", "=", "None", ")", ":", "uri", "=", "url", "or", "self", ".", "_settings", "[", "\"url\"", "]", "if", "url", "and", "self", ".", "_settings", "[", "\"base_url\"", "]", ":", "uri", "=", "\"%s/%s\"", "%", "(", "self", ".", "_settings", "[", "\"base_url\"", "]", ",", "url", ")", "uri", "+=", "\".json\"", "if", "parameters", ":", "uri", "+=", "\"?%s\"", "%", "urllib", ".", "urlencode", "(", "parameters", ")", "return", "uri" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Message.is_text
Tells if this message is a text message. Returns: bool. Success
pyfire/message.py
def is_text(self): """ Tells if this message is a text message. Returns: bool. Success """ return self.type in [ self._TYPE_PASTE, self._TYPE_TEXT, self._TYPE_TWEET ]
def is_text(self): """ Tells if this message is a text message. Returns: bool. Success """ return self.type in [ self._TYPE_PASTE, self._TYPE_TEXT, self._TYPE_TWEET ]
[ "Tells", "if", "this", "message", "is", "a", "text", "message", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/message.py#L121-L131
[ "def", "is_text", "(", "self", ")", ":", "return", "self", ".", "type", "in", "[", "self", ".", "_TYPE_PASTE", ",", "self", ".", "_TYPE_TEXT", ",", "self", ".", "_TYPE_TWEET", "]" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Campfire.get_rooms
Get rooms list. Kwargs: sort (bool): If True, sort rooms by name Returns: array. List of rooms (each room is a dict)
pyfire/campfire.py
def get_rooms(self, sort=True): """ Get rooms list. Kwargs: sort (bool): If True, sort rooms by name Returns: array. List of rooms (each room is a dict) """ rooms = self._connection.get("rooms") if sort: rooms.sort(key=operator.itemgetter("name")) return rooms
def get_rooms(self, sort=True): """ Get rooms list. Kwargs: sort (bool): If True, sort rooms by name Returns: array. List of rooms (each room is a dict) """ rooms = self._connection.get("rooms") if sort: rooms.sort(key=operator.itemgetter("name")) return rooms
[ "Get", "rooms", "list", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/campfire.py#L76-L88
[ "def", "get_rooms", "(", "self", ",", "sort", "=", "True", ")", ":", "rooms", "=", "self", ".", "_connection", ".", "get", "(", "\"rooms\"", ")", "if", "sort", ":", "rooms", ".", "sort", "(", "key", "=", "operator", ".", "itemgetter", "(", "\"name\"", ")", ")", "return", "rooms" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Campfire.get_room_by_name
Get a room by name. Returns: :class:`Room`. Room Raises: RoomNotFoundException
pyfire/campfire.py
def get_room_by_name(self, name): """ Get a room by name. Returns: :class:`Room`. Room Raises: RoomNotFoundException """ rooms = self.get_rooms() for room in rooms or []: if room["name"] == name: return self.get_room(room["id"]) raise RoomNotFoundException("Room %s not found" % name)
def get_room_by_name(self, name): """ Get a room by name. Returns: :class:`Room`. Room Raises: RoomNotFoundException """ rooms = self.get_rooms() for room in rooms or []: if room["name"] == name: return self.get_room(room["id"]) raise RoomNotFoundException("Room %s not found" % name)
[ "Get", "a", "room", "by", "name", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/campfire.py#L90-L103
[ "def", "get_room_by_name", "(", "self", ",", "name", ")", ":", "rooms", "=", "self", ".", "get_rooms", "(", ")", "for", "room", "in", "rooms", "or", "[", "]", ":", "if", "room", "[", "\"name\"", "]", "==", "name", ":", "return", "self", ".", "get_room", "(", "room", "[", "\"id\"", "]", ")", "raise", "RoomNotFoundException", "(", "\"Room %s not found\"", "%", "name", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Campfire.get_room
Get room. Returns: :class:`Room`. Room
pyfire/campfire.py
def get_room(self, id): """ Get room. Returns: :class:`Room`. Room """ if id not in self._rooms: self._rooms[id] = Room(self, id) return self._rooms[id]
def get_room(self, id): """ Get room. Returns: :class:`Room`. Room """ if id not in self._rooms: self._rooms[id] = Room(self, id) return self._rooms[id]
[ "Get", "room", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/campfire.py#L105-L113
[ "def", "get_room", "(", "self", ",", "id", ")", ":", "if", "id", "not", "in", "self", ".", "_rooms", ":", "self", ".", "_rooms", "[", "id", "]", "=", "Room", "(", "self", ",", "id", ")", "return", "self", ".", "_rooms", "[", "id", "]" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Campfire.get_user
Get user. Returns: :class:`User`. User
pyfire/campfire.py
def get_user(self, id = None): """ Get user. Returns: :class:`User`. User """ if not id: id = self._user.id if id not in self._users: self._users[id] = self._user if id == self._user.id else User(self, id) return self._users[id]
def get_user(self, id = None): """ Get user. Returns: :class:`User`. User """ if not id: id = self._user.id if id not in self._users: self._users[id] = self._user if id == self._user.id else User(self, id) return self._users[id]
[ "Get", "user", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/campfire.py#L115-L127
[ "def", "get_user", "(", "self", ",", "id", "=", "None", ")", ":", "if", "not", "id", ":", "id", "=", "self", ".", "_user", ".", "id", "if", "id", "not", "in", "self", ".", "_users", ":", "self", ".", "_users", "[", "id", "]", "=", "self", ".", "_user", "if", "id", "==", "self", ".", "_user", ".", "id", "else", "User", "(", "self", ",", "id", ")", "return", "self", ".", "_users", "[", "id", "]" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Campfire.search
Search transcripts. Args: terms (str): Terms for search Returns: array. Messages
pyfire/campfire.py
def search(self, terms): """ Search transcripts. Args: terms (str): Terms for search Returns: array. Messages """ messages = self._connection.get("search/%s" % urllib.quote_plus(terms), key="messages") if messages: messages = [Message(self, message) for message in messages] return messages
def search(self, terms): """ Search transcripts. Args: terms (str): Terms for search Returns: array. Messages """ messages = self._connection.get("search/%s" % urllib.quote_plus(terms), key="messages") if messages: messages = [Message(self, message) for message in messages] return messages
[ "Search", "transcripts", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/campfire.py#L129-L141
[ "def", "search", "(", "self", ",", "terms", ")", ":", "messages", "=", "self", ".", "_connection", ".", "get", "(", "\"search/%s\"", "%", "urllib", ".", "quote_plus", "(", "terms", ")", ",", "key", "=", "\"messages\"", ")", "if", "messages", ":", "messages", "=", "[", "Message", "(", "self", ",", "message", ")", "for", "message", "in", "messages", "]", "return", "messages" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
cookie_dump
:rtype: ``Cookie.SimpleCookie``
bustard/http.py
def cookie_dump(key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ :rtype: ``Cookie.SimpleCookie`` """ cookie = SimpleCookie() cookie[key] = value for attr in ('max_age', 'expires', 'path', 'domain', 'secure', 'httponly'): attr_key = attr.replace('_', '-') attr_value = locals()[attr] if attr_value: cookie[key][attr_key] = attr_value return cookie
def cookie_dump(key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ :rtype: ``Cookie.SimpleCookie`` """ cookie = SimpleCookie() cookie[key] = value for attr in ('max_age', 'expires', 'path', 'domain', 'secure', 'httponly'): attr_key = attr.replace('_', '-') attr_value = locals()[attr] if attr_value: cookie[key][attr_key] = attr_value return cookie
[ ":", "rtype", ":", "Cookie", ".", "SimpleCookie" ]
mozillazg/bustard
python
https://github.com/mozillazg/bustard/blob/bd7b47f3ba5440cf6ea026c8b633060fedeb80b7/bustard/http.py#L291-L304
[ "def", "cookie_dump", "(", "key", ",", "value", "=", "''", ",", "max_age", "=", "None", ",", "expires", "=", "None", ",", "path", "=", "'/'", ",", "domain", "=", "None", ",", "secure", "=", "False", ",", "httponly", "=", "False", ")", ":", "cookie", "=", "SimpleCookie", "(", ")", "cookie", "[", "key", "]", "=", "value", "for", "attr", "in", "(", "'max_age'", ",", "'expires'", ",", "'path'", ",", "'domain'", ",", "'secure'", ",", "'httponly'", ")", ":", "attr_key", "=", "attr", ".", "replace", "(", "'_'", ",", "'-'", ")", "attr_value", "=", "locals", "(", ")", "[", "attr", "]", "if", "attr_value", ":", "cookie", "[", "key", "]", "[", "attr_key", "]", "=", "attr_value", "return", "cookie" ]
bd7b47f3ba5440cf6ea026c8b633060fedeb80b7
valid
response_status_string
e.g. ``200 OK``
bustard/http.py
def response_status_string(code): """e.g. ``200 OK`` """ mean = HTTP_STATUS_CODES.get(code, 'unknown').upper() return '{code} {mean}'.format(code=code, mean=mean)
def response_status_string(code): """e.g. ``200 OK`` """ mean = HTTP_STATUS_CODES.get(code, 'unknown').upper() return '{code} {mean}'.format(code=code, mean=mean)
[ "e", ".", "g", ".", "200", "OK" ]
mozillazg/bustard
python
https://github.com/mozillazg/bustard/blob/bd7b47f3ba5440cf6ea026c8b633060fedeb80b7/bustard/http.py#L307-L310
[ "def", "response_status_string", "(", "code", ")", ":", "mean", "=", "HTTP_STATUS_CODES", ".", "get", "(", "code", ",", "'unknown'", ")", ".", "upper", "(", ")", "return", "'{code} {mean}'", ".", "format", "(", "code", "=", "code", ",", "mean", "=", "mean", ")" ]
bd7b47f3ba5440cf6ea026c8b633060fedeb80b7
valid
Request.cookies
Request cookies :rtype: dict
bustard/http.py
def cookies(self): """Request cookies :rtype: dict """ http_cookie = self.environ.get('HTTP_COOKIE', '') _cookies = { k: v.value for (k, v) in SimpleCookie(http_cookie).items() } return _cookies
def cookies(self): """Request cookies :rtype: dict """ http_cookie = self.environ.get('HTTP_COOKIE', '') _cookies = { k: v.value for (k, v) in SimpleCookie(http_cookie).items() } return _cookies
[ "Request", "cookies" ]
mozillazg/bustard
python
https://github.com/mozillazg/bustard/blob/bd7b47f3ba5440cf6ea026c8b633060fedeb80b7/bustard/http.py#L90-L100
[ "def", "cookies", "(", "self", ")", ":", "http_cookie", "=", "self", ".", "environ", ".", "get", "(", "'HTTP_COOKIE'", ",", "''", ")", "_cookies", "=", "{", "k", ":", "v", ".", "value", "for", "(", "k", ",", "v", ")", "in", "SimpleCookie", "(", "http_cookie", ")", ".", "items", "(", ")", "}", "return", "_cookies" ]
bd7b47f3ba5440cf6ea026c8b633060fedeb80b7
valid
Stream.attach
Attach an observer. Args: observer (func): A function to be called when new messages arrive Returns: :class:`Stream`. Current instance to allow chaining
pyfire/stream.py
def attach(self, observer): """ Attach an observer. Args: observer (func): A function to be called when new messages arrive Returns: :class:`Stream`. Current instance to allow chaining """ if not observer in self._observers: self._observers.append(observer) return self
def attach(self, observer): """ Attach an observer. Args: observer (func): A function to be called when new messages arrive Returns: :class:`Stream`. Current instance to allow chaining """ if not observer in self._observers: self._observers.append(observer) return self
[ "Attach", "an", "observer", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/stream.py#L52-L63
[ "def", "attach", "(", "self", ",", "observer", ")", ":", "if", "not", "observer", "in", "self", ".", "_observers", ":", "self", ".", "_observers", ".", "append", "(", "observer", ")", "return", "self" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Stream.incoming
Called when incoming messages arrive. Args: messages (tuple): Messages (each message is a dict)
pyfire/stream.py
def incoming(self, messages): """ Called when incoming messages arrive. Args: messages (tuple): Messages (each message is a dict) """ if self._observers: campfire = self._room.get_campfire() for message in messages: for observer in self._observers: observer(Message(campfire, message))
def incoming(self, messages): """ Called when incoming messages arrive. Args: messages (tuple): Messages (each message is a dict) """ if self._observers: campfire = self._room.get_campfire() for message in messages: for observer in self._observers: observer(Message(campfire, message))
[ "Called", "when", "incoming", "messages", "arrive", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/stream.py#L80-L90
[ "def", "incoming", "(", "self", ",", "messages", ")", ":", "if", "self", ".", "_observers", ":", "campfire", "=", "self", ".", "_room", ".", "get_campfire", "(", ")", "for", "message", "in", "messages", ":", "for", "observer", "in", "self", ".", "_observers", ":", "observer", "(", "Message", "(", "campfire", ",", "message", ")", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
Stream.run
Called by the thread, it runs the process. NEVER call this method directly. Instead call start() to start the thread. To stop, call stop(), and then join()
pyfire/stream.py
def run(self): """ Called by the thread, it runs the process. NEVER call this method directly. Instead call start() to start the thread. To stop, call stop(), and then join() """ if self._live: self._use_process = True self._abort = False campfire = self._room.get_campfire() if self._live: process = LiveStreamProcess(campfire.get_connection().get_settings(), self._room.id) else: process = StreamProcess(campfire.get_connection().get_settings(), self._room.id, pause=self._pause) if not self._use_process: process.set_callback(self.incoming) if self._use_process: queue = Queue() process.set_queue(queue) process.start() if not process.is_alive(): return self._streaming = True while not self._abort: if self._use_process: if not process.is_alive(): self._abort = True break try: incoming = queue.get_nowait() if isinstance(incoming, list): self.incoming(incoming) elif isinstance(incoming, Exception): self._abort = True if self._error_callback: self._error_callback(incoming, self._room) except Empty: time.sleep(self._pause) pass else: process.fetch() time.sleep(self._pause) self._streaming = False if self._use_process and self._abort and not process.is_alive() and self._error_callback: self._error_callback(Exception("Streaming process was killed"), self._room) if self._use_process: queue.close() if process.is_alive(): process.stop() process.terminate() process.join()
def run(self): """ Called by the thread, it runs the process. NEVER call this method directly. Instead call start() to start the thread. To stop, call stop(), and then join() """ if self._live: self._use_process = True self._abort = False campfire = self._room.get_campfire() if self._live: process = LiveStreamProcess(campfire.get_connection().get_settings(), self._room.id) else: process = StreamProcess(campfire.get_connection().get_settings(), self._room.id, pause=self._pause) if not self._use_process: process.set_callback(self.incoming) if self._use_process: queue = Queue() process.set_queue(queue) process.start() if not process.is_alive(): return self._streaming = True while not self._abort: if self._use_process: if not process.is_alive(): self._abort = True break try: incoming = queue.get_nowait() if isinstance(incoming, list): self.incoming(incoming) elif isinstance(incoming, Exception): self._abort = True if self._error_callback: self._error_callback(incoming, self._room) except Empty: time.sleep(self._pause) pass else: process.fetch() time.sleep(self._pause) self._streaming = False if self._use_process and self._abort and not process.is_alive() and self._error_callback: self._error_callback(Exception("Streaming process was killed"), self._room) if self._use_process: queue.close() if process.is_alive(): process.stop() process.terminate() process.join()
[ "Called", "by", "the", "thread", "it", "runs", "the", "process", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/stream.py#L111-L173
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "_live", ":", "self", ".", "_use_process", "=", "True", "self", ".", "_abort", "=", "False", "campfire", "=", "self", ".", "_room", ".", "get_campfire", "(", ")", "if", "self", ".", "_live", ":", "process", "=", "LiveStreamProcess", "(", "campfire", ".", "get_connection", "(", ")", ".", "get_settings", "(", ")", ",", "self", ".", "_room", ".", "id", ")", "else", ":", "process", "=", "StreamProcess", "(", "campfire", ".", "get_connection", "(", ")", ".", "get_settings", "(", ")", ",", "self", ".", "_room", ".", "id", ",", "pause", "=", "self", ".", "_pause", ")", "if", "not", "self", ".", "_use_process", ":", "process", ".", "set_callback", "(", "self", ".", "incoming", ")", "if", "self", ".", "_use_process", ":", "queue", "=", "Queue", "(", ")", "process", ".", "set_queue", "(", "queue", ")", "process", ".", "start", "(", ")", "if", "not", "process", ".", "is_alive", "(", ")", ":", "return", "self", ".", "_streaming", "=", "True", "while", "not", "self", ".", "_abort", ":", "if", "self", ".", "_use_process", ":", "if", "not", "process", ".", "is_alive", "(", ")", ":", "self", ".", "_abort", "=", "True", "break", "try", ":", "incoming", "=", "queue", ".", "get_nowait", "(", ")", "if", "isinstance", "(", "incoming", ",", "list", ")", ":", "self", ".", "incoming", "(", "incoming", ")", "elif", "isinstance", "(", "incoming", ",", "Exception", ")", ":", "self", ".", "_abort", "=", "True", "if", "self", ".", "_error_callback", ":", "self", ".", "_error_callback", "(", "incoming", ",", "self", ".", "_room", ")", "except", "Empty", ":", "time", ".", "sleep", "(", "self", ".", "_pause", ")", "pass", "else", ":", "process", ".", "fetch", "(", ")", "time", ".", "sleep", "(", "self", ".", "_pause", ")", "self", ".", "_streaming", "=", "False", "if", "self", ".", "_use_process", "and", "self", ".", "_abort", "and", "not", "process", ".", "is_alive", "(", ")", "and", "self", ".", "_error_callback", ":", "self", ".", "_error_callback", "(", "Exception", "(", "\"Streaming process was killed\"", ")", ",", "self", ".", "_room", ")", "if", "self", ".", "_use_process", ":", "queue", ".", "close", "(", ")", "if", "process", ".", "is_alive", "(", ")", ":", "process", ".", "stop", "(", ")", "process", ".", "terminate", "(", ")", "process", ".", "join", "(", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
StreamProcess.run
Called by the process, it runs it. NEVER call this method directly. Instead call start() to start the separate process. If you don't want to use a second process, then call fetch() directly on this istance. To stop, call terminate()
pyfire/stream.py
def run(self): """ Called by the process, it runs it. NEVER call this method directly. Instead call start() to start the separate process. If you don't want to use a second process, then call fetch() directly on this istance. To stop, call terminate() """ if not self._queue: raise Exception("No queue available to send messages") while True: self.fetch() time.sleep(self._pause)
def run(self): """ Called by the process, it runs it. NEVER call this method directly. Instead call start() to start the separate process. If you don't want to use a second process, then call fetch() directly on this istance. To stop, call terminate() """ if not self._queue: raise Exception("No queue available to send messages") while True: self.fetch() time.sleep(self._pause)
[ "Called", "by", "the", "process", "it", "runs", "it", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/stream.py#L220-L233
[ "def", "run", "(", "self", ")", ":", "if", "not", "self", ".", "_queue", ":", "raise", "Exception", "(", "\"No queue available to send messages\"", ")", "while", "True", ":", "self", ".", "fetch", "(", ")", "time", ".", "sleep", "(", "self", ".", "_pause", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
StreamProcess.fetch
Fetch new messages.
pyfire/stream.py
def fetch(self): """ Fetch new messages. """ try: if not self._last_message_id: messages = self._connection.get("room/%s/recent" % self._room_id, key="messages", parameters={ "limit": 1 }) self._last_message_id = messages[-1]["id"] messages = self._connection.get("room/%s/recent" % self._room_id, key="messages", parameters={ "since_message_id": self._last_message_id }) except: messages = [] if messages: self._last_message_id = messages[-1]["id"] self.received(messages)
def fetch(self): """ Fetch new messages. """ try: if not self._last_message_id: messages = self._connection.get("room/%s/recent" % self._room_id, key="messages", parameters={ "limit": 1 }) self._last_message_id = messages[-1]["id"] messages = self._connection.get("room/%s/recent" % self._room_id, key="messages", parameters={ "since_message_id": self._last_message_id }) except: messages = [] if messages: self._last_message_id = messages[-1]["id"] self.received(messages)
[ "Fetch", "new", "messages", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/stream.py#L235-L253
[ "def", "fetch", "(", "self", ")", ":", "try", ":", "if", "not", "self", ".", "_last_message_id", ":", "messages", "=", "self", ".", "_connection", ".", "get", "(", "\"room/%s/recent\"", "%", "self", ".", "_room_id", ",", "key", "=", "\"messages\"", ",", "parameters", "=", "{", "\"limit\"", ":", "1", "}", ")", "self", ".", "_last_message_id", "=", "messages", "[", "-", "1", "]", "[", "\"id\"", "]", "messages", "=", "self", ".", "_connection", ".", "get", "(", "\"room/%s/recent\"", "%", "self", ".", "_room_id", ",", "key", "=", "\"messages\"", ",", "parameters", "=", "{", "\"since_message_id\"", ":", "self", ".", "_last_message_id", "}", ")", "except", ":", "messages", "=", "[", "]", "if", "messages", ":", "self", ".", "_last_message_id", "=", "messages", "[", "-", "1", "]", "[", "\"id\"", "]", "self", ".", "received", "(", "messages", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
StreamProcess.received
Called when new messages arrive. Args: messages (tuple): Messages
pyfire/stream.py
def received(self, messages): """ Called when new messages arrive. Args: messages (tuple): Messages """ if messages: if self._queue: self._queue.put_nowait(messages) if self._callback: self._callback(messages)
def received(self, messages): """ Called when new messages arrive. Args: messages (tuple): Messages """ if messages: if self._queue: self._queue.put_nowait(messages) if self._callback: self._callback(messages)
[ "Called", "when", "new", "messages", "arrive", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/stream.py#L258-L269
[ "def", "received", "(", "self", ",", "messages", ")", ":", "if", "messages", ":", "if", "self", ".", "_queue", ":", "self", ".", "_queue", ".", "put_nowait", "(", "messages", ")", "if", "self", ".", "_callback", ":", "self", ".", "_callback", "(", "messages", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412
valid
LiveStreamProcess.run
Called by the process, it runs it. NEVER call this method directly. Instead call start() to start the separate process. If you don't want to use a second process, then call fetch() directly on this istance. To stop, call terminate()
pyfire/stream.py
def run(self): """ Called by the process, it runs it. NEVER call this method directly. Instead call start() to start the separate process. If you don't want to use a second process, then call fetch() directly on this istance. To stop, call terminate() """ if not self._queue: raise Exception("No queue available to send messages") factory = LiveStreamFactory(self) self._reactor.connectSSL("streaming.campfirenow.com", 443, factory, ssl.ClientContextFactory()) self._reactor.run()
def run(self): """ Called by the process, it runs it. NEVER call this method directly. Instead call start() to start the separate process. If you don't want to use a second process, then call fetch() directly on this istance. To stop, call terminate() """ if not self._queue: raise Exception("No queue available to send messages") factory = LiveStreamFactory(self) self._reactor.connectSSL("streaming.campfirenow.com", 443, factory, ssl.ClientContextFactory()) self._reactor.run()
[ "Called", "by", "the", "process", "it", "runs", "it", "." ]
mariano/pyfire
python
https://github.com/mariano/pyfire/blob/42e3490c138abc8e10f2e9f8f8f3b40240a80412/pyfire/stream.py#L301-L314
[ "def", "run", "(", "self", ")", ":", "if", "not", "self", ".", "_queue", ":", "raise", "Exception", "(", "\"No queue available to send messages\"", ")", "factory", "=", "LiveStreamFactory", "(", "self", ")", "self", ".", "_reactor", ".", "connectSSL", "(", "\"streaming.campfirenow.com\"", ",", "443", ",", "factory", ",", "ssl", ".", "ClientContextFactory", "(", ")", ")", "self", ".", "_reactor", ".", "run", "(", ")" ]
42e3490c138abc8e10f2e9f8f8f3b40240a80412