partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
Jwt.encode
Create a token based on the data held in the class. :return: A new token :rtype: str
simplejwt/jwt.py
def encode(self) -> str: """ Create a token based on the data held in the class. :return: A new token :rtype: str """ payload = {} payload.update(self.registered_claims) payload.update(self.payload) return encode(self.secret, payload, self.alg, self.header)
def encode(self) -> str: """ Create a token based on the data held in the class. :return: A new token :rtype: str """ payload = {} payload.update(self.registered_claims) payload.update(self.payload) return encode(self.secret, payload, self.alg, self.header)
[ "Create", "a", "token", "based", "on", "the", "data", "held", "in", "the", "class", "." ]
jmwri/simplejwt
python
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/jwt.py#L301-L311
[ "def", "encode", "(", "self", ")", "->", "str", ":", "payload", "=", "{", "}", "payload", ".", "update", "(", "self", ".", "registered_claims", ")", "payload", ".", "update", "(", "self", ".", "payload", ")", "return", "encode", "(", "self", ".", "secret", ",", "payload", ",", "self", ".", "alg", ",", "self", ".", "header", ")" ]
0828eaace0846918d2d202f5a60167a003e88b71
valid
Jwt.decode
Decodes the given token into an instance of `Jwt`. :param secret: The secret used to decode the token. Must match the secret used when creating the token. :type secret: Union[str, bytes] :param token: The token to decode. :type token: Union[str, bytes] :param alg: The algorithm used to decode the token. Must match the algorithm used when creating the token. :type alg: str :return: The decoded token. :rtype: `Jwt`
simplejwt/jwt.py
def decode(secret: Union[str, bytes], token: Union[str, bytes], alg: str = default_alg) -> 'Jwt': """ Decodes the given token into an instance of `Jwt`. :param secret: The secret used to decode the token. Must match the secret used when creating the token. :type secret: Union[str, bytes] :param token: The token to decode. :type token: Union[str, bytes] :param alg: The algorithm used to decode the token. Must match the algorithm used when creating the token. :type alg: str :return: The decoded token. :rtype: `Jwt` """ header, payload = decode(secret, token, alg) return Jwt(secret, payload, alg, header)
def decode(secret: Union[str, bytes], token: Union[str, bytes], alg: str = default_alg) -> 'Jwt': """ Decodes the given token into an instance of `Jwt`. :param secret: The secret used to decode the token. Must match the secret used when creating the token. :type secret: Union[str, bytes] :param token: The token to decode. :type token: Union[str, bytes] :param alg: The algorithm used to decode the token. Must match the algorithm used when creating the token. :type alg: str :return: The decoded token. :rtype: `Jwt` """ header, payload = decode(secret, token, alg) return Jwt(secret, payload, alg, header)
[ "Decodes", "the", "given", "token", "into", "an", "instance", "of", "Jwt", "." ]
jmwri/simplejwt
python
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/jwt.py#L314-L331
[ "def", "decode", "(", "secret", ":", "Union", "[", "str", ",", "bytes", "]", ",", "token", ":", "Union", "[", "str", ",", "bytes", "]", ",", "alg", ":", "str", "=", "default_alg", ")", "->", "'Jwt'", ":", "header", ",", "payload", "=", "decode", "(", "secret", ",", "token", ",", "alg", ")", "return", "Jwt", "(", "secret", ",", "payload", ",", "alg", ",", "header", ")" ]
0828eaace0846918d2d202f5a60167a003e88b71
valid
Jwt.compare
Compare against another `Jwt`. :param jwt: The token to compare against. :type jwt: Jwt :param compare_dates: Should the comparision take dates into account? :type compare_dates: bool :return: Are the two Jwt's the same? :rtype: bool
simplejwt/jwt.py
def compare(self, jwt: 'Jwt', compare_dates: bool = False) -> bool: """ Compare against another `Jwt`. :param jwt: The token to compare against. :type jwt: Jwt :param compare_dates: Should the comparision take dates into account? :type compare_dates: bool :return: Are the two Jwt's the same? :rtype: bool """ if self.secret != jwt.secret: return False if self.payload != jwt.payload: return False if self.alg != jwt.alg: return False if self.header != jwt.header: return False expected_claims = self.registered_claims actual_claims = jwt.registered_claims if not compare_dates: strip = ['exp', 'nbf', 'iat'] expected_claims = {k: {v if k not in strip else None} for k, v in expected_claims.items()} actual_claims = {k: {v if k not in strip else None} for k, v in actual_claims.items()} if expected_claims != actual_claims: return False return True
def compare(self, jwt: 'Jwt', compare_dates: bool = False) -> bool: """ Compare against another `Jwt`. :param jwt: The token to compare against. :type jwt: Jwt :param compare_dates: Should the comparision take dates into account? :type compare_dates: bool :return: Are the two Jwt's the same? :rtype: bool """ if self.secret != jwt.secret: return False if self.payload != jwt.payload: return False if self.alg != jwt.alg: return False if self.header != jwt.header: return False expected_claims = self.registered_claims actual_claims = jwt.registered_claims if not compare_dates: strip = ['exp', 'nbf', 'iat'] expected_claims = {k: {v if k not in strip else None} for k, v in expected_claims.items()} actual_claims = {k: {v if k not in strip else None} for k, v in actual_claims.items()} if expected_claims != actual_claims: return False return True
[ "Compare", "against", "another", "Jwt", "." ]
jmwri/simplejwt
python
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/jwt.py#L333-L362
[ "def", "compare", "(", "self", ",", "jwt", ":", "'Jwt'", ",", "compare_dates", ":", "bool", "=", "False", ")", "->", "bool", ":", "if", "self", ".", "secret", "!=", "jwt", ".", "secret", ":", "return", "False", "if", "self", ".", "payload", "!=", "jwt", ".", "payload", ":", "return", "False", "if", "self", ".", "alg", "!=", "jwt", ".", "alg", ":", "return", "False", "if", "self", ".", "header", "!=", "jwt", ".", "header", ":", "return", "False", "expected_claims", "=", "self", ".", "registered_claims", "actual_claims", "=", "jwt", ".", "registered_claims", "if", "not", "compare_dates", ":", "strip", "=", "[", "'exp'", ",", "'nbf'", ",", "'iat'", "]", "expected_claims", "=", "{", "k", ":", "{", "v", "if", "k", "not", "in", "strip", "else", "None", "}", "for", "k", ",", "v", "in", "expected_claims", ".", "items", "(", ")", "}", "actual_claims", "=", "{", "k", ":", "{", "v", "if", "k", "not", "in", "strip", "else", "None", "}", "for", "k", ",", "v", "in", "actual_claims", ".", "items", "(", ")", "}", "if", "expected_claims", "!=", "actual_claims", ":", "return", "False", "return", "True" ]
0828eaace0846918d2d202f5a60167a003e88b71
valid
UploadView.get
Download a file.
xmpp_http_upload/views.py
def get(self, request, hash, filename): """Download a file.""" if _ws_download is True: return HttpResponseForbidden() upload = Upload.objects.uploaded().get(hash=hash, name=filename) return FileResponse(upload.file, content_type=upload.type)
def get(self, request, hash, filename): """Download a file.""" if _ws_download is True: return HttpResponseForbidden() upload = Upload.objects.uploaded().get(hash=hash, name=filename) return FileResponse(upload.file, content_type=upload.type)
[ "Download", "a", "file", "." ]
mathiasertl/django-xmpp-http-upload
python
https://github.com/mathiasertl/django-xmpp-http-upload/blob/819cb8794647c4609bb4cb7855e2ad4bd51b9ea1/xmpp_http_upload/views.py#L175-L181
[ "def", "get", "(", "self", ",", "request", ",", "hash", ",", "filename", ")", ":", "if", "_ws_download", "is", "True", ":", "return", "HttpResponseForbidden", "(", ")", "upload", "=", "Upload", ".", "objects", ".", "uploaded", "(", ")", ".", "get", "(", "hash", "=", "hash", ",", "name", "=", "filename", ")", "return", "FileResponse", "(", "upload", ".", "file", ",", "content_type", "=", "upload", ".", "type", ")" ]
819cb8794647c4609bb4cb7855e2ad4bd51b9ea1
valid
b64_encode
:param data: Data the encode. :type data: bytes :return: Base 64 encoded data with padding removed. :rtype: bytes
simplejwt/util.py
def b64_encode(data: bytes) -> bytes: """ :param data: Data the encode. :type data: bytes :return: Base 64 encoded data with padding removed. :rtype: bytes """ encoded = urlsafe_b64encode(data) return encoded.replace(b'=', b'')
def b64_encode(data: bytes) -> bytes: """ :param data: Data the encode. :type data: bytes :return: Base 64 encoded data with padding removed. :rtype: bytes """ encoded = urlsafe_b64encode(data) return encoded.replace(b'=', b'')
[ ":", "param", "data", ":", "Data", "the", "encode", ".", ":", "type", "data", ":", "bytes", ":", "return", ":", "Base", "64", "encoded", "data", "with", "padding", "removed", ".", ":", "rtype", ":", "bytes" ]
jmwri/simplejwt
python
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/util.py#L5-L13
[ "def", "b64_encode", "(", "data", ":", "bytes", ")", "->", "bytes", ":", "encoded", "=", "urlsafe_b64encode", "(", "data", ")", "return", "encoded", ".", "replace", "(", "b'='", ",", "b''", ")" ]
0828eaace0846918d2d202f5a60167a003e88b71
valid
b64_decode
:param data: Base 64 encoded data to decode. :type data: bytes :return: Base 64 decoded data. :rtype: bytes
simplejwt/util.py
def b64_decode(data: bytes) -> bytes: """ :param data: Base 64 encoded data to decode. :type data: bytes :return: Base 64 decoded data. :rtype: bytes """ missing_padding = len(data) % 4 if missing_padding != 0: data += b'=' * (4 - missing_padding) return urlsafe_b64decode(data)
def b64_decode(data: bytes) -> bytes: """ :param data: Base 64 encoded data to decode. :type data: bytes :return: Base 64 decoded data. :rtype: bytes """ missing_padding = len(data) % 4 if missing_padding != 0: data += b'=' * (4 - missing_padding) return urlsafe_b64decode(data)
[ ":", "param", "data", ":", "Base", "64", "encoded", "data", "to", "decode", ".", ":", "type", "data", ":", "bytes", ":", "return", ":", "Base", "64", "decoded", "data", ".", ":", "rtype", ":", "bytes" ]
jmwri/simplejwt
python
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/util.py#L16-L26
[ "def", "b64_decode", "(", "data", ":", "bytes", ")", "->", "bytes", ":", "missing_padding", "=", "len", "(", "data", ")", "%", "4", "if", "missing_padding", "!=", "0", ":", "data", "+=", "b'='", "*", "(", "4", "-", "missing_padding", ")", "return", "urlsafe_b64decode", "(", "data", ")" ]
0828eaace0846918d2d202f5a60167a003e88b71
valid
to_bytes
:param data: Data to convert to bytes. :type data: Union[str, bytes] :return: `data` encoded to UTF8. :rtype: bytes
simplejwt/util.py
def to_bytes(data: Union[str, bytes]) -> bytes: """ :param data: Data to convert to bytes. :type data: Union[str, bytes] :return: `data` encoded to UTF8. :rtype: bytes """ if isinstance(data, bytes): return data return data.encode('utf-8')
def to_bytes(data: Union[str, bytes]) -> bytes: """ :param data: Data to convert to bytes. :type data: Union[str, bytes] :return: `data` encoded to UTF8. :rtype: bytes """ if isinstance(data, bytes): return data return data.encode('utf-8')
[ ":", "param", "data", ":", "Data", "to", "convert", "to", "bytes", ".", ":", "type", "data", ":", "Union", "[", "str", "bytes", "]", ":", "return", ":", "data", "encoded", "to", "UTF8", ".", ":", "rtype", ":", "bytes" ]
jmwri/simplejwt
python
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/util.py#L29-L38
[ "def", "to_bytes", "(", "data", ":", "Union", "[", "str", ",", "bytes", "]", ")", "->", "bytes", ":", "if", "isinstance", "(", "data", ",", "bytes", ")", ":", "return", "data", "return", "data", ".", "encode", "(", "'utf-8'", ")" ]
0828eaace0846918d2d202f5a60167a003e88b71
valid
from_bytes
:param data: A UTF8 byte string. :type data: Union[str, bytes] :return: `data` decoded from UTF8. :rtype: str
simplejwt/util.py
def from_bytes(data: Union[str, bytes]) -> str: """ :param data: A UTF8 byte string. :type data: Union[str, bytes] :return: `data` decoded from UTF8. :rtype: str """ if isinstance(data, str): return data return str(data, 'utf-8')
def from_bytes(data: Union[str, bytes]) -> str: """ :param data: A UTF8 byte string. :type data: Union[str, bytes] :return: `data` decoded from UTF8. :rtype: str """ if isinstance(data, str): return data return str(data, 'utf-8')
[ ":", "param", "data", ":", "A", "UTF8", "byte", "string", ".", ":", "type", "data", ":", "Union", "[", "str", "bytes", "]", ":", "return", ":", "data", "decoded", "from", "UTF8", ".", ":", "rtype", ":", "str" ]
jmwri/simplejwt
python
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/util.py#L41-L50
[ "def", "from_bytes", "(", "data", ":", "Union", "[", "str", ",", "bytes", "]", ")", "->", "str", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "return", "data", "return", "str", "(", "data", ",", "'utf-8'", ")" ]
0828eaace0846918d2d202f5a60167a003e88b71
valid
camelize_classname
Produce a 'camelized' class name, e.g.
pfamserver/models.py
def camelize_classname(base, tablename, table): "Produce a 'camelized' class name, e.g. " "'words_and_underscores' -> 'WordsAndUnderscores'" return str(tablename[0].upper() + re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:]))
def camelize_classname(base, tablename, table): "Produce a 'camelized' class name, e.g. " "'words_and_underscores' -> 'WordsAndUnderscores'" return str(tablename[0].upper() + re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:]))
[ "Produce", "a", "camelized", "class", "name", "e", ".", "g", "." ]
ecolell/pfamserver
python
https://github.com/ecolell/pfamserver/blob/048d321831864ba4ef0f33f020c0239131589ece/pfamserver/models.py#L30-L35
[ "def", "camelize_classname", "(", "base", ",", "tablename", ",", "table", ")", ":", "\"'words_and_underscores' -> 'WordsAndUnderscores'\"", "return", "str", "(", "tablename", "[", "0", "]", ".", "upper", "(", ")", "+", "re", ".", "sub", "(", "r'_([a-z])'", ",", "lambda", "m", ":", "m", ".", "group", "(", "1", ")", ".", "upper", "(", ")", ",", "tablename", "[", "1", ":", "]", ")", ")" ]
048d321831864ba4ef0f33f020c0239131589ece
valid
pluralize_collection
Produce an 'uncamelized', 'pluralized' class name, e.g.
pfamserver/models.py
def pluralize_collection(base, local_cls, referred_cls, constraint): "Produce an 'uncamelized', 'pluralized' class name, e.g. " "'SomeTerm' -> 'some_terms'" referred_name = referred_cls.__name__ uncamelized = re.sub(r'[A-Z]', lambda m: "_%s" % m.group(0).lower(), referred_name)[1:] pluralized = _pluralizer.plural(uncamelized) return pluralized
def pluralize_collection(base, local_cls, referred_cls, constraint): "Produce an 'uncamelized', 'pluralized' class name, e.g. " "'SomeTerm' -> 'some_terms'" referred_name = referred_cls.__name__ uncamelized = re.sub(r'[A-Z]', lambda m: "_%s" % m.group(0).lower(), referred_name)[1:] pluralized = _pluralizer.plural(uncamelized) return pluralized
[ "Produce", "an", "uncamelized", "pluralized", "class", "name", "e", ".", "g", "." ]
ecolell/pfamserver
python
https://github.com/ecolell/pfamserver/blob/048d321831864ba4ef0f33f020c0239131589ece/pfamserver/models.py#L38-L46
[ "def", "pluralize_collection", "(", "base", ",", "local_cls", ",", "referred_cls", ",", "constraint", ")", ":", "\"'SomeTerm' -> 'some_terms'\"", "referred_name", "=", "referred_cls", ".", "__name__", "uncamelized", "=", "re", ".", "sub", "(", "r'[A-Z]'", ",", "lambda", "m", ":", "\"_%s\"", "%", "m", ".", "group", "(", "0", ")", ".", "lower", "(", ")", ",", "referred_name", ")", "[", "1", ":", "]", "pluralized", "=", "_pluralizer", ".", "plural", "(", "uncamelized", ")", "return", "pluralized" ]
048d321831864ba4ef0f33f020c0239131589ece
valid
is_compressed_json_file
Test a file is a valid json file. - *.json: uncompressed, utf-8 encode json file - *.js: uncompressed, utf-8 encode json file - *.gz: compressed, utf-8 encode json file
superjson/_superjson.py
def is_compressed_json_file(abspath): """Test a file is a valid json file. - *.json: uncompressed, utf-8 encode json file - *.js: uncompressed, utf-8 encode json file - *.gz: compressed, utf-8 encode json file """ abspath = abspath.lower() fname, ext = os.path.splitext(abspath) if ext in [".json", ".js"]: is_compressed = False elif ext == ".gz": is_compressed = True else: raise ValueError( "'%s' is not a valid json file. " "extension has to be '.json' or '.js' for uncompressed, '.gz' " "for compressed." % abspath) return is_compressed
def is_compressed_json_file(abspath): """Test a file is a valid json file. - *.json: uncompressed, utf-8 encode json file - *.js: uncompressed, utf-8 encode json file - *.gz: compressed, utf-8 encode json file """ abspath = abspath.lower() fname, ext = os.path.splitext(abspath) if ext in [".json", ".js"]: is_compressed = False elif ext == ".gz": is_compressed = True else: raise ValueError( "'%s' is not a valid json file. " "extension has to be '.json' or '.js' for uncompressed, '.gz' " "for compressed." % abspath) return is_compressed
[ "Test", "a", "file", "is", "a", "valid", "json", "file", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/_superjson.py#L131-L149
[ "def", "is_compressed_json_file", "(", "abspath", ")", ":", "abspath", "=", "abspath", ".", "lower", "(", ")", "fname", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "abspath", ")", "if", "ext", "in", "[", "\".json\"", ",", "\".js\"", "]", ":", "is_compressed", "=", "False", "elif", "ext", "==", "\".gz\"", ":", "is_compressed", "=", "True", "else", ":", "raise", "ValueError", "(", "\"'%s' is not a valid json file. \"", "\"extension has to be '.json' or '.js' for uncompressed, '.gz' \"", "\"for compressed.\"", "%", "abspath", ")", "return", "is_compressed" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
SupportBuiltInDataType.dump_set
``set`` dumper.
superjson/_superjson.py
def dump_set(self, obj, class_name=set_class_name): """ ``set`` dumper. """ return {"$" + class_name: [self._json_convert(item) for item in obj]}
def dump_set(self, obj, class_name=set_class_name): """ ``set`` dumper. """ return {"$" + class_name: [self._json_convert(item) for item in obj]}
[ "set", "dumper", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/_superjson.py#L452-L456
[ "def", "dump_set", "(", "self", ",", "obj", ",", "class_name", "=", "set_class_name", ")", ":", "return", "{", "\"$\"", "+", "class_name", ":", "[", "self", ".", "_json_convert", "(", "item", ")", "for", "item", "in", "obj", "]", "}" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
SupportBuiltInDataType.dump_deque
``collections.deque`` dumper.
superjson/_superjson.py
def dump_deque(self, obj, class_name="collections.deque"): """ ``collections.deque`` dumper. """ return {"$" + class_name: [self._json_convert(item) for item in obj]}
def dump_deque(self, obj, class_name="collections.deque"): """ ``collections.deque`` dumper. """ return {"$" + class_name: [self._json_convert(item) for item in obj]}
[ "collections", ".", "deque", "dumper", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/_superjson.py#L464-L468
[ "def", "dump_deque", "(", "self", ",", "obj", ",", "class_name", "=", "\"collections.deque\"", ")", ":", "return", "{", "\"$\"", "+", "class_name", ":", "[", "self", ".", "_json_convert", "(", "item", ")", "for", "item", "in", "obj", "]", "}" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
SupportBuiltInDataType.dump_OrderedDict
``collections.OrderedDict`` dumper.
superjson/_superjson.py
def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"): """ ``collections.OrderedDict`` dumper. """ return { "$" + class_name: [ (key, self._json_convert(value)) for key, value in iteritems(obj) ] }
def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"): """ ``collections.OrderedDict`` dumper. """ return { "$" + class_name: [ (key, self._json_convert(value)) for key, value in iteritems(obj) ] }
[ "collections", ".", "OrderedDict", "dumper", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/_superjson.py#L476-L484
[ "def", "dump_OrderedDict", "(", "self", ",", "obj", ",", "class_name", "=", "\"collections.OrderedDict\"", ")", ":", "return", "{", "\"$\"", "+", "class_name", ":", "[", "(", "key", ",", "self", ".", "_json_convert", "(", "value", ")", ")", "for", "key", ",", "value", "in", "iteritems", "(", "obj", ")", "]", "}" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
SupportNumpyArray.dump_nparray
``numpy.ndarray`` dumper.
superjson/_superjson.py
def dump_nparray(self, obj, class_name=numpy_ndarray_class_name): """ ``numpy.ndarray`` dumper. """ return {"$" + class_name: self._json_convert(obj.tolist())}
def dump_nparray(self, obj, class_name=numpy_ndarray_class_name): """ ``numpy.ndarray`` dumper. """ return {"$" + class_name: self._json_convert(obj.tolist())}
[ "numpy", ".", "ndarray", "dumper", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/_superjson.py#L497-L501
[ "def", "dump_nparray", "(", "self", ",", "obj", ",", "class_name", "=", "numpy_ndarray_class_name", ")", ":", "return", "{", "\"$\"", "+", "class_name", ":", "self", ".", "_json_convert", "(", "obj", ".", "tolist", "(", ")", ")", "}" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
_invalidates_cache
Decorator for rruleset methods which may invalidate the cached length.
superjson/pkg/dateutil/rrule.py
def _invalidates_cache(f): """ Decorator for rruleset methods which may invalidate the cached length. """ def inner_func(self, *args, **kwargs): rv = f(self, *args, **kwargs) self._invalidate_cache() return rv return inner_func
def _invalidates_cache(f): """ Decorator for rruleset methods which may invalidate the cached length. """ def inner_func(self, *args, **kwargs): rv = f(self, *args, **kwargs) self._invalidate_cache() return rv return inner_func
[ "Decorator", "for", "rruleset", "methods", "which", "may", "invalidate", "the", "cached", "length", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/pkg/dateutil/rrule.py#L82-L93
[ "def", "_invalidates_cache", "(", "f", ")", ":", "def", "inner_func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rv", "=", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_invalidate_cache", "(", ")", "return", "rv", "return", "inner_func" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
rrulebase.before
Returns the last recurrence before the given datetime instance. The inc keyword defines what happens if dt is an occurrence. With inc=True, if dt itself is an occurrence, it will be returned.
superjson/pkg/dateutil/rrule.py
def before(self, dt, inc=False): """ Returns the last recurrence before the given datetime instance. The inc keyword defines what happens if dt is an occurrence. With inc=True, if dt itself is an occurrence, it will be returned. """ if self._cache_complete: gen = self._cache else: gen = self last = None if inc: for i in gen: if i > dt: break last = i else: for i in gen: if i >= dt: break last = i return last
def before(self, dt, inc=False): """ Returns the last recurrence before the given datetime instance. The inc keyword defines what happens if dt is an occurrence. With inc=True, if dt itself is an occurrence, it will be returned. """ if self._cache_complete: gen = self._cache else: gen = self last = None if inc: for i in gen: if i > dt: break last = i else: for i in gen: if i >= dt: break last = i return last
[ "Returns", "the", "last", "recurrence", "before", "the", "given", "datetime", "instance", ".", "The", "inc", "keyword", "defines", "what", "happens", "if", "dt", "is", "an", "occurrence", ".", "With", "inc", "=", "True", "if", "dt", "itself", "is", "an", "occurrence", "it", "will", "be", "returned", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/pkg/dateutil/rrule.py#L193-L212
[ "def", "before", "(", "self", ",", "dt", ",", "inc", "=", "False", ")", ":", "if", "self", ".", "_cache_complete", ":", "gen", "=", "self", ".", "_cache", "else", ":", "gen", "=", "self", "last", "=", "None", "if", "inc", ":", "for", "i", "in", "gen", ":", "if", "i", ">", "dt", ":", "break", "last", "=", "i", "else", ":", "for", "i", "in", "gen", ":", "if", "i", ">=", "dt", ":", "break", "last", "=", "i", "return", "last" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
rrulebase.after
Returns the first recurrence after the given datetime instance. The inc keyword defines what happens if dt is an occurrence. With inc=True, if dt itself is an occurrence, it will be returned.
superjson/pkg/dateutil/rrule.py
def after(self, dt, inc=False): """ Returns the first recurrence after the given datetime instance. The inc keyword defines what happens if dt is an occurrence. With inc=True, if dt itself is an occurrence, it will be returned. """ if self._cache_complete: gen = self._cache else: gen = self if inc: for i in gen: if i >= dt: return i else: for i in gen: if i > dt: return i return None
def after(self, dt, inc=False): """ Returns the first recurrence after the given datetime instance. The inc keyword defines what happens if dt is an occurrence. With inc=True, if dt itself is an occurrence, it will be returned. """ if self._cache_complete: gen = self._cache else: gen = self if inc: for i in gen: if i >= dt: return i else: for i in gen: if i > dt: return i return None
[ "Returns", "the", "first", "recurrence", "after", "the", "given", "datetime", "instance", ".", "The", "inc", "keyword", "defines", "what", "happens", "if", "dt", "is", "an", "occurrence", ".", "With", "inc", "=", "True", "if", "dt", "itself", "is", "an", "occurrence", "it", "will", "be", "returned", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/pkg/dateutil/rrule.py#L214-L230
[ "def", "after", "(", "self", ",", "dt", ",", "inc", "=", "False", ")", ":", "if", "self", ".", "_cache_complete", ":", "gen", "=", "self", ".", "_cache", "else", ":", "gen", "=", "self", "if", "inc", ":", "for", "i", "in", "gen", ":", "if", "i", ">=", "dt", ":", "return", "i", "else", ":", "for", "i", "in", "gen", ":", "if", "i", ">", "dt", ":", "return", "i", "return", "None" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
rrulebase.xafter
Generator which yields up to `count` recurrences after the given datetime instance, equivalent to `after`. :param dt: The datetime at which to start generating recurrences. :param count: The maximum number of recurrences to generate. If `None` (default), dates are generated until the recurrence rule is exhausted. :param inc: If `dt` is an instance of the rule and `inc` is `True`, it is included in the output. :yields: Yields a sequence of `datetime` objects.
superjson/pkg/dateutil/rrule.py
def xafter(self, dt, count=None, inc=False): """ Generator which yields up to `count` recurrences after the given datetime instance, equivalent to `after`. :param dt: The datetime at which to start generating recurrences. :param count: The maximum number of recurrences to generate. If `None` (default), dates are generated until the recurrence rule is exhausted. :param inc: If `dt` is an instance of the rule and `inc` is `True`, it is included in the output. :yields: Yields a sequence of `datetime` objects. """ if self._cache_complete: gen = self._cache else: gen = self # Select the comparison function if inc: def comp(dc, dtc): return dc >= dtc else: def comp(dc, dtc): return dc > dtc # Generate dates n = 0 for d in gen: if comp(d, dt): if count is not None: n += 1 if n > count: break yield d
def xafter(self, dt, count=None, inc=False): """ Generator which yields up to `count` recurrences after the given datetime instance, equivalent to `after`. :param dt: The datetime at which to start generating recurrences. :param count: The maximum number of recurrences to generate. If `None` (default), dates are generated until the recurrence rule is exhausted. :param inc: If `dt` is an instance of the rule and `inc` is `True`, it is included in the output. :yields: Yields a sequence of `datetime` objects. """ if self._cache_complete: gen = self._cache else: gen = self # Select the comparison function if inc: def comp(dc, dtc): return dc >= dtc else: def comp(dc, dtc): return dc > dtc # Generate dates n = 0 for d in gen: if comp(d, dt): if count is not None: n += 1 if n > count: break yield d
[ "Generator", "which", "yields", "up", "to", "count", "recurrences", "after", "the", "given", "datetime", "instance", "equivalent", "to", "after", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/pkg/dateutil/rrule.py#L232-L271
[ "def", "xafter", "(", "self", ",", "dt", ",", "count", "=", "None", ",", "inc", "=", "False", ")", ":", "if", "self", ".", "_cache_complete", ":", "gen", "=", "self", ".", "_cache", "else", ":", "gen", "=", "self", "# Select the comparison function", "if", "inc", ":", "def", "comp", "(", "dc", ",", "dtc", ")", ":", "return", "dc", ">=", "dtc", "else", ":", "def", "comp", "(", "dc", ",", "dtc", ")", ":", "return", "dc", ">", "dtc", "# Generate dates", "n", "=", "0", "for", "d", "in", "gen", ":", "if", "comp", "(", "d", ",", "dt", ")", ":", "if", "count", "is", "not", "None", ":", "n", "+=", "1", "if", "n", ">", "count", ":", "break", "yield", "d" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
rrule.replace
Return new rrule with same attributes except for those attributes given new values by whichever keyword arguments are specified.
superjson/pkg/dateutil/rrule.py
def replace(self, **kwargs): """Return new rrule with same attributes except for those attributes given new values by whichever keyword arguments are specified.""" new_kwargs = {"interval": self._interval, "count": self._count, "dtstart": self._dtstart, "freq": self._freq, "until": self._until, "wkst": self._wkst, "cache": False if self._cache is None else True} new_kwargs.update(self._original_rule) new_kwargs.update(kwargs) return rrule(**new_kwargs)
def replace(self, **kwargs): """Return new rrule with same attributes except for those attributes given new values by whichever keyword arguments are specified.""" new_kwargs = {"interval": self._interval, "count": self._count, "dtstart": self._dtstart, "freq": self._freq, "until": self._until, "wkst": self._wkst, "cache": False if self._cache is None else True} new_kwargs.update(self._original_rule) new_kwargs.update(kwargs) return rrule(**new_kwargs)
[ "Return", "new", "rrule", "with", "same", "attributes", "except", "for", "those", "attributes", "given", "new", "values", "by", "whichever", "keyword", "arguments", "are", "specified", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/pkg/dateutil/rrule.py#L742-L754
[ "def", "replace", "(", "self", ",", "*", "*", "kwargs", ")", ":", "new_kwargs", "=", "{", "\"interval\"", ":", "self", ".", "_interval", ",", "\"count\"", ":", "self", ".", "_count", ",", "\"dtstart\"", ":", "self", ".", "_dtstart", ",", "\"freq\"", ":", "self", ".", "_freq", ",", "\"until\"", ":", "self", ".", "_until", ",", "\"wkst\"", ":", "self", ".", "_wkst", ",", "\"cache\"", ":", "False", "if", "self", ".", "_cache", "is", "None", "else", "True", "}", "new_kwargs", ".", "update", "(", "self", ".", "_original_rule", ")", "new_kwargs", ".", "update", "(", "kwargs", ")", "return", "rrule", "(", "*", "*", "new_kwargs", ")" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
rrule.__construct_byset
If a `BYXXX` sequence is passed to the constructor at the same level as `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some specifications which cannot be reached given some starting conditions. This occurs whenever the interval is not coprime with the base of a given unit and the difference between the starting position and the ending position is not coprime with the greatest common denominator between the interval and the base. For example, with a FREQ of hourly starting at 17:00 and an interval of 4, the only valid values for BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not coprime. :param start: Specifies the starting position. :param byxxx: An iterable containing the list of allowed values. :param base: The largest allowable value for the specified frequency (e.g. 24 hours, 60 minutes). This does not preserve the type of the iterable, returning a set, since the values should be unique and the order is irrelevant, this will speed up later lookups. In the event of an empty set, raises a :exception:`ValueError`, as this results in an empty rrule.
superjson/pkg/dateutil/rrule.py
def __construct_byset(self, start, byxxx, base): """ If a `BYXXX` sequence is passed to the constructor at the same level as `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some specifications which cannot be reached given some starting conditions. This occurs whenever the interval is not coprime with the base of a given unit and the difference between the starting position and the ending position is not coprime with the greatest common denominator between the interval and the base. For example, with a FREQ of hourly starting at 17:00 and an interval of 4, the only valid values for BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not coprime. :param start: Specifies the starting position. :param byxxx: An iterable containing the list of allowed values. :param base: The largest allowable value for the specified frequency (e.g. 24 hours, 60 minutes). This does not preserve the type of the iterable, returning a set, since the values should be unique and the order is irrelevant, this will speed up later lookups. In the event of an empty set, raises a :exception:`ValueError`, as this results in an empty rrule. """ cset = set() # Support a single byxxx value. if isinstance(byxxx, integer_types): byxxx = (byxxx, ) for num in byxxx: i_gcd = gcd(self._interval, base) # Use divmod rather than % because we need to wrap negative nums. if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: cset.add(num) if len(cset) == 0: raise ValueError("Invalid rrule byxxx generates an empty set.") return cset
def __construct_byset(self, start, byxxx, base): """ If a `BYXXX` sequence is passed to the constructor at the same level as `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some specifications which cannot be reached given some starting conditions. This occurs whenever the interval is not coprime with the base of a given unit and the difference between the starting position and the ending position is not coprime with the greatest common denominator between the interval and the base. For example, with a FREQ of hourly starting at 17:00 and an interval of 4, the only valid values for BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not coprime. :param start: Specifies the starting position. :param byxxx: An iterable containing the list of allowed values. :param base: The largest allowable value for the specified frequency (e.g. 24 hours, 60 minutes). This does not preserve the type of the iterable, returning a set, since the values should be unique and the order is irrelevant, this will speed up later lookups. In the event of an empty set, raises a :exception:`ValueError`, as this results in an empty rrule. """ cset = set() # Support a single byxxx value. if isinstance(byxxx, integer_types): byxxx = (byxxx, ) for num in byxxx: i_gcd = gcd(self._interval, base) # Use divmod rather than % because we need to wrap negative nums. if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: cset.add(num) if len(cset) == 0: raise ValueError("Invalid rrule byxxx generates an empty set.") return cset
[ "If", "a", "BYXXX", "sequence", "is", "passed", "to", "the", "constructor", "at", "the", "same", "level", "as", "FREQ", "(", "e", ".", "g", ".", "FREQ", "=", "HOURLY", "BYHOUR", "=", "{", "2", "4", "7", "}", "INTERVAL", "=", "3", ")", "there", "are", "some", "specifications", "which", "cannot", "be", "reached", "given", "some", "starting", "conditions", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/pkg/dateutil/rrule.py#L1013-L1058
[ "def", "__construct_byset", "(", "self", ",", "start", ",", "byxxx", ",", "base", ")", ":", "cset", "=", "set", "(", ")", "# Support a single byxxx value.", "if", "isinstance", "(", "byxxx", ",", "integer_types", ")", ":", "byxxx", "=", "(", "byxxx", ",", ")", "for", "num", "in", "byxxx", ":", "i_gcd", "=", "gcd", "(", "self", ".", "_interval", ",", "base", ")", "# Use divmod rather than % because we need to wrap negative nums.", "if", "i_gcd", "==", "1", "or", "divmod", "(", "num", "-", "start", ",", "i_gcd", ")", "[", "1", "]", "==", "0", ":", "cset", ".", "add", "(", "num", ")", "if", "len", "(", "cset", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Invalid rrule byxxx generates an empty set.\"", ")", "return", "cset" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
rrule.__mod_distance
Calculates the next value in a sequence where the `FREQ` parameter is specified along with a `BYXXX` parameter at the same "level" (e.g. `HOURLY` specified with `BYHOUR`). :param value: The old value of the component. :param byxxx: The `BYXXX` set, which should have been generated by `rrule._construct_byset`, or something else which checks that a valid rule is present. :param base: The largest allowable value for the specified frequency (e.g. 24 hours, 60 minutes). If a valid value is not found after `base` iterations (the maximum number before the sequence would start to repeat), this raises a :exception:`ValueError`, as no valid values were found. This returns a tuple of `divmod(n*interval, base)`, where `n` is the smallest number of `interval` repetitions until the next specified value in `byxxx` is found.
superjson/pkg/dateutil/rrule.py
def __mod_distance(self, value, byxxx, base): """ Calculates the next value in a sequence where the `FREQ` parameter is specified along with a `BYXXX` parameter at the same "level" (e.g. `HOURLY` specified with `BYHOUR`). :param value: The old value of the component. :param byxxx: The `BYXXX` set, which should have been generated by `rrule._construct_byset`, or something else which checks that a valid rule is present. :param base: The largest allowable value for the specified frequency (e.g. 24 hours, 60 minutes). If a valid value is not found after `base` iterations (the maximum number before the sequence would start to repeat), this raises a :exception:`ValueError`, as no valid values were found. This returns a tuple of `divmod(n*interval, base)`, where `n` is the smallest number of `interval` repetitions until the next specified value in `byxxx` is found. """ accumulator = 0 for ii in range(1, base + 1): # Using divmod() over % to account for negative intervals div, value = divmod(value + self._interval, base) accumulator += div if value in byxxx: return (accumulator, value)
def __mod_distance(self, value, byxxx, base): """ Calculates the next value in a sequence where the `FREQ` parameter is specified along with a `BYXXX` parameter at the same "level" (e.g. `HOURLY` specified with `BYHOUR`). :param value: The old value of the component. :param byxxx: The `BYXXX` set, which should have been generated by `rrule._construct_byset`, or something else which checks that a valid rule is present. :param base: The largest allowable value for the specified frequency (e.g. 24 hours, 60 minutes). If a valid value is not found after `base` iterations (the maximum number before the sequence would start to repeat), this raises a :exception:`ValueError`, as no valid values were found. This returns a tuple of `divmod(n*interval, base)`, where `n` is the smallest number of `interval` repetitions until the next specified value in `byxxx` is found. """ accumulator = 0 for ii in range(1, base + 1): # Using divmod() over % to account for negative intervals div, value = divmod(value + self._interval, base) accumulator += div if value in byxxx: return (accumulator, value)
[ "Calculates", "the", "next", "value", "in", "a", "sequence", "where", "the", "FREQ", "parameter", "is", "specified", "along", "with", "a", "BYXXX", "parameter", "at", "the", "same", "level", "(", "e", ".", "g", ".", "HOURLY", "specified", "with", "BYHOUR", ")", "." ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/pkg/dateutil/rrule.py#L1060-L1090
[ "def", "__mod_distance", "(", "self", ",", "value", ",", "byxxx", ",", "base", ")", ":", "accumulator", "=", "0", "for", "ii", "in", "range", "(", "1", ",", "base", "+", "1", ")", ":", "# Using divmod() over % to account for negative intervals", "div", ",", "value", "=", "divmod", "(", "value", "+", "self", ".", "_interval", ",", "base", ")", "accumulator", "+=", "div", "if", "value", "in", "byxxx", ":", "return", "(", "accumulator", ",", "value", ")" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
_rrulestr._handle_BYWEEKDAY
Two ways to specify this: +1MO or MO(+1)
superjson/pkg/dateutil/rrule.py
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): """ Two ways to specify this: +1MO or MO(+1) """ l = [] for wday in value.split(','): if '(' in wday: # If it's of the form TH(+1), etc. splt = wday.split('(') w = splt[0] n = int(splt[1][:-1]) elif len(wday): # If it's of the form +1MO for i in range(len(wday)): if wday[i] not in '+-0123456789': break n = wday[:i] or None w = wday[i:] if n: n = int(n) else: raise ValueError("Invalid (empty) BYDAY specification.") l.append(weekdays[self._weekday_map[w]](n)) rrkwargs["byweekday"] = l
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): """ Two ways to specify this: +1MO or MO(+1) """ l = [] for wday in value.split(','): if '(' in wday: # If it's of the form TH(+1), etc. splt = wday.split('(') w = splt[0] n = int(splt[1][:-1]) elif len(wday): # If it's of the form +1MO for i in range(len(wday)): if wday[i] not in '+-0123456789': break n = wday[:i] or None w = wday[i:] if n: n = int(n) else: raise ValueError("Invalid (empty) BYDAY specification.") l.append(weekdays[self._weekday_map[w]](n)) rrkwargs["byweekday"] = l
[ "Two", "ways", "to", "specify", "this", ":", "+", "1MO", "or", "MO", "(", "+", "1", ")" ]
MacHu-GWU/superjson-project
python
https://github.com/MacHu-GWU/superjson-project/blob/782ca4b2edbd4b4018b8cedee42eeae7c921b917/superjson/pkg/dateutil/rrule.py#L1445-L1469
[ "def", "_handle_BYWEEKDAY", "(", "self", ",", "rrkwargs", ",", "name", ",", "value", ",", "*", "*", "kwargs", ")", ":", "l", "=", "[", "]", "for", "wday", "in", "value", ".", "split", "(", "','", ")", ":", "if", "'('", "in", "wday", ":", "# If it's of the form TH(+1), etc.", "splt", "=", "wday", ".", "split", "(", "'('", ")", "w", "=", "splt", "[", "0", "]", "n", "=", "int", "(", "splt", "[", "1", "]", "[", ":", "-", "1", "]", ")", "elif", "len", "(", "wday", ")", ":", "# If it's of the form +1MO", "for", "i", "in", "range", "(", "len", "(", "wday", ")", ")", ":", "if", "wday", "[", "i", "]", "not", "in", "'+-0123456789'", ":", "break", "n", "=", "wday", "[", ":", "i", "]", "or", "None", "w", "=", "wday", "[", "i", ":", "]", "if", "n", ":", "n", "=", "int", "(", "n", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid (empty) BYDAY specification.\"", ")", "l", ".", "append", "(", "weekdays", "[", "self", ".", "_weekday_map", "[", "w", "]", "]", "(", "n", ")", ")", "rrkwargs", "[", "\"byweekday\"", "]", "=", "l" ]
782ca4b2edbd4b4018b8cedee42eeae7c921b917
valid
get_data_for_root
This is the only API function of the projectfile module. It parses the Projectfiles from the given path and assembles the flattened command data structure. Returned data: { 'min-version': (1, 0, 0), 'description': 'Optional main description.', 'commands': { 'command_1': { 'description': 'Optional command level description for command_1.', 'script': [ 'flattened', 'out command', 'list for', 'command_1', ... ] } ... } } Raises: ProjectfileError with descriptive error message in the format of: { 'path': 'Optional path for the corresponding Projectfile.', 'line': 'Optional line number for the error in the Projectfile.', 'error': 'Mandatory descriptive error message.' } :param project_root: :return: {dict} parsed and flattened commands with descriptions
projects/projectfile/__init__.py
def get_data_for_root(project_root): """This is the only API function of the projectfile module. It parses the Projectfiles from the given path and assembles the flattened command data structure. Returned data: { 'min-version': (1, 0, 0), 'description': 'Optional main description.', 'commands': { 'command_1': { 'description': 'Optional command level description for command_1.', 'script': [ 'flattened', 'out command', 'list for', 'command_1', ... ] } ... } } Raises: ProjectfileError with descriptive error message in the format of: { 'path': 'Optional path for the corresponding Projectfile.', 'line': 'Optional line number for the error in the Projectfile.', 'error': 'Mandatory descriptive error message.' } :param project_root: :return: {dict} parsed and flattened commands with descriptions """ raw_nodes = file_handler.get_node_list(project_root) command_tree = command_processor.generate_command_tree(raw_nodes) command_processor.flatten_commands(command_tree) command_processor.process_variables(command_tree) return command_tree
def get_data_for_root(project_root): """This is the only API function of the projectfile module. It parses the Projectfiles from the given path and assembles the flattened command data structure. Returned data: { 'min-version': (1, 0, 0), 'description': 'Optional main description.', 'commands': { 'command_1': { 'description': 'Optional command level description for command_1.', 'script': [ 'flattened', 'out command', 'list for', 'command_1', ... ] } ... } } Raises: ProjectfileError with descriptive error message in the format of: { 'path': 'Optional path for the corresponding Projectfile.', 'line': 'Optional line number for the error in the Projectfile.', 'error': 'Mandatory descriptive error message.' } :param project_root: :return: {dict} parsed and flattened commands with descriptions """ raw_nodes = file_handler.get_node_list(project_root) command_tree = command_processor.generate_command_tree(raw_nodes) command_processor.flatten_commands(command_tree) command_processor.process_variables(command_tree) return command_tree
[ "This", "is", "the", "only", "API", "function", "of", "the", "projectfile", "module", ".", "It", "parses", "the", "Projectfiles", "from", "the", "given", "path", "and", "assembles", "the", "flattened", "command", "data", "structure", ".", "Returned", "data", ":", "{", "min", "-", "version", ":", "(", "1", "0", "0", ")", "description", ":", "Optional", "main", "description", ".", "commands", ":", "{", "command_1", ":", "{", "description", ":", "Optional", "command", "level", "description", "for", "command_1", ".", "script", ":", "[", "flattened", "out", "command", "list", "for", "command_1", "...", "]", "}", "...", "}", "}", "Raises", ":", "ProjectfileError", "with", "descriptive", "error", "message", "in", "the", "format", "of", ":", "{", "path", ":", "Optional", "path", "for", "the", "corresponding", "Projectfile", ".", "line", ":", "Optional", "line", "number", "for", "the", "error", "in", "the", "Projectfile", ".", "error", ":", "Mandatory", "descriptive", "error", "message", ".", "}", ":", "param", "project_root", ":", ":", "return", ":", "{", "dict", "}", "parsed", "and", "flattened", "commands", "with", "descriptions" ]
tiborsimon/projects
python
https://github.com/tiborsimon/projects/blob/44d1caf2bab001a2b0bf33c40d7669ae1206f534/projects/projectfile/__init__.py#L49-L87
[ "def", "get_data_for_root", "(", "project_root", ")", ":", "raw_nodes", "=", "file_handler", ".", "get_node_list", "(", "project_root", ")", "command_tree", "=", "command_processor", ".", "generate_command_tree", "(", "raw_nodes", ")", "command_processor", ".", "flatten_commands", "(", "command_tree", ")", "command_processor", ".", "process_variables", "(", "command_tree", ")", "return", "command_tree" ]
44d1caf2bab001a2b0bf33c40d7669ae1206f534
valid
run_get_percentage
Calculate what percentage a given number is of another, e.g. 50 is 50% of 100.
commands.py
def run_get_percentage(): """ Calculate what percentage a given number is of another, e.g. 50 is 50% of 100. """ description = run_get_percentage.__doc__ parser = argparse.ArgumentParser( prog='get_percentage', description=description, epilog="Example use: get_percentage 25 100", ) parser.add_argument( 'a', help='Integer or floating point number that is a percent of another number' ) parser.add_argument( 'b', help='Integer or floating point number of which the first number is a percent', ) args = parser.parse_args() print(sm.get_percentage(float(args.a), float(args.b)))
def run_get_percentage(): """ Calculate what percentage a given number is of another, e.g. 50 is 50% of 100. """ description = run_get_percentage.__doc__ parser = argparse.ArgumentParser( prog='get_percentage', description=description, epilog="Example use: get_percentage 25 100", ) parser.add_argument( 'a', help='Integer or floating point number that is a percent of another number' ) parser.add_argument( 'b', help='Integer or floating point number of which the first number is a percent', ) args = parser.parse_args() print(sm.get_percentage(float(args.a), float(args.b)))
[ "Calculate", "what", "percentage", "a", "given", "number", "is", "of", "another", "e", ".", "g", ".", "50", "is", "50%", "of", "100", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/commands.py#L25-L44
[ "def", "run_get_percentage", "(", ")", ":", "description", "=", "run_get_percentage", ".", "__doc__", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'get_percentage'", ",", "description", "=", "description", ",", "epilog", "=", "\"Example use: get_percentage 25 100\"", ",", ")", "parser", ".", "add_argument", "(", "'a'", ",", "help", "=", "'Integer or floating point number that is a percent of another number'", ")", "parser", ".", "add_argument", "(", "'b'", ",", "help", "=", "'Integer or floating point number of which the first number is a percent'", ",", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "print", "(", "sm", ".", "get_percentage", "(", "float", "(", "args", ".", "a", ")", ",", "float", "(", "args", ".", "b", ")", ")", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
run_excel_to_html
Run the excel_to_html function from the command-line. Args: -p path to file -s name of the sheet to convert -css classes to apply -m attempt to combine merged cells -c caption for accessibility -su summary for accessibility -d details for accessibility Example use: excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true
commands.py
def run_excel_to_html(): """ Run the excel_to_html function from the command-line. Args: -p path to file -s name of the sheet to convert -css classes to apply -m attempt to combine merged cells -c caption for accessibility -su summary for accessibility -d details for accessibility Example use: excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true """ # Capture commandline arguments. prog='' argument must # match the command name in setup.py entry_points parser = argparse.ArgumentParser(prog='excel_to_html') parser.add_argument('-p', nargs='?', help='Path to an excel file for conversion.') parser.add_argument( '-s', nargs='?', help='The name of a sheet in our excel file. Defaults to "Sheet1".', ) parser.add_argument( '-css', nargs='?', help='Space separated css classes to append to the table.' ) parser.add_argument( '-m', action='store_true', help='Merge, attempt to combine merged cells.' ) parser.add_argument( '-c', nargs='?', help='Caption for creating an accessible table.' ) parser.add_argument( '-d', nargs='?', help='Two strings separated by a | character. The first string \ is for the html "summary" attribute and the second string is for the html "details" attribute. \ both values must be provided and nothing more.', ) parser.add_argument( '-r', action='store_true', help='Row headers. Does the table have row headers?' ) args = parser.parse_args() inputs = { 'p': args.p, 's': args.s, 'css': args.css, 'm': args.m, 'c': args.c, 'd': args.d, 'r': args.r, } p = inputs['p'] s = inputs['s'] if inputs['s'] else 'Sheet1' css = inputs['css'] if inputs['css'] else '' m = inputs['m'] if inputs['m'] else False c = inputs['c'] if inputs['c'] else '' d = inputs['d'].split('|') if inputs['d'] else [] r = inputs['r'] if inputs['r'] else False html = fp.excel_to_html( p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m ) print(html)
def run_excel_to_html(): """ Run the excel_to_html function from the command-line. Args: -p path to file -s name of the sheet to convert -css classes to apply -m attempt to combine merged cells -c caption for accessibility -su summary for accessibility -d details for accessibility Example use: excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true """ # Capture commandline arguments. prog='' argument must # match the command name in setup.py entry_points parser = argparse.ArgumentParser(prog='excel_to_html') parser.add_argument('-p', nargs='?', help='Path to an excel file for conversion.') parser.add_argument( '-s', nargs='?', help='The name of a sheet in our excel file. Defaults to "Sheet1".', ) parser.add_argument( '-css', nargs='?', help='Space separated css classes to append to the table.' ) parser.add_argument( '-m', action='store_true', help='Merge, attempt to combine merged cells.' ) parser.add_argument( '-c', nargs='?', help='Caption for creating an accessible table.' ) parser.add_argument( '-d', nargs='?', help='Two strings separated by a | character. The first string \ is for the html "summary" attribute and the second string is for the html "details" attribute. \ both values must be provided and nothing more.', ) parser.add_argument( '-r', action='store_true', help='Row headers. Does the table have row headers?' ) args = parser.parse_args() inputs = { 'p': args.p, 's': args.s, 'css': args.css, 'm': args.m, 'c': args.c, 'd': args.d, 'r': args.r, } p = inputs['p'] s = inputs['s'] if inputs['s'] else 'Sheet1' css = inputs['css'] if inputs['css'] else '' m = inputs['m'] if inputs['m'] else False c = inputs['c'] if inputs['c'] else '' d = inputs['d'].split('|') if inputs['d'] else [] r = inputs['r'] if inputs['r'] else False html = fp.excel_to_html( p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m ) print(html)
[ "Run", "the", "excel_to_html", "function", "from", "the", "command", "-", "line", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/commands.py#L47-L117
[ "def", "run_excel_to_html", "(", ")", ":", "# Capture commandline arguments. prog='' argument must", "# match the command name in setup.py entry_points", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'excel_to_html'", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "nargs", "=", "'?'", ",", "help", "=", "'Path to an excel file for conversion.'", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "nargs", "=", "'?'", ",", "help", "=", "'The name of a sheet in our excel file. Defaults to \"Sheet1\".'", ",", ")", "parser", ".", "add_argument", "(", "'-css'", ",", "nargs", "=", "'?'", ",", "help", "=", "'Space separated css classes to append to the table.'", ")", "parser", ".", "add_argument", "(", "'-m'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Merge, attempt to combine merged cells.'", ")", "parser", ".", "add_argument", "(", "'-c'", ",", "nargs", "=", "'?'", ",", "help", "=", "'Caption for creating an accessible table.'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "nargs", "=", "'?'", ",", "help", "=", "'Two strings separated by a | character. The first string \\\n is for the html \"summary\" attribute and the second string is for the html \"details\" attribute. \\\n both values must be provided and nothing more.'", ",", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Row headers. Does the table have row headers?'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "inputs", "=", "{", "'p'", ":", "args", ".", "p", ",", "'s'", ":", "args", ".", "s", ",", "'css'", ":", "args", ".", "css", ",", "'m'", ":", "args", ".", "m", ",", "'c'", ":", "args", ".", "c", ",", "'d'", ":", "args", ".", "d", ",", "'r'", ":", "args", ".", "r", ",", "}", "p", "=", "inputs", "[", "'p'", "]", "s", "=", "inputs", "[", "'s'", "]", "if", "inputs", "[", "'s'", "]", "else", "'Sheet1'", "css", "=", "inputs", "[", "'css'", "]", "if", "inputs", "[", "'css'", "]", "else", "''", "m", "=", "inputs", "[", "'m'", "]", "if", "inputs", "[", "'m'", "]", "else", "False", "c", "=", "inputs", "[", "'c'", "]", "if", "inputs", "[", "'c'", "]", "else", "''", "d", "=", "inputs", "[", "'d'", "]", ".", "split", "(", "'|'", ")", "if", "inputs", "[", "'d'", "]", "else", "[", "]", "r", "=", "inputs", "[", "'r'", "]", "if", "inputs", "[", "'r'", "]", "else", "False", "html", "=", "fp", ".", "excel_to_html", "(", "p", ",", "sheetname", "=", "s", ",", "css_classes", "=", "css", ",", "caption", "=", "c", ",", "details", "=", "d", ",", "row_headers", "=", "r", ",", "merge", "=", "m", ")", "print", "(", "html", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
ConvertPHP.get_built_in
Gets the return string for a language that's supported by python. Used in cases when python provides support for the conversion. Args: language: string the langage to return for. level: integer, the indentation level. data: python data structure being converted (list of tuples) Returns: None, updates self.data_structure
convert_php/convert_php.py
def get_built_in(self, language, level, data): """ Gets the return string for a language that's supported by python. Used in cases when python provides support for the conversion. Args: language: string the langage to return for. level: integer, the indentation level. data: python data structure being converted (list of tuples) Returns: None, updates self.data_structure """ # Language is python pp = pprint.PrettyPrinter(indent=level) lookup = {'python' : pp.pformat(data), 'json' : str(json.dumps(data, sort_keys=True, indent=level, separators=(',', ': ')))} self.data_structure = lookup[language]
def get_built_in(self, language, level, data): """ Gets the return string for a language that's supported by python. Used in cases when python provides support for the conversion. Args: language: string the langage to return for. level: integer, the indentation level. data: python data structure being converted (list of tuples) Returns: None, updates self.data_structure """ # Language is python pp = pprint.PrettyPrinter(indent=level) lookup = {'python' : pp.pformat(data), 'json' : str(json.dumps(data, sort_keys=True, indent=level, separators=(',', ': ')))} self.data_structure = lookup[language]
[ "Gets", "the", "return", "string", "for", "a", "language", "that", "s", "supported", "by", "python", ".", "Used", "in", "cases", "when", "python", "provides", "support", "for", "the", "conversion", ".", "Args", ":", "language", ":", "string", "the", "langage", "to", "return", "for", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/convert_php/convert_php.py#L73-L94
[ "def", "get_built_in", "(", "self", ",", "language", ",", "level", ",", "data", ")", ":", "# Language is python", "pp", "=", "pprint", ".", "PrettyPrinter", "(", "indent", "=", "level", ")", "lookup", "=", "{", "'python'", ":", "pp", ".", "pformat", "(", "data", ")", ",", "'json'", ":", "str", "(", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "True", ",", "indent", "=", "level", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "}", "self", ".", "data_structure", "=", "lookup", "[", "language", "]" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
ConvertPHP.get_inner_template
Gets the requested template for the given language. Args: language: string, the language of the template to look for. template_type: string, 'iterable' or 'singular'. An iterable template is needed when the value is an iterable and needs more unpacking, e.g. list, tuple. A singular template is needed when unpacking is complete and the value is singular, e.g. string, int, float. indentation: int, the indentation level. key: multiple types, the array key. val: multiple types, the array values Returns: string, template formatting for arrays by language.
convert_php/convert_php.py
def get_inner_template(self, language, template_type, indentation, key, val): """ Gets the requested template for the given language. Args: language: string, the language of the template to look for. template_type: string, 'iterable' or 'singular'. An iterable template is needed when the value is an iterable and needs more unpacking, e.g. list, tuple. A singular template is needed when unpacking is complete and the value is singular, e.g. string, int, float. indentation: int, the indentation level. key: multiple types, the array key. val: multiple types, the array values Returns: string, template formatting for arrays by language. """ #Language specific inner templates inner_templates = {'php' : { 'iterable' : '%s%s => array \n%s( \n%s%s),\n' % (indentation, key, indentation, val, indentation), 'singular' : '%s%s => %s, \n' % (indentation, key, val) }, 'javascript' : { 'iterable' : '%s%s : {\n%s\n%s},\n' % (indentation, key, val, indentation), 'singular' : '%s%s: %s,\n' % (indentation, key, val)}, 'ocaml' : { 'iterable' : '%s[| (%s, (\n%s\n%s))|] ;;\n' % (indentation, key, val, indentation), 'singular' : '%s(%s, %s);\n' % (indentation, key, val)}} return inner_templates[language][template_type]
def get_inner_template(self, language, template_type, indentation, key, val): """ Gets the requested template for the given language. Args: language: string, the language of the template to look for. template_type: string, 'iterable' or 'singular'. An iterable template is needed when the value is an iterable and needs more unpacking, e.g. list, tuple. A singular template is needed when unpacking is complete and the value is singular, e.g. string, int, float. indentation: int, the indentation level. key: multiple types, the array key. val: multiple types, the array values Returns: string, template formatting for arrays by language. """ #Language specific inner templates inner_templates = {'php' : { 'iterable' : '%s%s => array \n%s( \n%s%s),\n' % (indentation, key, indentation, val, indentation), 'singular' : '%s%s => %s, \n' % (indentation, key, val) }, 'javascript' : { 'iterable' : '%s%s : {\n%s\n%s},\n' % (indentation, key, val, indentation), 'singular' : '%s%s: %s,\n' % (indentation, key, val)}, 'ocaml' : { 'iterable' : '%s[| (%s, (\n%s\n%s))|] ;;\n' % (indentation, key, val, indentation), 'singular' : '%s(%s, %s);\n' % (indentation, key, val)}} return inner_templates[language][template_type]
[ "Gets", "the", "requested", "template", "for", "the", "given", "language", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/convert_php/convert_php.py#L96-L129
[ "def", "get_inner_template", "(", "self", ",", "language", ",", "template_type", ",", "indentation", ",", "key", ",", "val", ")", ":", "#Language specific inner templates", "inner_templates", "=", "{", "'php'", ":", "{", "'iterable'", ":", "'%s%s => array \\n%s( \\n%s%s),\\n'", "%", "(", "indentation", ",", "key", ",", "indentation", ",", "val", ",", "indentation", ")", ",", "'singular'", ":", "'%s%s => %s, \\n'", "%", "(", "indentation", ",", "key", ",", "val", ")", "}", ",", "'javascript'", ":", "{", "'iterable'", ":", "'%s%s : {\\n%s\\n%s},\\n'", "%", "(", "indentation", ",", "key", ",", "val", ",", "indentation", ")", ",", "'singular'", ":", "'%s%s: %s,\\n'", "%", "(", "indentation", ",", "key", ",", "val", ")", "}", ",", "'ocaml'", ":", "{", "'iterable'", ":", "'%s[| (%s, (\\n%s\\n%s))|] ;;\\n'", "%", "(", "indentation", ",", "key", ",", "val", ",", "indentation", ")", ",", "'singular'", ":", "'%s(%s, %s);\\n'", "%", "(", "indentation", ",", "key", ",", "val", ")", "}", "}", "return", "inner_templates", "[", "language", "]", "[", "template_type", "]" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
ConvertPHP.translate_array
Unserializes a serialized php array and prints it to the console as a data structure in the specified language. Used to translate or convert a php array into a data structure in another language. Currently supports, PHP, Python, Javascript, and JSON. Args: string: a string of serialized php language: a string representing the desired output format for the array. level: integer, indentation level in spaces. Defaults to 3. retdata: boolean, the method will return the string in addition to printing it if set to True. Defaults to false. Returns: None but prints a string to the console if retdata is False, otherwise returns a string.
convert_php/convert_php.py
def translate_array(self, string, language, level=3, retdata=False): """Unserializes a serialized php array and prints it to the console as a data structure in the specified language. Used to translate or convert a php array into a data structure in another language. Currently supports, PHP, Python, Javascript, and JSON. Args: string: a string of serialized php language: a string representing the desired output format for the array. level: integer, indentation level in spaces. Defaults to 3. retdata: boolean, the method will return the string in addition to printing it if set to True. Defaults to false. Returns: None but prints a string to the console if retdata is False, otherwise returns a string. """ language = language.lower() assert self.is_built_in(language) or language in self.outer_templates, \ "Sorry, " + language + " is not a supported language." # Serialized data converted to a python data structure (list of tuples) data = phpserialize.loads(bytes(string, 'utf-8'), array_hook=list, decode_strings=True) # If language conversion is supported by python avoid recursion entirely # and use a built in library if self.is_built_in(language): self.get_built_in(language, level, data) print(self) return self.data_structure if retdata else None # The language is not supported. Use recursion to build a data structure. def loop_print(iterable, level=3): """ Loops over a python representation of a php array (list of tuples) and constructs a representation in another language. Translates a php array into another structure. Args: iterable: list or tuple to unpack. level: integer, number of spaces to use for indentation """ retval = '' indentation = ' ' * level # Base case - variable is not an iterable if not self.is_iterable(iterable) or isinstance(iterable, str): non_iterable = str(iterable) return str(non_iterable) # Recursive case for item in iterable: # If item is a tuple it should be a key, value pair if isinstance(item, tuple) and len(item) == 2: # Get the key value pair key = item[0] val = loop_print(item[1], level=level+3) # Translate special values val = self.translate_val(language, val) if language in self.lang_specific_values \ and val in self.lang_specific_values[language] else val # Convert keys to their properly formatted strings # Integers are not quoted as array keys key = str(key) if isinstance(key, int) else '\'' + str(key) + '\'' # The first item is a key and the second item is an iterable, boolean needs_unpacking = hasattr(item[0],'__iter__') == False \ and hasattr(item[1],'__iter__') == True # The second item is an iterable if needs_unpacking: retval += self.get_inner_template(language, 'iterable', indentation, key, val) # The second item is not an iterable else: # Convert values to their properly formatted strings # Integers and booleans are not quoted as array values val = str(val) if val.isdigit() or val in self.lang_specific_values[language].values() else '\'' + str(val) + '\'' retval += self.get_inner_template(language, 'singular', indentation, key, val) return retval # Execute the recursive call in language specific wrapper template self.data_structure = self.outer_templates[language] % (loop_print(data)) print(self) return self.data_structure if retdata else None
def translate_array(self, string, language, level=3, retdata=False): """Unserializes a serialized php array and prints it to the console as a data structure in the specified language. Used to translate or convert a php array into a data structure in another language. Currently supports, PHP, Python, Javascript, and JSON. Args: string: a string of serialized php language: a string representing the desired output format for the array. level: integer, indentation level in spaces. Defaults to 3. retdata: boolean, the method will return the string in addition to printing it if set to True. Defaults to false. Returns: None but prints a string to the console if retdata is False, otherwise returns a string. """ language = language.lower() assert self.is_built_in(language) or language in self.outer_templates, \ "Sorry, " + language + " is not a supported language." # Serialized data converted to a python data structure (list of tuples) data = phpserialize.loads(bytes(string, 'utf-8'), array_hook=list, decode_strings=True) # If language conversion is supported by python avoid recursion entirely # and use a built in library if self.is_built_in(language): self.get_built_in(language, level, data) print(self) return self.data_structure if retdata else None # The language is not supported. Use recursion to build a data structure. def loop_print(iterable, level=3): """ Loops over a python representation of a php array (list of tuples) and constructs a representation in another language. Translates a php array into another structure. Args: iterable: list or tuple to unpack. level: integer, number of spaces to use for indentation """ retval = '' indentation = ' ' * level # Base case - variable is not an iterable if not self.is_iterable(iterable) or isinstance(iterable, str): non_iterable = str(iterable) return str(non_iterable) # Recursive case for item in iterable: # If item is a tuple it should be a key, value pair if isinstance(item, tuple) and len(item) == 2: # Get the key value pair key = item[0] val = loop_print(item[1], level=level+3) # Translate special values val = self.translate_val(language, val) if language in self.lang_specific_values \ and val in self.lang_specific_values[language] else val # Convert keys to their properly formatted strings # Integers are not quoted as array keys key = str(key) if isinstance(key, int) else '\'' + str(key) + '\'' # The first item is a key and the second item is an iterable, boolean needs_unpacking = hasattr(item[0],'__iter__') == False \ and hasattr(item[1],'__iter__') == True # The second item is an iterable if needs_unpacking: retval += self.get_inner_template(language, 'iterable', indentation, key, val) # The second item is not an iterable else: # Convert values to their properly formatted strings # Integers and booleans are not quoted as array values val = str(val) if val.isdigit() or val in self.lang_specific_values[language].values() else '\'' + str(val) + '\'' retval += self.get_inner_template(language, 'singular', indentation, key, val) return retval # Execute the recursive call in language specific wrapper template self.data_structure = self.outer_templates[language] % (loop_print(data)) print(self) return self.data_structure if retdata else None
[ "Unserializes", "a", "serialized", "php", "array", "and", "prints", "it", "to", "the", "console", "as", "a", "data", "structure", "in", "the", "specified", "language", ".", "Used", "to", "translate", "or", "convert", "a", "php", "array", "into", "a", "data", "structure", "in", "another", "language", ".", "Currently", "supports", "PHP", "Python", "Javascript", "and", "JSON", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/convert_php/convert_php.py#L166-L260
[ "def", "translate_array", "(", "self", ",", "string", ",", "language", ",", "level", "=", "3", ",", "retdata", "=", "False", ")", ":", "language", "=", "language", ".", "lower", "(", ")", "assert", "self", ".", "is_built_in", "(", "language", ")", "or", "language", "in", "self", ".", "outer_templates", ",", "\"Sorry, \"", "+", "language", "+", "\" is not a supported language.\"", "# Serialized data converted to a python data structure (list of tuples)", "data", "=", "phpserialize", ".", "loads", "(", "bytes", "(", "string", ",", "'utf-8'", ")", ",", "array_hook", "=", "list", ",", "decode_strings", "=", "True", ")", "# If language conversion is supported by python avoid recursion entirely", "# and use a built in library", "if", "self", ".", "is_built_in", "(", "language", ")", ":", "self", ".", "get_built_in", "(", "language", ",", "level", ",", "data", ")", "print", "(", "self", ")", "return", "self", ".", "data_structure", "if", "retdata", "else", "None", "# The language is not supported. Use recursion to build a data structure.", "def", "loop_print", "(", "iterable", ",", "level", "=", "3", ")", ":", "\"\"\"\n Loops over a python representation of a php array \n (list of tuples) and constructs a representation in another language.\n Translates a php array into another structure.\n\n Args:\n iterable: list or tuple to unpack.\n\n level: integer, number of spaces to use for indentation\n \"\"\"", "retval", "=", "''", "indentation", "=", "' '", "*", "level", "# Base case - variable is not an iterable", "if", "not", "self", ".", "is_iterable", "(", "iterable", ")", "or", "isinstance", "(", "iterable", ",", "str", ")", ":", "non_iterable", "=", "str", "(", "iterable", ")", "return", "str", "(", "non_iterable", ")", "# Recursive case", "for", "item", "in", "iterable", ":", "# If item is a tuple it should be a key, value pair", "if", "isinstance", "(", "item", ",", "tuple", ")", "and", "len", "(", "item", ")", "==", "2", ":", "# Get the key value pair", "key", "=", "item", "[", "0", "]", "val", "=", "loop_print", "(", "item", "[", "1", "]", ",", "level", "=", "level", "+", "3", ")", "# Translate special values", "val", "=", "self", ".", "translate_val", "(", "language", ",", "val", ")", "if", "language", "in", "self", ".", "lang_specific_values", "and", "val", "in", "self", ".", "lang_specific_values", "[", "language", "]", "else", "val", "# Convert keys to their properly formatted strings", "# Integers are not quoted as array keys", "key", "=", "str", "(", "key", ")", "if", "isinstance", "(", "key", ",", "int", ")", "else", "'\\''", "+", "str", "(", "key", ")", "+", "'\\''", "# The first item is a key and the second item is an iterable, boolean", "needs_unpacking", "=", "hasattr", "(", "item", "[", "0", "]", ",", "'__iter__'", ")", "==", "False", "and", "hasattr", "(", "item", "[", "1", "]", ",", "'__iter__'", ")", "==", "True", "# The second item is an iterable", "if", "needs_unpacking", ":", "retval", "+=", "self", ".", "get_inner_template", "(", "language", ",", "'iterable'", ",", "indentation", ",", "key", ",", "val", ")", "# The second item is not an iterable", "else", ":", "# Convert values to their properly formatted strings", "# Integers and booleans are not quoted as array values", "val", "=", "str", "(", "val", ")", "if", "val", ".", "isdigit", "(", ")", "or", "val", "in", "self", ".", "lang_specific_values", "[", "language", "]", ".", "values", "(", ")", "else", "'\\''", "+", "str", "(", "val", ")", "+", "'\\''", "retval", "+=", "self", ".", "get_inner_template", "(", "language", ",", "'singular'", ",", "indentation", ",", "key", ",", "val", ")", "return", "retval", "# Execute the recursive call in language specific wrapper template", "self", ".", "data_structure", "=", "self", ".", "outer_templates", "[", "language", "]", "%", "(", "loop_print", "(", "data", ")", ")", "print", "(", "self", ")", "return", "self", ".", "data_structure", "if", "retdata", "else", "None" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
PCA.pc
e.g. 1000 x 2 U[:, :npc] * d[:npc], to plot etc.
pca_so.py
def pc( self ): """ e.g. 1000 x 2 U[:, :npc] * d[:npc], to plot etc. """ n = self.npc return self.U[:, :n] * self.d[:n]
def pc( self ): """ e.g. 1000 x 2 U[:, :npc] * d[:npc], to plot etc. """ n = self.npc return self.U[:, :n] * self.d[:n]
[ "e", ".", "g", ".", "1000", "x", "2", "U", "[", ":", ":", "npc", "]", "*", "d", "[", ":", "npc", "]", "to", "plot", "etc", "." ]
hobson/pug
python
https://github.com/hobson/pug/blob/f183e2b29e0b3efa425a9b75cfe001b28a279acc/pca_so.py#L81-L84
[ "def", "pc", "(", "self", ")", ":", "n", "=", "self", ".", "npc", "return", "self", ".", "U", "[", ":", ",", ":", "n", "]", "*", "self", ".", "d", "[", ":", "n", "]" ]
f183e2b29e0b3efa425a9b75cfe001b28a279acc
valid
get
Only API function for the config module. :return: {dict} loaded validated configuration.
projects/config.py
def get(): """ Only API function for the config module. :return: {dict} loaded validated configuration. """ config = {} try: config = _load_config() except IOError: try: _create_default_config() config = _load_config() except IOError as e: raise ConfigError(_FILE_CREATION_ERROR.format(e.args[0])) except SyntaxError as e: raise ConfigError(_JSON_SYNTAX_ERROR.format(e.args[0])) except Exception: raise ConfigError(_JSON_SYNTAX_ERROR.format('Yaml syntax error..')) try: _validate(config) except KeyError as e: raise ConfigError(_MANDATORY_KEY_ERROR.format(e.args[0])) except SyntaxError as e: raise ConfigError(_INVALID_KEY_ERROR.format(e.args[0])) except ValueError as e: raise ConfigError(_INVALID_VALUE_ERROR.format(e.args[0])) config['projects-path'] = os.path.expanduser(config['projects-path']) _complete_config(config) return config
def get(): """ Only API function for the config module. :return: {dict} loaded validated configuration. """ config = {} try: config = _load_config() except IOError: try: _create_default_config() config = _load_config() except IOError as e: raise ConfigError(_FILE_CREATION_ERROR.format(e.args[0])) except SyntaxError as e: raise ConfigError(_JSON_SYNTAX_ERROR.format(e.args[0])) except Exception: raise ConfigError(_JSON_SYNTAX_ERROR.format('Yaml syntax error..')) try: _validate(config) except KeyError as e: raise ConfigError(_MANDATORY_KEY_ERROR.format(e.args[0])) except SyntaxError as e: raise ConfigError(_INVALID_KEY_ERROR.format(e.args[0])) except ValueError as e: raise ConfigError(_INVALID_VALUE_ERROR.format(e.args[0])) config['projects-path'] = os.path.expanduser(config['projects-path']) _complete_config(config) return config
[ "Only", "API", "function", "for", "the", "config", "module", "." ]
tiborsimon/projects
python
https://github.com/tiborsimon/projects/blob/44d1caf2bab001a2b0bf33c40d7669ae1206f534/projects/config.py#L41-L71
[ "def", "get", "(", ")", ":", "config", "=", "{", "}", "try", ":", "config", "=", "_load_config", "(", ")", "except", "IOError", ":", "try", ":", "_create_default_config", "(", ")", "config", "=", "_load_config", "(", ")", "except", "IOError", "as", "e", ":", "raise", "ConfigError", "(", "_FILE_CREATION_ERROR", ".", "format", "(", "e", ".", "args", "[", "0", "]", ")", ")", "except", "SyntaxError", "as", "e", ":", "raise", "ConfigError", "(", "_JSON_SYNTAX_ERROR", ".", "format", "(", "e", ".", "args", "[", "0", "]", ")", ")", "except", "Exception", ":", "raise", "ConfigError", "(", "_JSON_SYNTAX_ERROR", ".", "format", "(", "'Yaml syntax error..'", ")", ")", "try", ":", "_validate", "(", "config", ")", "except", "KeyError", "as", "e", ":", "raise", "ConfigError", "(", "_MANDATORY_KEY_ERROR", ".", "format", "(", "e", ".", "args", "[", "0", "]", ")", ")", "except", "SyntaxError", "as", "e", ":", "raise", "ConfigError", "(", "_INVALID_KEY_ERROR", ".", "format", "(", "e", ".", "args", "[", "0", "]", ")", ")", "except", "ValueError", "as", "e", ":", "raise", "ConfigError", "(", "_INVALID_VALUE_ERROR", ".", "format", "(", "e", ".", "args", "[", "0", "]", ")", ")", "config", "[", "'projects-path'", "]", "=", "os", ".", "path", ".", "expanduser", "(", "config", "[", "'projects-path'", "]", ")", "_complete_config", "(", "config", ")", "return", "config" ]
44d1caf2bab001a2b0bf33c40d7669ae1206f534
valid
_validate
Config validation Raises: KeyError on missing mandatory key SyntaxError on invalid key ValueError on invalid value for key :param config: {dict} config to validate :return: None
projects/config.py
def _validate(config): """ Config validation Raises: KeyError on missing mandatory key SyntaxError on invalid key ValueError on invalid value for key :param config: {dict} config to validate :return: None """ for mandatory_key in _mandatory_keys: if mandatory_key not in config: raise KeyError(mandatory_key) for key in config.keys(): if key not in _mandatory_keys and key not in _optional_keys: raise SyntaxError(key) if not isinstance(config[key], _default_config[key].__class__): raise ValueError(key)
def _validate(config): """ Config validation Raises: KeyError on missing mandatory key SyntaxError on invalid key ValueError on invalid value for key :param config: {dict} config to validate :return: None """ for mandatory_key in _mandatory_keys: if mandatory_key not in config: raise KeyError(mandatory_key) for key in config.keys(): if key not in _mandatory_keys and key not in _optional_keys: raise SyntaxError(key) if not isinstance(config[key], _default_config[key].__class__): raise ValueError(key)
[ "Config", "validation", "Raises", ":", "KeyError", "on", "missing", "mandatory", "key", "SyntaxError", "on", "invalid", "key", "ValueError", "on", "invalid", "value", "for", "key", ":", "param", "config", ":", "{", "dict", "}", "config", "to", "validate", ":", "return", ":", "None" ]
tiborsimon/projects
python
https://github.com/tiborsimon/projects/blob/44d1caf2bab001a2b0bf33c40d7669ae1206f534/projects/config.py#L117-L133
[ "def", "_validate", "(", "config", ")", ":", "for", "mandatory_key", "in", "_mandatory_keys", ":", "if", "mandatory_key", "not", "in", "config", ":", "raise", "KeyError", "(", "mandatory_key", ")", "for", "key", "in", "config", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "_mandatory_keys", "and", "key", "not", "in", "_optional_keys", ":", "raise", "SyntaxError", "(", "key", ")", "if", "not", "isinstance", "(", "config", "[", "key", "]", ",", "_default_config", "[", "key", "]", ".", "__class__", ")", ":", "raise", "ValueError", "(", "key", ")" ]
44d1caf2bab001a2b0bf33c40d7669ae1206f534
valid
_create_default_config
Writes the full default configuration to the appropriate place. Raises: IOError - on unsuccesful file write :return: None
projects/config.py
def _create_default_config(): """ Writes the full default configuration to the appropriate place. Raises: IOError - on unsuccesful file write :return: None """ config_path = _get_config_path() with open(config_path, 'w+') as f: yaml.dump(_default_config, f, default_flow_style=False)
def _create_default_config(): """ Writes the full default configuration to the appropriate place. Raises: IOError - on unsuccesful file write :return: None """ config_path = _get_config_path() with open(config_path, 'w+') as f: yaml.dump(_default_config, f, default_flow_style=False)
[ "Writes", "the", "full", "default", "configuration", "to", "the", "appropriate", "place", ".", "Raises", ":", "IOError", "-", "on", "unsuccesful", "file", "write", ":", "return", ":", "None" ]
tiborsimon/projects
python
https://github.com/tiborsimon/projects/blob/44d1caf2bab001a2b0bf33c40d7669ae1206f534/projects/config.py#L136-L144
[ "def", "_create_default_config", "(", ")", ":", "config_path", "=", "_get_config_path", "(", ")", "with", "open", "(", "config_path", ",", "'w+'", ")", "as", "f", ":", "yaml", ".", "dump", "(", "_default_config", ",", "f", ",", "default_flow_style", "=", "False", ")" ]
44d1caf2bab001a2b0bf33c40d7669ae1206f534
valid
reusable
Create a reusable class from a generator function Parameters ---------- func: GeneratorCallable[T_yield, T_send, T_return] the function to wrap Note ---- * the callable must have an inspectable signature * If bound to a class, the new reusable generator is callable as a method. To opt out of this, add a :func:`staticmethod` decorator above this decorator.
gentools/core.py
def reusable(func): """Create a reusable class from a generator function Parameters ---------- func: GeneratorCallable[T_yield, T_send, T_return] the function to wrap Note ---- * the callable must have an inspectable signature * If bound to a class, the new reusable generator is callable as a method. To opt out of this, add a :func:`staticmethod` decorator above this decorator. """ sig = signature(func) origin = func while hasattr(origin, '__wrapped__'): origin = origin.__wrapped__ return type( origin.__name__, (ReusableGenerator, ), dict([ ('__doc__', origin.__doc__), ('__module__', origin.__module__), ('__signature__', sig), ('__wrapped__', staticmethod(func)), ] + [ (name, property(compose(itemgetter(name), attrgetter('_bound_args.arguments')))) for name in sig.parameters ] + ([ ('__qualname__', origin.__qualname__), ] if sys.version_info > (3, ) else [])))
def reusable(func): """Create a reusable class from a generator function Parameters ---------- func: GeneratorCallable[T_yield, T_send, T_return] the function to wrap Note ---- * the callable must have an inspectable signature * If bound to a class, the new reusable generator is callable as a method. To opt out of this, add a :func:`staticmethod` decorator above this decorator. """ sig = signature(func) origin = func while hasattr(origin, '__wrapped__'): origin = origin.__wrapped__ return type( origin.__name__, (ReusableGenerator, ), dict([ ('__doc__', origin.__doc__), ('__module__', origin.__module__), ('__signature__', sig), ('__wrapped__', staticmethod(func)), ] + [ (name, property(compose(itemgetter(name), attrgetter('_bound_args.arguments')))) for name in sig.parameters ] + ([ ('__qualname__', origin.__qualname__), ] if sys.version_info > (3, ) else [])))
[ "Create", "a", "reusable", "class", "from", "a", "generator", "function" ]
ariebovenberg/gentools
python
https://github.com/ariebovenberg/gentools/blob/4a1f9f928c7f8b4752b69168858e83b4b23d6bcb/gentools/core.py#L152-L186
[ "def", "reusable", "(", "func", ")", ":", "sig", "=", "signature", "(", "func", ")", "origin", "=", "func", "while", "hasattr", "(", "origin", ",", "'__wrapped__'", ")", ":", "origin", "=", "origin", ".", "__wrapped__", "return", "type", "(", "origin", ".", "__name__", ",", "(", "ReusableGenerator", ",", ")", ",", "dict", "(", "[", "(", "'__doc__'", ",", "origin", ".", "__doc__", ")", ",", "(", "'__module__'", ",", "origin", ".", "__module__", ")", ",", "(", "'__signature__'", ",", "sig", ")", ",", "(", "'__wrapped__'", ",", "staticmethod", "(", "func", ")", ")", ",", "]", "+", "[", "(", "name", ",", "property", "(", "compose", "(", "itemgetter", "(", "name", ")", ",", "attrgetter", "(", "'_bound_args.arguments'", ")", ")", ")", ")", "for", "name", "in", "sig", ".", "parameters", "]", "+", "(", "[", "(", "'__qualname__'", ",", "origin", ".", "__qualname__", ")", ",", "]", "if", "sys", ".", "version_info", ">", "(", "3", ",", ")", "else", "[", "]", ")", ")", ")" ]
4a1f9f928c7f8b4752b69168858e83b4b23d6bcb
valid
sendreturn
Send an item into a generator expecting a final return value Parameters ---------- gen: ~typing.Generator[T_yield, T_send, T_return] the generator to send the value to value: T_send the value to send Raises ------ RuntimeError if the generator did not return as expected Returns ------- T_return the generator's return value
gentools/core.py
def sendreturn(gen, value): """Send an item into a generator expecting a final return value Parameters ---------- gen: ~typing.Generator[T_yield, T_send, T_return] the generator to send the value to value: T_send the value to send Raises ------ RuntimeError if the generator did not return as expected Returns ------- T_return the generator's return value """ try: gen.send(value) except StopIteration as e: return stopiter_value(e) else: raise RuntimeError('generator did not return as expected')
def sendreturn(gen, value): """Send an item into a generator expecting a final return value Parameters ---------- gen: ~typing.Generator[T_yield, T_send, T_return] the generator to send the value to value: T_send the value to send Raises ------ RuntimeError if the generator did not return as expected Returns ------- T_return the generator's return value """ try: gen.send(value) except StopIteration as e: return stopiter_value(e) else: raise RuntimeError('generator did not return as expected')
[ "Send", "an", "item", "into", "a", "generator", "expecting", "a", "final", "return", "value" ]
ariebovenberg/gentools
python
https://github.com/ariebovenberg/gentools/blob/4a1f9f928c7f8b4752b69168858e83b4b23d6bcb/gentools/core.py#L297-L322
[ "def", "sendreturn", "(", "gen", ",", "value", ")", ":", "try", ":", "gen", ".", "send", "(", "value", ")", "except", "StopIteration", "as", "e", ":", "return", "stopiter_value", "(", "e", ")", "else", ":", "raise", "RuntimeError", "(", "'generator did not return as expected'", ")" ]
4a1f9f928c7f8b4752b69168858e83b4b23d6bcb
valid
imap_send
Apply a function to all ``send`` values of a generator Parameters ---------- func: ~typing.Callable[[T_send], T_mapped] the function to apply gen: Generable[T_yield, T_mapped, T_return] the generator iterable. Returns ------- ~typing.Generator[T_yield, T_send, T_return] the mapped generator
gentools/core.py
def imap_send(func, gen): """Apply a function to all ``send`` values of a generator Parameters ---------- func: ~typing.Callable[[T_send], T_mapped] the function to apply gen: Generable[T_yield, T_mapped, T_return] the generator iterable. Returns ------- ~typing.Generator[T_yield, T_send, T_return] the mapped generator """ gen = iter(gen) assert _is_just_started(gen) yielder = yield_from(gen) for item in yielder: with yielder: yielder.send(func((yield item))) return_(yielder.result)
def imap_send(func, gen): """Apply a function to all ``send`` values of a generator Parameters ---------- func: ~typing.Callable[[T_send], T_mapped] the function to apply gen: Generable[T_yield, T_mapped, T_return] the generator iterable. Returns ------- ~typing.Generator[T_yield, T_send, T_return] the mapped generator """ gen = iter(gen) assert _is_just_started(gen) yielder = yield_from(gen) for item in yielder: with yielder: yielder.send(func((yield item))) return_(yielder.result)
[ "Apply", "a", "function", "to", "all", "send", "values", "of", "a", "generator" ]
ariebovenberg/gentools
python
https://github.com/ariebovenberg/gentools/blob/4a1f9f928c7f8b4752b69168858e83b4b23d6bcb/gentools/core.py#L351-L372
[ "def", "imap_send", "(", "func", ",", "gen", ")", ":", "gen", "=", "iter", "(", "gen", ")", "assert", "_is_just_started", "(", "gen", ")", "yielder", "=", "yield_from", "(", "gen", ")", "for", "item", "in", "yielder", ":", "with", "yielder", ":", "yielder", ".", "send", "(", "func", "(", "(", "yield", "item", ")", ")", ")", "return_", "(", "yielder", ".", "result", ")" ]
4a1f9f928c7f8b4752b69168858e83b4b23d6bcb
valid
irelay
Create a new generator by relaying yield/send interactions through another generator Parameters ---------- gen: Generable[T_yield, T_send, T_return] the original generator thru: ~typing.Callable[[T_yield], ~typing.Generator] the generator callable through which each interaction is relayed Returns ------- ~typing.Generator the relayed generator
gentools/core.py
def irelay(gen, thru): """Create a new generator by relaying yield/send interactions through another generator Parameters ---------- gen: Generable[T_yield, T_send, T_return] the original generator thru: ~typing.Callable[[T_yield], ~typing.Generator] the generator callable through which each interaction is relayed Returns ------- ~typing.Generator the relayed generator """ gen = iter(gen) assert _is_just_started(gen) yielder = yield_from(gen) for item in yielder: with yielder: subgen = thru(item) subyielder = yield_from(subgen) for subitem in subyielder: with subyielder: subyielder.send((yield subitem)) yielder.send(subyielder.result) return_(yielder.result)
def irelay(gen, thru): """Create a new generator by relaying yield/send interactions through another generator Parameters ---------- gen: Generable[T_yield, T_send, T_return] the original generator thru: ~typing.Callable[[T_yield], ~typing.Generator] the generator callable through which each interaction is relayed Returns ------- ~typing.Generator the relayed generator """ gen = iter(gen) assert _is_just_started(gen) yielder = yield_from(gen) for item in yielder: with yielder: subgen = thru(item) subyielder = yield_from(subgen) for subitem in subyielder: with subyielder: subyielder.send((yield subitem)) yielder.send(subyielder.result) return_(yielder.result)
[ "Create", "a", "new", "generator", "by", "relaying", "yield", "/", "send", "interactions", "through", "another", "generator" ]
ariebovenberg/gentools
python
https://github.com/ariebovenberg/gentools/blob/4a1f9f928c7f8b4752b69168858e83b4b23d6bcb/gentools/core.py#L400-L431
[ "def", "irelay", "(", "gen", ",", "thru", ")", ":", "gen", "=", "iter", "(", "gen", ")", "assert", "_is_just_started", "(", "gen", ")", "yielder", "=", "yield_from", "(", "gen", ")", "for", "item", "in", "yielder", ":", "with", "yielder", ":", "subgen", "=", "thru", "(", "item", ")", "subyielder", "=", "yield_from", "(", "subgen", ")", "for", "subitem", "in", "subyielder", ":", "with", "subyielder", ":", "subyielder", ".", "send", "(", "(", "yield", "subitem", ")", ")", "yielder", ".", "send", "(", "subyielder", ".", "result", ")", "return_", "(", "yielder", ".", "result", ")" ]
4a1f9f928c7f8b4752b69168858e83b4b23d6bcb
valid
_data_integrity_check
Checks if all command dependencies refers to and existing command. If not, a ProjectfileError will be raised with the problematic dependency and it's command. :param data: parsed raw data set. :return: None
projects/projectfile/parser/__init__.py
def _data_integrity_check(data): """Checks if all command dependencies refers to and existing command. If not, a ProjectfileError will be raised with the problematic dependency and it's command. :param data: parsed raw data set. :return: None """ deps = [] for command in data['commands']: if 'dependencies' in data['commands'][command]: for d in data['commands'][command]['dependencies']: deps.append({ 'd': d, 'c': command }) for d in deps: if d['d'] not in data['commands']: raise error.ProjectfileError({ 'error': error.PROJECTFILE_INVALID_DEPENDENCY.format(d['d'], d['c']) })
def _data_integrity_check(data): """Checks if all command dependencies refers to and existing command. If not, a ProjectfileError will be raised with the problematic dependency and it's command. :param data: parsed raw data set. :return: None """ deps = [] for command in data['commands']: if 'dependencies' in data['commands'][command]: for d in data['commands'][command]['dependencies']: deps.append({ 'd': d, 'c': command }) for d in deps: if d['d'] not in data['commands']: raise error.ProjectfileError({ 'error': error.PROJECTFILE_INVALID_DEPENDENCY.format(d['d'], d['c']) })
[ "Checks", "if", "all", "command", "dependencies", "refers", "to", "and", "existing", "command", ".", "If", "not", "a", "ProjectfileError", "will", "be", "raised", "with", "the", "problematic", "dependency", "and", "it", "s", "command", ".", ":", "param", "data", ":", "parsed", "raw", "data", "set", ".", ":", "return", ":", "None" ]
tiborsimon/projects
python
https://github.com/tiborsimon/projects/blob/44d1caf2bab001a2b0bf33c40d7669ae1206f534/projects/projectfile/parser/__init__.py#L43-L62
[ "def", "_data_integrity_check", "(", "data", ")", ":", "deps", "=", "[", "]", "for", "command", "in", "data", "[", "'commands'", "]", ":", "if", "'dependencies'", "in", "data", "[", "'commands'", "]", "[", "command", "]", ":", "for", "d", "in", "data", "[", "'commands'", "]", "[", "command", "]", "[", "'dependencies'", "]", ":", "deps", ".", "append", "(", "{", "'d'", ":", "d", ",", "'c'", ":", "command", "}", ")", "for", "d", "in", "deps", ":", "if", "d", "[", "'d'", "]", "not", "in", "data", "[", "'commands'", "]", ":", "raise", "error", ".", "ProjectfileError", "(", "{", "'error'", ":", "error", ".", "PROJECTFILE_INVALID_DEPENDENCY", ".", "format", "(", "d", "[", "'d'", "]", ",", "d", "[", "'c'", "]", ")", "}", ")" ]
44d1caf2bab001a2b0bf33c40d7669ae1206f534
valid
_link_rels
Populate any database related fields (ForeignKeyField, OneToOneField) that have `_get`ters to populate them with
pug/decorators.py
def _link_rels(obj, fields=None, save=False, overwrite=False): """Populate any database related fields (ForeignKeyField, OneToOneField) that have `_get`ters to populate them with""" if not fields: meta = obj._meta fields = [f.name for f in meta.fields if hasattr(f, 'do_related_class') and not f.primary_key and hasattr(meta, '_get_' + f.name) and hasattr(meta, '_' + f.name)] for field in fields: # skip fields if they contain non-null data and `overwrite` option wasn't set if not overwrite and not isinstance(getattr(obj, field, None), NoneType): # print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None)) continue if hasattr(obj, field): setattr(obj, field, getattr(obj, '_' + field, None)) if save: obj.save() return obj
def _link_rels(obj, fields=None, save=False, overwrite=False): """Populate any database related fields (ForeignKeyField, OneToOneField) that have `_get`ters to populate them with""" if not fields: meta = obj._meta fields = [f.name for f in meta.fields if hasattr(f, 'do_related_class') and not f.primary_key and hasattr(meta, '_get_' + f.name) and hasattr(meta, '_' + f.name)] for field in fields: # skip fields if they contain non-null data and `overwrite` option wasn't set if not overwrite and not isinstance(getattr(obj, field, None), NoneType): # print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None)) continue if hasattr(obj, field): setattr(obj, field, getattr(obj, '_' + field, None)) if save: obj.save() return obj
[ "Populate", "any", "database", "related", "fields", "(", "ForeignKeyField", "OneToOneField", ")", "that", "have", "_get", "ters", "to", "populate", "them", "with" ]
hobson/pug
python
https://github.com/hobson/pug/blob/f183e2b29e0b3efa425a9b75cfe001b28a279acc/pug/decorators.py#L238-L252
[ "def", "_link_rels", "(", "obj", ",", "fields", "=", "None", ",", "save", "=", "False", ",", "overwrite", "=", "False", ")", ":", "if", "not", "fields", ":", "meta", "=", "obj", ".", "_meta", "fields", "=", "[", "f", ".", "name", "for", "f", "in", "meta", ".", "fields", "if", "hasattr", "(", "f", ",", "'do_related_class'", ")", "and", "not", "f", ".", "primary_key", "and", "hasattr", "(", "meta", ",", "'_get_'", "+", "f", ".", "name", ")", "and", "hasattr", "(", "meta", ",", "'_'", "+", "f", ".", "name", ")", "]", "for", "field", "in", "fields", ":", "# skip fields if they contain non-null data and `overwrite` option wasn't set", "if", "not", "overwrite", "and", "not", "isinstance", "(", "getattr", "(", "obj", ",", "field", ",", "None", ")", ",", "NoneType", ")", ":", "# print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None))", "continue", "if", "hasattr", "(", "obj", ",", "field", ")", ":", "setattr", "(", "obj", ",", "field", ",", "getattr", "(", "obj", ",", "'_'", "+", "field", ",", "None", ")", ")", "if", "save", ":", "obj", ".", "save", "(", ")", "return", "obj" ]
f183e2b29e0b3efa425a9b75cfe001b28a279acc
valid
_update
Update/populate any database fields that have `_get`ters to populate them with, regardless of whether they are data fields or related fields
pug/decorators.py
def _update(obj, fields=None, save=False, overwrite=False): """Update/populate any database fields that have `_get`ters to populate them with, regardless of whether they are data fields or related fields""" if not fields: meta = obj._meta fields = [f.name for f in meta.fields if not f.primary_key and hasattr(meta, '_get_' + f.name) and hasattr(meta, '_' + f.name)] # print fields fields_updated = [] for field in fields: # skip fields if they contain non-null data and `overwrite` option wasn't set if not overwrite and not getattr(obj, field, None) == None: # print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None)) continue # print field if hasattr(obj, field): # print field, getattr(obj, '_' + field, None) setattr(obj, field, getattr(obj, '_' + field, None)) if getattr(obj, field, None) != None: fields_updated += [field] if save: obj.save() return fields_updated
def _update(obj, fields=None, save=False, overwrite=False): """Update/populate any database fields that have `_get`ters to populate them with, regardless of whether they are data fields or related fields""" if not fields: meta = obj._meta fields = [f.name for f in meta.fields if not f.primary_key and hasattr(meta, '_get_' + f.name) and hasattr(meta, '_' + f.name)] # print fields fields_updated = [] for field in fields: # skip fields if they contain non-null data and `overwrite` option wasn't set if not overwrite and not getattr(obj, field, None) == None: # print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None)) continue # print field if hasattr(obj, field): # print field, getattr(obj, '_' + field, None) setattr(obj, field, getattr(obj, '_' + field, None)) if getattr(obj, field, None) != None: fields_updated += [field] if save: obj.save() return fields_updated
[ "Update", "/", "populate", "any", "database", "fields", "that", "have", "_get", "ters", "to", "populate", "them", "with", "regardless", "of", "whether", "they", "are", "data", "fields", "or", "related", "fields" ]
hobson/pug
python
https://github.com/hobson/pug/blob/f183e2b29e0b3efa425a9b75cfe001b28a279acc/pug/decorators.py#L270-L290
[ "def", "_update", "(", "obj", ",", "fields", "=", "None", ",", "save", "=", "False", ",", "overwrite", "=", "False", ")", ":", "if", "not", "fields", ":", "meta", "=", "obj", ".", "_meta", "fields", "=", "[", "f", ".", "name", "for", "f", "in", "meta", ".", "fields", "if", "not", "f", ".", "primary_key", "and", "hasattr", "(", "meta", ",", "'_get_'", "+", "f", ".", "name", ")", "and", "hasattr", "(", "meta", ",", "'_'", "+", "f", ".", "name", ")", "]", "# print fields", "fields_updated", "=", "[", "]", "for", "field", "in", "fields", ":", "# skip fields if they contain non-null data and `overwrite` option wasn't set", "if", "not", "overwrite", "and", "not", "getattr", "(", "obj", ",", "field", ",", "None", ")", "==", "None", ":", "# print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None))", "continue", "# print field", "if", "hasattr", "(", "obj", ",", "field", ")", ":", "# print field, getattr(obj, '_' + field, None)", "setattr", "(", "obj", ",", "field", ",", "getattr", "(", "obj", ",", "'_'", "+", "field", ",", "None", ")", ")", "if", "getattr", "(", "obj", ",", "field", ",", "None", ")", "!=", "None", ":", "fields_updated", "+=", "[", "field", "]", "if", "save", ":", "obj", ".", "save", "(", ")", "return", "fields_updated" ]
f183e2b29e0b3efa425a9b75cfe001b28a279acc
valid
bug_info
Prints the traceback and invokes the ipython debugger on any exception Only invokes ipydb if you are outside ipython or python interactive session. So scripts must be called from OS shell in order for exceptions to ipy-shell-out. Dependencies: Needs `pip install ipdb` Arguments: exc_type (type): The exception type/class (e.g. RuntimeError) exc_value (Exception): The exception instance (e.g. the error message passed to the Exception constructor) exc_trace (Traceback): The traceback instance References: http://stackoverflow.com/a/242531/623735 Example Usage: $ python -c 'from pug import debug;x=[];x[0]' Traceback (most recent call last): File "<string>", line 1, in <module> IndexError: list index out of range > <string>(1)<module>() ipdb> x [] ipdb> locals() {'__builtins__': <module '__builtin__' (built-in)>, '__package__': None, 'x': [], 'debug': <module 'pug.debug' from 'pug/debug.py'>, '__name__': '__main__', '__doc__': None} ipdb>
pug/debug.py
def bug_info(exc_type, exc_value, exc_trace): """Prints the traceback and invokes the ipython debugger on any exception Only invokes ipydb if you are outside ipython or python interactive session. So scripts must be called from OS shell in order for exceptions to ipy-shell-out. Dependencies: Needs `pip install ipdb` Arguments: exc_type (type): The exception type/class (e.g. RuntimeError) exc_value (Exception): The exception instance (e.g. the error message passed to the Exception constructor) exc_trace (Traceback): The traceback instance References: http://stackoverflow.com/a/242531/623735 Example Usage: $ python -c 'from pug import debug;x=[];x[0]' Traceback (most recent call last): File "<string>", line 1, in <module> IndexError: list index out of range > <string>(1)<module>() ipdb> x [] ipdb> locals() {'__builtins__': <module '__builtin__' (built-in)>, '__package__': None, 'x': [], 'debug': <module 'pug.debug' from 'pug/debug.py'>, '__name__': '__main__', '__doc__': None} ipdb> """ if hasattr(sys, 'ps1') or not sys.stderr.isatty(): # We are in interactive mode or don't have a tty-like device, so we call the default hook sys.__excepthook__(exc_type, exc_value, exc_trace) else: # Need to import non-built-ins here, so if dependencies haven't been installed, both tracebacks will print # (e.g. the ImportError and the Exception that got you here) import ipdb # We are NOT in interactive mode, print the exception traceback.print_exception(exc_type, exc_value, exc_trace) print # Start the debugger in post-mortem mode. ipdb.post_mortem(exc_trace)
def bug_info(exc_type, exc_value, exc_trace): """Prints the traceback and invokes the ipython debugger on any exception Only invokes ipydb if you are outside ipython or python interactive session. So scripts must be called from OS shell in order for exceptions to ipy-shell-out. Dependencies: Needs `pip install ipdb` Arguments: exc_type (type): The exception type/class (e.g. RuntimeError) exc_value (Exception): The exception instance (e.g. the error message passed to the Exception constructor) exc_trace (Traceback): The traceback instance References: http://stackoverflow.com/a/242531/623735 Example Usage: $ python -c 'from pug import debug;x=[];x[0]' Traceback (most recent call last): File "<string>", line 1, in <module> IndexError: list index out of range > <string>(1)<module>() ipdb> x [] ipdb> locals() {'__builtins__': <module '__builtin__' (built-in)>, '__package__': None, 'x': [], 'debug': <module 'pug.debug' from 'pug/debug.py'>, '__name__': '__main__', '__doc__': None} ipdb> """ if hasattr(sys, 'ps1') or not sys.stderr.isatty(): # We are in interactive mode or don't have a tty-like device, so we call the default hook sys.__excepthook__(exc_type, exc_value, exc_trace) else: # Need to import non-built-ins here, so if dependencies haven't been installed, both tracebacks will print # (e.g. the ImportError and the Exception that got you here) import ipdb # We are NOT in interactive mode, print the exception traceback.print_exception(exc_type, exc_value, exc_trace) print # Start the debugger in post-mortem mode. ipdb.post_mortem(exc_trace)
[ "Prints", "the", "traceback", "and", "invokes", "the", "ipython", "debugger", "on", "any", "exception" ]
hobson/pug
python
https://github.com/hobson/pug/blob/f183e2b29e0b3efa425a9b75cfe001b28a279acc/pug/debug.py#L24-L66
[ "def", "bug_info", "(", "exc_type", ",", "exc_value", ",", "exc_trace", ")", ":", "if", "hasattr", "(", "sys", ",", "'ps1'", ")", "or", "not", "sys", ".", "stderr", ".", "isatty", "(", ")", ":", "# We are in interactive mode or don't have a tty-like device, so we call the default hook", "sys", ".", "__excepthook__", "(", "exc_type", ",", "exc_value", ",", "exc_trace", ")", "else", ":", "# Need to import non-built-ins here, so if dependencies haven't been installed, both tracebacks will print", "# (e.g. the ImportError and the Exception that got you here)", "import", "ipdb", "# We are NOT in interactive mode, print the exception", "traceback", ".", "print_exception", "(", "exc_type", ",", "exc_value", ",", "exc_trace", ")", "print", "# Start the debugger in post-mortem mode.", "ipdb", ".", "post_mortem", "(", "exc_trace", ")" ]
f183e2b29e0b3efa425a9b75cfe001b28a279acc
valid
copy_web_file_to_local
Copies a file from its location on the web to a designated place on the local machine. Args: file_path: Complete url of the file to copy, string (e.g. http://fool.com/input.css). target_path: Path and name of file on the local machine, string. (e.g. /directory/output.css) Returns: None.
file_parsing/file_parsing.py
def copy_web_file_to_local(file_path, target_path): """Copies a file from its location on the web to a designated place on the local machine. Args: file_path: Complete url of the file to copy, string (e.g. http://fool.com/input.css). target_path: Path and name of file on the local machine, string. (e.g. /directory/output.css) Returns: None. """ response = urllib.request.urlopen(file_path) f = open(target_path, 'w') f.write(response.read()) f.close()
def copy_web_file_to_local(file_path, target_path): """Copies a file from its location on the web to a designated place on the local machine. Args: file_path: Complete url of the file to copy, string (e.g. http://fool.com/input.css). target_path: Path and name of file on the local machine, string. (e.g. /directory/output.css) Returns: None. """ response = urllib.request.urlopen(file_path) f = open(target_path, 'w') f.write(response.read()) f.close()
[ "Copies", "a", "file", "from", "its", "location", "on", "the", "web", "to", "a", "designated", "place", "on", "the", "local", "machine", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L39-L55
[ "def", "copy_web_file_to_local", "(", "file_path", ",", "target_path", ")", ":", "response", "=", "urllib", ".", "request", ".", "urlopen", "(", "file_path", ")", "f", "=", "open", "(", "target_path", ",", "'w'", ")", "f", ".", "write", "(", "response", ".", "read", "(", ")", ")", "f", ".", "close", "(", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
get_line_count
Counts the number of lines in a file. Args: fname: string, name of the file. Returns: integer, the number of lines in the file.
file_parsing/file_parsing.py
def get_line_count(fname): """Counts the number of lines in a file. Args: fname: string, name of the file. Returns: integer, the number of lines in the file. """ i = 0 with open(fname) as f: for i, l in enumerate(f): pass return i + 1
def get_line_count(fname): """Counts the number of lines in a file. Args: fname: string, name of the file. Returns: integer, the number of lines in the file. """ i = 0 with open(fname) as f: for i, l in enumerate(f): pass return i + 1
[ "Counts", "the", "number", "of", "lines", "in", "a", "file", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L57-L71
[ "def", "get_line_count", "(", "fname", ")", ":", "i", "=", "0", "with", "open", "(", "fname", ")", "as", "f", ":", "for", "i", ",", "l", "in", "enumerate", "(", "f", ")", ":", "pass", "return", "i", "+", "1" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
indent_css
Indentes css that has not been indented and saves it to a new file. A new file is created if the output destination does not already exist. Args: f: string, path to file. output: string, path/name of the output file (e.g. /directory/output.css). print type(response.read()) Returns: None.
file_parsing/file_parsing.py
def indent_css(f, output): """Indentes css that has not been indented and saves it to a new file. A new file is created if the output destination does not already exist. Args: f: string, path to file. output: string, path/name of the output file (e.g. /directory/output.css). print type(response.read()) Returns: None. """ line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline().rstrip() if len(string) > 0: if string[-1] == ";": output.write(" " + string + "\n") else: output.write(string + "\n") output.close() f.close()
def indent_css(f, output): """Indentes css that has not been indented and saves it to a new file. A new file is created if the output destination does not already exist. Args: f: string, path to file. output: string, path/name of the output file (e.g. /directory/output.css). print type(response.read()) Returns: None. """ line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline().rstrip() if len(string) > 0: if string[-1] == ";": output.write(" " + string + "\n") else: output.write(string + "\n") output.close() f.close()
[ "Indentes", "css", "that", "has", "not", "been", "indented", "and", "saves", "it", "to", "a", "new", "file", ".", "A", "new", "file", "is", "created", "if", "the", "output", "destination", "does", "not", "already", "exist", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L73-L97
[ "def", "indent_css", "(", "f", ",", "output", ")", ":", "line_count", "=", "get_line_count", "(", "f", ")", "f", "=", "open", "(", "f", ",", "'r+'", ")", "output", "=", "open", "(", "output", ",", "'r+'", ")", "for", "line", "in", "range", "(", "line_count", ")", ":", "string", "=", "f", ".", "readline", "(", ")", ".", "rstrip", "(", ")", "if", "len", "(", "string", ")", ">", "0", ":", "if", "string", "[", "-", "1", "]", "==", "\";\"", ":", "output", ".", "write", "(", "\" \"", "+", "string", "+", "\"\\n\"", ")", "else", ":", "output", ".", "write", "(", "string", "+", "\"\\n\"", ")", "output", ".", "close", "(", ")", "f", ".", "close", "(", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
add_newlines
Adds line breaks after every occurance of a given character in a file. Args: f: string, path to input file. output: string, path to output file. Returns: None.
file_parsing/file_parsing.py
def add_newlines(f, output, char): """Adds line breaks after every occurance of a given character in a file. Args: f: string, path to input file. output: string, path to output file. Returns: None. """ line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline() string = re.sub(char, char + '\n', string) output.write(string)
def add_newlines(f, output, char): """Adds line breaks after every occurance of a given character in a file. Args: f: string, path to input file. output: string, path to output file. Returns: None. """ line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline() string = re.sub(char, char + '\n', string) output.write(string)
[ "Adds", "line", "breaks", "after", "every", "occurance", "of", "a", "given", "character", "in", "a", "file", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L99-L116
[ "def", "add_newlines", "(", "f", ",", "output", ",", "char", ")", ":", "line_count", "=", "get_line_count", "(", "f", ")", "f", "=", "open", "(", "f", ",", "'r+'", ")", "output", "=", "open", "(", "output", ",", "'r+'", ")", "for", "line", "in", "range", "(", "line_count", ")", ":", "string", "=", "f", ".", "readline", "(", ")", "string", "=", "re", ".", "sub", "(", "char", ",", "char", "+", "'\\n'", ",", "string", ")", "output", ".", "write", "(", "string", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
add_whitespace_before
Adds a space before a character if there's isn't one already. Args: char: string, character that needs a space before it. input_file: string, path to file to parse. output_file: string, path to destination file. Returns: None.
file_parsing/file_parsing.py
def add_whitespace_before(char, input_file, output_file): """Adds a space before a character if there's isn't one already. Args: char: string, character that needs a space before it. input_file: string, path to file to parse. output_file: string, path to destination file. Returns: None. """ line_count = get_line_count(input_file) input_file = open(input_file, 'r') output_file = open(output_file, 'r+') for line in range(line_count): string = input_file.readline() # If there's not already a space before the character, add one if re.search(r'[a-zA-Z0-9]' + char, string) != None: string = re.sub(char, ' ' + char, string) output_file.write(string) input_file.close()
def add_whitespace_before(char, input_file, output_file): """Adds a space before a character if there's isn't one already. Args: char: string, character that needs a space before it. input_file: string, path to file to parse. output_file: string, path to destination file. Returns: None. """ line_count = get_line_count(input_file) input_file = open(input_file, 'r') output_file = open(output_file, 'r+') for line in range(line_count): string = input_file.readline() # If there's not already a space before the character, add one if re.search(r'[a-zA-Z0-9]' + char, string) != None: string = re.sub(char, ' ' + char, string) output_file.write(string) input_file.close()
[ "Adds", "a", "space", "before", "a", "character", "if", "there", "s", "isn", "t", "one", "already", ".", "Args", ":", "char", ":", "string", "character", "that", "needs", "a", "space", "before", "it", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L118-L140
[ "def", "add_whitespace_before", "(", "char", ",", "input_file", ",", "output_file", ")", ":", "line_count", "=", "get_line_count", "(", "input_file", ")", "input_file", "=", "open", "(", "input_file", ",", "'r'", ")", "output_file", "=", "open", "(", "output_file", ",", "'r+'", ")", "for", "line", "in", "range", "(", "line_count", ")", ":", "string", "=", "input_file", ".", "readline", "(", ")", "# If there's not already a space before the character, add one", "if", "re", ".", "search", "(", "r'[a-zA-Z0-9]'", "+", "char", ",", "string", ")", "!=", "None", ":", "string", "=", "re", ".", "sub", "(", "char", ",", "' '", "+", "char", ",", "string", ")", "output_file", ".", "write", "(", "string", ")", "input_file", ".", "close", "(", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
reformat_css
Reformats poorly written css. This function does not validate or fix errors in the code. It only gives code the proper indentation. Args: input_file: string, path to the input file. output_file: string, path to where the reformatted css should be saved. If the target file doesn't exist, a new file is created. Returns: None.
file_parsing/file_parsing.py
def reformat_css(input_file, output_file): """Reformats poorly written css. This function does not validate or fix errors in the code. It only gives code the proper indentation. Args: input_file: string, path to the input file. output_file: string, path to where the reformatted css should be saved. If the target file doesn't exist, a new file is created. Returns: None. """ # Number of lines in the file. line_count = get_line_count(input_file) # Open source and target files. f = open(input_file, 'r+') output = open(output_file, 'w') # Loop over every line in the file. for line in range(line_count): # Eliminate whitespace at the beginning and end of lines. string = f.readline().strip() # New lines after { string = re.sub('\{', '{\n', string) # New lines after ; string = re.sub('; ', ';', string) string = re.sub(';', ';\n', string) # Eliminate whitespace before comments string = re.sub('} /*', '}/*', string) # New lines after } string = re.sub('\}', '}\n', string) # New lines at the end of comments string = re.sub('\*/', '*/\n', string) # Write to the output file. output.write(string) # Close the files. output.close() f.close() # Indent the css. indent_css(output_file, output_file) # Make sure there's a space before every { add_whitespace_before("{", output_file, output_file)
def reformat_css(input_file, output_file): """Reformats poorly written css. This function does not validate or fix errors in the code. It only gives code the proper indentation. Args: input_file: string, path to the input file. output_file: string, path to where the reformatted css should be saved. If the target file doesn't exist, a new file is created. Returns: None. """ # Number of lines in the file. line_count = get_line_count(input_file) # Open source and target files. f = open(input_file, 'r+') output = open(output_file, 'w') # Loop over every line in the file. for line in range(line_count): # Eliminate whitespace at the beginning and end of lines. string = f.readline().strip() # New lines after { string = re.sub('\{', '{\n', string) # New lines after ; string = re.sub('; ', ';', string) string = re.sub(';', ';\n', string) # Eliminate whitespace before comments string = re.sub('} /*', '}/*', string) # New lines after } string = re.sub('\}', '}\n', string) # New lines at the end of comments string = re.sub('\*/', '*/\n', string) # Write to the output file. output.write(string) # Close the files. output.close() f.close() # Indent the css. indent_css(output_file, output_file) # Make sure there's a space before every { add_whitespace_before("{", output_file, output_file)
[ "Reformats", "poorly", "written", "css", ".", "This", "function", "does", "not", "validate", "or", "fix", "errors", "in", "the", "code", ".", "It", "only", "gives", "code", "the", "proper", "indentation", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L142-L188
[ "def", "reformat_css", "(", "input_file", ",", "output_file", ")", ":", "# Number of lines in the file.", "line_count", "=", "get_line_count", "(", "input_file", ")", "# Open source and target files.", "f", "=", "open", "(", "input_file", ",", "'r+'", ")", "output", "=", "open", "(", "output_file", ",", "'w'", ")", "# Loop over every line in the file.", "for", "line", "in", "range", "(", "line_count", ")", ":", "# Eliminate whitespace at the beginning and end of lines.", "string", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", "# New lines after { ", "string", "=", "re", ".", "sub", "(", "'\\{'", ",", "'{\\n'", ",", "string", ")", "# New lines after ; ", "string", "=", "re", ".", "sub", "(", "'; '", ",", "';'", ",", "string", ")", "string", "=", "re", ".", "sub", "(", "';'", ",", "';\\n'", ",", "string", ")", "# Eliminate whitespace before comments", "string", "=", "re", ".", "sub", "(", "'} /*'", ",", "'}/*'", ",", "string", ")", "# New lines after } ", "string", "=", "re", ".", "sub", "(", "'\\}'", ",", "'}\\n'", ",", "string", ")", "# New lines at the end of comments", "string", "=", "re", ".", "sub", "(", "'\\*/'", ",", "'*/\\n'", ",", "string", ")", "# Write to the output file.", "output", ".", "write", "(", "string", ")", "# Close the files.", "output", ".", "close", "(", ")", "f", ".", "close", "(", ")", "# Indent the css.", "indent_css", "(", "output_file", ",", "output_file", ")", "# Make sure there's a space before every {", "add_whitespace_before", "(", "\"{\"", ",", "output_file", ",", "output_file", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
is_int
Checks if a string is an integer. If the string value is an integer return True, otherwise return False. Args: string: a string to test. Returns: boolean
file_parsing/file_parsing.py
def is_int(string): """ Checks if a string is an integer. If the string value is an integer return True, otherwise return False. Args: string: a string to test. Returns: boolean """ try: a = float(string) b = int(a) except ValueError: return False else: return a == b
def is_int(string): """ Checks if a string is an integer. If the string value is an integer return True, otherwise return False. Args: string: a string to test. Returns: boolean """ try: a = float(string) b = int(a) except ValueError: return False else: return a == b
[ "Checks", "if", "a", "string", "is", "an", "integer", ".", "If", "the", "string", "value", "is", "an", "integer", "return", "True", "otherwise", "return", "False", ".", "Args", ":", "string", ":", "a", "string", "to", "test", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L249-L266
[ "def", "is_int", "(", "string", ")", ":", "try", ":", "a", "=", "float", "(", "string", ")", "b", "=", "int", "(", "a", ")", "except", "ValueError", ":", "return", "False", "else", ":", "return", "a", "==", "b" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
total_hours
Totals the hours for a given projct. Takes a list of input files for which to total the hours. Each input file represents a project. There are only multiple files for the same project when the duration was more than a year. A typical entry in an input file might look like this: 8/24/14 9:30-12:00 wrote foobar code for x, wrote a unit test for foobar code, tested. 2.5 hours Args: input_files: a list of files to parse. Returns: float: the total number of hours spent on the project.
file_parsing/file_parsing.py
def total_hours(input_files): """ Totals the hours for a given projct. Takes a list of input files for which to total the hours. Each input file represents a project. There are only multiple files for the same project when the duration was more than a year. A typical entry in an input file might look like this: 8/24/14 9:30-12:00 wrote foobar code for x, wrote a unit test for foobar code, tested. 2.5 hours Args: input_files: a list of files to parse. Returns: float: the total number of hours spent on the project. """ hours = 0 # Look for singular and plural forms of the word # and allow typos. allow = set(['hours', 'hour', 'huors', 'huor']) for input_file in input_files: doc = open(input_file, 'r') for line in doc: line = line.rstrip() data = line.split(' ') if (len(data) == 2) and (is_numeric(data[0])) and (data[1].lower() in allow): hours += float(data[0]) doc.close() return hours
def total_hours(input_files): """ Totals the hours for a given projct. Takes a list of input files for which to total the hours. Each input file represents a project. There are only multiple files for the same project when the duration was more than a year. A typical entry in an input file might look like this: 8/24/14 9:30-12:00 wrote foobar code for x, wrote a unit test for foobar code, tested. 2.5 hours Args: input_files: a list of files to parse. Returns: float: the total number of hours spent on the project. """ hours = 0 # Look for singular and plural forms of the word # and allow typos. allow = set(['hours', 'hour', 'huors', 'huor']) for input_file in input_files: doc = open(input_file, 'r') for line in doc: line = line.rstrip() data = line.split(' ') if (len(data) == 2) and (is_numeric(data[0])) and (data[1].lower() in allow): hours += float(data[0]) doc.close() return hours
[ "Totals", "the", "hours", "for", "a", "given", "projct", ".", "Takes", "a", "list", "of", "input", "files", "for", "which", "to", "total", "the", "hours", ".", "Each", "input", "file", "represents", "a", "project", ".", "There", "are", "only", "multiple", "files", "for", "the", "same", "project", "when", "the", "duration", "was", "more", "than", "a", "year", ".", "A", "typical", "entry", "in", "an", "input", "file", "might", "look", "like", "this", ":" ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L269-L299
[ "def", "total_hours", "(", "input_files", ")", ":", "hours", "=", "0", "# Look for singular and plural forms of the word", "# and allow typos.", "allow", "=", "set", "(", "[", "'hours'", ",", "'hour'", ",", "'huors'", ",", "'huor'", "]", ")", "for", "input_file", "in", "input_files", ":", "doc", "=", "open", "(", "input_file", ",", "'r'", ")", "for", "line", "in", "doc", ":", "line", "=", "line", ".", "rstrip", "(", ")", "data", "=", "line", ".", "split", "(", "' '", ")", "if", "(", "len", "(", "data", ")", "==", "2", ")", "and", "(", "is_numeric", "(", "data", "[", "0", "]", ")", ")", "and", "(", "data", "[", "1", "]", ".", "lower", "(", ")", "in", "allow", ")", ":", "hours", "+=", "float", "(", "data", "[", "0", "]", ")", "doc", ".", "close", "(", ")", "return", "hours" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
clean_strings
Take a list of strings and clear whitespace on each one. If a value in the list is not a string pass it through untouched. Args: iterable: mixed list Returns: mixed list
file_parsing/file_parsing.py
def clean_strings(iterable): """ Take a list of strings and clear whitespace on each one. If a value in the list is not a string pass it through untouched. Args: iterable: mixed list Returns: mixed list """ retval = [] for val in iterable: try: retval.append(val.strip()) except(AttributeError): retval.append(val) return retval
def clean_strings(iterable): """ Take a list of strings and clear whitespace on each one. If a value in the list is not a string pass it through untouched. Args: iterable: mixed list Returns: mixed list """ retval = [] for val in iterable: try: retval.append(val.strip()) except(AttributeError): retval.append(val) return retval
[ "Take", "a", "list", "of", "strings", "and", "clear", "whitespace", "on", "each", "one", ".", "If", "a", "value", "in", "the", "list", "is", "not", "a", "string", "pass", "it", "through", "untouched", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L302-L320
[ "def", "clean_strings", "(", "iterable", ")", ":", "retval", "=", "[", "]", "for", "val", "in", "iterable", ":", "try", ":", "retval", ".", "append", "(", "val", ".", "strip", "(", ")", ")", "except", "(", "AttributeError", ")", ":", "retval", ".", "append", "(", "val", ")", "return", "retval" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
excel_to_html
Convert an excel spreadsheet to an html table. This function supports the conversion of merged cells. It can be used in code or run from the command-line. If passed the correct arguments it can generate fully accessible html. Args: path: string, path to the spreadsheet. sheetname: string, name of the sheet to convert. css_classes: string, space separated classnames to append to the table. caption: string, a short heading-like description of the table. details: list of strings, where the first item in the list is a string for the html summary element and the second item is a string for the details element. The summary should be very short, e.g. "Help", where as the details element should be a long description regarding the purpose or how to navigate the table. row_headers: boolean, defaults to False. Does the table have row headers? If set to True, the first element in each row will be a <th scope="row"> element instead of a <td> element. merge: boolean, whether or not to combine cells that were merged in the spreadsheet. Returns: string, html table
file_parsing/file_parsing.py
def excel_to_html(path, sheetname='Sheet1', css_classes='', \ caption='', details=[], row_headers=False, merge=False): """ Convert an excel spreadsheet to an html table. This function supports the conversion of merged cells. It can be used in code or run from the command-line. If passed the correct arguments it can generate fully accessible html. Args: path: string, path to the spreadsheet. sheetname: string, name of the sheet to convert. css_classes: string, space separated classnames to append to the table. caption: string, a short heading-like description of the table. details: list of strings, where the first item in the list is a string for the html summary element and the second item is a string for the details element. The summary should be very short, e.g. "Help", where as the details element should be a long description regarding the purpose or how to navigate the table. row_headers: boolean, defaults to False. Does the table have row headers? If set to True, the first element in each row will be a <th scope="row"> element instead of a <td> element. merge: boolean, whether or not to combine cells that were merged in the spreadsheet. Returns: string, html table """ def get_data_on_merged_cells(): """ Build a datastructure with data on merged cells. """ # Use this to build support for merged columns and rows???? merged_cells = xls.book.sheet_by_name(sheetname).merged_cells ds = {} for crange in merged_cells: rlo, rhi, clo, chi = crange for rowx in range(rlo, rhi): for colx in range(clo, chi): # Cell (rlo, clo) (the top left one) will carry the data and # formatting info. The remainder will be recorded as blank cells, # but a renderer will apply the formatting info for the top left # cell (e.g. border, pattern) to all cells in the range. #print(str(rlo) + ' ' + str(clo)) #print(str(rowx) + ' ' + str(colx)) parent_cell = (rlo,clo) child_cell = (rowx,colx) if not parent_cell in ds: # Return data structure is a dictionary with numeric tuples # as keys. Each tuple holds the x, y coordinates of the cell. # The dictionary holds two values: # 1. A list with two numbers which represent the x/y count # starting at 1 for the current cell. # 2. A set describing which direction the cells are merged. ds[parent_cell] = [[1,1], set([])] else: if parent_cell != child_cell and child_cell[0] == parent_cell[0]: ds[parent_cell][0][0] += 1 ds[parent_cell][1].add('right') elif parent_cell != child_cell and child_cell[0] > parent_cell[0]: if child_cell[1] == parent_cell[1]: ds[parent_cell][0][1] += 1 ds[parent_cell][1].add('down') else: raise RuntimeError('Something went wrong') return ds def mark_cells_going_right(cell, curr_cell, merged_cells): """ Add a "colspan" attribute and mark empty table columns for deletion if they are part of a merged cell going right. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ #if curr_cell in merged_cells and merged_cells[curr_cell][1] == set(['right']): try: xcount = merged_cells[curr_cell][0][0] if xcount > 1: # No colspans on 1 cell['colspan'] = xcount col_count = xcount - 1 while col_count > 0: cell = cell.find_next_sibling() cell['class'] = 'delete' col_count -= 1 except: pass def mark_cells_going_down(cell, curr_cell, merged_cells): """ Add a "rowspan" attribute and mark empty table columns for deletion if they are part of a merged cell going down. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ if curr_cell in merged_cells and merged_cells[curr_cell][1] == set(['down']): ycount = merged_cells[curr_cell][0][1] cell['rowspan'] = ycount row_count = ycount for child_row in cell.parent.find_next_siblings(limit=row_count - 1): i = 0 for child in child_row.find_all('td'): if i == curr_cell[1]: child['class'] = 'delete' i += 1 def mark_cells_going_down_and_right(cell, curr_cell, merged_cells): """ Add "rowspan" and "colspan" attributes and mark empty columns for deletion if they are part of a merged cell going down and to the right diagonally. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ if curr_cell in merged_cells and \ ('down' in merged_cells[curr_cell][1] and \ 'right' in merged_cells[curr_cell][1]): xcount = merged_cells[curr_cell][0][0] ycount = merged_cells[curr_cell][0][1] row_count = ycount col_count = xcount mark_cells_going_right(cell, curr_cell, merged_cells) flag = False for child_row in [cell.parent] + cell.parent.find_all_next('tr', limit=row_count - 1): i = 0 for child in child_row.find_all('td'): if i == curr_cell[1]: mark_cells_going_right(child, curr_cell, merged_cells) if not flag: child['colspan'] = col_count child['rowspan'] = row_count flag = True else: child['class'] = 'delete' i += 1 def is_empty_th(string): """ Detects if a table cell is left empty (is a merged cell). Args: string: string """ if string[:8] == 'Unnamed:': data = string.split(' ') if is_numeric(data[1]): return True return False def mark_header_cells(html): """ Mark header cells for deletion if they need to be merged. Also, add colspan and scope attributes. Args: html: string """ th = html.find_all('th') for header in th: txt = header.string if not is_empty_th(txt): header['scope'] = 'col' count = 1 for sibling in header.find_next_siblings(): if is_empty_th(sibling.string): count += 1 sibling['class'] = 'delete' else: break if count > 1: header['colspan'] = count header['scope'] = 'colgroup' def create_caption(html, caption): """ Create a caption element for an accessible table and append it to the right part of the tree. Args: html: string caption: string """ ctag = html.new_tag('caption') ctag.insert(0, caption) html.table.insert(0, ctag) def create_summary_and_details(html, details): """ Create a summary and details element for an accessible table and insert it into the right part of the tree. Args: html: string details: string """ if len(details) != 2: msg = 'The "details" argument should be a list with two items. ' \ + 'The first item should be a string for the html summary ' \ + 'and the second should be a long description for the details ' \ + 'element. Both of those must be included and nothing else.' raise RuntimeError(msg) summary = details[0] details = details[1] if not caption: create_caption(html, caption) dtag = html.new_tag('details') stag = html.new_tag('summary') ptag = html.new_tag('p') stag.insert(0, summary) ptag.insert(0, details) dtag.insert(0, stag) dtag.append(ptag) html.table.caption.insert(1, dtag) def format_properly(html): """ Fix bad formatting from beautifulsoup. Args: html: string of html representing a table. """ return html.replace('\n ', '').replace('\n </td>', \ '</td>').replace('\n </th>', '</th>').replace('\n </summary>', \ '</summary>').replace('\n </p>', '</p>') def add_row_headers(html): """ Convert <td>s to <th>s if row_headers is set to True. Args: html: string, table. """ for row in html.tbody.find_all('tr'): spans_rows = 'rowspan' in row.td.attrs spans_columns = 'colspan' in row.td.attrs new_tag = html.new_tag('th') new_tag['scope'] = 'row' new_tag.string = row.td.string if spans_rows: new_tag['rowspan'] = row.td.attrs['rowspan'] new_tag['scope'] = 'rowgroup' if spans_columns: new_tag['colspan'] = row.td.attrs['colspan'] row.td.replace_with(new_tag) def beautify(html): """ Beautify the html from pandas. Args: html: table markup from pandas. """ table = html.find('table') first_tr = table.find('tr') del table['border'] del first_tr['style'] return format_properly(html.prettify(formatter='minimal')) def parse_html(html, caption, details): """ Use BeautifulSoup to correct the html for merged columns and rows. What could possibly go wrong? Args: html: string caption: string details: list of strings lenght of two Returns: string, modified html """ new_html = BeautifulSoup(html, 'html.parser') if merge: row_num = 1 # e.g. {(4, 3): [1, 'right'], (2, 1): [1, 'down']} merged_cells = get_data_on_merged_cells() rows = new_html.find('table').find('tbody').find_all('tr') for row in rows: cell_num = 0 # Why are we off by 1? Maybe because we set index to False in to_html? cells = row.find_all('td') for cell in cells: #cell['class'] = str(row_num) + ' ' + str(cell_num) # DEBUG curr_cell = (row_num, cell_num) # Mark merged cells for deletion mark_cells_going_right(cell, curr_cell, merged_cells) mark_cells_going_down(cell, curr_cell, merged_cells) mark_cells_going_down_and_right(cell, curr_cell, merged_cells) cell_num += 1 row_num += 1 # Mark header cells for deletion mark_header_cells(new_html) # Delete all the renegade cells at once destroy = new_html.find_all(attrs={'class' : 'delete' }) for item in destroy: item.extract() # Convert <td>s to <th>s if needed. if row_headers: add_row_headers(new_html) # Add caption if applicable if caption: create_caption(new_html, caption) # Add summary and details if possible if details: create_summary_and_details(new_html, details) return beautify(new_html) # Set options for pandas and load the excel file pd.options.display.max_colwidth = -1 xls = pd.ExcelFile(path) # Parse the sheet you're interested in, results in a Dataframe df = xls.parse(sheetname) # Convert the dataframe to html panda_html = df.to_html(classes=css_classes, index=False, na_rep='') # Parse the panda html to merge cells and beautify the markup return parse_html(panda_html, caption, details)
def excel_to_html(path, sheetname='Sheet1', css_classes='', \ caption='', details=[], row_headers=False, merge=False): """ Convert an excel spreadsheet to an html table. This function supports the conversion of merged cells. It can be used in code or run from the command-line. If passed the correct arguments it can generate fully accessible html. Args: path: string, path to the spreadsheet. sheetname: string, name of the sheet to convert. css_classes: string, space separated classnames to append to the table. caption: string, a short heading-like description of the table. details: list of strings, where the first item in the list is a string for the html summary element and the second item is a string for the details element. The summary should be very short, e.g. "Help", where as the details element should be a long description regarding the purpose or how to navigate the table. row_headers: boolean, defaults to False. Does the table have row headers? If set to True, the first element in each row will be a <th scope="row"> element instead of a <td> element. merge: boolean, whether or not to combine cells that were merged in the spreadsheet. Returns: string, html table """ def get_data_on_merged_cells(): """ Build a datastructure with data on merged cells. """ # Use this to build support for merged columns and rows???? merged_cells = xls.book.sheet_by_name(sheetname).merged_cells ds = {} for crange in merged_cells: rlo, rhi, clo, chi = crange for rowx in range(rlo, rhi): for colx in range(clo, chi): # Cell (rlo, clo) (the top left one) will carry the data and # formatting info. The remainder will be recorded as blank cells, # but a renderer will apply the formatting info for the top left # cell (e.g. border, pattern) to all cells in the range. #print(str(rlo) + ' ' + str(clo)) #print(str(rowx) + ' ' + str(colx)) parent_cell = (rlo,clo) child_cell = (rowx,colx) if not parent_cell in ds: # Return data structure is a dictionary with numeric tuples # as keys. Each tuple holds the x, y coordinates of the cell. # The dictionary holds two values: # 1. A list with two numbers which represent the x/y count # starting at 1 for the current cell. # 2. A set describing which direction the cells are merged. ds[parent_cell] = [[1,1], set([])] else: if parent_cell != child_cell and child_cell[0] == parent_cell[0]: ds[parent_cell][0][0] += 1 ds[parent_cell][1].add('right') elif parent_cell != child_cell and child_cell[0] > parent_cell[0]: if child_cell[1] == parent_cell[1]: ds[parent_cell][0][1] += 1 ds[parent_cell][1].add('down') else: raise RuntimeError('Something went wrong') return ds def mark_cells_going_right(cell, curr_cell, merged_cells): """ Add a "colspan" attribute and mark empty table columns for deletion if they are part of a merged cell going right. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ #if curr_cell in merged_cells and merged_cells[curr_cell][1] == set(['right']): try: xcount = merged_cells[curr_cell][0][0] if xcount > 1: # No colspans on 1 cell['colspan'] = xcount col_count = xcount - 1 while col_count > 0: cell = cell.find_next_sibling() cell['class'] = 'delete' col_count -= 1 except: pass def mark_cells_going_down(cell, curr_cell, merged_cells): """ Add a "rowspan" attribute and mark empty table columns for deletion if they are part of a merged cell going down. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ if curr_cell in merged_cells and merged_cells[curr_cell][1] == set(['down']): ycount = merged_cells[curr_cell][0][1] cell['rowspan'] = ycount row_count = ycount for child_row in cell.parent.find_next_siblings(limit=row_count - 1): i = 0 for child in child_row.find_all('td'): if i == curr_cell[1]: child['class'] = 'delete' i += 1 def mark_cells_going_down_and_right(cell, curr_cell, merged_cells): """ Add "rowspan" and "colspan" attributes and mark empty columns for deletion if they are part of a merged cell going down and to the right diagonally. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ if curr_cell in merged_cells and \ ('down' in merged_cells[curr_cell][1] and \ 'right' in merged_cells[curr_cell][1]): xcount = merged_cells[curr_cell][0][0] ycount = merged_cells[curr_cell][0][1] row_count = ycount col_count = xcount mark_cells_going_right(cell, curr_cell, merged_cells) flag = False for child_row in [cell.parent] + cell.parent.find_all_next('tr', limit=row_count - 1): i = 0 for child in child_row.find_all('td'): if i == curr_cell[1]: mark_cells_going_right(child, curr_cell, merged_cells) if not flag: child['colspan'] = col_count child['rowspan'] = row_count flag = True else: child['class'] = 'delete' i += 1 def is_empty_th(string): """ Detects if a table cell is left empty (is a merged cell). Args: string: string """ if string[:8] == 'Unnamed:': data = string.split(' ') if is_numeric(data[1]): return True return False def mark_header_cells(html): """ Mark header cells for deletion if they need to be merged. Also, add colspan and scope attributes. Args: html: string """ th = html.find_all('th') for header in th: txt = header.string if not is_empty_th(txt): header['scope'] = 'col' count = 1 for sibling in header.find_next_siblings(): if is_empty_th(sibling.string): count += 1 sibling['class'] = 'delete' else: break if count > 1: header['colspan'] = count header['scope'] = 'colgroup' def create_caption(html, caption): """ Create a caption element for an accessible table and append it to the right part of the tree. Args: html: string caption: string """ ctag = html.new_tag('caption') ctag.insert(0, caption) html.table.insert(0, ctag) def create_summary_and_details(html, details): """ Create a summary and details element for an accessible table and insert it into the right part of the tree. Args: html: string details: string """ if len(details) != 2: msg = 'The "details" argument should be a list with two items. ' \ + 'The first item should be a string for the html summary ' \ + 'and the second should be a long description for the details ' \ + 'element. Both of those must be included and nothing else.' raise RuntimeError(msg) summary = details[0] details = details[1] if not caption: create_caption(html, caption) dtag = html.new_tag('details') stag = html.new_tag('summary') ptag = html.new_tag('p') stag.insert(0, summary) ptag.insert(0, details) dtag.insert(0, stag) dtag.append(ptag) html.table.caption.insert(1, dtag) def format_properly(html): """ Fix bad formatting from beautifulsoup. Args: html: string of html representing a table. """ return html.replace('\n ', '').replace('\n </td>', \ '</td>').replace('\n </th>', '</th>').replace('\n </summary>', \ '</summary>').replace('\n </p>', '</p>') def add_row_headers(html): """ Convert <td>s to <th>s if row_headers is set to True. Args: html: string, table. """ for row in html.tbody.find_all('tr'): spans_rows = 'rowspan' in row.td.attrs spans_columns = 'colspan' in row.td.attrs new_tag = html.new_tag('th') new_tag['scope'] = 'row' new_tag.string = row.td.string if spans_rows: new_tag['rowspan'] = row.td.attrs['rowspan'] new_tag['scope'] = 'rowgroup' if spans_columns: new_tag['colspan'] = row.td.attrs['colspan'] row.td.replace_with(new_tag) def beautify(html): """ Beautify the html from pandas. Args: html: table markup from pandas. """ table = html.find('table') first_tr = table.find('tr') del table['border'] del first_tr['style'] return format_properly(html.prettify(formatter='minimal')) def parse_html(html, caption, details): """ Use BeautifulSoup to correct the html for merged columns and rows. What could possibly go wrong? Args: html: string caption: string details: list of strings lenght of two Returns: string, modified html """ new_html = BeautifulSoup(html, 'html.parser') if merge: row_num = 1 # e.g. {(4, 3): [1, 'right'], (2, 1): [1, 'down']} merged_cells = get_data_on_merged_cells() rows = new_html.find('table').find('tbody').find_all('tr') for row in rows: cell_num = 0 # Why are we off by 1? Maybe because we set index to False in to_html? cells = row.find_all('td') for cell in cells: #cell['class'] = str(row_num) + ' ' + str(cell_num) # DEBUG curr_cell = (row_num, cell_num) # Mark merged cells for deletion mark_cells_going_right(cell, curr_cell, merged_cells) mark_cells_going_down(cell, curr_cell, merged_cells) mark_cells_going_down_and_right(cell, curr_cell, merged_cells) cell_num += 1 row_num += 1 # Mark header cells for deletion mark_header_cells(new_html) # Delete all the renegade cells at once destroy = new_html.find_all(attrs={'class' : 'delete' }) for item in destroy: item.extract() # Convert <td>s to <th>s if needed. if row_headers: add_row_headers(new_html) # Add caption if applicable if caption: create_caption(new_html, caption) # Add summary and details if possible if details: create_summary_and_details(new_html, details) return beautify(new_html) # Set options for pandas and load the excel file pd.options.display.max_colwidth = -1 xls = pd.ExcelFile(path) # Parse the sheet you're interested in, results in a Dataframe df = xls.parse(sheetname) # Convert the dataframe to html panda_html = df.to_html(classes=css_classes, index=False, na_rep='') # Parse the panda html to merge cells and beautify the markup return parse_html(panda_html, caption, details)
[ "Convert", "an", "excel", "spreadsheet", "to", "an", "html", "table", ".", "This", "function", "supports", "the", "conversion", "of", "merged", "cells", ".", "It", "can", "be", "used", "in", "code", "or", "run", "from", "the", "command", "-", "line", ".", "If", "passed", "the", "correct", "arguments", "it", "can", "generate", "fully", "accessible", "html", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/file_parsing/file_parsing.py#L340-L732
[ "def", "excel_to_html", "(", "path", ",", "sheetname", "=", "'Sheet1'", ",", "css_classes", "=", "''", ",", "caption", "=", "''", ",", "details", "=", "[", "]", ",", "row_headers", "=", "False", ",", "merge", "=", "False", ")", ":", "def", "get_data_on_merged_cells", "(", ")", ":", "\"\"\"\n Build a datastructure with data \n on merged cells.\n \"\"\"", "# Use this to build support for merged columns and rows???? ", "merged_cells", "=", "xls", ".", "book", ".", "sheet_by_name", "(", "sheetname", ")", ".", "merged_cells", "ds", "=", "{", "}", "for", "crange", "in", "merged_cells", ":", "rlo", ",", "rhi", ",", "clo", ",", "chi", "=", "crange", "for", "rowx", "in", "range", "(", "rlo", ",", "rhi", ")", ":", "for", "colx", "in", "range", "(", "clo", ",", "chi", ")", ":", "# Cell (rlo, clo) (the top left one) will carry the data and ", "# formatting info. The remainder will be recorded as blank cells, ", "# but a renderer will apply the formatting info for the top left ", "# cell (e.g. border, pattern) to all cells in the range.", "#print(str(rlo) + ' ' + str(clo))", "#print(str(rowx) + ' ' + str(colx))", "parent_cell", "=", "(", "rlo", ",", "clo", ")", "child_cell", "=", "(", "rowx", ",", "colx", ")", "if", "not", "parent_cell", "in", "ds", ":", "# Return data structure is a dictionary with numeric tuples ", "# as keys. Each tuple holds the x, y coordinates of the cell.", "# The dictionary holds two values:", "# 1. A list with two numbers which represent the x/y count ", "# starting at 1 for the current cell.", "# 2. A set describing which direction the cells are merged.", "ds", "[", "parent_cell", "]", "=", "[", "[", "1", ",", "1", "]", ",", "set", "(", "[", "]", ")", "]", "else", ":", "if", "parent_cell", "!=", "child_cell", "and", "child_cell", "[", "0", "]", "==", "parent_cell", "[", "0", "]", ":", "ds", "[", "parent_cell", "]", "[", "0", "]", "[", "0", "]", "+=", "1", "ds", "[", "parent_cell", "]", "[", "1", "]", ".", "add", "(", "'right'", ")", "elif", "parent_cell", "!=", "child_cell", "and", "child_cell", "[", "0", "]", ">", "parent_cell", "[", "0", "]", ":", "if", "child_cell", "[", "1", "]", "==", "parent_cell", "[", "1", "]", ":", "ds", "[", "parent_cell", "]", "[", "0", "]", "[", "1", "]", "+=", "1", "ds", "[", "parent_cell", "]", "[", "1", "]", ".", "add", "(", "'down'", ")", "else", ":", "raise", "RuntimeError", "(", "'Something went wrong'", ")", "return", "ds", "def", "mark_cells_going_right", "(", "cell", ",", "curr_cell", ",", "merged_cells", ")", ":", "\"\"\"\n Add a \"colspan\" attribute and mark empty table \n columns for deletion if they are part of a \n merged cell going right.\n\n Args:\n cell: BeautifulSoup element tag object \n representation of the current cell.\n\n curr_cell: tuple, numeric representation \n of the current cell.\n\n merged_cells: dictionary of of data about \n merged cells.\n \"\"\"", "#if curr_cell in merged_cells and merged_cells[curr_cell][1] == set(['right']):", "try", ":", "xcount", "=", "merged_cells", "[", "curr_cell", "]", "[", "0", "]", "[", "0", "]", "if", "xcount", ">", "1", ":", "# No colspans on 1", "cell", "[", "'colspan'", "]", "=", "xcount", "col_count", "=", "xcount", "-", "1", "while", "col_count", ">", "0", ":", "cell", "=", "cell", ".", "find_next_sibling", "(", ")", "cell", "[", "'class'", "]", "=", "'delete'", "col_count", "-=", "1", "except", ":", "pass", "def", "mark_cells_going_down", "(", "cell", ",", "curr_cell", ",", "merged_cells", ")", ":", "\"\"\"\n Add a \"rowspan\" attribute and mark empty table \n columns for deletion if they are part of a \n merged cell going down.\n\n Args:\n cell: BeautifulSoup element tag object \n representation of the current cell.\n\n curr_cell: tuple, numeric representation \n of the current cell.\n\n merged_cells: dictionary of of data about \n merged cells.\n \"\"\"", "if", "curr_cell", "in", "merged_cells", "and", "merged_cells", "[", "curr_cell", "]", "[", "1", "]", "==", "set", "(", "[", "'down'", "]", ")", ":", "ycount", "=", "merged_cells", "[", "curr_cell", "]", "[", "0", "]", "[", "1", "]", "cell", "[", "'rowspan'", "]", "=", "ycount", "row_count", "=", "ycount", "for", "child_row", "in", "cell", ".", "parent", ".", "find_next_siblings", "(", "limit", "=", "row_count", "-", "1", ")", ":", "i", "=", "0", "for", "child", "in", "child_row", ".", "find_all", "(", "'td'", ")", ":", "if", "i", "==", "curr_cell", "[", "1", "]", ":", "child", "[", "'class'", "]", "=", "'delete'", "i", "+=", "1", "def", "mark_cells_going_down_and_right", "(", "cell", ",", "curr_cell", ",", "merged_cells", ")", ":", "\"\"\"\n Add \"rowspan\" and \"colspan\" attributes and mark \n empty columns for deletion if they are part of a \n merged cell going down and to the right diagonally.\n\n Args:\n cell: BeautifulSoup element tag object \n representation of the current cell.\n\n curr_cell: tuple, numeric representation \n of the current cell.\n\n merged_cells: dictionary of of data about \n merged cells.\n \"\"\"", "if", "curr_cell", "in", "merged_cells", "and", "(", "'down'", "in", "merged_cells", "[", "curr_cell", "]", "[", "1", "]", "and", "'right'", "in", "merged_cells", "[", "curr_cell", "]", "[", "1", "]", ")", ":", "xcount", "=", "merged_cells", "[", "curr_cell", "]", "[", "0", "]", "[", "0", "]", "ycount", "=", "merged_cells", "[", "curr_cell", "]", "[", "0", "]", "[", "1", "]", "row_count", "=", "ycount", "col_count", "=", "xcount", "mark_cells_going_right", "(", "cell", ",", "curr_cell", ",", "merged_cells", ")", "flag", "=", "False", "for", "child_row", "in", "[", "cell", ".", "parent", "]", "+", "cell", ".", "parent", ".", "find_all_next", "(", "'tr'", ",", "limit", "=", "row_count", "-", "1", ")", ":", "i", "=", "0", "for", "child", "in", "child_row", ".", "find_all", "(", "'td'", ")", ":", "if", "i", "==", "curr_cell", "[", "1", "]", ":", "mark_cells_going_right", "(", "child", ",", "curr_cell", ",", "merged_cells", ")", "if", "not", "flag", ":", "child", "[", "'colspan'", "]", "=", "col_count", "child", "[", "'rowspan'", "]", "=", "row_count", "flag", "=", "True", "else", ":", "child", "[", "'class'", "]", "=", "'delete'", "i", "+=", "1", "def", "is_empty_th", "(", "string", ")", ":", "\"\"\"\n Detects if a table cell is left\n empty (is a merged cell).\n\n Args:\n string: string\n \"\"\"", "if", "string", "[", ":", "8", "]", "==", "'Unnamed:'", ":", "data", "=", "string", ".", "split", "(", "' '", ")", "if", "is_numeric", "(", "data", "[", "1", "]", ")", ":", "return", "True", "return", "False", "def", "mark_header_cells", "(", "html", ")", ":", "\"\"\"\n Mark header cells for deletion if they \n need to be merged. Also, add colspan\n and scope attributes.\n\n Args: \n html: string\n \"\"\"", "th", "=", "html", ".", "find_all", "(", "'th'", ")", "for", "header", "in", "th", ":", "txt", "=", "header", ".", "string", "if", "not", "is_empty_th", "(", "txt", ")", ":", "header", "[", "'scope'", "]", "=", "'col'", "count", "=", "1", "for", "sibling", "in", "header", ".", "find_next_siblings", "(", ")", ":", "if", "is_empty_th", "(", "sibling", ".", "string", ")", ":", "count", "+=", "1", "sibling", "[", "'class'", "]", "=", "'delete'", "else", ":", "break", "if", "count", ">", "1", ":", "header", "[", "'colspan'", "]", "=", "count", "header", "[", "'scope'", "]", "=", "'colgroup'", "def", "create_caption", "(", "html", ",", "caption", ")", ":", "\"\"\"\n Create a caption element for an \n accessible table and append it\n to the right part of the tree.\n \n Args:\n html: string\n\n caption: string\n \"\"\"", "ctag", "=", "html", ".", "new_tag", "(", "'caption'", ")", "ctag", ".", "insert", "(", "0", ",", "caption", ")", "html", ".", "table", ".", "insert", "(", "0", ",", "ctag", ")", "def", "create_summary_and_details", "(", "html", ",", "details", ")", ":", "\"\"\"\n Create a summary and details element\n for an accessible table and insert \n it into the right part of the tree.\n\n Args:\n html: string\n\n details: string\n \"\"\"", "if", "len", "(", "details", ")", "!=", "2", ":", "msg", "=", "'The \"details\" argument should be a list with two items. '", "+", "'The first item should be a string for the html summary '", "+", "'and the second should be a long description for the details '", "+", "'element. Both of those must be included and nothing else.'", "raise", "RuntimeError", "(", "msg", ")", "summary", "=", "details", "[", "0", "]", "details", "=", "details", "[", "1", "]", "if", "not", "caption", ":", "create_caption", "(", "html", ",", "caption", ")", "dtag", "=", "html", ".", "new_tag", "(", "'details'", ")", "stag", "=", "html", ".", "new_tag", "(", "'summary'", ")", "ptag", "=", "html", ".", "new_tag", "(", "'p'", ")", "stag", ".", "insert", "(", "0", ",", "summary", ")", "ptag", ".", "insert", "(", "0", ",", "details", ")", "dtag", ".", "insert", "(", "0", ",", "stag", ")", "dtag", ".", "append", "(", "ptag", ")", "html", ".", "table", ".", "caption", ".", "insert", "(", "1", ",", "dtag", ")", "def", "format_properly", "(", "html", ")", ":", "\"\"\"\n Fix bad formatting from beautifulsoup.\n\n Args:\n html: string of html representing \n a table.\n \"\"\"", "return", "html", ".", "replace", "(", "'\\n '", ",", "''", ")", ".", "replace", "(", "'\\n </td>'", ",", "'</td>'", ")", ".", "replace", "(", "'\\n </th>'", ",", "'</th>'", ")", ".", "replace", "(", "'\\n </summary>'", ",", "'</summary>'", ")", ".", "replace", "(", "'\\n </p>'", ",", "'</p>'", ")", "def", "add_row_headers", "(", "html", ")", ":", "\"\"\"\n Convert <td>s to <th>s if row_headers\n is set to True.\n\n Args:\n html: string, table.\n \"\"\"", "for", "row", "in", "html", ".", "tbody", ".", "find_all", "(", "'tr'", ")", ":", "spans_rows", "=", "'rowspan'", "in", "row", ".", "td", ".", "attrs", "spans_columns", "=", "'colspan'", "in", "row", ".", "td", ".", "attrs", "new_tag", "=", "html", ".", "new_tag", "(", "'th'", ")", "new_tag", "[", "'scope'", "]", "=", "'row'", "new_tag", ".", "string", "=", "row", ".", "td", ".", "string", "if", "spans_rows", ":", "new_tag", "[", "'rowspan'", "]", "=", "row", ".", "td", ".", "attrs", "[", "'rowspan'", "]", "new_tag", "[", "'scope'", "]", "=", "'rowgroup'", "if", "spans_columns", ":", "new_tag", "[", "'colspan'", "]", "=", "row", ".", "td", ".", "attrs", "[", "'colspan'", "]", "row", ".", "td", ".", "replace_with", "(", "new_tag", ")", "def", "beautify", "(", "html", ")", ":", "\"\"\"\n Beautify the html from pandas.\n\n Args:\n html: table markup from pandas.\n \"\"\"", "table", "=", "html", ".", "find", "(", "'table'", ")", "first_tr", "=", "table", ".", "find", "(", "'tr'", ")", "del", "table", "[", "'border'", "]", "del", "first_tr", "[", "'style'", "]", "return", "format_properly", "(", "html", ".", "prettify", "(", "formatter", "=", "'minimal'", ")", ")", "def", "parse_html", "(", "html", ",", "caption", ",", "details", ")", ":", "\"\"\"\n Use BeautifulSoup to correct the \n html for merged columns and rows.\n What could possibly go wrong?\n\n Args:\n html: string\n\n caption: string\n\n details: list of strings lenght of two\n\n Returns:\n string, modified html\n \"\"\"", "new_html", "=", "BeautifulSoup", "(", "html", ",", "'html.parser'", ")", "if", "merge", ":", "row_num", "=", "1", "# e.g. {(4, 3): [1, 'right'], (2, 1): [1, 'down']}", "merged_cells", "=", "get_data_on_merged_cells", "(", ")", "rows", "=", "new_html", ".", "find", "(", "'table'", ")", ".", "find", "(", "'tbody'", ")", ".", "find_all", "(", "'tr'", ")", "for", "row", "in", "rows", ":", "cell_num", "=", "0", "# Why are we off by 1? Maybe because we set index to False in to_html?", "cells", "=", "row", ".", "find_all", "(", "'td'", ")", "for", "cell", "in", "cells", ":", "#cell['class'] = str(row_num) + ' ' + str(cell_num) # DEBUG", "curr_cell", "=", "(", "row_num", ",", "cell_num", ")", "# Mark merged cells for deletion", "mark_cells_going_right", "(", "cell", ",", "curr_cell", ",", "merged_cells", ")", "mark_cells_going_down", "(", "cell", ",", "curr_cell", ",", "merged_cells", ")", "mark_cells_going_down_and_right", "(", "cell", ",", "curr_cell", ",", "merged_cells", ")", "cell_num", "+=", "1", "row_num", "+=", "1", "# Mark header cells for deletion", "mark_header_cells", "(", "new_html", ")", "# Delete all the renegade cells at once", "destroy", "=", "new_html", ".", "find_all", "(", "attrs", "=", "{", "'class'", ":", "'delete'", "}", ")", "for", "item", "in", "destroy", ":", "item", ".", "extract", "(", ")", "# Convert <td>s to <th>s if needed.", "if", "row_headers", ":", "add_row_headers", "(", "new_html", ")", "# Add caption if applicable", "if", "caption", ":", "create_caption", "(", "new_html", ",", "caption", ")", "# Add summary and details if possible", "if", "details", ":", "create_summary_and_details", "(", "new_html", ",", "details", ")", "return", "beautify", "(", "new_html", ")", "# Set options for pandas and load the excel file", "pd", ".", "options", ".", "display", ".", "max_colwidth", "=", "-", "1", "xls", "=", "pd", ".", "ExcelFile", "(", "path", ")", "# Parse the sheet you're interested in, results in a Dataframe", "df", "=", "xls", ".", "parse", "(", "sheetname", ")", "# Convert the dataframe to html", "panda_html", "=", "df", ".", "to_html", "(", "classes", "=", "css_classes", ",", "index", "=", "False", ",", "na_rep", "=", "''", ")", "# Parse the panda html to merge cells and beautify the markup ", "return", "parse_html", "(", "panda_html", ",", "caption", ",", "details", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
future_value
Calculates the future value of money invested at an anual interest rate, x times per year, for a given number of years. Args: present_value: int or float, the current value of the money (principal). annual_rate: float 0 to 1 e.g., .5 = 50%), the interest rate paid out. periods_per_year: int, the number of times money is invested per year. years: int, the number of years invested. Returns: Float, the future value of the money invested with compound interest.
simple_math/simple_math.py
def future_value(present_value, annual_rate, periods_per_year, years): """ Calculates the future value of money invested at an anual interest rate, x times per year, for a given number of years. Args: present_value: int or float, the current value of the money (principal). annual_rate: float 0 to 1 e.g., .5 = 50%), the interest rate paid out. periods_per_year: int, the number of times money is invested per year. years: int, the number of years invested. Returns: Float, the future value of the money invested with compound interest. """ # The nominal interest rate per period (rate) is how much interest you earn during a # particular length of time, before accounting for compounding. This is typically # expressed as a percentage. rate_per_period = annual_rate / float(periods_per_year) # How many periods in the future the calculation is for. periods = periods_per_year * years return present_value * (1 + rate_per_period) ** periods
def future_value(present_value, annual_rate, periods_per_year, years): """ Calculates the future value of money invested at an anual interest rate, x times per year, for a given number of years. Args: present_value: int or float, the current value of the money (principal). annual_rate: float 0 to 1 e.g., .5 = 50%), the interest rate paid out. periods_per_year: int, the number of times money is invested per year. years: int, the number of years invested. Returns: Float, the future value of the money invested with compound interest. """ # The nominal interest rate per period (rate) is how much interest you earn during a # particular length of time, before accounting for compounding. This is typically # expressed as a percentage. rate_per_period = annual_rate / float(periods_per_year) # How many periods in the future the calculation is for. periods = periods_per_year * years return present_value * (1 + rate_per_period) ** periods
[ "Calculates", "the", "future", "value", "of", "money", "invested", "at", "an", "anual", "interest", "rate", "x", "times", "per", "year", "for", "a", "given", "number", "of", "years", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L146-L172
[ "def", "future_value", "(", "present_value", ",", "annual_rate", ",", "periods_per_year", ",", "years", ")", ":", "# The nominal interest rate per period (rate) is how much interest you earn during a", "# particular length of time, before accounting for compounding. This is typically", "# expressed as a percentage.", "rate_per_period", "=", "annual_rate", "/", "float", "(", "periods_per_year", ")", "# How many periods in the future the calculation is for.", "periods", "=", "periods_per_year", "*", "years", "return", "present_value", "*", "(", "1", "+", "rate_per_period", ")", "**", "periods" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
triangle_area
Uses Heron's formula to find the area of a triangle based on the coordinates of three points. Args: point1: list or tuple, the x y coordinate of point one. point2: list or tuple, the x y coordinate of point two. point3: list or tuple, the x y coordinate of point three. Returns: The area of a triangle as a floating point number. Requires: The math module, point_distance().
simple_math/simple_math.py
def triangle_area(point1, point2, point3): """ Uses Heron's formula to find the area of a triangle based on the coordinates of three points. Args: point1: list or tuple, the x y coordinate of point one. point2: list or tuple, the x y coordinate of point two. point3: list or tuple, the x y coordinate of point three. Returns: The area of a triangle as a floating point number. Requires: The math module, point_distance(). """ """Lengths of the three sides of the triangle""" a = point_distance(point1, point2) b = point_distance(point1, point3) c = point_distance(point2, point3) """Where s is the semiperimeter""" s = (a + b + c) / 2.0 """Return the area of the triangle (using Heron's formula)""" return math.sqrt(s * (s - a) * (s - b) * (s - c))
def triangle_area(point1, point2, point3): """ Uses Heron's formula to find the area of a triangle based on the coordinates of three points. Args: point1: list or tuple, the x y coordinate of point one. point2: list or tuple, the x y coordinate of point two. point3: list or tuple, the x y coordinate of point three. Returns: The area of a triangle as a floating point number. Requires: The math module, point_distance(). """ """Lengths of the three sides of the triangle""" a = point_distance(point1, point2) b = point_distance(point1, point3) c = point_distance(point2, point3) """Where s is the semiperimeter""" s = (a + b + c) / 2.0 """Return the area of the triangle (using Heron's formula)""" return math.sqrt(s * (s - a) * (s - b) * (s - c))
[ "Uses", "Heron", "s", "formula", "to", "find", "the", "area", "of", "a", "triangle", "based", "on", "the", "coordinates", "of", "three", "points", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L190-L218
[ "def", "triangle_area", "(", "point1", ",", "point2", ",", "point3", ")", ":", "\"\"\"Lengths of the three sides of the triangle\"\"\"", "a", "=", "point_distance", "(", "point1", ",", "point2", ")", "b", "=", "point_distance", "(", "point1", ",", "point3", ")", "c", "=", "point_distance", "(", "point2", ",", "point3", ")", "\"\"\"Where s is the semiperimeter\"\"\"", "s", "=", "(", "a", "+", "b", "+", "c", ")", "/", "2.0", "\"\"\"Return the area of the triangle (using Heron's formula)\"\"\"", "return", "math", ".", "sqrt", "(", "s", "*", "(", "s", "-", "a", ")", "*", "(", "s", "-", "b", ")", "*", "(", "s", "-", "c", ")", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
regular_polygon_area
Calculates the area of a regular polygon (with sides of equal length). Args: number_of_sides: Integer, the number of sides of the polygon length_of_sides: Integer or floating point number, the length of the sides Returns: The area of a regular polygon as an integer or floating point number Requires: The math module
simple_math/simple_math.py
def regular_polygon_area(number_of_sides, length_of_sides): """ Calculates the area of a regular polygon (with sides of equal length). Args: number_of_sides: Integer, the number of sides of the polygon length_of_sides: Integer or floating point number, the length of the sides Returns: The area of a regular polygon as an integer or floating point number Requires: The math module """ return (0.25 * number_of_sides * length_of_sides ** 2) / math.tan( math.pi / number_of_sides )
def regular_polygon_area(number_of_sides, length_of_sides): """ Calculates the area of a regular polygon (with sides of equal length). Args: number_of_sides: Integer, the number of sides of the polygon length_of_sides: Integer or floating point number, the length of the sides Returns: The area of a regular polygon as an integer or floating point number Requires: The math module """ return (0.25 * number_of_sides * length_of_sides ** 2) / math.tan( math.pi / number_of_sides )
[ "Calculates", "the", "area", "of", "a", "regular", "polygon", "(", "with", "sides", "of", "equal", "length", ")", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L241-L258
[ "def", "regular_polygon_area", "(", "number_of_sides", ",", "length_of_sides", ")", ":", "return", "(", "0.25", "*", "number_of_sides", "*", "length_of_sides", "**", "2", ")", "/", "math", ".", "tan", "(", "math", ".", "pi", "/", "number_of_sides", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
median
Calculates the median of a list of integers or floating point numbers. Args: data: A list of integers or floating point numbers Returns: Sorts the list numerically and returns the middle number if the list has an odd number of items. If the list contains an even number of items the mean of the two middle numbers is returned.
simple_math/simple_math.py
def median(data): """ Calculates the median of a list of integers or floating point numbers. Args: data: A list of integers or floating point numbers Returns: Sorts the list numerically and returns the middle number if the list has an odd number of items. If the list contains an even number of items the mean of the two middle numbers is returned. """ ordered = sorted(data) length = len(ordered) if length % 2 == 0: return ( ordered[math.floor(length / 2) - 1] + ordered[math.floor(length / 2)] ) / 2.0 elif length % 2 != 0: return ordered[math.floor(length / 2)]
def median(data): """ Calculates the median of a list of integers or floating point numbers. Args: data: A list of integers or floating point numbers Returns: Sorts the list numerically and returns the middle number if the list has an odd number of items. If the list contains an even number of items the mean of the two middle numbers is returned. """ ordered = sorted(data) length = len(ordered) if length % 2 == 0: return ( ordered[math.floor(length / 2) - 1] + ordered[math.floor(length / 2)] ) / 2.0 elif length % 2 != 0: return ordered[math.floor(length / 2)]
[ "Calculates", "the", "median", "of", "a", "list", "of", "integers", "or", "floating", "point", "numbers", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L261-L281
[ "def", "median", "(", "data", ")", ":", "ordered", "=", "sorted", "(", "data", ")", "length", "=", "len", "(", "ordered", ")", "if", "length", "%", "2", "==", "0", ":", "return", "(", "ordered", "[", "math", ".", "floor", "(", "length", "/", "2", ")", "-", "1", "]", "+", "ordered", "[", "math", ".", "floor", "(", "length", "/", "2", ")", "]", ")", "/", "2.0", "elif", "length", "%", "2", "!=", "0", ":", "return", "ordered", "[", "math", ".", "floor", "(", "length", "/", "2", ")", "]" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
average
Calculates the average or mean of a list of numbers Args: numbers: a list of integers or floating point numbers. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: The average (mean) of the numbers as a floating point number or a Decimal object. Requires: The math module
simple_math/simple_math.py
def average(numbers, numtype='float'): """ Calculates the average or mean of a list of numbers Args: numbers: a list of integers or floating point numbers. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: The average (mean) of the numbers as a floating point number or a Decimal object. Requires: The math module """ if type == 'decimal': return Decimal(sum(numbers)) / len(numbers) else: return float(sum(numbers)) / len(numbers)
def average(numbers, numtype='float'): """ Calculates the average or mean of a list of numbers Args: numbers: a list of integers or floating point numbers. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: The average (mean) of the numbers as a floating point number or a Decimal object. Requires: The math module """ if type == 'decimal': return Decimal(sum(numbers)) / len(numbers) else: return float(sum(numbers)) / len(numbers)
[ "Calculates", "the", "average", "or", "mean", "of", "a", "list", "of", "numbers" ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L284-L303
[ "def", "average", "(", "numbers", ",", "numtype", "=", "'float'", ")", ":", "if", "type", "==", "'decimal'", ":", "return", "Decimal", "(", "sum", "(", "numbers", ")", ")", "/", "len", "(", "numbers", ")", "else", ":", "return", "float", "(", "sum", "(", "numbers", ")", ")", "/", "len", "(", "numbers", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
variance
Calculates the population or sample variance of a list of numbers. A large number means the results are all over the place, while a small number means the results are comparatively close to the average. Args: numbers: a list of integers or floating point numbers to compare. type: string, 'population' or 'sample', the kind of variance to be computed. Returns: The computed population or sample variance. Defaults to population variance. Requires: The math module, average()
simple_math/simple_math.py
def variance(numbers, type='population'): """ Calculates the population or sample variance of a list of numbers. A large number means the results are all over the place, while a small number means the results are comparatively close to the average. Args: numbers: a list of integers or floating point numbers to compare. type: string, 'population' or 'sample', the kind of variance to be computed. Returns: The computed population or sample variance. Defaults to population variance. Requires: The math module, average() """ mean = average(numbers) variance = 0 for number in numbers: variance += (mean - number) ** 2 if type == 'population': return variance / len(numbers) else: return variance / (len(numbers) - 1)
def variance(numbers, type='population'): """ Calculates the population or sample variance of a list of numbers. A large number means the results are all over the place, while a small number means the results are comparatively close to the average. Args: numbers: a list of integers or floating point numbers to compare. type: string, 'population' or 'sample', the kind of variance to be computed. Returns: The computed population or sample variance. Defaults to population variance. Requires: The math module, average() """ mean = average(numbers) variance = 0 for number in numbers: variance += (mean - number) ** 2 if type == 'population': return variance / len(numbers) else: return variance / (len(numbers) - 1)
[ "Calculates", "the", "population", "or", "sample", "variance", "of", "a", "list", "of", "numbers", ".", "A", "large", "number", "means", "the", "results", "are", "all", "over", "the", "place", "while", "a", "small", "number", "means", "the", "results", "are", "comparatively", "close", "to", "the", "average", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L306-L332
[ "def", "variance", "(", "numbers", ",", "type", "=", "'population'", ")", ":", "mean", "=", "average", "(", "numbers", ")", "variance", "=", "0", "for", "number", "in", "numbers", ":", "variance", "+=", "(", "mean", "-", "number", ")", "**", "2", "if", "type", "==", "'population'", ":", "return", "variance", "/", "len", "(", "numbers", ")", "else", ":", "return", "variance", "/", "(", "len", "(", "numbers", ")", "-", "1", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
get_percentage
Finds the percentage of one number over another. Args: a: The number that is a percent, int or float. b: The base number that a is a percent of, int or float. i: Optional boolean integer. True if the user wants the result returned as a whole number. Assumes False. r: Optional boolean round. True if the user wants the result rounded. Rounds to the second decimal point on floating point numbers. Assumes False. Returns: The argument a as a percentage of b. Throws a warning if integer is set to True and round is set to False.
simple_math/simple_math.py
def get_percentage(a, b, i=False, r=False): """ Finds the percentage of one number over another. Args: a: The number that is a percent, int or float. b: The base number that a is a percent of, int or float. i: Optional boolean integer. True if the user wants the result returned as a whole number. Assumes False. r: Optional boolean round. True if the user wants the result rounded. Rounds to the second decimal point on floating point numbers. Assumes False. Returns: The argument a as a percentage of b. Throws a warning if integer is set to True and round is set to False. """ # Round to the second decimal if i is False and r is True: percentage = round(100.0 * (float(a) / b), 2) # Round to the nearest whole number elif (i is True and r is True) or (i is True and r is False): percentage = int(round(100 * (float(a) / b))) # A rounded number and an integer were requested if r is False: warnings.warn( "If integer is set to True and Round is set to False, you will still get a rounded number if you pass floating point numbers as arguments." ) # A precise unrounded decimal else: percentage = 100.0 * (float(a) / b) return percentage
def get_percentage(a, b, i=False, r=False): """ Finds the percentage of one number over another. Args: a: The number that is a percent, int or float. b: The base number that a is a percent of, int or float. i: Optional boolean integer. True if the user wants the result returned as a whole number. Assumes False. r: Optional boolean round. True if the user wants the result rounded. Rounds to the second decimal point on floating point numbers. Assumes False. Returns: The argument a as a percentage of b. Throws a warning if integer is set to True and round is set to False. """ # Round to the second decimal if i is False and r is True: percentage = round(100.0 * (float(a) / b), 2) # Round to the nearest whole number elif (i is True and r is True) or (i is True and r is False): percentage = int(round(100 * (float(a) / b))) # A rounded number and an integer were requested if r is False: warnings.warn( "If integer is set to True and Round is set to False, you will still get a rounded number if you pass floating point numbers as arguments." ) # A precise unrounded decimal else: percentage = 100.0 * (float(a) / b) return percentage
[ "Finds", "the", "percentage", "of", "one", "number", "over", "another", "." ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L348-L385
[ "def", "get_percentage", "(", "a", ",", "b", ",", "i", "=", "False", ",", "r", "=", "False", ")", ":", "# Round to the second decimal", "if", "i", "is", "False", "and", "r", "is", "True", ":", "percentage", "=", "round", "(", "100.0", "*", "(", "float", "(", "a", ")", "/", "b", ")", ",", "2", ")", "# Round to the nearest whole number", "elif", "(", "i", "is", "True", "and", "r", "is", "True", ")", "or", "(", "i", "is", "True", "and", "r", "is", "False", ")", ":", "percentage", "=", "int", "(", "round", "(", "100", "*", "(", "float", "(", "a", ")", "/", "b", ")", ")", ")", "# A rounded number and an integer were requested", "if", "r", "is", "False", ":", "warnings", ".", "warn", "(", "\"If integer is set to True and Round is set to False, you will still get a rounded number if you pass floating point numbers as arguments.\"", ")", "# A precise unrounded decimal", "else", ":", "percentage", "=", "100.0", "*", "(", "float", "(", "a", ")", "/", "b", ")", "return", "percentage" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
take_home_pay
Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: gross_pay: float or int, gross monthly pay. employer_match: float or int, the 401(k) match from your employer. taxes_and_fees: list, taxes and fees that are deducted from your paycheck. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly take-home pay.
simple_math/simple_math.py
def take_home_pay(gross_pay, employer_match, taxes_and_fees, numtype='float'): """ Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: gross_pay: float or int, gross monthly pay. employer_match: float or int, the 401(k) match from your employer. taxes_and_fees: list, taxes and fees that are deducted from your paycheck. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly take-home pay. """ if numtype == 'decimal': return (Decimal(gross_pay) + Decimal(employer_match)) - Decimal( sum(taxes_and_fees) ) else: return (float(gross_pay) + float(employer_match)) - sum(taxes_and_fees)
def take_home_pay(gross_pay, employer_match, taxes_and_fees, numtype='float'): """ Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: gross_pay: float or int, gross monthly pay. employer_match: float or int, the 401(k) match from your employer. taxes_and_fees: list, taxes and fees that are deducted from your paycheck. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly take-home pay. """ if numtype == 'decimal': return (Decimal(gross_pay) + Decimal(employer_match)) - Decimal( sum(taxes_and_fees) ) else: return (float(gross_pay) + float(employer_match)) - sum(taxes_and_fees)
[ "Calculate", "net", "take", "-", "home", "pay", "including", "employer", "retirement", "savings", "match", "using", "the", "formula", "laid", "out", "by", "Mr", ".", "Money", "Mustache", ":", "http", ":", "//", "www", ".", "mrmoneymustache", ".", "com", "/", "2015", "/", "01", "/", "26", "/", "calculating", "-", "net", "-", "worth", "/" ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L432-L455
[ "def", "take_home_pay", "(", "gross_pay", ",", "employer_match", ",", "taxes_and_fees", ",", "numtype", "=", "'float'", ")", ":", "if", "numtype", "==", "'decimal'", ":", "return", "(", "Decimal", "(", "gross_pay", ")", "+", "Decimal", "(", "employer_match", ")", ")", "-", "Decimal", "(", "sum", "(", "taxes_and_fees", ")", ")", "else", ":", "return", "(", "float", "(", "gross_pay", ")", "+", "float", "(", "employer_match", ")", ")", "-", "sum", "(", "taxes_and_fees", ")" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
savings_rate
Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: take_home_pay: float or int, monthly take-home pay spending: float or int, monthly spending numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly savings rate expressed as a percentage.
simple_math/simple_math.py
def savings_rate(take_home_pay, spending, numtype='float'): """ Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: take_home_pay: float or int, monthly take-home pay spending: float or int, monthly spending numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly savings rate expressed as a percentage. """ if numtype == 'decimal': try: return ( (Decimal(take_home_pay) - Decimal(spending)) / (Decimal(take_home_pay)) ) * Decimal(100.0) # Leave InvalidOperation for backwards compatibility except (InvalidOperation, DivisionByZero): return Decimal(0.0) else: try: return ( (float(take_home_pay) - float(spending)) / (float(take_home_pay)) ) * 100.0 except (ZeroDivisionError): return 0.0
def savings_rate(take_home_pay, spending, numtype='float'): """ Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: take_home_pay: float or int, monthly take-home pay spending: float or int, monthly spending numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly savings rate expressed as a percentage. """ if numtype == 'decimal': try: return ( (Decimal(take_home_pay) - Decimal(spending)) / (Decimal(take_home_pay)) ) * Decimal(100.0) # Leave InvalidOperation for backwards compatibility except (InvalidOperation, DivisionByZero): return Decimal(0.0) else: try: return ( (float(take_home_pay) - float(spending)) / (float(take_home_pay)) ) * 100.0 except (ZeroDivisionError): return 0.0
[ "Calculate", "net", "take", "-", "home", "pay", "including", "employer", "retirement", "savings", "match", "using", "the", "formula", "laid", "out", "by", "Mr", ".", "Money", "Mustache", ":", "http", ":", "//", "www", ".", "mrmoneymustache", ".", "com", "/", "2015", "/", "01", "/", "26", "/", "calculating", "-", "net", "-", "worth", "/" ]
bbusenius/Diablo-Python
python
https://github.com/bbusenius/Diablo-Python/blob/646ac5a6f1c79cf9b928a4e2a7979988698b6c82/simple_math/simple_math.py#L458-L489
[ "def", "savings_rate", "(", "take_home_pay", ",", "spending", ",", "numtype", "=", "'float'", ")", ":", "if", "numtype", "==", "'decimal'", ":", "try", ":", "return", "(", "(", "Decimal", "(", "take_home_pay", ")", "-", "Decimal", "(", "spending", ")", ")", "/", "(", "Decimal", "(", "take_home_pay", ")", ")", ")", "*", "Decimal", "(", "100.0", ")", "# Leave InvalidOperation for backwards compatibility", "except", "(", "InvalidOperation", ",", "DivisionByZero", ")", ":", "return", "Decimal", "(", "0.0", ")", "else", ":", "try", ":", "return", "(", "(", "float", "(", "take_home_pay", ")", "-", "float", "(", "spending", ")", ")", "/", "(", "float", "(", "take_home_pay", ")", ")", ")", "*", "100.0", "except", "(", "ZeroDivisionError", ")", ":", "return", "0.0" ]
646ac5a6f1c79cf9b928a4e2a7979988698b6c82
valid
get_variable
Read __version__ or other properties from a python file without importing it from gist.github.com/technonik/406623 but with added keyward kwarg
pug/setup_util.py
def get_variable(relpath, keyword='__version__'): """Read __version__ or other properties from a python file without importing it from gist.github.com/technonik/406623 but with added keyward kwarg """ for line in open(os.path.join(os.path.dirname(__file__), relpath), encoding='cp437'): if keyword in line: if '"' in line: return line.split('"')[1] elif "'" in line: return line.split("'")[1]
def get_variable(relpath, keyword='__version__'): """Read __version__ or other properties from a python file without importing it from gist.github.com/technonik/406623 but with added keyward kwarg """ for line in open(os.path.join(os.path.dirname(__file__), relpath), encoding='cp437'): if keyword in line: if '"' in line: return line.split('"')[1] elif "'" in line: return line.split("'")[1]
[ "Read", "__version__", "or", "other", "properties", "from", "a", "python", "file", "without", "importing", "it", "from", "gist", ".", "github", ".", "com", "/", "technonik", "/", "406623", "but", "with", "added", "keyward", "kwarg" ]
hobson/pug
python
https://github.com/hobson/pug/blob/f183e2b29e0b3efa425a9b75cfe001b28a279acc/pug/setup_util.py#L16-L25
[ "def", "get_variable", "(", "relpath", ",", "keyword", "=", "'__version__'", ")", ":", "for", "line", "in", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "relpath", ")", ",", "encoding", "=", "'cp437'", ")", ":", "if", "keyword", "in", "line", ":", "if", "'\"'", "in", "line", ":", "return", "line", ".", "split", "(", "'\"'", ")", "[", "1", "]", "elif", "\"'\"", "in", "line", ":", "return", "line", ".", "split", "(", "\"'\"", ")", "[", "1", "]" ]
f183e2b29e0b3efa425a9b75cfe001b28a279acc
valid
DateTimeUtils.datetime_str_to_timestamp
'2018-01-01 00:00:00' (str) --> 1514736000 :param str datetime_str: datetime string :return: unix timestamp (int) or None :rtype: int or None
eggit/egg_time.py
def datetime_str_to_timestamp(datetime_str): ''' '2018-01-01 00:00:00' (str) --> 1514736000 :param str datetime_str: datetime string :return: unix timestamp (int) or None :rtype: int or None ''' try: dtf = DTFormat() struct_time = time.strptime(datetime_str, dtf.datetime_format) return time.mktime(struct_time) except: return None
def datetime_str_to_timestamp(datetime_str): ''' '2018-01-01 00:00:00' (str) --> 1514736000 :param str datetime_str: datetime string :return: unix timestamp (int) or None :rtype: int or None ''' try: dtf = DTFormat() struct_time = time.strptime(datetime_str, dtf.datetime_format) return time.mktime(struct_time) except: return None
[ "2018", "-", "01", "-", "01", "00", ":", "00", ":", "00", "(", "str", ")", "--", ">", "1514736000" ]
MyJoiT/eggit
python
https://github.com/MyJoiT/eggit/blob/1e20910264ee2fd72c6783f0817572e16ea87bd0/eggit/egg_time.py#L28-L42
[ "def", "datetime_str_to_timestamp", "(", "datetime_str", ")", ":", "try", ":", "dtf", "=", "DTFormat", "(", ")", "struct_time", "=", "time", ".", "strptime", "(", "datetime_str", ",", "dtf", ".", "datetime_format", ")", "return", "time", ".", "mktime", "(", "struct_time", ")", "except", ":", "return", "None" ]
1e20910264ee2fd72c6783f0817572e16ea87bd0
valid
DateTimeUtils.get_datetime_string
Get datetime string from datetime object :param datetime datetime_obj: datetime object :return: datetime string :rtype: str
eggit/egg_time.py
def get_datetime_string(datetime_obj): ''' Get datetime string from datetime object :param datetime datetime_obj: datetime object :return: datetime string :rtype: str ''' if isinstance(datetime_obj, datetime): dft = DTFormat() return datetime_obj.strftime(dft.datetime_format) return None
def get_datetime_string(datetime_obj): ''' Get datetime string from datetime object :param datetime datetime_obj: datetime object :return: datetime string :rtype: str ''' if isinstance(datetime_obj, datetime): dft = DTFormat() return datetime_obj.strftime(dft.datetime_format) return None
[ "Get", "datetime", "string", "from", "datetime", "object" ]
MyJoiT/eggit
python
https://github.com/MyJoiT/eggit/blob/1e20910264ee2fd72c6783f0817572e16ea87bd0/eggit/egg_time.py#L64-L77
[ "def", "get_datetime_string", "(", "datetime_obj", ")", ":", "if", "isinstance", "(", "datetime_obj", ",", "datetime", ")", ":", "dft", "=", "DTFormat", "(", ")", "return", "datetime_obj", ".", "strftime", "(", "dft", ".", "datetime_format", ")", "return", "None" ]
1e20910264ee2fd72c6783f0817572e16ea87bd0
valid
DateTimeUtils.timestamp_to_datetime
1514736000 --> datetime object :param int timestamp: unix timestamp (int) :return: datetime object or None :rtype: datetime or None
eggit/egg_time.py
def timestamp_to_datetime(timestamp): ''' 1514736000 --> datetime object :param int timestamp: unix timestamp (int) :return: datetime object or None :rtype: datetime or None ''' if isinstance(timestamp, (int, float, str)): try: timestamp = float(timestamp) if timestamp.is_integer(): timestamp = int(timestamp) except: return None temp = str(timestamp).split('.')[0] if len(temp) == 13: timestamp = timestamp / 1000.0 if len(temp) < 10: return None else: return None return datetime.fromtimestamp(timestamp)
def timestamp_to_datetime(timestamp): ''' 1514736000 --> datetime object :param int timestamp: unix timestamp (int) :return: datetime object or None :rtype: datetime or None ''' if isinstance(timestamp, (int, float, str)): try: timestamp = float(timestamp) if timestamp.is_integer(): timestamp = int(timestamp) except: return None temp = str(timestamp).split('.')[0] if len(temp) == 13: timestamp = timestamp / 1000.0 if len(temp) < 10: return None else: return None return datetime.fromtimestamp(timestamp)
[ "1514736000", "--", ">", "datetime", "object" ]
MyJoiT/eggit
python
https://github.com/MyJoiT/eggit/blob/1e20910264ee2fd72c6783f0817572e16ea87bd0/eggit/egg_time.py#L92-L119
[ "def", "timestamp_to_datetime", "(", "timestamp", ")", ":", "if", "isinstance", "(", "timestamp", ",", "(", "int", ",", "float", ",", "str", ")", ")", ":", "try", ":", "timestamp", "=", "float", "(", "timestamp", ")", "if", "timestamp", ".", "is_integer", "(", ")", ":", "timestamp", "=", "int", "(", "timestamp", ")", "except", ":", "return", "None", "temp", "=", "str", "(", "timestamp", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "if", "len", "(", "temp", ")", "==", "13", ":", "timestamp", "=", "timestamp", "/", "1000.0", "if", "len", "(", "temp", ")", "<", "10", ":", "return", "None", "else", ":", "return", "None", "return", "datetime", ".", "fromtimestamp", "(", "timestamp", ")" ]
1e20910264ee2fd72c6783f0817572e16ea87bd0
valid
attr
attr pipe can extract attribute value of object. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_name: The name of attribute :type attr_name: str :returns: generator
cmdlet/cmds.py
def attr(prev, attr_name): """attr pipe can extract attribute value of object. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_name: The name of attribute :type attr_name: str :returns: generator """ for obj in prev: if hasattr(obj, attr_name): yield getattr(obj, attr_name)
def attr(prev, attr_name): """attr pipe can extract attribute value of object. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_name: The name of attribute :type attr_name: str :returns: generator """ for obj in prev: if hasattr(obj, attr_name): yield getattr(obj, attr_name)
[ "attr", "pipe", "can", "extract", "attribute", "value", "of", "object", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L93-L104
[ "def", "attr", "(", "prev", ",", "attr_name", ")", ":", "for", "obj", "in", "prev", ":", "if", "hasattr", "(", "obj", ",", "attr_name", ")", ":", "yield", "getattr", "(", "obj", ",", "attr_name", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
attrs
attrs pipe can extract attribute values of object. If attr_names is a list and its item is not a valid attribute of prev's object. It will be excluded from yielded dict. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_names: The list of attribute names :type attr_names: str of list :returns: generator
cmdlet/cmds.py
def attrs(prev, attr_names): """attrs pipe can extract attribute values of object. If attr_names is a list and its item is not a valid attribute of prev's object. It will be excluded from yielded dict. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_names: The list of attribute names :type attr_names: str of list :returns: generator """ for obj in prev: attr_values = [] for name in attr_names: if hasattr(obj, name): attr_values.append(getattr(obj, name)) yield attr_values
def attrs(prev, attr_names): """attrs pipe can extract attribute values of object. If attr_names is a list and its item is not a valid attribute of prev's object. It will be excluded from yielded dict. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_names: The list of attribute names :type attr_names: str of list :returns: generator """ for obj in prev: attr_values = [] for name in attr_names: if hasattr(obj, name): attr_values.append(getattr(obj, name)) yield attr_values
[ "attrs", "pipe", "can", "extract", "attribute", "values", "of", "object", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L108-L125
[ "def", "attrs", "(", "prev", ",", "attr_names", ")", ":", "for", "obj", "in", "prev", ":", "attr_values", "=", "[", "]", "for", "name", "in", "attr_names", ":", "if", "hasattr", "(", "obj", ",", "name", ")", ":", "attr_values", ".", "append", "(", "getattr", "(", "obj", ",", "name", ")", ")", "yield", "attr_values" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
attrdict
attrdict pipe can extract attribute values of object into a dict. The argument attr_names can be a list or a dict. If attr_names is a list and its item is not a valid attribute of prev's object. It will be excluded from yielded dict. If attr_names is dict and the key doesn't exist in prev's object. the value of corresponding attr_names key will be copy to yielded dict. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_names: The list or dict of attribute names :type attr_names: str of list or dict :returns: generator
cmdlet/cmds.py
def attrdict(prev, attr_names): """attrdict pipe can extract attribute values of object into a dict. The argument attr_names can be a list or a dict. If attr_names is a list and its item is not a valid attribute of prev's object. It will be excluded from yielded dict. If attr_names is dict and the key doesn't exist in prev's object. the value of corresponding attr_names key will be copy to yielded dict. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_names: The list or dict of attribute names :type attr_names: str of list or dict :returns: generator """ if isinstance(attr_names, dict): for obj in prev: attr_values = dict() for name in attr_names.keys(): if hasattr(obj, name): attr_values[name] = getattr(obj, name) else: attr_values[name] = attr_names[name] yield attr_values else: for obj in prev: attr_values = dict() for name in attr_names: if hasattr(obj, name): attr_values[name] = getattr(obj, name) yield attr_values
def attrdict(prev, attr_names): """attrdict pipe can extract attribute values of object into a dict. The argument attr_names can be a list or a dict. If attr_names is a list and its item is not a valid attribute of prev's object. It will be excluded from yielded dict. If attr_names is dict and the key doesn't exist in prev's object. the value of corresponding attr_names key will be copy to yielded dict. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_names: The list or dict of attribute names :type attr_names: str of list or dict :returns: generator """ if isinstance(attr_names, dict): for obj in prev: attr_values = dict() for name in attr_names.keys(): if hasattr(obj, name): attr_values[name] = getattr(obj, name) else: attr_values[name] = attr_names[name] yield attr_values else: for obj in prev: attr_values = dict() for name in attr_names: if hasattr(obj, name): attr_values[name] = getattr(obj, name) yield attr_values
[ "attrdict", "pipe", "can", "extract", "attribute", "values", "of", "object", "into", "a", "dict", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L129-L161
[ "def", "attrdict", "(", "prev", ",", "attr_names", ")", ":", "if", "isinstance", "(", "attr_names", ",", "dict", ")", ":", "for", "obj", "in", "prev", ":", "attr_values", "=", "dict", "(", ")", "for", "name", "in", "attr_names", ".", "keys", "(", ")", ":", "if", "hasattr", "(", "obj", ",", "name", ")", ":", "attr_values", "[", "name", "]", "=", "getattr", "(", "obj", ",", "name", ")", "else", ":", "attr_values", "[", "name", "]", "=", "attr_names", "[", "name", "]", "yield", "attr_values", "else", ":", "for", "obj", "in", "prev", ":", "attr_values", "=", "dict", "(", ")", "for", "name", "in", "attr_names", ":", "if", "hasattr", "(", "obj", ",", "name", ")", ":", "attr_values", "[", "name", "]", "=", "getattr", "(", "obj", ",", "name", ")", "yield", "attr_values" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
flatten
flatten pipe extracts nested item from previous pipe. :param prev: The previous iterator of pipe. :type prev: Pipe :param depth: The deepest nested level to be extracted. 0 means no extraction. :type depth: integer :returns: generator
cmdlet/cmds.py
def flatten(prev, depth=sys.maxsize): """flatten pipe extracts nested item from previous pipe. :param prev: The previous iterator of pipe. :type prev: Pipe :param depth: The deepest nested level to be extracted. 0 means no extraction. :type depth: integer :returns: generator """ def inner_flatten(iterable, curr_level, max_levels): for i in iterable: if hasattr(i, '__iter__') and curr_level < max_levels: for j in inner_flatten(i, curr_level + 1, max_levels): yield j else: yield i for d in prev: if hasattr(d, '__iter__') and depth > 0: for inner_d in inner_flatten(d, 1, depth): yield inner_d else: yield d
def flatten(prev, depth=sys.maxsize): """flatten pipe extracts nested item from previous pipe. :param prev: The previous iterator of pipe. :type prev: Pipe :param depth: The deepest nested level to be extracted. 0 means no extraction. :type depth: integer :returns: generator """ def inner_flatten(iterable, curr_level, max_levels): for i in iterable: if hasattr(i, '__iter__') and curr_level < max_levels: for j in inner_flatten(i, curr_level + 1, max_levels): yield j else: yield i for d in prev: if hasattr(d, '__iter__') and depth > 0: for inner_d in inner_flatten(d, 1, depth): yield inner_d else: yield d
[ "flatten", "pipe", "extracts", "nested", "item", "from", "previous", "pipe", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L165-L187
[ "def", "flatten", "(", "prev", ",", "depth", "=", "sys", ".", "maxsize", ")", ":", "def", "inner_flatten", "(", "iterable", ",", "curr_level", ",", "max_levels", ")", ":", "for", "i", "in", "iterable", ":", "if", "hasattr", "(", "i", ",", "'__iter__'", ")", "and", "curr_level", "<", "max_levels", ":", "for", "j", "in", "inner_flatten", "(", "i", ",", "curr_level", "+", "1", ",", "max_levels", ")", ":", "yield", "j", "else", ":", "yield", "i", "for", "d", "in", "prev", ":", "if", "hasattr", "(", "d", ",", "'__iter__'", ")", "and", "depth", ">", "0", ":", "for", "inner_d", "in", "inner_flatten", "(", "d", ",", "1", ",", "depth", ")", ":", "yield", "inner_d", "else", ":", "yield", "d" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
values
values pipe extract value from previous pipe. If previous pipe send a dictionary to values pipe, keys should contains the key of dictionary which you want to get. If previous pipe send list or tuple, :param prev: The previous iterator of pipe. :type prev: Pipe :returns: generator
cmdlet/cmds.py
def values(prev, *keys, **kw): """values pipe extract value from previous pipe. If previous pipe send a dictionary to values pipe, keys should contains the key of dictionary which you want to get. If previous pipe send list or tuple, :param prev: The previous iterator of pipe. :type prev: Pipe :returns: generator """ d = next(prev) if isinstance(d, dict): yield [d[k] for k in keys if k in d] for d in prev: yield [d[k] for k in keys if k in d] else: yield [d[i] for i in keys if 0 <= i < len(d)] for d in prev: yield [d[i] for i in keys if 0 <= i < len(d)]
def values(prev, *keys, **kw): """values pipe extract value from previous pipe. If previous pipe send a dictionary to values pipe, keys should contains the key of dictionary which you want to get. If previous pipe send list or tuple, :param prev: The previous iterator of pipe. :type prev: Pipe :returns: generator """ d = next(prev) if isinstance(d, dict): yield [d[k] for k in keys if k in d] for d in prev: yield [d[k] for k in keys if k in d] else: yield [d[i] for i in keys if 0 <= i < len(d)] for d in prev: yield [d[i] for i in keys if 0 <= i < len(d)]
[ "values", "pipe", "extract", "value", "from", "previous", "pipe", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L191-L210
[ "def", "values", "(", "prev", ",", "*", "keys", ",", "*", "*", "kw", ")", ":", "d", "=", "next", "(", "prev", ")", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "yield", "[", "d", "[", "k", "]", "for", "k", "in", "keys", "if", "k", "in", "d", "]", "for", "d", "in", "prev", ":", "yield", "[", "d", "[", "k", "]", "for", "k", "in", "keys", "if", "k", "in", "d", "]", "else", ":", "yield", "[", "d", "[", "i", "]", "for", "i", "in", "keys", "if", "0", "<=", "i", "<", "len", "(", "d", ")", "]", "for", "d", "in", "prev", ":", "yield", "[", "d", "[", "i", "]", "for", "i", "in", "keys", "if", "0", "<=", "i", "<", "len", "(", "d", ")", "]" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
pack
pack pipe takes n elements from previous generator and yield one list to next. :param prev: The previous iterator of pipe. :type prev: Pipe :param rest: Set True to allow to output the rest part of last elements. :type prev: boolean :param padding: Specify the padding element for the rest part of last elements. :type prev: boolean :returns: generator :Example: >>> result([1,2,3,4,5,6,7] | pack(3)) [[1, 2, 3], [4, 5, 6]] >>> result([1,2,3,4,5,6,7] | pack(3, rest=True)) [[1, 2, 3], [4, 5, 6], [7,]] >>> result([1,2,3,4,5,6,7] | pack(3, padding=None)) [[1, 2, 3], [4, 5, 6], [7, None, None]]
cmdlet/cmds.py
def pack(prev, n, rest=False, **kw): """pack pipe takes n elements from previous generator and yield one list to next. :param prev: The previous iterator of pipe. :type prev: Pipe :param rest: Set True to allow to output the rest part of last elements. :type prev: boolean :param padding: Specify the padding element for the rest part of last elements. :type prev: boolean :returns: generator :Example: >>> result([1,2,3,4,5,6,7] | pack(3)) [[1, 2, 3], [4, 5, 6]] >>> result([1,2,3,4,5,6,7] | pack(3, rest=True)) [[1, 2, 3], [4, 5, 6], [7,]] >>> result([1,2,3,4,5,6,7] | pack(3, padding=None)) [[1, 2, 3], [4, 5, 6], [7, None, None]] """ if 'padding' in kw: use_padding = True padding = kw['padding'] else: use_padding = False padding = None items = [] for i, data in enumerate(prev, 1): items.append(data) if (i % n) == 0: yield items items = [] if len(items) != 0 and rest: if use_padding: items.extend([padding, ] * (n - (i % n))) yield items
def pack(prev, n, rest=False, **kw): """pack pipe takes n elements from previous generator and yield one list to next. :param prev: The previous iterator of pipe. :type prev: Pipe :param rest: Set True to allow to output the rest part of last elements. :type prev: boolean :param padding: Specify the padding element for the rest part of last elements. :type prev: boolean :returns: generator :Example: >>> result([1,2,3,4,5,6,7] | pack(3)) [[1, 2, 3], [4, 5, 6]] >>> result([1,2,3,4,5,6,7] | pack(3, rest=True)) [[1, 2, 3], [4, 5, 6], [7,]] >>> result([1,2,3,4,5,6,7] | pack(3, padding=None)) [[1, 2, 3], [4, 5, 6], [7, None, None]] """ if 'padding' in kw: use_padding = True padding = kw['padding'] else: use_padding = False padding = None items = [] for i, data in enumerate(prev, 1): items.append(data) if (i % n) == 0: yield items items = [] if len(items) != 0 and rest: if use_padding: items.extend([padding, ] * (n - (i % n))) yield items
[ "pack", "pipe", "takes", "n", "elements", "from", "previous", "generator", "and", "yield", "one", "list", "to", "next", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L248-L287
[ "def", "pack", "(", "prev", ",", "n", ",", "rest", "=", "False", ",", "*", "*", "kw", ")", ":", "if", "'padding'", "in", "kw", ":", "use_padding", "=", "True", "padding", "=", "kw", "[", "'padding'", "]", "else", ":", "use_padding", "=", "False", "padding", "=", "None", "items", "=", "[", "]", "for", "i", ",", "data", "in", "enumerate", "(", "prev", ",", "1", ")", ":", "items", ".", "append", "(", "data", ")", "if", "(", "i", "%", "n", ")", "==", "0", ":", "yield", "items", "items", "=", "[", "]", "if", "len", "(", "items", ")", "!=", "0", "and", "rest", ":", "if", "use_padding", ":", "items", ".", "extend", "(", "[", "padding", ",", "]", "*", "(", "n", "-", "(", "i", "%", "n", ")", ")", ")", "yield", "items" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
grep
The pipe greps the data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter out data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :param kw: :type kw: dict :returns: generator
cmdlet/cmds.py
def grep(prev, pattern, *args, **kw): """The pipe greps the data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter out data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :param kw: :type kw: dict :returns: generator """ inv = False if 'inv' not in kw else kw.pop('inv') pattern_obj = re.compile(pattern, *args, **kw) for data in prev: if bool(inv) ^ bool(pattern_obj.match(data)): yield data
def grep(prev, pattern, *args, **kw): """The pipe greps the data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter out data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :param kw: :type kw: dict :returns: generator """ inv = False if 'inv' not in kw else kw.pop('inv') pattern_obj = re.compile(pattern, *args, **kw) for data in prev: if bool(inv) ^ bool(pattern_obj.match(data)): yield data
[ "The", "pipe", "greps", "the", "data", "passed", "from", "previous", "generator", "according", "to", "given", "regular", "expression", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L307-L326
[ "def", "grep", "(", "prev", ",", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "inv", "=", "False", "if", "'inv'", "not", "in", "kw", "else", "kw", ".", "pop", "(", "'inv'", ")", "pattern_obj", "=", "re", ".", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", "for", "data", "in", "prev", ":", "if", "bool", "(", "inv", ")", "^", "bool", "(", "pattern_obj", ".", "match", "(", "data", ")", ")", ":", "yield", "data" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
match
The pipe greps the data passed from previous generator according to given regular expression. The data passed to next pipe is MatchObject , dict or tuple which determined by 'to' in keyword argument. By default, match pipe yields MatchObject. Use 'to' in keyword argument to change the type of match result. If 'to' is dict, yield MatchObject.groupdict(). If 'to' is tuple, yield MatchObject.groups(). If 'to' is list, yield list(MatchObject.groups()). :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter data. :type pattern: str|unicode :param to: What data type the result should be stored. dict|tuple|list :type to: type :returns: generator
cmdlet/cmds.py
def match(prev, pattern, *args, **kw): """The pipe greps the data passed from previous generator according to given regular expression. The data passed to next pipe is MatchObject , dict or tuple which determined by 'to' in keyword argument. By default, match pipe yields MatchObject. Use 'to' in keyword argument to change the type of match result. If 'to' is dict, yield MatchObject.groupdict(). If 'to' is tuple, yield MatchObject.groups(). If 'to' is list, yield list(MatchObject.groups()). :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter data. :type pattern: str|unicode :param to: What data type the result should be stored. dict|tuple|list :type to: type :returns: generator """ to = 'to' in kw and kw.pop('to') pattern_obj = re.compile(pattern, *args, **kw) if to is dict: for data in prev: match = pattern_obj.match(data) if match is not None: yield match.groupdict() elif to is tuple: for data in prev: match = pattern_obj.match(data) if match is not None: yield match.groups() elif to is list: for data in prev: match = pattern_obj.match(data) if match is not None: yield list(match.groups()) else: for data in prev: match = pattern_obj.match(data) if match is not None: yield match
def match(prev, pattern, *args, **kw): """The pipe greps the data passed from previous generator according to given regular expression. The data passed to next pipe is MatchObject , dict or tuple which determined by 'to' in keyword argument. By default, match pipe yields MatchObject. Use 'to' in keyword argument to change the type of match result. If 'to' is dict, yield MatchObject.groupdict(). If 'to' is tuple, yield MatchObject.groups(). If 'to' is list, yield list(MatchObject.groups()). :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter data. :type pattern: str|unicode :param to: What data type the result should be stored. dict|tuple|list :type to: type :returns: generator """ to = 'to' in kw and kw.pop('to') pattern_obj = re.compile(pattern, *args, **kw) if to is dict: for data in prev: match = pattern_obj.match(data) if match is not None: yield match.groupdict() elif to is tuple: for data in prev: match = pattern_obj.match(data) if match is not None: yield match.groups() elif to is list: for data in prev: match = pattern_obj.match(data) if match is not None: yield list(match.groups()) else: for data in prev: match = pattern_obj.match(data) if match is not None: yield match
[ "The", "pipe", "greps", "the", "data", "passed", "from", "previous", "generator", "according", "to", "given", "regular", "expression", ".", "The", "data", "passed", "to", "next", "pipe", "is", "MatchObject", "dict", "or", "tuple", "which", "determined", "by", "to", "in", "keyword", "argument", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L329-L371
[ "def", "match", "(", "prev", ",", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "to", "=", "'to'", "in", "kw", "and", "kw", ".", "pop", "(", "'to'", ")", "pattern_obj", "=", "re", ".", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", "if", "to", "is", "dict", ":", "for", "data", "in", "prev", ":", "match", "=", "pattern_obj", ".", "match", "(", "data", ")", "if", "match", "is", "not", "None", ":", "yield", "match", ".", "groupdict", "(", ")", "elif", "to", "is", "tuple", ":", "for", "data", "in", "prev", ":", "match", "=", "pattern_obj", ".", "match", "(", "data", ")", "if", "match", "is", "not", "None", ":", "yield", "match", ".", "groups", "(", ")", "elif", "to", "is", "list", ":", "for", "data", "in", "prev", ":", "match", "=", "pattern_obj", ".", "match", "(", "data", ")", "if", "match", "is", "not", "None", ":", "yield", "list", "(", "match", ".", "groups", "(", ")", ")", "else", ":", "for", "data", "in", "prev", ":", "match", "=", "pattern_obj", ".", "match", "(", "data", ")", "if", "match", "is", "not", "None", ":", "yield", "match" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
resplit
The resplit pipe split previous pipe input by regular expression. Use 'maxsplit' keyword argument to limit the number of split. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to split string. :type pattern: str|unicode
cmdlet/cmds.py
def resplit(prev, pattern, *args, **kw): """The resplit pipe split previous pipe input by regular expression. Use 'maxsplit' keyword argument to limit the number of split. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to split string. :type pattern: str|unicode """ maxsplit = 0 if 'maxsplit' not in kw else kw.pop('maxsplit') pattern_obj = re.compile(pattern, *args, **kw) for s in prev: yield pattern_obj.split(s, maxsplit=maxsplit)
def resplit(prev, pattern, *args, **kw): """The resplit pipe split previous pipe input by regular expression. Use 'maxsplit' keyword argument to limit the number of split. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to split string. :type pattern: str|unicode """ maxsplit = 0 if 'maxsplit' not in kw else kw.pop('maxsplit') pattern_obj = re.compile(pattern, *args, **kw) for s in prev: yield pattern_obj.split(s, maxsplit=maxsplit)
[ "The", "resplit", "pipe", "split", "previous", "pipe", "input", "by", "regular", "expression", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L375-L388
[ "def", "resplit", "(", "prev", ",", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "maxsplit", "=", "0", "if", "'maxsplit'", "not", "in", "kw", "else", "kw", ".", "pop", "(", "'maxsplit'", ")", "pattern_obj", "=", "re", ".", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", "for", "s", "in", "prev", ":", "yield", "pattern_obj", ".", "split", "(", "s", ",", "maxsplit", "=", "maxsplit", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
sub
sub pipe is a wrapper of re.sub method. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern string. :type pattern: str|unicode :param repl: Check repl argument in re.sub method. :type repl: str|unicode|callable
cmdlet/cmds.py
def sub(prev, pattern, repl, *args, **kw): """sub pipe is a wrapper of re.sub method. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern string. :type pattern: str|unicode :param repl: Check repl argument in re.sub method. :type repl: str|unicode|callable """ count = 0 if 'count' not in kw else kw.pop('count') pattern_obj = re.compile(pattern, *args, **kw) for s in prev: yield pattern_obj.sub(repl, s, count=count)
def sub(prev, pattern, repl, *args, **kw): """sub pipe is a wrapper of re.sub method. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern string. :type pattern: str|unicode :param repl: Check repl argument in re.sub method. :type repl: str|unicode|callable """ count = 0 if 'count' not in kw else kw.pop('count') pattern_obj = re.compile(pattern, *args, **kw) for s in prev: yield pattern_obj.sub(repl, s, count=count)
[ "sub", "pipe", "is", "a", "wrapper", "of", "re", ".", "sub", "method", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L392-L405
[ "def", "sub", "(", "prev", ",", "pattern", ",", "repl", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "count", "=", "0", "if", "'count'", "not", "in", "kw", "else", "kw", ".", "pop", "(", "'count'", ")", "pattern_obj", "=", "re", ".", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", "for", "s", "in", "prev", ":", "yield", "pattern_obj", ".", "sub", "(", "repl", ",", "s", ",", "count", "=", "count", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
wildcard
wildcard pipe greps data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The wildcard string which used to filter data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :returns: generator
cmdlet/cmds.py
def wildcard(prev, pattern, *args, **kw): """wildcard pipe greps data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The wildcard string which used to filter data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :returns: generator """ import fnmatch inv = 'inv' in kw and kw.pop('inv') pattern_obj = re.compile(fnmatch.translate(pattern), *args, **kw) if not inv: for data in prev: if pattern_obj.match(data): yield data else: for data in prev: if not pattern_obj.match(data): yield data
def wildcard(prev, pattern, *args, **kw): """wildcard pipe greps data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The wildcard string which used to filter data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :returns: generator """ import fnmatch inv = 'inv' in kw and kw.pop('inv') pattern_obj = re.compile(fnmatch.translate(pattern), *args, **kw) if not inv: for data in prev: if pattern_obj.match(data): yield data else: for data in prev: if not pattern_obj.match(data): yield data
[ "wildcard", "pipe", "greps", "data", "passed", "from", "previous", "generator", "according", "to", "given", "regular", "expression", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L426-L450
[ "def", "wildcard", "(", "prev", ",", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "import", "fnmatch", "inv", "=", "'inv'", "in", "kw", "and", "kw", ".", "pop", "(", "'inv'", ")", "pattern_obj", "=", "re", ".", "compile", "(", "fnmatch", ".", "translate", "(", "pattern", ")", ",", "*", "args", ",", "*", "*", "kw", ")", "if", "not", "inv", ":", "for", "data", "in", "prev", ":", "if", "pattern_obj", ".", "match", "(", "data", ")", ":", "yield", "data", "else", ":", "for", "data", "in", "prev", ":", "if", "not", "pattern_obj", ".", "match", "(", "data", ")", ":", "yield", "data" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
stdout
This pipe read data from previous iterator and write it to stdout. :param prev: The previous iterator of pipe. :type prev: Pipe :param endl: The end-of-line symbol for each output. :type endl: str :param thru: If true, data will passed to next generator. If false, data will be dropped. :type thru: bool :returns: generator
cmdlet/cmds.py
def stdout(prev, endl='\n', thru=False): """This pipe read data from previous iterator and write it to stdout. :param prev: The previous iterator of pipe. :type prev: Pipe :param endl: The end-of-line symbol for each output. :type endl: str :param thru: If true, data will passed to next generator. If false, data will be dropped. :type thru: bool :returns: generator """ for i in prev: sys.stdout.write(str(i) + endl) if thru: yield i
def stdout(prev, endl='\n', thru=False): """This pipe read data from previous iterator and write it to stdout. :param prev: The previous iterator of pipe. :type prev: Pipe :param endl: The end-of-line symbol for each output. :type endl: str :param thru: If true, data will passed to next generator. If false, data will be dropped. :type thru: bool :returns: generator """ for i in prev: sys.stdout.write(str(i) + endl) if thru: yield i
[ "This", "pipe", "read", "data", "from", "previous", "iterator", "and", "write", "it", "to", "stdout", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L454-L469
[ "def", "stdout", "(", "prev", ",", "endl", "=", "'\\n'", ",", "thru", "=", "False", ")", ":", "for", "i", "in", "prev", ":", "sys", ".", "stdout", ".", "write", "(", "str", "(", "i", ")", "+", "endl", ")", "if", "thru", ":", "yield", "i" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
readline
This pipe get filenames or file object from previous pipe and read the content of file. Then, send the content of file line by line to next pipe. The start and end parameters are used to limit the range of reading from file. :param prev: The previous iterator of pipe. :type prev: Pipe :param filename: The files to be read. If None, use previous pipe input as filenames. :type filename: None|str|unicode|list|tuple :param mode: The mode to open file. default is 'r' :type mode: str :param trim: The function to trim the line before send to next pipe. :type trim: function object. :param start: if star is specified, only line number larger or equal to start will be sent. :type start: integer :param end: The last line number to read. :type end: integer :returns: generator
cmdlet/cmds.py
def readline(prev, filename=None, mode='r', trim=str.rstrip, start=1, end=sys.maxsize): """This pipe get filenames or file object from previous pipe and read the content of file. Then, send the content of file line by line to next pipe. The start and end parameters are used to limit the range of reading from file. :param prev: The previous iterator of pipe. :type prev: Pipe :param filename: The files to be read. If None, use previous pipe input as filenames. :type filename: None|str|unicode|list|tuple :param mode: The mode to open file. default is 'r' :type mode: str :param trim: The function to trim the line before send to next pipe. :type trim: function object. :param start: if star is specified, only line number larger or equal to start will be sent. :type start: integer :param end: The last line number to read. :type end: integer :returns: generator """ if prev is None: if filename is None: raise Exception('No input available for readline.') elif is_str_type(filename): file_list = [filename, ] else: file_list = filename else: file_list = prev for fn in file_list: if isinstance(fn, file_type): fd = fn else: fd = open(fn, mode) try: if start <= 1 and end == sys.maxsize: for line in fd: yield trim(line) else: for line_no, line in enumerate(fd, 1): if line_no < start: continue yield trim(line) if line_no >= end: break finally: if fd != fn: fd.close()
def readline(prev, filename=None, mode='r', trim=str.rstrip, start=1, end=sys.maxsize): """This pipe get filenames or file object from previous pipe and read the content of file. Then, send the content of file line by line to next pipe. The start and end parameters are used to limit the range of reading from file. :param prev: The previous iterator of pipe. :type prev: Pipe :param filename: The files to be read. If None, use previous pipe input as filenames. :type filename: None|str|unicode|list|tuple :param mode: The mode to open file. default is 'r' :type mode: str :param trim: The function to trim the line before send to next pipe. :type trim: function object. :param start: if star is specified, only line number larger or equal to start will be sent. :type start: integer :param end: The last line number to read. :type end: integer :returns: generator """ if prev is None: if filename is None: raise Exception('No input available for readline.') elif is_str_type(filename): file_list = [filename, ] else: file_list = filename else: file_list = prev for fn in file_list: if isinstance(fn, file_type): fd = fn else: fd = open(fn, mode) try: if start <= 1 and end == sys.maxsize: for line in fd: yield trim(line) else: for line_no, line in enumerate(fd, 1): if line_no < start: continue yield trim(line) if line_no >= end: break finally: if fd != fn: fd.close()
[ "This", "pipe", "get", "filenames", "or", "file", "object", "from", "previous", "pipe", "and", "read", "the", "content", "of", "file", ".", "Then", "send", "the", "content", "of", "file", "line", "by", "line", "to", "next", "pipe", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L490-L539
[ "def", "readline", "(", "prev", ",", "filename", "=", "None", ",", "mode", "=", "'r'", ",", "trim", "=", "str", ".", "rstrip", ",", "start", "=", "1", ",", "end", "=", "sys", ".", "maxsize", ")", ":", "if", "prev", "is", "None", ":", "if", "filename", "is", "None", ":", "raise", "Exception", "(", "'No input available for readline.'", ")", "elif", "is_str_type", "(", "filename", ")", ":", "file_list", "=", "[", "filename", ",", "]", "else", ":", "file_list", "=", "filename", "else", ":", "file_list", "=", "prev", "for", "fn", "in", "file_list", ":", "if", "isinstance", "(", "fn", ",", "file_type", ")", ":", "fd", "=", "fn", "else", ":", "fd", "=", "open", "(", "fn", ",", "mode", ")", "try", ":", "if", "start", "<=", "1", "and", "end", "==", "sys", ".", "maxsize", ":", "for", "line", "in", "fd", ":", "yield", "trim", "(", "line", ")", "else", ":", "for", "line_no", ",", "line", "in", "enumerate", "(", "fd", ",", "1", ")", ":", "if", "line_no", "<", "start", ":", "continue", "yield", "trim", "(", "line", ")", "if", "line_no", ">=", "end", ":", "break", "finally", ":", "if", "fd", "!=", "fn", ":", "fd", ".", "close", "(", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
fileobj
This pipe read/write data from/to file object which specified by file_handle. :param prev: The previous iterator of pipe. :type prev: Pipe :param file_handle: The file object to read or write. :type file_handle: file object :param endl: The end-of-line symbol for each output. :type endl: str :param thru: If true, data will passed to next generator. If false, data will be dropped. :type thru: bool :returns: generator
cmdlet/cmds.py
def fileobj(prev, file_handle, endl='', thru=False): """This pipe read/write data from/to file object which specified by file_handle. :param prev: The previous iterator of pipe. :type prev: Pipe :param file_handle: The file object to read or write. :type file_handle: file object :param endl: The end-of-line symbol for each output. :type endl: str :param thru: If true, data will passed to next generator. If false, data will be dropped. :type thru: bool :returns: generator """ if prev is not None: for i in prev: file_handle.write(str(i)+endl) if thru: yield i else: for data in file_handle: yield data
def fileobj(prev, file_handle, endl='', thru=False): """This pipe read/write data from/to file object which specified by file_handle. :param prev: The previous iterator of pipe. :type prev: Pipe :param file_handle: The file object to read or write. :type file_handle: file object :param endl: The end-of-line symbol for each output. :type endl: str :param thru: If true, data will passed to next generator. If false, data will be dropped. :type thru: bool :returns: generator """ if prev is not None: for i in prev: file_handle.write(str(i)+endl) if thru: yield i else: for data in file_handle: yield data
[ "This", "pipe", "read", "/", "write", "data", "from", "/", "to", "file", "object", "which", "specified", "by", "file_handle", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L542-L564
[ "def", "fileobj", "(", "prev", ",", "file_handle", ",", "endl", "=", "''", ",", "thru", "=", "False", ")", ":", "if", "prev", "is", "not", "None", ":", "for", "i", "in", "prev", ":", "file_handle", ".", "write", "(", "str", "(", "i", ")", "+", "endl", ")", "if", "thru", ":", "yield", "i", "else", ":", "for", "data", "in", "file_handle", ":", "yield", "data" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
sh
sh pipe execute shell command specified by args. If previous pipe exists, read data from it and write it to stdin of shell process. The stdout of shell process will be passed to next pipe object line by line. A optional keyword argument 'trim' can pass a function into sh pipe. It is used to trim the output from shell process. The default trim function is str.rstrip. Therefore, any space characters in tail of shell process output line will be removed. For example: py_files = result(sh('ls') | strip | wildcard('*.py')) :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The command line arguments. It will be joined by space character. :type args: list of string. :param kw: arguments for subprocess.Popen. :type kw: dictionary of options. :returns: generator
cmdlet/cmds.py
def sh(prev, *args, **kw): """sh pipe execute shell command specified by args. If previous pipe exists, read data from it and write it to stdin of shell process. The stdout of shell process will be passed to next pipe object line by line. A optional keyword argument 'trim' can pass a function into sh pipe. It is used to trim the output from shell process. The default trim function is str.rstrip. Therefore, any space characters in tail of shell process output line will be removed. For example: py_files = result(sh('ls') | strip | wildcard('*.py')) :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The command line arguments. It will be joined by space character. :type args: list of string. :param kw: arguments for subprocess.Popen. :type kw: dictionary of options. :returns: generator """ endl = '\n' if 'endl' not in kw else kw.pop('endl') trim = None if 'trim' not in kw else kw.pop('trim') if trim is None: trim = bytes.rstrip if is_py3 else str.rstrip cmdline = ' '.join(args) if not cmdline: if prev is not None: for i in prev: yield i else: while True: yield None process = subprocess.Popen(cmdline, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kw) if prev is not None: stdin_buffer = StringIO() for i in prev: stdin_buffer.write(i) if endl: stdin_buffer.write(endl) if is_py3: process.stdin.write(stdin_buffer.getvalue().encode('utf-8')) else: process.stdin.write(stdin_buffer.getvalue()) process.stdin.flush() process.stdin.close() stdin_buffer.close() for line in process.stdout: yield trim(line) process.wait()
def sh(prev, *args, **kw): """sh pipe execute shell command specified by args. If previous pipe exists, read data from it and write it to stdin of shell process. The stdout of shell process will be passed to next pipe object line by line. A optional keyword argument 'trim' can pass a function into sh pipe. It is used to trim the output from shell process. The default trim function is str.rstrip. Therefore, any space characters in tail of shell process output line will be removed. For example: py_files = result(sh('ls') | strip | wildcard('*.py')) :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The command line arguments. It will be joined by space character. :type args: list of string. :param kw: arguments for subprocess.Popen. :type kw: dictionary of options. :returns: generator """ endl = '\n' if 'endl' not in kw else kw.pop('endl') trim = None if 'trim' not in kw else kw.pop('trim') if trim is None: trim = bytes.rstrip if is_py3 else str.rstrip cmdline = ' '.join(args) if not cmdline: if prev is not None: for i in prev: yield i else: while True: yield None process = subprocess.Popen(cmdline, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kw) if prev is not None: stdin_buffer = StringIO() for i in prev: stdin_buffer.write(i) if endl: stdin_buffer.write(endl) if is_py3: process.stdin.write(stdin_buffer.getvalue().encode('utf-8')) else: process.stdin.write(stdin_buffer.getvalue()) process.stdin.flush() process.stdin.close() stdin_buffer.close() for line in process.stdout: yield trim(line) process.wait()
[ "sh", "pipe", "execute", "shell", "command", "specified", "by", "args", ".", "If", "previous", "pipe", "exists", "read", "data", "from", "it", "and", "write", "it", "to", "stdin", "of", "shell", "process", ".", "The", "stdout", "of", "shell", "process", "will", "be", "passed", "to", "next", "pipe", "object", "line", "by", "line", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L567-L623
[ "def", "sh", "(", "prev", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "endl", "=", "'\\n'", "if", "'endl'", "not", "in", "kw", "else", "kw", ".", "pop", "(", "'endl'", ")", "trim", "=", "None", "if", "'trim'", "not", "in", "kw", "else", "kw", ".", "pop", "(", "'trim'", ")", "if", "trim", "is", "None", ":", "trim", "=", "bytes", ".", "rstrip", "if", "is_py3", "else", "str", ".", "rstrip", "cmdline", "=", "' '", ".", "join", "(", "args", ")", "if", "not", "cmdline", ":", "if", "prev", "is", "not", "None", ":", "for", "i", "in", "prev", ":", "yield", "i", "else", ":", "while", "True", ":", "yield", "None", "process", "=", "subprocess", ".", "Popen", "(", "cmdline", ",", "shell", "=", "True", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "*", "*", "kw", ")", "if", "prev", "is", "not", "None", ":", "stdin_buffer", "=", "StringIO", "(", ")", "for", "i", "in", "prev", ":", "stdin_buffer", ".", "write", "(", "i", ")", "if", "endl", ":", "stdin_buffer", ".", "write", "(", "endl", ")", "if", "is_py3", ":", "process", ".", "stdin", ".", "write", "(", "stdin_buffer", ".", "getvalue", "(", ")", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "process", ".", "stdin", ".", "write", "(", "stdin_buffer", ".", "getvalue", "(", ")", ")", "process", ".", "stdin", ".", "flush", "(", ")", "process", ".", "stdin", ".", "close", "(", ")", "stdin_buffer", ".", "close", "(", ")", "for", "line", "in", "process", ".", "stdout", ":", "yield", "trim", "(", "line", ")", "process", ".", "wait", "(", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
walk
This pipe wrap os.walk and yield absolute path one by one. :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The end-of-line symbol for each output. :type args: list of string. :param kw: The end-of-line symbol for each output. :type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol. :returns: generator
cmdlet/cmds.py
def walk(prev, inital_path, *args, **kw): """This pipe wrap os.walk and yield absolute path one by one. :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The end-of-line symbol for each output. :type args: list of string. :param kw: The end-of-line symbol for each output. :type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol. :returns: generator """ for dir_path, dir_names, filenames in os.walk(inital_path): for filename in filenames: yield os.path.join(dir_path, filename)
def walk(prev, inital_path, *args, **kw): """This pipe wrap os.walk and yield absolute path one by one. :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The end-of-line symbol for each output. :type args: list of string. :param kw: The end-of-line symbol for each output. :type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol. :returns: generator """ for dir_path, dir_names, filenames in os.walk(inital_path): for filename in filenames: yield os.path.join(dir_path, filename)
[ "This", "pipe", "wrap", "os", ".", "walk", "and", "yield", "absolute", "path", "one", "by", "one", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L626-L639
[ "def", "walk", "(", "prev", ",", "inital_path", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "for", "dir_path", ",", "dir_names", ",", "filenames", "in", "os", ".", "walk", "(", "inital_path", ")", ":", "for", "filename", "in", "filenames", ":", "yield", "os", ".", "path", ".", "join", "(", "dir_path", ",", "filename", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
join
alias of str.join
cmdlet/cmds.py
def join(prev, sep, *args, **kw): '''alias of str.join''' yield sep.join(prev, *args, **kw)
def join(prev, sep, *args, **kw): '''alias of str.join''' yield sep.join(prev, *args, **kw)
[ "alias", "of", "str", ".", "join" ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L686-L688
[ "def", "join", "(", "prev", ",", "sep", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "yield", "sep", ".", "join", "(", "prev", ",", "*", "args", ",", "*", "*", "kw", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
substitute
alias of string.Template.substitute
cmdlet/cmds.py
def substitute(prev, *args, **kw): '''alias of string.Template.substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.substitute(data)
def substitute(prev, *args, **kw): '''alias of string.Template.substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.substitute(data)
[ "alias", "of", "string", ".", "Template", ".", "substitute" ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L691-L695
[ "def", "substitute", "(", "prev", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "template_obj", "=", "string", ".", "Template", "(", "*", "args", ",", "*", "*", "kw", ")", "for", "data", "in", "prev", ":", "yield", "template_obj", ".", "substitute", "(", "data", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
safe_substitute
alias of string.Template.safe_substitute
cmdlet/cmds.py
def safe_substitute(prev, *args, **kw): '''alias of string.Template.safe_substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.safe_substitute(data)
def safe_substitute(prev, *args, **kw): '''alias of string.Template.safe_substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.safe_substitute(data)
[ "alias", "of", "string", ".", "Template", ".", "safe_substitute" ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L698-L702
[ "def", "safe_substitute", "(", "prev", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "template_obj", "=", "string", ".", "Template", "(", "*", "args", ",", "*", "*", "kw", ")", "for", "data", "in", "prev", ":", "yield", "template_obj", ".", "safe_substitute", "(", "data", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
to_str
Convert data from previous pipe with specified encoding.
cmdlet/cmds.py
def to_str(prev, encoding=None): """Convert data from previous pipe with specified encoding.""" first = next(prev) if isinstance(first, str): if encoding is None: yield first for s in prev: yield s else: yield first.encode(encoding) for s in prev: yield s.encode(encoding) else: if encoding is None: encoding = sys.stdout.encoding or 'utf-8' yield first.decode(encoding) for s in prev: yield s.decode(encoding)
def to_str(prev, encoding=None): """Convert data from previous pipe with specified encoding.""" first = next(prev) if isinstance(first, str): if encoding is None: yield first for s in prev: yield s else: yield first.encode(encoding) for s in prev: yield s.encode(encoding) else: if encoding is None: encoding = sys.stdout.encoding or 'utf-8' yield first.decode(encoding) for s in prev: yield s.decode(encoding)
[ "Convert", "data", "from", "previous", "pipe", "with", "specified", "encoding", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L705-L722
[ "def", "to_str", "(", "prev", ",", "encoding", "=", "None", ")", ":", "first", "=", "next", "(", "prev", ")", "if", "isinstance", "(", "first", ",", "str", ")", ":", "if", "encoding", "is", "None", ":", "yield", "first", "for", "s", "in", "prev", ":", "yield", "s", "else", ":", "yield", "first", ".", "encode", "(", "encoding", ")", "for", "s", "in", "prev", ":", "yield", "s", ".", "encode", "(", "encoding", ")", "else", ":", "if", "encoding", "is", "None", ":", "encoding", "=", "sys", ".", "stdout", ".", "encoding", "or", "'utf-8'", "yield", "first", ".", "decode", "(", "encoding", ")", "for", "s", "in", "prev", ":", "yield", "s", ".", "decode", "(", "encoding", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
register_default_types
Regiser all default type-to-pipe convertors.
cmdlet/cmds.py
def register_default_types(): """Regiser all default type-to-pipe convertors.""" register_type(type, pipe.map) register_type(types.FunctionType, pipe.map) register_type(types.MethodType, pipe.map) register_type(tuple, seq) register_type(list, seq) register_type(types.GeneratorType, seq) register_type(string_type, sh) register_type(unicode_type, sh) register_type(file_type, fileobj) if is_py3: register_type(range, seq) register_type(map, seq)
def register_default_types(): """Regiser all default type-to-pipe convertors.""" register_type(type, pipe.map) register_type(types.FunctionType, pipe.map) register_type(types.MethodType, pipe.map) register_type(tuple, seq) register_type(list, seq) register_type(types.GeneratorType, seq) register_type(string_type, sh) register_type(unicode_type, sh) register_type(file_type, fileobj) if is_py3: register_type(range, seq) register_type(map, seq)
[ "Regiser", "all", "default", "type", "-", "to", "-", "pipe", "convertors", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L724-L738
[ "def", "register_default_types", "(", ")", ":", "register_type", "(", "type", ",", "pipe", ".", "map", ")", "register_type", "(", "types", ".", "FunctionType", ",", "pipe", ".", "map", ")", "register_type", "(", "types", ".", "MethodType", ",", "pipe", ".", "map", ")", "register_type", "(", "tuple", ",", "seq", ")", "register_type", "(", "list", ",", "seq", ")", "register_type", "(", "types", ".", "GeneratorType", ",", "seq", ")", "register_type", "(", "string_type", ",", "sh", ")", "register_type", "(", "unicode_type", ",", "sh", ")", "register_type", "(", "file_type", ",", "fileobj", ")", "if", "is_py3", ":", "register_type", "(", "range", ",", "seq", ")", "register_type", "(", "map", ",", "seq", ")" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
Paginator.get_dict
Convert Paginator instance to dict :return: Paging data :rtype: dict
eggit/paginator.py
def get_dict(self): ''' Convert Paginator instance to dict :return: Paging data :rtype: dict ''' return dict( current_page=self.current_page, total_page_count=self.total_page_count, items=self.items, total_item_count=self.total_item_count, page_size=self.page_size )
def get_dict(self): ''' Convert Paginator instance to dict :return: Paging data :rtype: dict ''' return dict( current_page=self.current_page, total_page_count=self.total_page_count, items=self.items, total_item_count=self.total_item_count, page_size=self.page_size )
[ "Convert", "Paginator", "instance", "to", "dict" ]
MyJoiT/eggit
python
https://github.com/MyJoiT/eggit/blob/1e20910264ee2fd72c6783f0817572e16ea87bd0/eggit/paginator.py#L21-L35
[ "def", "get_dict", "(", "self", ")", ":", "return", "dict", "(", "current_page", "=", "self", ".", "current_page", ",", "total_page_count", "=", "self", ".", "total_page_count", ",", "items", "=", "self", ".", "items", ",", "total_item_count", "=", "self", ".", "total_item_count", ",", "page_size", "=", "self", ".", "page_size", ")" ]
1e20910264ee2fd72c6783f0817572e16ea87bd0
valid
write_log
This function logs a line of data to both a 'log' file, and a 'latest' file The 'latest' file is optional, and is sent to this function as a boolean value via the variable 'require_latest'. So the 2 log directories and filenames are: a. (REQUIRED): log_directory + log_filename b. (OPTIONAL): latest_directory + latest_filename The 'latest' directory and filename is provided so as to have a consistent file of the latest events This is usually the latest day of events. The way this function works with the 'latest' log_dir is as follows: a. It checks for the existance of log_directory + log_filename b. If (a) doesn't exist, then any 'latest' file is removed and a new one created c. If (a) already exists, logs are written to any existing 'latest' file If one doesn't exist, it will be created For both the 'log' and 'latest' files, a header line will be written if a new file is created Please note that a header must start with the '#' symbol, so the Ardexa agent can interpret this line as a header , and will not send it to the cloud
ardexaplugin.py
def write_log(log_directory, log_filename, header, logline, debug, require_latest, latest_directory, latest_filename): """This function logs a line of data to both a 'log' file, and a 'latest' file The 'latest' file is optional, and is sent to this function as a boolean value via the variable 'require_latest'. So the 2 log directories and filenames are: a. (REQUIRED): log_directory + log_filename b. (OPTIONAL): latest_directory + latest_filename The 'latest' directory and filename is provided so as to have a consistent file of the latest events This is usually the latest day of events. The way this function works with the 'latest' log_dir is as follows: a. It checks for the existance of log_directory + log_filename b. If (a) doesn't exist, then any 'latest' file is removed and a new one created c. If (a) already exists, logs are written to any existing 'latest' file If one doesn't exist, it will be created For both the 'log' and 'latest' files, a header line will be written if a new file is created Please note that a header must start with the '#' symbol, so the Ardexa agent can interpret this line as a header , and will not send it to the cloud """ create_new_file = False # Make sure the logging directory exists. The following will create all the necessary subdirs, # if the subdirs exist in part or in full if not os.path.exists(log_directory): os.makedirs(log_directory) full_path_log = os.path.join(log_directory, log_filename) if debug > 1: print("Full path of log directory: ", full_path_log) # If the file doesn't exist, annotate that a new 'latest' file is to be created # and that a header is to be created if not os.path.isfile(full_path_log): if debug > 1: print("Log file doesn't exist: ", full_path_log) create_new_file = True # Repeat for the 'latest', if it doesn't exist if require_latest: if not os.path.exists(latest_directory): os.makedirs(latest_directory) full_path_latest = os.path.join(latest_directory, latest_filename) if debug > 1: print("Full path of latest directory: ", full_path_latest) # If the 'create_new_file' tag is set AND the file exists, then remove it if create_new_file and os.path.isfile(full_path_latest): # then remove the file os.remove(full_path_latest) # Now create both (or open both) and write to them if debug > 1: print("##########################################") print("Writing the line to", full_path_latest) print(logline) print("##########################################") # Write the logline to the log file output_file = open(full_path_log, "a") if create_new_file: output_file.write(header) output_file.write(logline) output_file.close() # And write it to the 'latest' if required if require_latest: write_latest = open(full_path_latest, "a") if create_new_file: write_latest.write(header) write_latest.write(logline) write_latest.close()
def write_log(log_directory, log_filename, header, logline, debug, require_latest, latest_directory, latest_filename): """This function logs a line of data to both a 'log' file, and a 'latest' file The 'latest' file is optional, and is sent to this function as a boolean value via the variable 'require_latest'. So the 2 log directories and filenames are: a. (REQUIRED): log_directory + log_filename b. (OPTIONAL): latest_directory + latest_filename The 'latest' directory and filename is provided so as to have a consistent file of the latest events This is usually the latest day of events. The way this function works with the 'latest' log_dir is as follows: a. It checks for the existance of log_directory + log_filename b. If (a) doesn't exist, then any 'latest' file is removed and a new one created c. If (a) already exists, logs are written to any existing 'latest' file If one doesn't exist, it will be created For both the 'log' and 'latest' files, a header line will be written if a new file is created Please note that a header must start with the '#' symbol, so the Ardexa agent can interpret this line as a header , and will not send it to the cloud """ create_new_file = False # Make sure the logging directory exists. The following will create all the necessary subdirs, # if the subdirs exist in part or in full if not os.path.exists(log_directory): os.makedirs(log_directory) full_path_log = os.path.join(log_directory, log_filename) if debug > 1: print("Full path of log directory: ", full_path_log) # If the file doesn't exist, annotate that a new 'latest' file is to be created # and that a header is to be created if not os.path.isfile(full_path_log): if debug > 1: print("Log file doesn't exist: ", full_path_log) create_new_file = True # Repeat for the 'latest', if it doesn't exist if require_latest: if not os.path.exists(latest_directory): os.makedirs(latest_directory) full_path_latest = os.path.join(latest_directory, latest_filename) if debug > 1: print("Full path of latest directory: ", full_path_latest) # If the 'create_new_file' tag is set AND the file exists, then remove it if create_new_file and os.path.isfile(full_path_latest): # then remove the file os.remove(full_path_latest) # Now create both (or open both) and write to them if debug > 1: print("##########################################") print("Writing the line to", full_path_latest) print(logline) print("##########################################") # Write the logline to the log file output_file = open(full_path_log, "a") if create_new_file: output_file.write(header) output_file.write(logline) output_file.close() # And write it to the 'latest' if required if require_latest: write_latest = open(full_path_latest, "a") if create_new_file: write_latest.write(header) write_latest.write(logline) write_latest.close()
[ "This", "function", "logs", "a", "line", "of", "data", "to", "both", "a", "log", "file", "and", "a", "latest", "file", "The", "latest", "file", "is", "optional", "and", "is", "sent", "to", "this", "function", "as", "a", "boolean", "value", "via", "the", "variable", "require_latest", ".", "So", "the", "2", "log", "directories", "and", "filenames", "are", ":", "a", ".", "(", "REQUIRED", ")", ":", "log_directory", "+", "log_filename", "b", ".", "(", "OPTIONAL", ")", ":", "latest_directory", "+", "latest_filename" ]
ardexa/ardexaplugin
python
https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L15-L85
[ "def", "write_log", "(", "log_directory", ",", "log_filename", ",", "header", ",", "logline", ",", "debug", ",", "require_latest", ",", "latest_directory", ",", "latest_filename", ")", ":", "create_new_file", "=", "False", "# Make sure the logging directory exists. The following will create all the necessary subdirs,", "# if the subdirs exist in part or in full", "if", "not", "os", ".", "path", ".", "exists", "(", "log_directory", ")", ":", "os", ".", "makedirs", "(", "log_directory", ")", "full_path_log", "=", "os", ".", "path", ".", "join", "(", "log_directory", ",", "log_filename", ")", "if", "debug", ">", "1", ":", "print", "(", "\"Full path of log directory: \"", ",", "full_path_log", ")", "# If the file doesn't exist, annotate that a new 'latest' file is to be created", "# and that a header is to be created", "if", "not", "os", ".", "path", ".", "isfile", "(", "full_path_log", ")", ":", "if", "debug", ">", "1", ":", "print", "(", "\"Log file doesn't exist: \"", ",", "full_path_log", ")", "create_new_file", "=", "True", "# Repeat for the 'latest', if it doesn't exist", "if", "require_latest", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "latest_directory", ")", ":", "os", ".", "makedirs", "(", "latest_directory", ")", "full_path_latest", "=", "os", ".", "path", ".", "join", "(", "latest_directory", ",", "latest_filename", ")", "if", "debug", ">", "1", ":", "print", "(", "\"Full path of latest directory: \"", ",", "full_path_latest", ")", "# If the 'create_new_file' tag is set AND the file exists, then remove it", "if", "create_new_file", "and", "os", ".", "path", ".", "isfile", "(", "full_path_latest", ")", ":", "# then remove the file", "os", ".", "remove", "(", "full_path_latest", ")", "# Now create both (or open both) and write to them", "if", "debug", ">", "1", ":", "print", "(", "\"##########################################\"", ")", "print", "(", "\"Writing the line to\"", ",", "full_path_latest", ")", "print", "(", "logline", ")", "print", "(", "\"##########################################\"", ")", "# Write the logline to the log file", "output_file", "=", "open", "(", "full_path_log", ",", "\"a\"", ")", "if", "create_new_file", ":", "output_file", ".", "write", "(", "header", ")", "output_file", ".", "write", "(", "logline", ")", "output_file", ".", "close", "(", ")", "# And write it to the 'latest' if required", "if", "require_latest", ":", "write_latest", "=", "open", "(", "full_path_latest", ",", "\"a\"", ")", "if", "create_new_file", ":", "write_latest", ".", "write", "(", "header", ")", "write_latest", ".", "write", "(", "logline", ")", "write_latest", ".", "close", "(", ")" ]
5068532f601ae3042bd87af1063057e8f274f670
valid
check_pidfile
Check that a process is not running more than once, using PIDFILE
ardexaplugin.py
def check_pidfile(pidfile, debug): """Check that a process is not running more than once, using PIDFILE""" # Check PID exists and see if the PID is running if os.path.isfile(pidfile): pidfile_handle = open(pidfile, 'r') # try and read the PID file. If no luck, remove it try: pid = int(pidfile_handle.read()) pidfile_handle.close() if check_pid(pid, debug): return True except: pass # PID is not active, remove the PID file os.unlink(pidfile) # Create a PID file, to ensure this is script is only run once (at a time) pid = str(os.getpid()) open(pidfile, 'w').write(pid) return False
def check_pidfile(pidfile, debug): """Check that a process is not running more than once, using PIDFILE""" # Check PID exists and see if the PID is running if os.path.isfile(pidfile): pidfile_handle = open(pidfile, 'r') # try and read the PID file. If no luck, remove it try: pid = int(pidfile_handle.read()) pidfile_handle.close() if check_pid(pid, debug): return True except: pass # PID is not active, remove the PID file os.unlink(pidfile) # Create a PID file, to ensure this is script is only run once (at a time) pid = str(os.getpid()) open(pidfile, 'w').write(pid) return False
[ "Check", "that", "a", "process", "is", "not", "running", "more", "than", "once", "using", "PIDFILE" ]
ardexa/ardexaplugin
python
https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L88-L108
[ "def", "check_pidfile", "(", "pidfile", ",", "debug", ")", ":", "# Check PID exists and see if the PID is running", "if", "os", ".", "path", ".", "isfile", "(", "pidfile", ")", ":", "pidfile_handle", "=", "open", "(", "pidfile", ",", "'r'", ")", "# try and read the PID file. If no luck, remove it", "try", ":", "pid", "=", "int", "(", "pidfile_handle", ".", "read", "(", ")", ")", "pidfile_handle", ".", "close", "(", ")", "if", "check_pid", "(", "pid", ",", "debug", ")", ":", "return", "True", "except", ":", "pass", "# PID is not active, remove the PID file", "os", ".", "unlink", "(", "pidfile", ")", "# Create a PID file, to ensure this is script is only run once (at a time)", "pid", "=", "str", "(", "os", ".", "getpid", "(", ")", ")", "open", "(", "pidfile", ",", "'w'", ")", ".", "write", "(", "pid", ")", "return", "False" ]
5068532f601ae3042bd87af1063057e8f274f670
valid
check_pid
This function will check whether a PID is currently running
ardexaplugin.py
def check_pid(pid, debug): """This function will check whether a PID is currently running""" try: # A Kill of 0 is to check if the PID is active. It won't kill the process os.kill(pid, 0) if debug > 1: print("Script has a PIDFILE where the process is still running") return True except OSError: if debug > 1: print("Script does not appear to be running") return False
def check_pid(pid, debug): """This function will check whether a PID is currently running""" try: # A Kill of 0 is to check if the PID is active. It won't kill the process os.kill(pid, 0) if debug > 1: print("Script has a PIDFILE where the process is still running") return True except OSError: if debug > 1: print("Script does not appear to be running") return False
[ "This", "function", "will", "check", "whether", "a", "PID", "is", "currently", "running" ]
ardexa/ardexaplugin
python
https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L111-L122
[ "def", "check_pid", "(", "pid", ",", "debug", ")", ":", "try", ":", "# A Kill of 0 is to check if the PID is active. It won't kill the process", "os", ".", "kill", "(", "pid", ",", "0", ")", "if", "debug", ">", "1", ":", "print", "(", "\"Script has a PIDFILE where the process is still running\"", ")", "return", "True", "except", "OSError", ":", "if", "debug", ">", "1", ":", "print", "(", "\"Script does not appear to be running\"", ")", "return", "False" ]
5068532f601ae3042bd87af1063057e8f274f670
valid
convert_words_to_uint
Convert two words to a floating point
ardexaplugin.py
def convert_words_to_uint(high_word, low_word): """Convert two words to a floating point""" try: low_num = int(low_word) # low_word might arrive as a signed number. Convert to unsigned if low_num < 0: low_num = abs(low_num) + 2**15 number = (int(high_word) << 16) | low_num return number, True except: return 0, False
def convert_words_to_uint(high_word, low_word): """Convert two words to a floating point""" try: low_num = int(low_word) # low_word might arrive as a signed number. Convert to unsigned if low_num < 0: low_num = abs(low_num) + 2**15 number = (int(high_word) << 16) | low_num return number, True except: return 0, False
[ "Convert", "two", "words", "to", "a", "floating", "point" ]
ardexa/ardexaplugin
python
https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L153-L163
[ "def", "convert_words_to_uint", "(", "high_word", ",", "low_word", ")", ":", "try", ":", "low_num", "=", "int", "(", "low_word", ")", "# low_word might arrive as a signed number. Convert to unsigned", "if", "low_num", "<", "0", ":", "low_num", "=", "abs", "(", "low_num", ")", "+", "2", "**", "15", "number", "=", "(", "int", "(", "high_word", ")", "<<", "16", ")", "|", "low_num", "return", "number", ",", "True", "except", ":", "return", "0", ",", "False" ]
5068532f601ae3042bd87af1063057e8f274f670
valid
convert_words_to_float
Convert two words to a floating point
ardexaplugin.py
def convert_words_to_float(high_word, low_word): """Convert two words to a floating point""" number, retval = convert_words_to_uint(high_word, low_word) if not retval: return 0.0, False try: packed_float = struct.pack('>l', number) return struct.unpack('>f', packed_float)[0], True except: return 0.0, False
def convert_words_to_float(high_word, low_word): """Convert two words to a floating point""" number, retval = convert_words_to_uint(high_word, low_word) if not retval: return 0.0, False try: packed_float = struct.pack('>l', number) return struct.unpack('>f', packed_float)[0], True except: return 0.0, False
[ "Convert", "two", "words", "to", "a", "floating", "point" ]
ardexa/ardexaplugin
python
https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L166-L176
[ "def", "convert_words_to_float", "(", "high_word", ",", "low_word", ")", ":", "number", ",", "retval", "=", "convert_words_to_uint", "(", "high_word", ",", "low_word", ")", "if", "not", "retval", ":", "return", "0.0", ",", "False", "try", ":", "packed_float", "=", "struct", ".", "pack", "(", "'>l'", ",", "number", ")", "return", "struct", ".", "unpack", "(", "'>f'", ",", "packed_float", ")", "[", "0", "]", ",", "True", "except", ":", "return", "0.0", ",", "False" ]
5068532f601ae3042bd87af1063057e8f274f670
valid
disown
This function will disown, so the Ardexa service can be restarted
ardexaplugin.py
def disown(debug): """This function will disown, so the Ardexa service can be restarted""" # Get the current PID pid = os.getpid() cgroup_file = "/proc/" + str(pid) + "/cgroup" try: infile = open(cgroup_file, "r") except IOError: print("Could not open cgroup file: ", cgroup_file) return False # Read each line for line in infile: # Check if the line contains "ardexa.service" if line.find("ardexa.service") == -1: continue # if the lines contains "name=", replace it with nothing line = line.replace("name=", "") # Split the line by commas items_list = line.split(':') accounts = items_list[1] dir_str = accounts + "/ardexa.disown" # If accounts is empty, continue if not accounts: continue # Create the dir and all subdirs full_dir = "/sys/fs/cgroup/" + dir_str if not os.path.exists(full_dir): os.makedirs(full_dir) if debug >= 1: print("Making directory: ", full_dir) else: if debug >= 1: print("Directory already exists: ", full_dir) # Add the PID to the file full_path = full_dir + "/cgroup.procs" prog_list = ["echo", str(pid), ">", full_path] run_program(prog_list, debug, True) # If this item contains a comma, then separate it, and reverse # some OSes will need cpuacct,cpu reversed to actually work if accounts.find(",") != -1: acct_list = accounts.split(',') accounts = acct_list[1] + "," + acct_list[0] dir_str = accounts + "/ardexa.disown" # Create the dir and all subdirs. But it may not work. So use a TRY full_dir = "/sys/fs/cgroup/" + dir_str try: if not os.path.exists(full_dir): os.makedirs(full_dir) except: continue # Add the PID to the file full_path = full_dir + "/cgroup.procs" prog_list = ["echo", str(pid), ">", full_path] run_program(prog_list, debug, True) infile.close() # For debug purposes only if debug >= 1: prog_list = ["cat", cgroup_file] run_program(prog_list, debug, False) # If there are any "ardexa.service" in the proc file. If so, exit with error prog_list = ["grep", "-q", "ardexa.service", cgroup_file] if run_program(prog_list, debug, False): # There are entries still left in the file return False return True
def disown(debug): """This function will disown, so the Ardexa service can be restarted""" # Get the current PID pid = os.getpid() cgroup_file = "/proc/" + str(pid) + "/cgroup" try: infile = open(cgroup_file, "r") except IOError: print("Could not open cgroup file: ", cgroup_file) return False # Read each line for line in infile: # Check if the line contains "ardexa.service" if line.find("ardexa.service") == -1: continue # if the lines contains "name=", replace it with nothing line = line.replace("name=", "") # Split the line by commas items_list = line.split(':') accounts = items_list[1] dir_str = accounts + "/ardexa.disown" # If accounts is empty, continue if not accounts: continue # Create the dir and all subdirs full_dir = "/sys/fs/cgroup/" + dir_str if not os.path.exists(full_dir): os.makedirs(full_dir) if debug >= 1: print("Making directory: ", full_dir) else: if debug >= 1: print("Directory already exists: ", full_dir) # Add the PID to the file full_path = full_dir + "/cgroup.procs" prog_list = ["echo", str(pid), ">", full_path] run_program(prog_list, debug, True) # If this item contains a comma, then separate it, and reverse # some OSes will need cpuacct,cpu reversed to actually work if accounts.find(",") != -1: acct_list = accounts.split(',') accounts = acct_list[1] + "," + acct_list[0] dir_str = accounts + "/ardexa.disown" # Create the dir and all subdirs. But it may not work. So use a TRY full_dir = "/sys/fs/cgroup/" + dir_str try: if not os.path.exists(full_dir): os.makedirs(full_dir) except: continue # Add the PID to the file full_path = full_dir + "/cgroup.procs" prog_list = ["echo", str(pid), ">", full_path] run_program(prog_list, debug, True) infile.close() # For debug purposes only if debug >= 1: prog_list = ["cat", cgroup_file] run_program(prog_list, debug, False) # If there are any "ardexa.service" in the proc file. If so, exit with error prog_list = ["grep", "-q", "ardexa.service", cgroup_file] if run_program(prog_list, debug, False): # There are entries still left in the file return False return True
[ "This", "function", "will", "disown", "so", "the", "Ardexa", "service", "can", "be", "restarted" ]
ardexa/ardexaplugin
python
https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L179-L253
[ "def", "disown", "(", "debug", ")", ":", "# Get the current PID", "pid", "=", "os", ".", "getpid", "(", ")", "cgroup_file", "=", "\"/proc/\"", "+", "str", "(", "pid", ")", "+", "\"/cgroup\"", "try", ":", "infile", "=", "open", "(", "cgroup_file", ",", "\"r\"", ")", "except", "IOError", ":", "print", "(", "\"Could not open cgroup file: \"", ",", "cgroup_file", ")", "return", "False", "# Read each line", "for", "line", "in", "infile", ":", "# Check if the line contains \"ardexa.service\"", "if", "line", ".", "find", "(", "\"ardexa.service\"", ")", "==", "-", "1", ":", "continue", "# if the lines contains \"name=\", replace it with nothing", "line", "=", "line", ".", "replace", "(", "\"name=\"", ",", "\"\"", ")", "# Split the line by commas", "items_list", "=", "line", ".", "split", "(", "':'", ")", "accounts", "=", "items_list", "[", "1", "]", "dir_str", "=", "accounts", "+", "\"/ardexa.disown\"", "# If accounts is empty, continue", "if", "not", "accounts", ":", "continue", "# Create the dir and all subdirs", "full_dir", "=", "\"/sys/fs/cgroup/\"", "+", "dir_str", "if", "not", "os", ".", "path", ".", "exists", "(", "full_dir", ")", ":", "os", ".", "makedirs", "(", "full_dir", ")", "if", "debug", ">=", "1", ":", "print", "(", "\"Making directory: \"", ",", "full_dir", ")", "else", ":", "if", "debug", ">=", "1", ":", "print", "(", "\"Directory already exists: \"", ",", "full_dir", ")", "# Add the PID to the file", "full_path", "=", "full_dir", "+", "\"/cgroup.procs\"", "prog_list", "=", "[", "\"echo\"", ",", "str", "(", "pid", ")", ",", "\">\"", ",", "full_path", "]", "run_program", "(", "prog_list", ",", "debug", ",", "True", ")", "# If this item contains a comma, then separate it, and reverse", "# some OSes will need cpuacct,cpu reversed to actually work", "if", "accounts", ".", "find", "(", "\",\"", ")", "!=", "-", "1", ":", "acct_list", "=", "accounts", ".", "split", "(", "','", ")", "accounts", "=", "acct_list", "[", "1", "]", "+", "\",\"", "+", "acct_list", "[", "0", "]", "dir_str", "=", "accounts", "+", "\"/ardexa.disown\"", "# Create the dir and all subdirs. But it may not work. So use a TRY", "full_dir", "=", "\"/sys/fs/cgroup/\"", "+", "dir_str", "try", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "full_dir", ")", ":", "os", ".", "makedirs", "(", "full_dir", ")", "except", ":", "continue", "# Add the PID to the file", "full_path", "=", "full_dir", "+", "\"/cgroup.procs\"", "prog_list", "=", "[", "\"echo\"", ",", "str", "(", "pid", ")", ",", "\">\"", ",", "full_path", "]", "run_program", "(", "prog_list", ",", "debug", ",", "True", ")", "infile", ".", "close", "(", ")", "# For debug purposes only", "if", "debug", ">=", "1", ":", "prog_list", "=", "[", "\"cat\"", ",", "cgroup_file", "]", "run_program", "(", "prog_list", ",", "debug", ",", "False", ")", "# If there are any \"ardexa.service\" in the proc file. If so, exit with error", "prog_list", "=", "[", "\"grep\"", ",", "\"-q\"", ",", "\"ardexa.service\"", ",", "cgroup_file", "]", "if", "run_program", "(", "prog_list", ",", "debug", ",", "False", ")", ":", "# There are entries still left in the file", "return", "False", "return", "True" ]
5068532f601ae3042bd87af1063057e8f274f670
valid
run_program
Run a program and check program return code Note that some commands don't work well with Popen. So if this function is specifically called with 'shell=True', then it will run the old 'os.system'. In which case, there is no program output
ardexaplugin.py
def run_program(prog_list, debug, shell): """Run a program and check program return code Note that some commands don't work well with Popen. So if this function is specifically called with 'shell=True', then it will run the old 'os.system'. In which case, there is no program output """ try: if not shell: process = Popen(prog_list, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() retcode = process.returncode if debug >= 1: print("Program : ", " ".join(prog_list)) print("Return Code: ", retcode) print("Stdout: ", stdout) print("Stderr: ", stderr) return bool(retcode) else: command = " ".join(prog_list) os.system(command) return True except: return False
def run_program(prog_list, debug, shell): """Run a program and check program return code Note that some commands don't work well with Popen. So if this function is specifically called with 'shell=True', then it will run the old 'os.system'. In which case, there is no program output """ try: if not shell: process = Popen(prog_list, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() retcode = process.returncode if debug >= 1: print("Program : ", " ".join(prog_list)) print("Return Code: ", retcode) print("Stdout: ", stdout) print("Stderr: ", stderr) return bool(retcode) else: command = " ".join(prog_list) os.system(command) return True except: return False
[ "Run", "a", "program", "and", "check", "program", "return", "code", "Note", "that", "some", "commands", "don", "t", "work", "well", "with", "Popen", ".", "So", "if", "this", "function", "is", "specifically", "called", "with", "shell", "=", "True", "then", "it", "will", "run", "the", "old", "os", ".", "system", ".", "In", "which", "case", "there", "is", "no", "program", "output" ]
ardexa/ardexaplugin
python
https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L256-L277
[ "def", "run_program", "(", "prog_list", ",", "debug", ",", "shell", ")", ":", "try", ":", "if", "not", "shell", ":", "process", "=", "Popen", "(", "prog_list", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "process", ".", "communicate", "(", ")", "retcode", "=", "process", ".", "returncode", "if", "debug", ">=", "1", ":", "print", "(", "\"Program : \"", ",", "\" \"", ".", "join", "(", "prog_list", ")", ")", "print", "(", "\"Return Code: \"", ",", "retcode", ")", "print", "(", "\"Stdout: \"", ",", "stdout", ")", "print", "(", "\"Stderr: \"", ",", "stderr", ")", "return", "bool", "(", "retcode", ")", "else", ":", "command", "=", "\" \"", ".", "join", "(", "prog_list", ")", "os", ".", "system", "(", "command", ")", "return", "True", "except", ":", "return", "False" ]
5068532f601ae3042bd87af1063057e8f274f670
valid
parse_address_list
Yield each integer from a complex range string like "1-9,12,15-20,23" >>> list(parse_address_list('1-9,12,15-20,23')) [1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 23] >>> list(parse_address_list('1-9,12,15-20,2-3-4')) Traceback (most recent call last): ... ValueError: format error in 2-3-4
ardexaplugin.py
def parse_address_list(addrs): """Yield each integer from a complex range string like "1-9,12,15-20,23" >>> list(parse_address_list('1-9,12,15-20,23')) [1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 23] >>> list(parse_address_list('1-9,12,15-20,2-3-4')) Traceback (most recent call last): ... ValueError: format error in 2-3-4 """ for addr in addrs.split(','): elem = addr.split('-') if len(elem) == 1: # a number yield int(elem[0]) elif len(elem) == 2: # a range inclusive start, end = list(map(int, elem)) for i in range(start, end+1): yield i else: # more than one hyphen raise ValueError('format error in %s' % addr)
def parse_address_list(addrs): """Yield each integer from a complex range string like "1-9,12,15-20,23" >>> list(parse_address_list('1-9,12,15-20,23')) [1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 23] >>> list(parse_address_list('1-9,12,15-20,2-3-4')) Traceback (most recent call last): ... ValueError: format error in 2-3-4 """ for addr in addrs.split(','): elem = addr.split('-') if len(elem) == 1: # a number yield int(elem[0]) elif len(elem) == 2: # a range inclusive start, end = list(map(int, elem)) for i in range(start, end+1): yield i else: # more than one hyphen raise ValueError('format error in %s' % addr)
[ "Yield", "each", "integer", "from", "a", "complex", "range", "string", "like", "1", "-", "9", "12", "15", "-", "20", "23" ]
ardexa/ardexaplugin
python
https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L280-L300
[ "def", "parse_address_list", "(", "addrs", ")", ":", "for", "addr", "in", "addrs", ".", "split", "(", "','", ")", ":", "elem", "=", "addr", ".", "split", "(", "'-'", ")", "if", "len", "(", "elem", ")", "==", "1", ":", "# a number", "yield", "int", "(", "elem", "[", "0", "]", ")", "elif", "len", "(", "elem", ")", "==", "2", ":", "# a range inclusive", "start", ",", "end", "=", "list", "(", "map", "(", "int", ",", "elem", ")", ")", "for", "i", "in", "range", "(", "start", ",", "end", "+", "1", ")", ":", "yield", "i", "else", ":", "# more than one hyphen", "raise", "ValueError", "(", "'format error in %s'", "%", "addr", ")" ]
5068532f601ae3042bd87af1063057e8f274f670
valid
_encode_ids
Do url-encode resource ids
octopie/api.py
def _encode_ids(*args): """ Do url-encode resource ids """ ids = [] for v in args: if isinstance(v, basestring): qv = v.encode('utf-8') if isinstance(v, unicode) else v ids.append(urllib.quote(qv)) else: qv = str(v) ids.append(urllib.quote(qv)) return ';'.join(ids)
def _encode_ids(*args): """ Do url-encode resource ids """ ids = [] for v in args: if isinstance(v, basestring): qv = v.encode('utf-8') if isinstance(v, unicode) else v ids.append(urllib.quote(qv)) else: qv = str(v) ids.append(urllib.quote(qv)) return ';'.join(ids)
[ "Do", "url", "-", "encode", "resource", "ids" ]
stevenc81/octopie
python
https://github.com/stevenc81/octopie/blob/4e06fd8600c8cf4337ee21cc50e748bbf760a0ba/octopie/api.py#L51-L65
[ "def", "_encode_ids", "(", "*", "args", ")", ":", "ids", "=", "[", "]", "for", "v", "in", "args", ":", "if", "isinstance", "(", "v", ",", "basestring", ")", ":", "qv", "=", "v", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "v", ",", "unicode", ")", "else", "v", "ids", ".", "append", "(", "urllib", ".", "quote", "(", "qv", ")", ")", "else", ":", "qv", "=", "str", "(", "v", ")", "ids", ".", "append", "(", "urllib", ".", "quote", "(", "qv", ")", ")", "return", "';'", ".", "join", "(", "ids", ")" ]
4e06fd8600c8cf4337ee21cc50e748bbf760a0ba
valid
random_string
Generate random string with parameter length. Example: >>> from eggit.egg_string import random_string >>> random_string(8) 'q4f2eaT4' >>>
eggit/egg_string.py
def random_string(length): ''' Generate random string with parameter length. Example: >>> from eggit.egg_string import random_string >>> random_string(8) 'q4f2eaT4' >>> ''' str_list = [random.choice(string.digits + string.ascii_letters) for i in range(length)] return ''.join(str_list)
def random_string(length): ''' Generate random string with parameter length. Example: >>> from eggit.egg_string import random_string >>> random_string(8) 'q4f2eaT4' >>> ''' str_list = [random.choice(string.digits + string.ascii_letters) for i in range(length)] return ''.join(str_list)
[ "Generate", "random", "string", "with", "parameter", "length", ".", "Example", ":" ]
MyJoiT/eggit
python
https://github.com/MyJoiT/eggit/blob/1e20910264ee2fd72c6783f0817572e16ea87bd0/eggit/egg_string.py#L5-L18
[ "def", "random_string", "(", "length", ")", ":", "str_list", "=", "[", "random", ".", "choice", "(", "string", ".", "digits", "+", "string", ".", "ascii_letters", ")", "for", "i", "in", "range", "(", "length", ")", "]", "return", "''", ".", "join", "(", "str_list", ")" ]
1e20910264ee2fd72c6783f0817572e16ea87bd0
valid
get_item_creator
Get item creator according registered item type. :param item_type: The type of item to be checed. :type item_type: types.TypeType. :returns: Creator function. None if type not found.
cmdlet/cmdlet.py
def get_item_creator(item_type): """Get item creator according registered item type. :param item_type: The type of item to be checed. :type item_type: types.TypeType. :returns: Creator function. None if type not found. """ if item_type not in Pipe.pipe_item_types: for registered_type in Pipe.pipe_item_types: if issubclass(item_type, registered_type): return Pipe.pipe_item_types[registered_type] return None else: return Pipe.pipe_item_types[item_type]
def get_item_creator(item_type): """Get item creator according registered item type. :param item_type: The type of item to be checed. :type item_type: types.TypeType. :returns: Creator function. None if type not found. """ if item_type not in Pipe.pipe_item_types: for registered_type in Pipe.pipe_item_types: if issubclass(item_type, registered_type): return Pipe.pipe_item_types[registered_type] return None else: return Pipe.pipe_item_types[item_type]
[ "Get", "item", "creator", "according", "registered", "item", "type", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmdlet.py#L199-L212
[ "def", "get_item_creator", "(", "item_type", ")", ":", "if", "item_type", "not", "in", "Pipe", ".", "pipe_item_types", ":", "for", "registered_type", "in", "Pipe", ".", "pipe_item_types", ":", "if", "issubclass", "(", "item_type", ",", "registered_type", ")", ":", "return", "Pipe", ".", "pipe_item_types", "[", "registered_type", "]", "return", "None", "else", ":", "return", "Pipe", ".", "pipe_item_types", "[", "item_type", "]" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
Pipe.clone
Self-cloning. All its next Pipe objects are cloned too. :returns: cloned object
cmdlet/cmdlet.py
def clone(self): """Self-cloning. All its next Pipe objects are cloned too. :returns: cloned object """ new_object = copy.copy(self) if new_object.next: new_object.next = new_object.next.clone() return new_object
def clone(self): """Self-cloning. All its next Pipe objects are cloned too. :returns: cloned object """ new_object = copy.copy(self) if new_object.next: new_object.next = new_object.next.clone() return new_object
[ "Self", "-", "cloning", ".", "All", "its", "next", "Pipe", "objects", "are", "cloned", "too", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmdlet.py#L105-L113
[ "def", "clone", "(", "self", ")", ":", "new_object", "=", "copy", ".", "copy", "(", "self", ")", "if", "new_object", ".", "next", ":", "new_object", ".", "next", "=", "new_object", ".", "next", ".", "clone", "(", ")", "return", "new_object" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
Pipe.append
Append next object to pipe tail. :param next: The Pipe object to be appended to tail. :type next: Pipe object.
cmdlet/cmdlet.py
def append(self, next): """Append next object to pipe tail. :param next: The Pipe object to be appended to tail. :type next: Pipe object. """ next.chained = True if self.next: self.next.append(next) else: self.next = next
def append(self, next): """Append next object to pipe tail. :param next: The Pipe object to be appended to tail. :type next: Pipe object. """ next.chained = True if self.next: self.next.append(next) else: self.next = next
[ "Append", "next", "object", "to", "pipe", "tail", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmdlet.py#L115-L125
[ "def", "append", "(", "self", ",", "next", ")", ":", "next", ".", "chained", "=", "True", "if", "self", ".", "next", ":", "self", ".", "next", ".", "append", "(", "next", ")", "else", ":", "self", ".", "next", "=", "next" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433
valid
Pipe.iter
Return an generator as iterator object. :param prev: Previous Pipe object which used for data input. :returns: A generator for iteration.
cmdlet/cmdlet.py
def iter(self, prev=None): """Return an generator as iterator object. :param prev: Previous Pipe object which used for data input. :returns: A generator for iteration. """ if self.next: generator = self.next.iter(self.func(prev, *self.args, **self.kw)) else: generator = self.func(prev, *self.args, **self.kw) return generator
def iter(self, prev=None): """Return an generator as iterator object. :param prev: Previous Pipe object which used for data input. :returns: A generator for iteration. """ if self.next: generator = self.next.iter(self.func(prev, *self.args, **self.kw)) else: generator = self.func(prev, *self.args, **self.kw) return generator
[ "Return", "an", "generator", "as", "iterator", "object", "." ]
GaryLee/cmdlet
python
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmdlet.py#L134-L145
[ "def", "iter", "(", "self", ",", "prev", "=", "None", ")", ":", "if", "self", ".", "next", ":", "generator", "=", "self", ".", "next", ".", "iter", "(", "self", ".", "func", "(", "prev", ",", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kw", ")", ")", "else", ":", "generator", "=", "self", ".", "func", "(", "prev", ",", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kw", ")", "return", "generator" ]
5852a63fc2c7dd723a3d7abe18455f8dacb49433