partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
load_config_from_cli
Loads config, checking CLI arguments for a config file
goodconf/contrib/django.py
def load_config_from_cli(config: GoodConf, argv: List[str]) -> List[str]: """Loads config, checking CLI arguments for a config file""" # Monkey patch Django's command parser from django.core.management.base import BaseCommand original_parser = BaseCommand.create_parser def patched_parser(self, prog_name, subcommand): parser = original_parser(self, prog_name, subcommand) argparser_add_argument(parser, config) return parser BaseCommand.create_parser = patched_parser try: parser = argparse.ArgumentParser(add_help=False) argparser_add_argument(parser, config) config_arg, default_args = parser.parse_known_args(argv) config.load(config_arg.config) yield default_args finally: # Put that create_parser back where it came from or so help me! BaseCommand.create_parser = original_parser
def load_config_from_cli(config: GoodConf, argv: List[str]) -> List[str]: """Loads config, checking CLI arguments for a config file""" # Monkey patch Django's command parser from django.core.management.base import BaseCommand original_parser = BaseCommand.create_parser def patched_parser(self, prog_name, subcommand): parser = original_parser(self, prog_name, subcommand) argparser_add_argument(parser, config) return parser BaseCommand.create_parser = patched_parser try: parser = argparse.ArgumentParser(add_help=False) argparser_add_argument(parser, config) config_arg, default_args = parser.parse_known_args(argv) config.load(config_arg.config) yield default_args finally: # Put that create_parser back where it came from or so help me! BaseCommand.create_parser = original_parser
[ "Loads", "config", "checking", "CLI", "arguments", "for", "a", "config", "file" ]
lincolnloop/goodconf
python
https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/contrib/django.py#L10-L33
[ "def", "load_config_from_cli", "(", "config", ":", "GoodConf", ",", "argv", ":", "List", "[", "str", "]", ")", "->", "List", "[", "str", "]", ":", "# Monkey patch Django's command parser", "from", "django", ".", "core", ".", "management", ".", "base", "import", "BaseCommand", "original_parser", "=", "BaseCommand", ".", "create_parser", "def", "patched_parser", "(", "self", ",", "prog_name", ",", "subcommand", ")", ":", "parser", "=", "original_parser", "(", "self", ",", "prog_name", ",", "subcommand", ")", "argparser_add_argument", "(", "parser", ",", "config", ")", "return", "parser", "BaseCommand", ".", "create_parser", "=", "patched_parser", "try", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "argparser_add_argument", "(", "parser", ",", "config", ")", "config_arg", ",", "default_args", "=", "parser", ".", "parse_known_args", "(", "argv", ")", "config", ".", "load", "(", "config_arg", ".", "config", ")", "yield", "default_args", "finally", ":", "# Put that create_parser back where it came from or so help me!", "BaseCommand", ".", "create_parser", "=", "original_parser" ]
19515da5783f86b9516dbf81531107c2d9eae567
test
execute_from_command_line_with_config
Load's config then runs Django's execute_from_command_line
goodconf/contrib/django.py
def execute_from_command_line_with_config(config: GoodConf, argv: List[str]): """Load's config then runs Django's execute_from_command_line""" with load_config_from_cli(config, argv) as args: from django.core.management import execute_from_command_line execute_from_command_line(args)
def execute_from_command_line_with_config(config: GoodConf, argv: List[str]): """Load's config then runs Django's execute_from_command_line""" with load_config_from_cli(config, argv) as args: from django.core.management import execute_from_command_line execute_from_command_line(args)
[ "Load", "s", "config", "then", "runs", "Django", "s", "execute_from_command_line" ]
lincolnloop/goodconf
python
https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/contrib/django.py#L36-L40
[ "def", "execute_from_command_line_with_config", "(", "config", ":", "GoodConf", ",", "argv", ":", "List", "[", "str", "]", ")", ":", "with", "load_config_from_cli", "(", "config", ",", "argv", ")", "as", "args", ":", "from", "django", ".", "core", ".", "management", "import", "execute_from_command_line", "execute_from_command_line", "(", "args", ")" ]
19515da5783f86b9516dbf81531107c2d9eae567
test
argparser_add_argument
Adds argument for config to existing argparser
goodconf/contrib/argparse.py
def argparser_add_argument(parser: argparse.ArgumentParser, config: GoodConf): """Adds argument for config to existing argparser""" help = "Config file." if config.file_env_var: help += (" Can also be configured via the " "environment variable: {}".format(config.file_env_var)) if config.default_files: help += (" Defaults to the first file that exists from " "[{}].".format(', '.join(config.default_files))) parser.add_argument('-C', '--config', metavar='FILE', help=help)
def argparser_add_argument(parser: argparse.ArgumentParser, config: GoodConf): """Adds argument for config to existing argparser""" help = "Config file." if config.file_env_var: help += (" Can also be configured via the " "environment variable: {}".format(config.file_env_var)) if config.default_files: help += (" Defaults to the first file that exists from " "[{}].".format(', '.join(config.default_files))) parser.add_argument('-C', '--config', metavar='FILE', help=help)
[ "Adds", "argument", "for", "config", "to", "existing", "argparser" ]
lincolnloop/goodconf
python
https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/contrib/argparse.py#L5-L14
[ "def", "argparser_add_argument", "(", "parser", ":", "argparse", ".", "ArgumentParser", ",", "config", ":", "GoodConf", ")", ":", "help", "=", "\"Config file.\"", "if", "config", ".", "file_env_var", ":", "help", "+=", "(", "\" Can also be configured via the \"", "\"environment variable: {}\"", ".", "format", "(", "config", ".", "file_env_var", ")", ")", "if", "config", ".", "default_files", ":", "help", "+=", "(", "\" Defaults to the first file that exists from \"", "\"[{}].\"", ".", "format", "(", "', '", ".", "join", "(", "config", ".", "default_files", ")", ")", ")", "parser", ".", "add_argument", "(", "'-C'", ",", "'--config'", ",", "metavar", "=", "'FILE'", ",", "help", "=", "help", ")" ]
19515da5783f86b9516dbf81531107c2d9eae567
test
_load_config
Given a file path, parse it based on its extension (YAML or JSON) and return the values as a Python dictionary. JSON is the default if an extension can't be determined.
goodconf/__init__.py
def _load_config(path: str) -> dict: """ Given a file path, parse it based on its extension (YAML or JSON) and return the values as a Python dictionary. JSON is the default if an extension can't be determined. """ __, ext = os.path.splitext(path) if ext in ['.yaml', '.yml']: import ruamel.yaml loader = ruamel.yaml.safe_load else: loader = json.load with open(path) as f: config = loader(f) return config
def _load_config(path: str) -> dict: """ Given a file path, parse it based on its extension (YAML or JSON) and return the values as a Python dictionary. JSON is the default if an extension can't be determined. """ __, ext = os.path.splitext(path) if ext in ['.yaml', '.yml']: import ruamel.yaml loader = ruamel.yaml.safe_load else: loader = json.load with open(path) as f: config = loader(f) return config
[ "Given", "a", "file", "path", "parse", "it", "based", "on", "its", "extension", "(", "YAML", "or", "JSON", ")", "and", "return", "the", "values", "as", "a", "Python", "dictionary", ".", "JSON", "is", "the", "default", "if", "an", "extension", "can", "t", "be", "determined", "." ]
lincolnloop/goodconf
python
https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/__init__.py#L18-L32
[ "def", "_load_config", "(", "path", ":", "str", ")", "->", "dict", ":", "__", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "if", "ext", "in", "[", "'.yaml'", ",", "'.yml'", "]", ":", "import", "ruamel", ".", "yaml", "loader", "=", "ruamel", ".", "yaml", ".", "safe_load", "else", ":", "loader", "=", "json", ".", "load", "with", "open", "(", "path", ")", "as", "f", ":", "config", "=", "loader", "(", "f", ")", "return", "config" ]
19515da5783f86b9516dbf81531107c2d9eae567
test
GoodConf.load
Find config file and set values
goodconf/__init__.py
def load(self, filename: str = None): """Find config file and set values""" if filename: self.config_file = _find_file(filename) else: if self.file_env_var and self.file_env_var in os.environ: self.config_file = _find_file(os.environ[self.file_env_var]) if not self.config_file: for filename in self.default_files: self.config_file = _find_file(filename, require=False) if self.config_file: break if self.config_file: config = _load_config(self.config_file) log.info("Loading config from %s", self.config_file) else: config = {} log.info("No config file specified. " "Loading with environment variables.") self.set_values(config)
def load(self, filename: str = None): """Find config file and set values""" if filename: self.config_file = _find_file(filename) else: if self.file_env_var and self.file_env_var in os.environ: self.config_file = _find_file(os.environ[self.file_env_var]) if not self.config_file: for filename in self.default_files: self.config_file = _find_file(filename, require=False) if self.config_file: break if self.config_file: config = _load_config(self.config_file) log.info("Loading config from %s", self.config_file) else: config = {} log.info("No config file specified. " "Loading with environment variables.") self.set_values(config)
[ "Find", "config", "file", "and", "set", "values" ]
lincolnloop/goodconf
python
https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/__init__.py#L66-L85
[ "def", "load", "(", "self", ",", "filename", ":", "str", "=", "None", ")", ":", "if", "filename", ":", "self", ".", "config_file", "=", "_find_file", "(", "filename", ")", "else", ":", "if", "self", ".", "file_env_var", "and", "self", ".", "file_env_var", "in", "os", ".", "environ", ":", "self", ".", "config_file", "=", "_find_file", "(", "os", ".", "environ", "[", "self", ".", "file_env_var", "]", ")", "if", "not", "self", ".", "config_file", ":", "for", "filename", "in", "self", ".", "default_files", ":", "self", ".", "config_file", "=", "_find_file", "(", "filename", ",", "require", "=", "False", ")", "if", "self", ".", "config_file", ":", "break", "if", "self", ".", "config_file", ":", "config", "=", "_load_config", "(", "self", ".", "config_file", ")", "log", ".", "info", "(", "\"Loading config from %s\"", ",", "self", ".", "config_file", ")", "else", ":", "config", "=", "{", "}", "log", ".", "info", "(", "\"No config file specified. \"", "\"Loading with environment variables.\"", ")", "self", ".", "set_values", "(", "config", ")" ]
19515da5783f86b9516dbf81531107c2d9eae567
test
GoodConf.generate_yaml
Dumps initial config in YAML
goodconf/__init__.py
def generate_yaml(cls, **override): """ Dumps initial config in YAML """ import ruamel.yaml yaml = ruamel.yaml.YAML() yaml_str = StringIO() yaml.dump(cls.get_initial(**override), stream=yaml_str) yaml_str.seek(0) dict_from_yaml = yaml.load(yaml_str) if cls.__doc__: dict_from_yaml.yaml_set_start_comment( '\n' + cls.__doc__ + '\n\n') for k in dict_from_yaml.keys(): if cls._values[k].help: dict_from_yaml.yaml_set_comment_before_after_key( k, before='\n' + cls._values[k].help) yaml_str = StringIO() yaml.dump(dict_from_yaml, yaml_str) yaml_str.seek(0) return yaml_str.read()
def generate_yaml(cls, **override): """ Dumps initial config in YAML """ import ruamel.yaml yaml = ruamel.yaml.YAML() yaml_str = StringIO() yaml.dump(cls.get_initial(**override), stream=yaml_str) yaml_str.seek(0) dict_from_yaml = yaml.load(yaml_str) if cls.__doc__: dict_from_yaml.yaml_set_start_comment( '\n' + cls.__doc__ + '\n\n') for k in dict_from_yaml.keys(): if cls._values[k].help: dict_from_yaml.yaml_set_comment_before_after_key( k, before='\n' + cls._values[k].help) yaml_str = StringIO() yaml.dump(dict_from_yaml, yaml_str) yaml_str.seek(0) return yaml_str.read()
[ "Dumps", "initial", "config", "in", "YAML" ]
lincolnloop/goodconf
python
https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/__init__.py#L99-L119
[ "def", "generate_yaml", "(", "cls", ",", "*", "*", "override", ")", ":", "import", "ruamel", ".", "yaml", "yaml", "=", "ruamel", ".", "yaml", ".", "YAML", "(", ")", "yaml_str", "=", "StringIO", "(", ")", "yaml", ".", "dump", "(", "cls", ".", "get_initial", "(", "*", "*", "override", ")", ",", "stream", "=", "yaml_str", ")", "yaml_str", ".", "seek", "(", "0", ")", "dict_from_yaml", "=", "yaml", ".", "load", "(", "yaml_str", ")", "if", "cls", ".", "__doc__", ":", "dict_from_yaml", ".", "yaml_set_start_comment", "(", "'\\n'", "+", "cls", ".", "__doc__", "+", "'\\n\\n'", ")", "for", "k", "in", "dict_from_yaml", ".", "keys", "(", ")", ":", "if", "cls", ".", "_values", "[", "k", "]", ".", "help", ":", "dict_from_yaml", ".", "yaml_set_comment_before_after_key", "(", "k", ",", "before", "=", "'\\n'", "+", "cls", ".", "_values", "[", "k", "]", ".", "help", ")", "yaml_str", "=", "StringIO", "(", ")", "yaml", ".", "dump", "(", "dict_from_yaml", ",", "yaml_str", ")", "yaml_str", ".", "seek", "(", "0", ")", "return", "yaml_str", ".", "read", "(", ")" ]
19515da5783f86b9516dbf81531107c2d9eae567
test
GoodConf.generate_markdown
Documents values in markdown
goodconf/__init__.py
def generate_markdown(cls): """ Documents values in markdown """ lines = [] if cls.__doc__: lines.extend(['# {}'.format(cls.__doc__), '']) for k, v in cls._values.items(): lines.append('* **{}** '.format(k)) if v.required: lines[-1] = lines[-1] + '_REQUIRED_ ' if v.help: lines.append(' {} '.format(v.help)) lines.append(' type: `{}` '.format(v.cast_as.__name__)) if v.default is not None: lines.append(' default: `{}` '.format(v.default)) return '\n'.join(lines)
def generate_markdown(cls): """ Documents values in markdown """ lines = [] if cls.__doc__: lines.extend(['# {}'.format(cls.__doc__), '']) for k, v in cls._values.items(): lines.append('* **{}** '.format(k)) if v.required: lines[-1] = lines[-1] + '_REQUIRED_ ' if v.help: lines.append(' {} '.format(v.help)) lines.append(' type: `{}` '.format(v.cast_as.__name__)) if v.default is not None: lines.append(' default: `{}` '.format(v.default)) return '\n'.join(lines)
[ "Documents", "values", "in", "markdown" ]
lincolnloop/goodconf
python
https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/__init__.py#L129-L145
[ "def", "generate_markdown", "(", "cls", ")", ":", "lines", "=", "[", "]", "if", "cls", ".", "__doc__", ":", "lines", ".", "extend", "(", "[", "'# {}'", ".", "format", "(", "cls", ".", "__doc__", ")", ",", "''", "]", ")", "for", "k", ",", "v", "in", "cls", ".", "_values", ".", "items", "(", ")", ":", "lines", ".", "append", "(", "'* **{}** '", ".", "format", "(", "k", ")", ")", "if", "v", ".", "required", ":", "lines", "[", "-", "1", "]", "=", "lines", "[", "-", "1", "]", "+", "'_REQUIRED_ '", "if", "v", ".", "help", ":", "lines", ".", "append", "(", "' {} '", ".", "format", "(", "v", ".", "help", ")", ")", "lines", ".", "append", "(", "' type: `{}` '", ".", "format", "(", "v", ".", "cast_as", ".", "__name__", ")", ")", "if", "v", ".", "default", "is", "not", "None", ":", "lines", ".", "append", "(", "' default: `{}` '", ".", "format", "(", "v", ".", "default", ")", ")", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
19515da5783f86b9516dbf81531107c2d9eae567
test
Value.cast
converts string to type requested by `cast_as`
goodconf/values.py
def cast(self, val: str): """converts string to type requested by `cast_as`""" try: return getattr(self, 'cast_as_{}'.format( self.cast_as.__name__.lower()))(val) except AttributeError: return self.cast_as(val)
def cast(self, val: str): """converts string to type requested by `cast_as`""" try: return getattr(self, 'cast_as_{}'.format( self.cast_as.__name__.lower()))(val) except AttributeError: return self.cast_as(val)
[ "converts", "string", "to", "type", "requested", "by", "cast_as" ]
lincolnloop/goodconf
python
https://github.com/lincolnloop/goodconf/blob/19515da5783f86b9516dbf81531107c2d9eae567/goodconf/values.py#L96-L102
[ "def", "cast", "(", "self", ",", "val", ":", "str", ")", ":", "try", ":", "return", "getattr", "(", "self", ",", "'cast_as_{}'", ".", "format", "(", "self", ".", "cast_as", ".", "__name__", ".", "lower", "(", ")", ")", ")", "(", "val", ")", "except", "AttributeError", ":", "return", "self", ".", "cast_as", "(", "val", ")" ]
19515da5783f86b9516dbf81531107c2d9eae567
test
list_dates_between
Returns all dates from first to last included.
currency_converter/currency_converter.py
def list_dates_between(first_date, last_date): """Returns all dates from first to last included.""" return [first_date + timedelta(days=n) for n in range(1 + (last_date - first_date).days)]
def list_dates_between(first_date, last_date): """Returns all dates from first to last included.""" return [first_date + timedelta(days=n) for n in range(1 + (last_date - first_date).days)]
[ "Returns", "all", "dates", "from", "first", "to", "last", "included", "." ]
alexprengere/currencyconverter
python
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L60-L63
[ "def", "list_dates_between", "(", "first_date", ",", "last_date", ")", ":", "return", "[", "first_date", "+", "timedelta", "(", "days", "=", "n", ")", "for", "n", "in", "range", "(", "1", "+", "(", "last_date", "-", "first_date", ")", ".", "days", ")", "]" ]
e3cb0d693819c0c824214225b23a47e9380f71df
test
parse_date
Fast %Y-%m-%d parsing.
currency_converter/currency_converter.py
def parse_date(s): """Fast %Y-%m-%d parsing.""" try: return datetime.date(int(s[:4]), int(s[5:7]), int(s[8:10])) except ValueError: # other accepted format used in one-day data set return datetime.datetime.strptime(s, '%d %B %Y').date()
def parse_date(s): """Fast %Y-%m-%d parsing.""" try: return datetime.date(int(s[:4]), int(s[5:7]), int(s[8:10])) except ValueError: # other accepted format used in one-day data set return datetime.datetime.strptime(s, '%d %B %Y').date()
[ "Fast", "%Y", "-", "%m", "-", "%d", "parsing", "." ]
alexprengere/currencyconverter
python
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L67-L72
[ "def", "parse_date", "(", "s", ")", ":", "try", ":", "return", "datetime", ".", "date", "(", "int", "(", "s", "[", ":", "4", "]", ")", ",", "int", "(", "s", "[", "5", ":", "7", "]", ")", ",", "int", "(", "s", "[", "8", ":", "10", "]", ")", ")", "except", "ValueError", ":", "# other accepted format used in one-day data set", "return", "datetime", ".", "datetime", ".", "strptime", "(", "s", ",", "'%d %B %Y'", ")", ".", "date", "(", ")" ]
e3cb0d693819c0c824214225b23a47e9380f71df
test
CurrencyConverter.load_file
To be subclassed if alternate methods of loading data.
currency_converter/currency_converter.py
def load_file(self, currency_file): """To be subclassed if alternate methods of loading data. """ if currency_file.startswith(('http://', 'https://')): content = urlopen(currency_file).read() else: with open(currency_file, 'rb') as f: content = f.read() if currency_file.endswith('.zip'): self.load_lines(get_lines_from_zip(content)) else: self.load_lines(content.decode('utf-8').splitlines())
def load_file(self, currency_file): """To be subclassed if alternate methods of loading data. """ if currency_file.startswith(('http://', 'https://')): content = urlopen(currency_file).read() else: with open(currency_file, 'rb') as f: content = f.read() if currency_file.endswith('.zip'): self.load_lines(get_lines_from_zip(content)) else: self.load_lines(content.decode('utf-8').splitlines())
[ "To", "be", "subclassed", "if", "alternate", "methods", "of", "loading", "data", "." ]
alexprengere/currencyconverter
python
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L147-L159
[ "def", "load_file", "(", "self", ",", "currency_file", ")", ":", "if", "currency_file", ".", "startswith", "(", "(", "'http://'", ",", "'https://'", ")", ")", ":", "content", "=", "urlopen", "(", "currency_file", ")", ".", "read", "(", ")", "else", ":", "with", "open", "(", "currency_file", ",", "'rb'", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "if", "currency_file", ".", "endswith", "(", "'.zip'", ")", ":", "self", ".", "load_lines", "(", "get_lines_from_zip", "(", "content", ")", ")", "else", ":", "self", ".", "load_lines", "(", "content", ".", "decode", "(", "'utf-8'", ")", ".", "splitlines", "(", ")", ")" ]
e3cb0d693819c0c824214225b23a47e9380f71df
test
CurrencyConverter._set_missing_to_none
Fill missing rates of a currency with the closest available ones.
currency_converter/currency_converter.py
def _set_missing_to_none(self, currency): """Fill missing rates of a currency with the closest available ones.""" rates = self._rates[currency] first_date, last_date = self.bounds[currency] for date in list_dates_between(first_date, last_date): if date not in rates: rates[date] = None if self.verbose: missing = len([r for r in itervalues(rates) if r is None]) if missing: print('{0}: {1} missing rates from {2} to {3} ({4} days)'.format( currency, missing, first_date, last_date, 1 + (last_date - first_date).days))
def _set_missing_to_none(self, currency): """Fill missing rates of a currency with the closest available ones.""" rates = self._rates[currency] first_date, last_date = self.bounds[currency] for date in list_dates_between(first_date, last_date): if date not in rates: rates[date] = None if self.verbose: missing = len([r for r in itervalues(rates) if r is None]) if missing: print('{0}: {1} missing rates from {2} to {3} ({4} days)'.format( currency, missing, first_date, last_date, 1 + (last_date - first_date).days))
[ "Fill", "missing", "rates", "of", "a", "currency", "with", "the", "closest", "available", "ones", "." ]
alexprengere/currencyconverter
python
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L192-L206
[ "def", "_set_missing_to_none", "(", "self", ",", "currency", ")", ":", "rates", "=", "self", ".", "_rates", "[", "currency", "]", "first_date", ",", "last_date", "=", "self", ".", "bounds", "[", "currency", "]", "for", "date", "in", "list_dates_between", "(", "first_date", ",", "last_date", ")", ":", "if", "date", "not", "in", "rates", ":", "rates", "[", "date", "]", "=", "None", "if", "self", ".", "verbose", ":", "missing", "=", "len", "(", "[", "r", "for", "r", "in", "itervalues", "(", "rates", ")", "if", "r", "is", "None", "]", ")", "if", "missing", ":", "print", "(", "'{0}: {1} missing rates from {2} to {3} ({4} days)'", ".", "format", "(", "currency", ",", "missing", ",", "first_date", ",", "last_date", ",", "1", "+", "(", "last_date", "-", "first_date", ")", ".", "days", ")", ")" ]
e3cb0d693819c0c824214225b23a47e9380f71df
test
CurrencyConverter._compute_missing_rates
Fill missing rates of a currency. This is done by linear interpolation of the two closest available rates. :param str currency: The currency to fill missing rates for.
currency_converter/currency_converter.py
def _compute_missing_rates(self, currency): """Fill missing rates of a currency. This is done by linear interpolation of the two closest available rates. :param str currency: The currency to fill missing rates for. """ rates = self._rates[currency] # tmp will store the closest rates forward and backward tmp = defaultdict(lambda: [None, None]) for date in sorted(rates): rate = rates[date] if rate is not None: closest_rate = rate dist = 0 else: dist += 1 tmp[date][0] = closest_rate, dist for date in sorted(rates, reverse=True): rate = rates[date] if rate is not None: closest_rate = rate dist = 0 else: dist += 1 tmp[date][1] = closest_rate, dist for date in sorted(tmp): (r0, d0), (r1, d1) = tmp[date] rates[date] = (r0 * d1 + r1 * d0) / (d0 + d1) if self.verbose: print(('{0}: filling {1} missing rate using {2} ({3}d old) and ' '{4} ({5}d later)').format(currency, date, r0, d0, r1, d1))
def _compute_missing_rates(self, currency): """Fill missing rates of a currency. This is done by linear interpolation of the two closest available rates. :param str currency: The currency to fill missing rates for. """ rates = self._rates[currency] # tmp will store the closest rates forward and backward tmp = defaultdict(lambda: [None, None]) for date in sorted(rates): rate = rates[date] if rate is not None: closest_rate = rate dist = 0 else: dist += 1 tmp[date][0] = closest_rate, dist for date in sorted(rates, reverse=True): rate = rates[date] if rate is not None: closest_rate = rate dist = 0 else: dist += 1 tmp[date][1] = closest_rate, dist for date in sorted(tmp): (r0, d0), (r1, d1) = tmp[date] rates[date] = (r0 * d1 + r1 * d0) / (d0 + d1) if self.verbose: print(('{0}: filling {1} missing rate using {2} ({3}d old) and ' '{4} ({5}d later)').format(currency, date, r0, d0, r1, d1))
[ "Fill", "missing", "rates", "of", "a", "currency", "." ]
alexprengere/currencyconverter
python
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L208-L243
[ "def", "_compute_missing_rates", "(", "self", ",", "currency", ")", ":", "rates", "=", "self", ".", "_rates", "[", "currency", "]", "# tmp will store the closest rates forward and backward", "tmp", "=", "defaultdict", "(", "lambda", ":", "[", "None", ",", "None", "]", ")", "for", "date", "in", "sorted", "(", "rates", ")", ":", "rate", "=", "rates", "[", "date", "]", "if", "rate", "is", "not", "None", ":", "closest_rate", "=", "rate", "dist", "=", "0", "else", ":", "dist", "+=", "1", "tmp", "[", "date", "]", "[", "0", "]", "=", "closest_rate", ",", "dist", "for", "date", "in", "sorted", "(", "rates", ",", "reverse", "=", "True", ")", ":", "rate", "=", "rates", "[", "date", "]", "if", "rate", "is", "not", "None", ":", "closest_rate", "=", "rate", "dist", "=", "0", "else", ":", "dist", "+=", "1", "tmp", "[", "date", "]", "[", "1", "]", "=", "closest_rate", ",", "dist", "for", "date", "in", "sorted", "(", "tmp", ")", ":", "(", "r0", ",", "d0", ")", ",", "(", "r1", ",", "d1", ")", "=", "tmp", "[", "date", "]", "rates", "[", "date", "]", "=", "(", "r0", "*", "d1", "+", "r1", "*", "d0", ")", "/", "(", "d0", "+", "d1", ")", "if", "self", ".", "verbose", ":", "print", "(", "(", "'{0}: filling {1} missing rate using {2} ({3}d old) and '", "'{4} ({5}d later)'", ")", ".", "format", "(", "currency", ",", "date", ",", "r0", ",", "d0", ",", "r1", ",", "d1", ")", ")" ]
e3cb0d693819c0c824214225b23a47e9380f71df
test
CurrencyConverter._get_rate
Get a rate for a given currency and date. :type date: datetime.date >>> from datetime import date >>> c = CurrencyConverter() >>> c._get_rate('USD', date=date(2014, 3, 28)) 1.375... >>> c._get_rate('BGN', date=date(2010, 11, 21)) Traceback (most recent call last): RateNotFoundError: BGN has no rate for 2010-11-21
currency_converter/currency_converter.py
def _get_rate(self, currency, date): """Get a rate for a given currency and date. :type date: datetime.date >>> from datetime import date >>> c = CurrencyConverter() >>> c._get_rate('USD', date=date(2014, 3, 28)) 1.375... >>> c._get_rate('BGN', date=date(2010, 11, 21)) Traceback (most recent call last): RateNotFoundError: BGN has no rate for 2010-11-21 """ if currency == self.ref_currency: return 1.0 if date not in self._rates[currency]: first_date, last_date = self.bounds[currency] if not self.fallback_on_wrong_date: raise RateNotFoundError('{0} not in {1} bounds {2}/{3}'.format( date, currency, first_date, last_date)) if date < first_date: fallback_date = first_date elif date > last_date: fallback_date = last_date else: raise AssertionError('Should never happen, bug in the code!') if self.verbose: print(r'/!\ {0} not in {1} bounds {2}/{3}, falling back to {4}'.format( date, currency, first_date, last_date, fallback_date)) date = fallback_date rate = self._rates[currency][date] if rate is None: raise RateNotFoundError('{0} has no rate for {1}'.format(currency, date)) return rate
def _get_rate(self, currency, date): """Get a rate for a given currency and date. :type date: datetime.date >>> from datetime import date >>> c = CurrencyConverter() >>> c._get_rate('USD', date=date(2014, 3, 28)) 1.375... >>> c._get_rate('BGN', date=date(2010, 11, 21)) Traceback (most recent call last): RateNotFoundError: BGN has no rate for 2010-11-21 """ if currency == self.ref_currency: return 1.0 if date not in self._rates[currency]: first_date, last_date = self.bounds[currency] if not self.fallback_on_wrong_date: raise RateNotFoundError('{0} not in {1} bounds {2}/{3}'.format( date, currency, first_date, last_date)) if date < first_date: fallback_date = first_date elif date > last_date: fallback_date = last_date else: raise AssertionError('Should never happen, bug in the code!') if self.verbose: print(r'/!\ {0} not in {1} bounds {2}/{3}, falling back to {4}'.format( date, currency, first_date, last_date, fallback_date)) date = fallback_date rate = self._rates[currency][date] if rate is None: raise RateNotFoundError('{0} has no rate for {1}'.format(currency, date)) return rate
[ "Get", "a", "rate", "for", "a", "given", "currency", "and", "date", "." ]
alexprengere/currencyconverter
python
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L245-L284
[ "def", "_get_rate", "(", "self", ",", "currency", ",", "date", ")", ":", "if", "currency", "==", "self", ".", "ref_currency", ":", "return", "1.0", "if", "date", "not", "in", "self", ".", "_rates", "[", "currency", "]", ":", "first_date", ",", "last_date", "=", "self", ".", "bounds", "[", "currency", "]", "if", "not", "self", ".", "fallback_on_wrong_date", ":", "raise", "RateNotFoundError", "(", "'{0} not in {1} bounds {2}/{3}'", ".", "format", "(", "date", ",", "currency", ",", "first_date", ",", "last_date", ")", ")", "if", "date", "<", "first_date", ":", "fallback_date", "=", "first_date", "elif", "date", ">", "last_date", ":", "fallback_date", "=", "last_date", "else", ":", "raise", "AssertionError", "(", "'Should never happen, bug in the code!'", ")", "if", "self", ".", "verbose", ":", "print", "(", "r'/!\\ {0} not in {1} bounds {2}/{3}, falling back to {4}'", ".", "format", "(", "date", ",", "currency", ",", "first_date", ",", "last_date", ",", "fallback_date", ")", ")", "date", "=", "fallback_date", "rate", "=", "self", ".", "_rates", "[", "currency", "]", "[", "date", "]", "if", "rate", "is", "None", ":", "raise", "RateNotFoundError", "(", "'{0} has no rate for {1}'", ".", "format", "(", "currency", ",", "date", ")", ")", "return", "rate" ]
e3cb0d693819c0c824214225b23a47e9380f71df
test
CurrencyConverter.convert
Convert amount from a currency to another one. :param float amount: The amount of `currency` to convert. :param str currency: The currency to convert from. :param str new_currency: The currency to convert to. :param datetime.date date: Use the conversion rate of this date. If this is not given, the most recent rate is used. :return: The value of `amount` in `new_currency`. :rtype: float >>> from datetime import date >>> c = CurrencyConverter() >>> c.convert(100, 'EUR', 'USD', date=date(2014, 3, 28)) 137.5... >>> c.convert(100, 'USD', date=date(2014, 3, 28)) 72.67... >>> c.convert(100, 'BGN', date=date(2010, 11, 21)) Traceback (most recent call last): RateNotFoundError: BGN has no rate for 2010-11-21
currency_converter/currency_converter.py
def convert(self, amount, currency, new_currency='EUR', date=None): """Convert amount from a currency to another one. :param float amount: The amount of `currency` to convert. :param str currency: The currency to convert from. :param str new_currency: The currency to convert to. :param datetime.date date: Use the conversion rate of this date. If this is not given, the most recent rate is used. :return: The value of `amount` in `new_currency`. :rtype: float >>> from datetime import date >>> c = CurrencyConverter() >>> c.convert(100, 'EUR', 'USD', date=date(2014, 3, 28)) 137.5... >>> c.convert(100, 'USD', date=date(2014, 3, 28)) 72.67... >>> c.convert(100, 'BGN', date=date(2010, 11, 21)) Traceback (most recent call last): RateNotFoundError: BGN has no rate for 2010-11-21 """ for c in currency, new_currency: if c not in self.currencies: raise ValueError('{0} is not a supported currency'.format(c)) if date is None: date = self.bounds[currency].last_date else: try: date = date.date() # fallback if input was a datetime object except AttributeError: pass r0 = self._get_rate(currency, date) r1 = self._get_rate(new_currency, date) return float(amount) / r0 * r1
def convert(self, amount, currency, new_currency='EUR', date=None): """Convert amount from a currency to another one. :param float amount: The amount of `currency` to convert. :param str currency: The currency to convert from. :param str new_currency: The currency to convert to. :param datetime.date date: Use the conversion rate of this date. If this is not given, the most recent rate is used. :return: The value of `amount` in `new_currency`. :rtype: float >>> from datetime import date >>> c = CurrencyConverter() >>> c.convert(100, 'EUR', 'USD', date=date(2014, 3, 28)) 137.5... >>> c.convert(100, 'USD', date=date(2014, 3, 28)) 72.67... >>> c.convert(100, 'BGN', date=date(2010, 11, 21)) Traceback (most recent call last): RateNotFoundError: BGN has no rate for 2010-11-21 """ for c in currency, new_currency: if c not in self.currencies: raise ValueError('{0} is not a supported currency'.format(c)) if date is None: date = self.bounds[currency].last_date else: try: date = date.date() # fallback if input was a datetime object except AttributeError: pass r0 = self._get_rate(currency, date) r1 = self._get_rate(new_currency, date) return float(amount) / r0 * r1
[ "Convert", "amount", "from", "a", "currency", "to", "another", "one", "." ]
alexprengere/currencyconverter
python
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L286-L323
[ "def", "convert", "(", "self", ",", "amount", ",", "currency", ",", "new_currency", "=", "'EUR'", ",", "date", "=", "None", ")", ":", "for", "c", "in", "currency", ",", "new_currency", ":", "if", "c", "not", "in", "self", ".", "currencies", ":", "raise", "ValueError", "(", "'{0} is not a supported currency'", ".", "format", "(", "c", ")", ")", "if", "date", "is", "None", ":", "date", "=", "self", ".", "bounds", "[", "currency", "]", ".", "last_date", "else", ":", "try", ":", "date", "=", "date", ".", "date", "(", ")", "# fallback if input was a datetime object", "except", "AttributeError", ":", "pass", "r0", "=", "self", ".", "_get_rate", "(", "currency", ",", "date", ")", "r1", "=", "self", ".", "_get_rate", "(", "new_currency", ",", "date", ")", "return", "float", "(", "amount", ")", "/", "r0", "*", "r1" ]
e3cb0d693819c0c824214225b23a47e9380f71df
test
grouper
Group iterable by n elements. >>> for t in grouper('abcdefg', 3, fillvalue='x'): ... print(''.join(t)) abc def gxx
currency_converter/__main__.py
def grouper(iterable, n, fillvalue=None): """Group iterable by n elements. >>> for t in grouper('abcdefg', 3, fillvalue='x'): ... print(''.join(t)) abc def gxx """ return list(zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue))
def grouper(iterable, n, fillvalue=None): """Group iterable by n elements. >>> for t in grouper('abcdefg', 3, fillvalue='x'): ... print(''.join(t)) abc def gxx """ return list(zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue))
[ "Group", "iterable", "by", "n", "elements", "." ]
alexprengere/currencyconverter
python
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/__main__.py#L16-L25
[ "def", "grouper", "(", "iterable", ",", "n", ",", "fillvalue", "=", "None", ")", ":", "return", "list", "(", "zip_longest", "(", "*", "[", "iter", "(", "iterable", ")", "]", "*", "n", ",", "fillvalue", "=", "fillvalue", ")", ")" ]
e3cb0d693819c0c824214225b23a47e9380f71df
test
animate
Animate given frame for set number of iterations. Parameters ---------- frames : list Frames for animating interval : float Interval between two frames name : str Name of animation iterations : int, optional Number of loops for animations
examples/examples.py
def animate(frames, interval, name, iterations=2): """Animate given frame for set number of iterations. Parameters ---------- frames : list Frames for animating interval : float Interval between two frames name : str Name of animation iterations : int, optional Number of loops for animations """ for i in range(iterations): for frame in frames: frame = get_coded_text(frame) output = "\r{0} {1}".format(frame, name) sys.stdout.write(output) sys.stdout.write(CLEAR_LINE) sys.stdout.flush() time.sleep(0.001 * interval)
def animate(frames, interval, name, iterations=2): """Animate given frame for set number of iterations. Parameters ---------- frames : list Frames for animating interval : float Interval between two frames name : str Name of animation iterations : int, optional Number of loops for animations """ for i in range(iterations): for frame in frames: frame = get_coded_text(frame) output = "\r{0} {1}".format(frame, name) sys.stdout.write(output) sys.stdout.write(CLEAR_LINE) sys.stdout.flush() time.sleep(0.001 * interval)
[ "Animate", "given", "frame", "for", "set", "number", "of", "iterations", "." ]
manrajgrover/py-spinners
python
https://github.com/manrajgrover/py-spinners/blob/2400b5f355049a691202671cb2ccf2b269eef4a3/examples/examples.py#L75-L96
[ "def", "animate", "(", "frames", ",", "interval", ",", "name", ",", "iterations", "=", "2", ")", ":", "for", "i", "in", "range", "(", "iterations", ")", ":", "for", "frame", "in", "frames", ":", "frame", "=", "get_coded_text", "(", "frame", ")", "output", "=", "\"\\r{0} {1}\"", ".", "format", "(", "frame", ",", "name", ")", "sys", ".", "stdout", ".", "write", "(", "output", ")", "sys", ".", "stdout", ".", "write", "(", "CLEAR_LINE", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "time", ".", "sleep", "(", "0.001", "*", "interval", ")" ]
2400b5f355049a691202671cb2ccf2b269eef4a3
test
DimacsCnf.tostring
Convert Cnf object ot Dimacs cnf string cnf: Cnf object In the converted Cnf there will be only numbers for variable names. The conversion guarantees that the variables will be numbered alphabetically.
satispy/io/dimacs_cnf.py
def tostring(self, cnf): """Convert Cnf object ot Dimacs cnf string cnf: Cnf object In the converted Cnf there will be only numbers for variable names. The conversion guarantees that the variables will be numbered alphabetically. """ self.varname_dict = {} self.varobj_dict = {} varis = set() for d in cnf.dis: for v in d: varis.add(v.name) ret = "p cnf %d %d" % (len(varis), len(cnf.dis)) varis = dict(list(zip(sorted(list(varis)),list(map(str,list(range(1,len(varis)+1))))))) for v in varis: vo = Variable(v) self.varname_dict[vo] = varis[v] self.varobj_dict[varis[v]] = vo for d in cnf.dis: ret += "\n" vnamelist = [] for v in d: vnamelist.append(("-" if v.inverted else "") + varis[v.name]) ret += " ".join(vnamelist) + " 0" return ret
def tostring(self, cnf): """Convert Cnf object ot Dimacs cnf string cnf: Cnf object In the converted Cnf there will be only numbers for variable names. The conversion guarantees that the variables will be numbered alphabetically. """ self.varname_dict = {} self.varobj_dict = {} varis = set() for d in cnf.dis: for v in d: varis.add(v.name) ret = "p cnf %d %d" % (len(varis), len(cnf.dis)) varis = dict(list(zip(sorted(list(varis)),list(map(str,list(range(1,len(varis)+1))))))) for v in varis: vo = Variable(v) self.varname_dict[vo] = varis[v] self.varobj_dict[varis[v]] = vo for d in cnf.dis: ret += "\n" vnamelist = [] for v in d: vnamelist.append(("-" if v.inverted else "") + varis[v.name]) ret += " ".join(vnamelist) + " 0" return ret
[ "Convert", "Cnf", "object", "ot", "Dimacs", "cnf", "string", "cnf", ":", "Cnf", "object", "In", "the", "converted", "Cnf", "there", "will", "be", "only", "numbers", "for", "variable", "names", ".", "The", "conversion", "guarantees", "that", "the", "variables", "will", "be", "numbered", "alphabetically", "." ]
netom/satispy
python
https://github.com/netom/satispy/blob/0201a7bffd9070441b9e82187348d61c53922b6b/satispy/io/dimacs_cnf.py#L18-L51
[ "def", "tostring", "(", "self", ",", "cnf", ")", ":", "self", ".", "varname_dict", "=", "{", "}", "self", ".", "varobj_dict", "=", "{", "}", "varis", "=", "set", "(", ")", "for", "d", "in", "cnf", ".", "dis", ":", "for", "v", "in", "d", ":", "varis", ".", "add", "(", "v", ".", "name", ")", "ret", "=", "\"p cnf %d %d\"", "%", "(", "len", "(", "varis", ")", ",", "len", "(", "cnf", ".", "dis", ")", ")", "varis", "=", "dict", "(", "list", "(", "zip", "(", "sorted", "(", "list", "(", "varis", ")", ")", ",", "list", "(", "map", "(", "str", ",", "list", "(", "range", "(", "1", ",", "len", "(", "varis", ")", "+", "1", ")", ")", ")", ")", ")", ")", ")", "for", "v", "in", "varis", ":", "vo", "=", "Variable", "(", "v", ")", "self", ".", "varname_dict", "[", "vo", "]", "=", "varis", "[", "v", "]", "self", ".", "varobj_dict", "[", "varis", "[", "v", "]", "]", "=", "vo", "for", "d", "in", "cnf", ".", "dis", ":", "ret", "+=", "\"\\n\"", "vnamelist", "=", "[", "]", "for", "v", "in", "d", ":", "vnamelist", ".", "append", "(", "(", "\"-\"", "if", "v", ".", "inverted", "else", "\"\"", ")", "+", "varis", "[", "v", ".", "name", "]", ")", "ret", "+=", "\" \"", ".", "join", "(", "vnamelist", ")", "+", "\" 0\"", "return", "ret" ]
0201a7bffd9070441b9e82187348d61c53922b6b
test
reduceCnf
I just found a remarkably large bug in my SAT solver and found an interesting solution. Remove all b | -b (-b | b) & (b | -a) & (-b | a) & (a | -a) becomes (b | -a) & (-b | a) Remove all (-e) & (-e) (-e | a) & (-e | a) & (-e | a) & (-e | a) becomes (-e | a) (-b | b | c) becomes nothing, not (c)
satispy/cnf.py
def reduceCnf(cnf): """ I just found a remarkably large bug in my SAT solver and found an interesting solution. Remove all b | -b (-b | b) & (b | -a) & (-b | a) & (a | -a) becomes (b | -a) & (-b | a) Remove all (-e) & (-e) (-e | a) & (-e | a) & (-e | a) & (-e | a) becomes (-e | a) (-b | b | c) becomes nothing, not (c) """ output = Cnf() for x in cnf.dis: dont_add = False for y in x: for z in x: if z == -y: dont_add = True break if dont_add: break if dont_add: continue # TODO: Is this necessary anymore? Probably not. Do statistical analysis. if x not in output.dis: output.dis |= frozenset([x]) return output
def reduceCnf(cnf): """ I just found a remarkably large bug in my SAT solver and found an interesting solution. Remove all b | -b (-b | b) & (b | -a) & (-b | a) & (a | -a) becomes (b | -a) & (-b | a) Remove all (-e) & (-e) (-e | a) & (-e | a) & (-e | a) & (-e | a) becomes (-e | a) (-b | b | c) becomes nothing, not (c) """ output = Cnf() for x in cnf.dis: dont_add = False for y in x: for z in x: if z == -y: dont_add = True break if dont_add: break if dont_add: continue # TODO: Is this necessary anymore? Probably not. Do statistical analysis. if x not in output.dis: output.dis |= frozenset([x]) return output
[ "I", "just", "found", "a", "remarkably", "large", "bug", "in", "my", "SAT", "solver", "and", "found", "an", "interesting", "solution", ".", "Remove", "all", "b", "|", "-", "b", "(", "-", "b", "|", "b", ")", "&", "(", "b", "|", "-", "a", ")", "&", "(", "-", "b", "|", "a", ")", "&", "(", "a", "|", "-", "a", ")", "becomes", "(", "b", "|", "-", "a", ")", "&", "(", "-", "b", "|", "a", ")" ]
netom/satispy
python
https://github.com/netom/satispy/blob/0201a7bffd9070441b9e82187348d61c53922b6b/satispy/cnf.py#L128-L156
[ "def", "reduceCnf", "(", "cnf", ")", ":", "output", "=", "Cnf", "(", ")", "for", "x", "in", "cnf", ".", "dis", ":", "dont_add", "=", "False", "for", "y", "in", "x", ":", "for", "z", "in", "x", ":", "if", "z", "==", "-", "y", ":", "dont_add", "=", "True", "break", "if", "dont_add", ":", "break", "if", "dont_add", ":", "continue", "# TODO: Is this necessary anymore? Probably not. Do statistical analysis.", "if", "x", "not", "in", "output", ".", "dis", ":", "output", ".", "dis", "|=", "frozenset", "(", "[", "x", "]", ")", "return", "output" ]
0201a7bffd9070441b9e82187348d61c53922b6b
test
Ephemeris.load
[DEPRECATED] Load the polynomial series for `name` and return it.
jplephem/ephem.py
def load(self, name): """[DEPRECATED] Load the polynomial series for `name` and return it.""" s = self.sets.get(name) if s is None: self.sets[name] = s = np.load(self.path('jpl-%s.npy' % name)) return s
def load(self, name): """[DEPRECATED] Load the polynomial series for `name` and return it.""" s = self.sets.get(name) if s is None: self.sets[name] = s = np.load(self.path('jpl-%s.npy' % name)) return s
[ "[", "DEPRECATED", "]", "Load", "the", "polynomial", "series", "for", "name", "and", "return", "it", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/ephem.py#L41-L46
[ "def", "load", "(", "self", ",", "name", ")", ":", "s", "=", "self", ".", "sets", ".", "get", "(", "name", ")", "if", "s", "is", "None", ":", "self", ".", "sets", "[", "name", "]", "=", "s", "=", "np", ".", "load", "(", "self", ".", "path", "(", "'jpl-%s.npy'", "%", "name", ")", ")", "return", "s" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Ephemeris.position
[DEPRECATED] Compute the position of `name` at time ``tdb [+ tdb2]``. The position is returned as a NumPy array ``[x y z]``. The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return value will be a two-dimensional array giving a row of values for each coordinate. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``.
jplephem/ephem.py
def position(self, name, tdb, tdb2=0.0): """[DEPRECATED] Compute the position of `name` at time ``tdb [+ tdb2]``. The position is returned as a NumPy array ``[x y z]``. The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return value will be a two-dimensional array giving a row of values for each coordinate. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``. """ bundle = self.compute_bundle(name, tdb, tdb2) return self.position_from_bundle(bundle)
def position(self, name, tdb, tdb2=0.0): """[DEPRECATED] Compute the position of `name` at time ``tdb [+ tdb2]``. The position is returned as a NumPy array ``[x y z]``. The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return value will be a two-dimensional array giving a row of values for each coordinate. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``. """ bundle = self.compute_bundle(name, tdb, tdb2) return self.position_from_bundle(bundle)
[ "[", "DEPRECATED", "]", "Compute", "the", "position", "of", "name", "at", "time", "tdb", "[", "+", "tdb2", "]", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/ephem.py#L48-L68
[ "def", "position", "(", "self", ",", "name", ",", "tdb", ",", "tdb2", "=", "0.0", ")", ":", "bundle", "=", "self", ".", "compute_bundle", "(", "name", ",", "tdb", ",", "tdb2", ")", "return", "self", ".", "position_from_bundle", "(", "bundle", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Ephemeris.position_and_velocity
[DEPRECATED] Compute the position and velocity of `name` at ``tdb [+ tdb2]``. The position and velocity are returned in a 2-tuple:: ([x y z], [xdot ydot zdot]) The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return values will be two-dimensional arrays giving a row of values for each coordinate. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``.
jplephem/ephem.py
def position_and_velocity(self, name, tdb, tdb2=0.0): """[DEPRECATED] Compute the position and velocity of `name` at ``tdb [+ tdb2]``. The position and velocity are returned in a 2-tuple:: ([x y z], [xdot ydot zdot]) The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return values will be two-dimensional arrays giving a row of values for each coordinate. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``. """ bundle = self.compute_bundle(name, tdb, tdb2) position = self.position_from_bundle(bundle) velocity = self.velocity_from_bundle(bundle) return position, velocity
def position_and_velocity(self, name, tdb, tdb2=0.0): """[DEPRECATED] Compute the position and velocity of `name` at ``tdb [+ tdb2]``. The position and velocity are returned in a 2-tuple:: ([x y z], [xdot ydot zdot]) The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return values will be two-dimensional arrays giving a row of values for each coordinate. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``. """ bundle = self.compute_bundle(name, tdb, tdb2) position = self.position_from_bundle(bundle) velocity = self.velocity_from_bundle(bundle) return position, velocity
[ "[", "DEPRECATED", "]", "Compute", "the", "position", "and", "velocity", "of", "name", "at", "tdb", "[", "+", "tdb2", "]", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/ephem.py#L70-L94
[ "def", "position_and_velocity", "(", "self", ",", "name", ",", "tdb", ",", "tdb2", "=", "0.0", ")", ":", "bundle", "=", "self", ".", "compute_bundle", "(", "name", ",", "tdb", ",", "tdb2", ")", "position", "=", "self", ".", "position_from_bundle", "(", "bundle", ")", "velocity", "=", "self", ".", "velocity_from_bundle", "(", "bundle", ")", "return", "position", ",", "velocity" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Ephemeris.compute
[DEPRECATED] Legacy routine that concatenates position and velocity vectors. This routine is deprecated. Use the methods `position()` and `position_and_velocity()` instead. This method follows the same calling convention, but incurs extra copy operations in order to return a single NumPy array:: [x y z xdot ydot zdot]
jplephem/ephem.py
def compute(self, name, tdb): """[DEPRECATED] Legacy routine that concatenates position and velocity vectors. This routine is deprecated. Use the methods `position()` and `position_and_velocity()` instead. This method follows the same calling convention, but incurs extra copy operations in order to return a single NumPy array:: [x y z xdot ydot zdot] """ bundle = self.compute_bundle(name, tdb, 0.0) position = self.position_from_bundle(bundle) velocity = self.velocity_from_bundle(bundle) return np.concatenate((position, velocity))
def compute(self, name, tdb): """[DEPRECATED] Legacy routine that concatenates position and velocity vectors. This routine is deprecated. Use the methods `position()` and `position_and_velocity()` instead. This method follows the same calling convention, but incurs extra copy operations in order to return a single NumPy array:: [x y z xdot ydot zdot] """ bundle = self.compute_bundle(name, tdb, 0.0) position = self.position_from_bundle(bundle) velocity = self.velocity_from_bundle(bundle) return np.concatenate((position, velocity))
[ "[", "DEPRECATED", "]", "Legacy", "routine", "that", "concatenates", "position", "and", "velocity", "vectors", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/ephem.py#L96-L110
[ "def", "compute", "(", "self", ",", "name", ",", "tdb", ")", ":", "bundle", "=", "self", ".", "compute_bundle", "(", "name", ",", "tdb", ",", "0.0", ")", "position", "=", "self", ".", "position_from_bundle", "(", "bundle", ")", "velocity", "=", "self", ".", "velocity_from_bundle", "(", "bundle", ")", "return", "np", ".", "concatenate", "(", "(", "position", ",", "velocity", ")", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Ephemeris.compute_bundle
[DEPRECATED] Return a tuple of coefficients and parameters for `tdb`. The return value is a tuple that bundles together the coefficients and other Chebyshev intermediate values that are needed for the computation of either the position or velocity. The bundle can then be passed to either `position_from_bundle()` or `velocity_from_bundle()` to finish the computation. See the package-level documentation for details; most users will simply call `position()` or `position_and_velocity()` instead. The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return values will be arrays providing a value for each time in `tdb`. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``.
jplephem/ephem.py
def compute_bundle(self, name, tdb, tdb2=0.0): """[DEPRECATED] Return a tuple of coefficients and parameters for `tdb`. The return value is a tuple that bundles together the coefficients and other Chebyshev intermediate values that are needed for the computation of either the position or velocity. The bundle can then be passed to either `position_from_bundle()` or `velocity_from_bundle()` to finish the computation. See the package-level documentation for details; most users will simply call `position()` or `position_and_velocity()` instead. The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return values will be arrays providing a value for each time in `tdb`. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``. """ input_was_scalar = getattr(tdb, 'shape', ()) == () if input_was_scalar: tdb = np.array((tdb,)) # no need to deal with tdb2; numpy broadcast will add fine below. coefficient_sets = self.load(name) number_of_sets, axis_count, coefficient_count = coefficient_sets.shape jalpha, jomega = self.jalpha, self.jomega days_per_set = (jomega - jalpha) / number_of_sets # to keep precision, first subtract, then add index, offset = divmod((tdb - jalpha) + tdb2, days_per_set) index = index.astype(int) if (index < 0).any() or (number_of_sets < index).any(): raise DateError('ephemeris %s only covers dates %.1f through %.1f' % (self.name, jalpha, jomega)) omegas = (index == number_of_sets) index[omegas] -= 1 offset[omegas] += days_per_set coefficients = np.rollaxis(coefficient_sets[index], 1) # Chebyshev recurrence: T = np.empty((coefficient_count, len(index))) T[0] = 1.0 T[1] = t1 = 2.0 * offset / days_per_set - 1.0 twot1 = t1 + t1 for i in range(2, coefficient_count): T[i] = twot1 * T[i-1] - T[i-2] bundle = coefficients, days_per_set, T, twot1 return bundle
def compute_bundle(self, name, tdb, tdb2=0.0): """[DEPRECATED] Return a tuple of coefficients and parameters for `tdb`. The return value is a tuple that bundles together the coefficients and other Chebyshev intermediate values that are needed for the computation of either the position or velocity. The bundle can then be passed to either `position_from_bundle()` or `velocity_from_bundle()` to finish the computation. See the package-level documentation for details; most users will simply call `position()` or `position_and_velocity()` instead. The barycentric dynamical time `tdb` argument should be a float. If there are many dates you want computed, then make `tdb` an array, which is more efficient than calling this method multiple times; the return values will be arrays providing a value for each time in `tdb`. For extra precision, the time can be split into two floats; a popular choice is to use `tdb` for the integer or half-integer date, and `tdb2` to hold the remaining fraction. Consult the `names` attribute of this ephemeris for the values of `name` it supports, such as ``'mars'`` or ``'earthmoon'``. """ input_was_scalar = getattr(tdb, 'shape', ()) == () if input_was_scalar: tdb = np.array((tdb,)) # no need to deal with tdb2; numpy broadcast will add fine below. coefficient_sets = self.load(name) number_of_sets, axis_count, coefficient_count = coefficient_sets.shape jalpha, jomega = self.jalpha, self.jomega days_per_set = (jomega - jalpha) / number_of_sets # to keep precision, first subtract, then add index, offset = divmod((tdb - jalpha) + tdb2, days_per_set) index = index.astype(int) if (index < 0).any() or (number_of_sets < index).any(): raise DateError('ephemeris %s only covers dates %.1f through %.1f' % (self.name, jalpha, jomega)) omegas = (index == number_of_sets) index[omegas] -= 1 offset[omegas] += days_per_set coefficients = np.rollaxis(coefficient_sets[index], 1) # Chebyshev recurrence: T = np.empty((coefficient_count, len(index))) T[0] = 1.0 T[1] = t1 = 2.0 * offset / days_per_set - 1.0 twot1 = t1 + t1 for i in range(2, coefficient_count): T[i] = twot1 * T[i-1] - T[i-2] bundle = coefficients, days_per_set, T, twot1 return bundle
[ "[", "DEPRECATED", "]", "Return", "a", "tuple", "of", "coefficients", "and", "parameters", "for", "tdb", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/ephem.py#L112-L171
[ "def", "compute_bundle", "(", "self", ",", "name", ",", "tdb", ",", "tdb2", "=", "0.0", ")", ":", "input_was_scalar", "=", "getattr", "(", "tdb", ",", "'shape'", ",", "(", ")", ")", "==", "(", ")", "if", "input_was_scalar", ":", "tdb", "=", "np", ".", "array", "(", "(", "tdb", ",", ")", ")", "# no need to deal with tdb2; numpy broadcast will add fine below.", "coefficient_sets", "=", "self", ".", "load", "(", "name", ")", "number_of_sets", ",", "axis_count", ",", "coefficient_count", "=", "coefficient_sets", ".", "shape", "jalpha", ",", "jomega", "=", "self", ".", "jalpha", ",", "self", ".", "jomega", "days_per_set", "=", "(", "jomega", "-", "jalpha", ")", "/", "number_of_sets", "# to keep precision, first subtract, then add", "index", ",", "offset", "=", "divmod", "(", "(", "tdb", "-", "jalpha", ")", "+", "tdb2", ",", "days_per_set", ")", "index", "=", "index", ".", "astype", "(", "int", ")", "if", "(", "index", "<", "0", ")", ".", "any", "(", ")", "or", "(", "number_of_sets", "<", "index", ")", ".", "any", "(", ")", ":", "raise", "DateError", "(", "'ephemeris %s only covers dates %.1f through %.1f'", "%", "(", "self", ".", "name", ",", "jalpha", ",", "jomega", ")", ")", "omegas", "=", "(", "index", "==", "number_of_sets", ")", "index", "[", "omegas", "]", "-=", "1", "offset", "[", "omegas", "]", "+=", "days_per_set", "coefficients", "=", "np", ".", "rollaxis", "(", "coefficient_sets", "[", "index", "]", ",", "1", ")", "# Chebyshev recurrence:", "T", "=", "np", ".", "empty", "(", "(", "coefficient_count", ",", "len", "(", "index", ")", ")", ")", "T", "[", "0", "]", "=", "1.0", "T", "[", "1", "]", "=", "t1", "=", "2.0", "*", "offset", "/", "days_per_set", "-", "1.0", "twot1", "=", "t1", "+", "t1", "for", "i", "in", "range", "(", "2", ",", "coefficient_count", ")", ":", "T", "[", "i", "]", "=", "twot1", "*", "T", "[", "i", "-", "1", "]", "-", "T", "[", "i", "-", "2", "]", "bundle", "=", "coefficients", ",", "days_per_set", ",", "T", ",", "twot1", "return", "bundle" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Ephemeris.position_from_bundle
[DEPRECATED] Return position, given the `coefficient_bundle()` return value.
jplephem/ephem.py
def position_from_bundle(self, bundle): """[DEPRECATED] Return position, given the `coefficient_bundle()` return value.""" coefficients, days_per_set, T, twot1 = bundle return (T.T * coefficients).sum(axis=2)
def position_from_bundle(self, bundle): """[DEPRECATED] Return position, given the `coefficient_bundle()` return value.""" coefficients, days_per_set, T, twot1 = bundle return (T.T * coefficients).sum(axis=2)
[ "[", "DEPRECATED", "]", "Return", "position", "given", "the", "coefficient_bundle", "()", "return", "value", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/ephem.py#L173-L177
[ "def", "position_from_bundle", "(", "self", ",", "bundle", ")", ":", "coefficients", ",", "days_per_set", ",", "T", ",", "twot1", "=", "bundle", "return", "(", "T", ".", "T", "*", "coefficients", ")", ".", "sum", "(", "axis", "=", "2", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Ephemeris.velocity_from_bundle
[DEPRECATED] Return velocity, given the `coefficient_bundle()` return value.
jplephem/ephem.py
def velocity_from_bundle(self, bundle): """[DEPRECATED] Return velocity, given the `coefficient_bundle()` return value.""" coefficients, days_per_set, T, twot1 = bundle coefficient_count = coefficients.shape[2] # Chebyshev derivative: dT = np.empty_like(T) dT[0] = 0.0 dT[1] = 1.0 dT[2] = twot1 + twot1 for i in range(3, coefficient_count): dT[i] = twot1 * dT[i-1] - dT[i-2] + T[i-1] + T[i-1] dT *= 2.0 dT /= days_per_set return (dT.T * coefficients).sum(axis=2)
def velocity_from_bundle(self, bundle): """[DEPRECATED] Return velocity, given the `coefficient_bundle()` return value.""" coefficients, days_per_set, T, twot1 = bundle coefficient_count = coefficients.shape[2] # Chebyshev derivative: dT = np.empty_like(T) dT[0] = 0.0 dT[1] = 1.0 dT[2] = twot1 + twot1 for i in range(3, coefficient_count): dT[i] = twot1 * dT[i-1] - dT[i-2] + T[i-1] + T[i-1] dT *= 2.0 dT /= days_per_set return (dT.T * coefficients).sum(axis=2)
[ "[", "DEPRECATED", "]", "Return", "velocity", "given", "the", "coefficient_bundle", "()", "return", "value", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/ephem.py#L179-L196
[ "def", "velocity_from_bundle", "(", "self", ",", "bundle", ")", ":", "coefficients", ",", "days_per_set", ",", "T", ",", "twot1", "=", "bundle", "coefficient_count", "=", "coefficients", ".", "shape", "[", "2", "]", "# Chebyshev derivative:", "dT", "=", "np", ".", "empty_like", "(", "T", ")", "dT", "[", "0", "]", "=", "0.0", "dT", "[", "1", "]", "=", "1.0", "dT", "[", "2", "]", "=", "twot1", "+", "twot1", "for", "i", "in", "range", "(", "3", ",", "coefficient_count", ")", ":", "dT", "[", "i", "]", "=", "twot1", "*", "dT", "[", "i", "-", "1", "]", "-", "dT", "[", "i", "-", "2", "]", "+", "T", "[", "i", "-", "1", "]", "+", "T", "[", "i", "-", "1", "]", "dT", "*=", "2.0", "dT", "/=", "days_per_set", "return", "(", "dT", ".", "T", "*", "coefficients", ")", ".", "sum", "(", "axis", "=", "2", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.read_record
Return record `n` as 1,024 bytes; records are indexed from 1.
jplephem/daf.py
def read_record(self, n): """Return record `n` as 1,024 bytes; records are indexed from 1.""" self.file.seek(n * K - K) return self.file.read(K)
def read_record(self, n): """Return record `n` as 1,024 bytes; records are indexed from 1.""" self.file.seek(n * K - K) return self.file.read(K)
[ "Return", "record", "n", "as", "1", "024", "bytes", ";", "records", "are", "indexed", "from", "1", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L74-L77
[ "def", "read_record", "(", "self", ",", "n", ")", ":", "self", ".", "file", ".", "seek", "(", "n", "*", "K", "-", "K", ")", "return", "self", ".", "file", ".", "read", "(", "K", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.write_record
Write `data` to file record `n`; records are indexed from 1.
jplephem/daf.py
def write_record(self, n, data): """Write `data` to file record `n`; records are indexed from 1.""" self.file.seek(n * K - K) return self.file.write(data)
def write_record(self, n, data): """Write `data` to file record `n`; records are indexed from 1.""" self.file.seek(n * K - K) return self.file.write(data)
[ "Write", "data", "to", "file", "record", "n", ";", "records", "are", "indexed", "from", "1", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L79-L82
[ "def", "write_record", "(", "self", ",", "n", ",", "data", ")", ":", "self", ".", "file", ".", "seek", "(", "n", "*", "K", "-", "K", ")", "return", "self", ".", "file", ".", "write", "(", "data", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.map_words
Return a memory-map of the elements `start` through `end`. The memory map will offer the 8-byte double-precision floats ("elements") in the file from index `start` through to the index `end`, inclusive, both counting the first float as element 1. Memory maps must begin on a page boundary, so `skip` returns the number of extra bytes at the beginning of the return value.
jplephem/daf.py
def map_words(self, start, end): """Return a memory-map of the elements `start` through `end`. The memory map will offer the 8-byte double-precision floats ("elements") in the file from index `start` through to the index `end`, inclusive, both counting the first float as element 1. Memory maps must begin on a page boundary, so `skip` returns the number of extra bytes at the beginning of the return value. """ i, j = 8 * start - 8, 8 * end try: fileno = self.file.fileno() except (AttributeError, io.UnsupportedOperation): fileno = None if fileno is None: skip = 0 self.file.seek(i) m = self.file.read(j - i) else: skip = i % mmap.ALLOCATIONGRANULARITY r = mmap.ACCESS_READ m = mmap.mmap(fileno, length=j-i+skip, access=r, offset=i-skip) if sys.version_info > (3,): m = memoryview(m) # so further slicing can return views return m, skip
def map_words(self, start, end): """Return a memory-map of the elements `start` through `end`. The memory map will offer the 8-byte double-precision floats ("elements") in the file from index `start` through to the index `end`, inclusive, both counting the first float as element 1. Memory maps must begin on a page boundary, so `skip` returns the number of extra bytes at the beginning of the return value. """ i, j = 8 * start - 8, 8 * end try: fileno = self.file.fileno() except (AttributeError, io.UnsupportedOperation): fileno = None if fileno is None: skip = 0 self.file.seek(i) m = self.file.read(j - i) else: skip = i % mmap.ALLOCATIONGRANULARITY r = mmap.ACCESS_READ m = mmap.mmap(fileno, length=j-i+skip, access=r, offset=i-skip) if sys.version_info > (3,): m = memoryview(m) # so further slicing can return views return m, skip
[ "Return", "a", "memory", "-", "map", "of", "the", "elements", "start", "through", "end", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L92-L117
[ "def", "map_words", "(", "self", ",", "start", ",", "end", ")", ":", "i", ",", "j", "=", "8", "*", "start", "-", "8", ",", "8", "*", "end", "try", ":", "fileno", "=", "self", ".", "file", ".", "fileno", "(", ")", "except", "(", "AttributeError", ",", "io", ".", "UnsupportedOperation", ")", ":", "fileno", "=", "None", "if", "fileno", "is", "None", ":", "skip", "=", "0", "self", ".", "file", ".", "seek", "(", "i", ")", "m", "=", "self", ".", "file", ".", "read", "(", "j", "-", "i", ")", "else", ":", "skip", "=", "i", "%", "mmap", ".", "ALLOCATIONGRANULARITY", "r", "=", "mmap", ".", "ACCESS_READ", "m", "=", "mmap", ".", "mmap", "(", "fileno", ",", "length", "=", "j", "-", "i", "+", "skip", ",", "access", "=", "r", ",", "offset", "=", "i", "-", "skip", ")", "if", "sys", ".", "version_info", ">", "(", "3", ",", ")", ":", "m", "=", "memoryview", "(", "m", ")", "# so further slicing can return views", "return", "m", ",", "skip" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.comments
Return the text inside the comment area of the file.
jplephem/daf.py
def comments(self): """Return the text inside the comment area of the file.""" record_numbers = range(2, self.fward) if not record_numbers: return '' data = b''.join(self.read_record(n)[0:1000] for n in record_numbers) try: return data[:data.find(b'\4')].decode('ascii').replace('\0', '\n') except IndexError: raise ValueError('DAF file comment area is missing its EOT byte') except UnicodeDecodeError: raise ValueError('DAF file comment area is not ASCII text')
def comments(self): """Return the text inside the comment area of the file.""" record_numbers = range(2, self.fward) if not record_numbers: return '' data = b''.join(self.read_record(n)[0:1000] for n in record_numbers) try: return data[:data.find(b'\4')].decode('ascii').replace('\0', '\n') except IndexError: raise ValueError('DAF file comment area is missing its EOT byte') except UnicodeDecodeError: raise ValueError('DAF file comment area is not ASCII text')
[ "Return", "the", "text", "inside", "the", "comment", "area", "of", "the", "file", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L119-L130
[ "def", "comments", "(", "self", ")", ":", "record_numbers", "=", "range", "(", "2", ",", "self", ".", "fward", ")", "if", "not", "record_numbers", ":", "return", "''", "data", "=", "b''", ".", "join", "(", "self", ".", "read_record", "(", "n", ")", "[", "0", ":", "1000", "]", "for", "n", "in", "record_numbers", ")", "try", ":", "return", "data", "[", ":", "data", ".", "find", "(", "b'\\4'", ")", "]", ".", "decode", "(", "'ascii'", ")", ".", "replace", "(", "'\\0'", ",", "'\\n'", ")", "except", "IndexError", ":", "raise", "ValueError", "(", "'DAF file comment area is missing its EOT byte'", ")", "except", "UnicodeDecodeError", ":", "raise", "ValueError", "(", "'DAF file comment area is not ASCII text'", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.read_array
Return floats from `start` to `end` inclusive, indexed from 1. The entire range of floats is immediately read into memory from the file, making this efficient for small sequences of floats whose values are all needed immediately.
jplephem/daf.py
def read_array(self, start, end): """Return floats from `start` to `end` inclusive, indexed from 1. The entire range of floats is immediately read into memory from the file, making this efficient for small sequences of floats whose values are all needed immediately. """ f = self.file f.seek(8 * (start - 1)) length = 1 + end - start data = f.read(8 * length) return ndarray(length, self.endian + 'd', data)
def read_array(self, start, end): """Return floats from `start` to `end` inclusive, indexed from 1. The entire range of floats is immediately read into memory from the file, making this efficient for small sequences of floats whose values are all needed immediately. """ f = self.file f.seek(8 * (start - 1)) length = 1 + end - start data = f.read(8 * length) return ndarray(length, self.endian + 'd', data)
[ "Return", "floats", "from", "start", "to", "end", "inclusive", "indexed", "from", "1", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L132-L144
[ "def", "read_array", "(", "self", ",", "start", ",", "end", ")", ":", "f", "=", "self", ".", "file", "f", ".", "seek", "(", "8", "*", "(", "start", "-", "1", ")", ")", "length", "=", "1", "+", "end", "-", "start", "data", "=", "f", ".", "read", "(", "8", "*", "length", ")", "return", "ndarray", "(", "length", ",", "self", ".", "endian", "+", "'d'", ",", "data", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.map_array
Return floats from `start` to `end` inclusive, indexed from 1. Instead of pausing to load all of the floats into RAM, this routine creates a memory map which will load data from the file only as it is accessed, and then will let it expire back out to disk later. This is very efficient for large data sets to which you need random access.
jplephem/daf.py
def map_array(self, start, end): """Return floats from `start` to `end` inclusive, indexed from 1. Instead of pausing to load all of the floats into RAM, this routine creates a memory map which will load data from the file only as it is accessed, and then will let it expire back out to disk later. This is very efficient for large data sets to which you need random access. """ if self._array is None: self._map, skip = self.map_words(1, self.free - 1) assert skip == 0 self._array = ndarray(self.free - 1, self.endian + 'd', self._map) return self._array[start - 1 : end]
def map_array(self, start, end): """Return floats from `start` to `end` inclusive, indexed from 1. Instead of pausing to load all of the floats into RAM, this routine creates a memory map which will load data from the file only as it is accessed, and then will let it expire back out to disk later. This is very efficient for large data sets to which you need random access. """ if self._array is None: self._map, skip = self.map_words(1, self.free - 1) assert skip == 0 self._array = ndarray(self.free - 1, self.endian + 'd', self._map) return self._array[start - 1 : end]
[ "Return", "floats", "from", "start", "to", "end", "inclusive", "indexed", "from", "1", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L146-L160
[ "def", "map_array", "(", "self", ",", "start", ",", "end", ")", ":", "if", "self", ".", "_array", "is", "None", ":", "self", ".", "_map", ",", "skip", "=", "self", ".", "map_words", "(", "1", ",", "self", ".", "free", "-", "1", ")", "assert", "skip", "==", "0", "self", ".", "_array", "=", "ndarray", "(", "self", ".", "free", "-", "1", ",", "self", ".", "endian", "+", "'d'", ",", "self", ".", "_map", ")", "return", "self", ".", "_array", "[", "start", "-", "1", ":", "end", "]" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.summary_records
Yield (record_number, n_summaries, record_data) for each record. Readers will only use the second two values in each tuple. Writers can update the record using the `record_number`.
jplephem/daf.py
def summary_records(self): """Yield (record_number, n_summaries, record_data) for each record. Readers will only use the second two values in each tuple. Writers can update the record using the `record_number`. """ record_number = self.fward unpack = self.summary_control_struct.unpack while record_number: data = self.read_record(record_number) next_number, previous_number, n_summaries = unpack(data[:24]) yield record_number, n_summaries, data record_number = int(next_number)
def summary_records(self): """Yield (record_number, n_summaries, record_data) for each record. Readers will only use the second two values in each tuple. Writers can update the record using the `record_number`. """ record_number = self.fward unpack = self.summary_control_struct.unpack while record_number: data = self.read_record(record_number) next_number, previous_number, n_summaries = unpack(data[:24]) yield record_number, n_summaries, data record_number = int(next_number)
[ "Yield", "(", "record_number", "n_summaries", "record_data", ")", "for", "each", "record", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L162-L175
[ "def", "summary_records", "(", "self", ")", ":", "record_number", "=", "self", ".", "fward", "unpack", "=", "self", ".", "summary_control_struct", ".", "unpack", "while", "record_number", ":", "data", "=", "self", ".", "read_record", "(", "record_number", ")", "next_number", ",", "previous_number", ",", "n_summaries", "=", "unpack", "(", "data", "[", ":", "24", "]", ")", "yield", "record_number", ",", "n_summaries", ",", "data", "record_number", "=", "int", "(", "next_number", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.summaries
Yield (name, (value, value, ...)) for each summary in the file.
jplephem/daf.py
def summaries(self): """Yield (name, (value, value, ...)) for each summary in the file.""" length = self.summary_length step = self.summary_step for record_number, n_summaries, summary_data in self.summary_records(): name_data = self.read_record(record_number + 1) for i in range(0, int(n_summaries) * step, step): j = self.summary_control_struct.size + i name = name_data[i:i+step].strip() data = summary_data[j:j+length] values = self.summary_struct.unpack(data) yield name, values
def summaries(self): """Yield (name, (value, value, ...)) for each summary in the file.""" length = self.summary_length step = self.summary_step for record_number, n_summaries, summary_data in self.summary_records(): name_data = self.read_record(record_number + 1) for i in range(0, int(n_summaries) * step, step): j = self.summary_control_struct.size + i name = name_data[i:i+step].strip() data = summary_data[j:j+length] values = self.summary_struct.unpack(data) yield name, values
[ "Yield", "(", "name", "(", "value", "value", "...", "))", "for", "each", "summary", "in", "the", "file", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L177-L188
[ "def", "summaries", "(", "self", ")", ":", "length", "=", "self", ".", "summary_length", "step", "=", "self", ".", "summary_step", "for", "record_number", ",", "n_summaries", ",", "summary_data", "in", "self", ".", "summary_records", "(", ")", ":", "name_data", "=", "self", ".", "read_record", "(", "record_number", "+", "1", ")", "for", "i", "in", "range", "(", "0", ",", "int", "(", "n_summaries", ")", "*", "step", ",", "step", ")", ":", "j", "=", "self", ".", "summary_control_struct", ".", "size", "+", "i", "name", "=", "name_data", "[", "i", ":", "i", "+", "step", "]", ".", "strip", "(", ")", "data", "=", "summary_data", "[", "j", ":", "j", "+", "length", "]", "values", "=", "self", ".", "summary_struct", ".", "unpack", "(", "data", ")", "yield", "name", ",", "values" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
DAF.add_array
Add a new array to the DAF file. The summary will be initialized with the `name` and `values`, and will have its start word and end word fields set to point to where the `array` of floats has been appended to the file.
jplephem/daf.py
def add_array(self, name, values, array): """Add a new array to the DAF file. The summary will be initialized with the `name` and `values`, and will have its start word and end word fields set to point to where the `array` of floats has been appended to the file. """ f = self.file scs = self.summary_control_struct record_number = self.bward data = bytearray(self.read_record(record_number)) next_record, previous_record, n_summaries = scs.unpack(data[:24]) if n_summaries < self.summaries_per_record: summary_record = record_number name_record = summary_record + 1 data[:24] = scs.pack(next_record, previous_record, n_summaries + 1) self.write_record(summary_record, data) else: summary_record = ((self.free - 1) * 8 + 1023) // 1024 + 1 name_record = summary_record + 1 free_record = summary_record + 2 n_summaries = 0 data[:24] = scs.pack(summary_record, previous_record, n_summaries) self.write_record(record_number, data) summaries = scs.pack(0, record_number, 1).ljust(1024, b'\0') names = b'\0' * 1024 self.write_record(summary_record, summaries) self.write_record(name_record, names) self.bward = summary_record self.free = (free_record - 1) * 1024 // 8 + 1 start_word = self.free f.seek((start_word - 1) * 8) array = numpy_array(array) # TODO: force correct endian f.write(array.view()) end_word = f.tell() // 8 self.free = end_word + 1 self.write_file_record() values = values[:self.nd + self.ni - 2] + (start_word, end_word) base = 1024 * (summary_record - 1) offset = int(n_summaries) * self.summary_step f.seek(base + scs.size + offset) f.write(self.summary_struct.pack(*values)) f.seek(base + 1024 + offset) f.write(name[:self.summary_length].ljust(self.summary_step, b' '))
def add_array(self, name, values, array): """Add a new array to the DAF file. The summary will be initialized with the `name` and `values`, and will have its start word and end word fields set to point to where the `array` of floats has been appended to the file. """ f = self.file scs = self.summary_control_struct record_number = self.bward data = bytearray(self.read_record(record_number)) next_record, previous_record, n_summaries = scs.unpack(data[:24]) if n_summaries < self.summaries_per_record: summary_record = record_number name_record = summary_record + 1 data[:24] = scs.pack(next_record, previous_record, n_summaries + 1) self.write_record(summary_record, data) else: summary_record = ((self.free - 1) * 8 + 1023) // 1024 + 1 name_record = summary_record + 1 free_record = summary_record + 2 n_summaries = 0 data[:24] = scs.pack(summary_record, previous_record, n_summaries) self.write_record(record_number, data) summaries = scs.pack(0, record_number, 1).ljust(1024, b'\0') names = b'\0' * 1024 self.write_record(summary_record, summaries) self.write_record(name_record, names) self.bward = summary_record self.free = (free_record - 1) * 1024 // 8 + 1 start_word = self.free f.seek((start_word - 1) * 8) array = numpy_array(array) # TODO: force correct endian f.write(array.view()) end_word = f.tell() // 8 self.free = end_word + 1 self.write_file_record() values = values[:self.nd + self.ni - 2] + (start_word, end_word) base = 1024 * (summary_record - 1) offset = int(n_summaries) * self.summary_step f.seek(base + scs.size + offset) f.write(self.summary_struct.pack(*values)) f.seek(base + 1024 + offset) f.write(name[:self.summary_length].ljust(self.summary_step, b' '))
[ "Add", "a", "new", "array", "to", "the", "DAF", "file", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L202-L255
[ "def", "add_array", "(", "self", ",", "name", ",", "values", ",", "array", ")", ":", "f", "=", "self", ".", "file", "scs", "=", "self", ".", "summary_control_struct", "record_number", "=", "self", ".", "bward", "data", "=", "bytearray", "(", "self", ".", "read_record", "(", "record_number", ")", ")", "next_record", ",", "previous_record", ",", "n_summaries", "=", "scs", ".", "unpack", "(", "data", "[", ":", "24", "]", ")", "if", "n_summaries", "<", "self", ".", "summaries_per_record", ":", "summary_record", "=", "record_number", "name_record", "=", "summary_record", "+", "1", "data", "[", ":", "24", "]", "=", "scs", ".", "pack", "(", "next_record", ",", "previous_record", ",", "n_summaries", "+", "1", ")", "self", ".", "write_record", "(", "summary_record", ",", "data", ")", "else", ":", "summary_record", "=", "(", "(", "self", ".", "free", "-", "1", ")", "*", "8", "+", "1023", ")", "//", "1024", "+", "1", "name_record", "=", "summary_record", "+", "1", "free_record", "=", "summary_record", "+", "2", "n_summaries", "=", "0", "data", "[", ":", "24", "]", "=", "scs", ".", "pack", "(", "summary_record", ",", "previous_record", ",", "n_summaries", ")", "self", ".", "write_record", "(", "record_number", ",", "data", ")", "summaries", "=", "scs", ".", "pack", "(", "0", ",", "record_number", ",", "1", ")", ".", "ljust", "(", "1024", ",", "b'\\0'", ")", "names", "=", "b'\\0'", "*", "1024", "self", ".", "write_record", "(", "summary_record", ",", "summaries", ")", "self", ".", "write_record", "(", "name_record", ",", "names", ")", "self", ".", "bward", "=", "summary_record", "self", ".", "free", "=", "(", "free_record", "-", "1", ")", "*", "1024", "//", "8", "+", "1", "start_word", "=", "self", ".", "free", "f", ".", "seek", "(", "(", "start_word", "-", "1", ")", "*", "8", ")", "array", "=", "numpy_array", "(", "array", ")", "# TODO: force correct endian", "f", ".", "write", "(", "array", ".", "view", "(", ")", ")", "end_word", "=", "f", ".", "tell", "(", ")", "//", "8", "self", ".", "free", "=", "end_word", "+", "1", "self", ".", "write_file_record", "(", ")", "values", "=", "values", "[", ":", "self", ".", "nd", "+", "self", ".", "ni", "-", "2", "]", "+", "(", "start_word", ",", "end_word", ")", "base", "=", "1024", "*", "(", "summary_record", "-", "1", ")", "offset", "=", "int", "(", "n_summaries", ")", "*", "self", ".", "summary_step", "f", ".", "seek", "(", "base", "+", "scs", ".", "size", "+", "offset", ")", "f", ".", "write", "(", "self", ".", "summary_struct", ".", "pack", "(", "*", "values", ")", ")", "f", ".", "seek", "(", "base", "+", "1024", "+", "offset", ")", "f", ".", "write", "(", "name", "[", ":", "self", ".", "summary_length", "]", ".", "ljust", "(", "self", ".", "summary_step", ",", "b' '", ")", ")" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
SPK.close
Close this SPK file.
jplephem/spk.py
def close(self): """Close this SPK file.""" self.daf.file.close() for segment in self.segments: if hasattr(segment, '_data'): del segment._data self.daf._array = None self.daf._map = None
def close(self): """Close this SPK file.""" self.daf.file.close() for segment in self.segments: if hasattr(segment, '_data'): del segment._data self.daf._array = None self.daf._map = None
[ "Close", "this", "SPK", "file", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/spk.py#L46-L53
[ "def", "close", "(", "self", ")", ":", "self", ".", "daf", ".", "file", ".", "close", "(", ")", "for", "segment", "in", "self", ".", "segments", ":", "if", "hasattr", "(", "segment", ",", "'_data'", ")", ":", "del", "segment", ".", "_data", "self", ".", "daf", ".", "_array", "=", "None", "self", ".", "daf", ".", "_map", "=", "None" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Segment.describe
Return a textual description of the segment.
jplephem/spk.py
def describe(self, verbose=True): """Return a textual description of the segment.""" center = titlecase(target_names.get(self.center, 'Unknown center')) target = titlecase(target_names.get(self.target, 'Unknown target')) text = ('{0.start_jd:.2f}..{0.end_jd:.2f} {1} ({0.center})' ' -> {2} ({0.target})'.format(self, center, target)) if verbose: text += ('\n frame={0.frame} data_type={0.data_type} source={1}' .format(self, self.source.decode('ascii'))) return text
def describe(self, verbose=True): """Return a textual description of the segment.""" center = titlecase(target_names.get(self.center, 'Unknown center')) target = titlecase(target_names.get(self.target, 'Unknown target')) text = ('{0.start_jd:.2f}..{0.end_jd:.2f} {1} ({0.center})' ' -> {2} ({0.target})'.format(self, center, target)) if verbose: text += ('\n frame={0.frame} data_type={0.data_type} source={1}' .format(self, self.source.decode('ascii'))) return text
[ "Return", "a", "textual", "description", "of", "the", "segment", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/spk.py#L109-L118
[ "def", "describe", "(", "self", ",", "verbose", "=", "True", ")", ":", "center", "=", "titlecase", "(", "target_names", ".", "get", "(", "self", ".", "center", ",", "'Unknown center'", ")", ")", "target", "=", "titlecase", "(", "target_names", ".", "get", "(", "self", ".", "target", ",", "'Unknown target'", ")", ")", "text", "=", "(", "'{0.start_jd:.2f}..{0.end_jd:.2f} {1} ({0.center})'", "' -> {2} ({0.target})'", ".", "format", "(", "self", ",", "center", ",", "target", ")", ")", "if", "verbose", ":", "text", "+=", "(", "'\\n frame={0.frame} data_type={0.data_type} source={1}'", ".", "format", "(", "self", ",", "self", ".", "source", ".", "decode", "(", "'ascii'", ")", ")", ")", "return", "text" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Segment.compute
Compute the component values for the time `tdb` plus `tdb2`.
jplephem/spk.py
def compute(self, tdb, tdb2=0.0): """Compute the component values for the time `tdb` plus `tdb2`.""" for position in self.generate(tdb, tdb2): return position
def compute(self, tdb, tdb2=0.0): """Compute the component values for the time `tdb` plus `tdb2`.""" for position in self.generate(tdb, tdb2): return position
[ "Compute", "the", "component", "values", "for", "the", "time", "tdb", "plus", "tdb2", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/spk.py#L120-L123
[ "def", "compute", "(", "self", ",", "tdb", ",", "tdb2", "=", "0.0", ")", ":", "for", "position", "in", "self", ".", "generate", "(", "tdb", ",", "tdb2", ")", ":", "return", "position" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
BinaryPCK.close
Close this file.
jplephem/binary_pck.py
def close(self): """Close this file.""" self.daf.file.close() for segment in self.segments: if hasattr(segment, '_data'): del segment._data
def close(self): """Close this file.""" self.daf.file.close() for segment in self.segments: if hasattr(segment, '_data'): del segment._data
[ "Close", "this", "file", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/binary_pck.py#L42-L47
[ "def", "close", "(", "self", ")", ":", "self", ".", "daf", ".", "file", ".", "close", "(", ")", "for", "segment", "in", "self", ".", "segments", ":", "if", "hasattr", "(", "segment", ",", "'_data'", ")", ":", "del", "segment", ".", "_data" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Segment.describe
Return a textual description of the segment.
jplephem/binary_pck.py
def describe(self, verbose=True): """Return a textual description of the segment.""" body = titlecase(target_names.get(self.body, 'Unknown body')) text = ('{0.start_jd:.2f}..{0.end_jd:.2f} frame={0.frame}' ' {1} ({0.body})'.format(self, body)) if verbose: text += ('\n data_type={0.data_type} source={1}' .format(self, self.source.decode('ascii'))) return text
def describe(self, verbose=True): """Return a textual description of the segment.""" body = titlecase(target_names.get(self.body, 'Unknown body')) text = ('{0.start_jd:.2f}..{0.end_jd:.2f} frame={0.frame}' ' {1} ({0.body})'.format(self, body)) if verbose: text += ('\n data_type={0.data_type} source={1}' .format(self, self.source.decode('ascii'))) return text
[ "Return", "a", "textual", "description", "of", "the", "segment", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/binary_pck.py#L92-L100
[ "def", "describe", "(", "self", ",", "verbose", "=", "True", ")", ":", "body", "=", "titlecase", "(", "target_names", ".", "get", "(", "self", ".", "body", ",", "'Unknown body'", ")", ")", "text", "=", "(", "'{0.start_jd:.2f}..{0.end_jd:.2f} frame={0.frame}'", "' {1} ({0.body})'", ".", "format", "(", "self", ",", "body", ")", ")", "if", "verbose", ":", "text", "+=", "(", "'\\n data_type={0.data_type} source={1}'", ".", "format", "(", "self", ",", "self", ".", "source", ".", "decode", "(", "'ascii'", ")", ")", ")", "return", "text" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Segment._load
Map the coefficients into memory using a NumPy array.
jplephem/binary_pck.py
def _load(self): """Map the coefficients into memory using a NumPy array. """ if self.data_type == 2: component_count = 3 else: raise ValueError('only binary PCK data type 2 is supported') init, intlen, rsize, n = self.daf.read_array(self.end_i - 3, self.end_i) initial_epoch = jd(init) interval_length = intlen / S_PER_DAY coefficient_count = int(rsize - 2) // component_count coefficients = self.daf.map_array(self.start_i, self.end_i - 4) coefficients.shape = (int(n), int(rsize)) coefficients = coefficients[:,2:] # ignore MID and RADIUS elements coefficients.shape = (int(n), component_count, coefficient_count) coefficients = rollaxis(coefficients, 1) return initial_epoch, interval_length, coefficients
def _load(self): """Map the coefficients into memory using a NumPy array. """ if self.data_type == 2: component_count = 3 else: raise ValueError('only binary PCK data type 2 is supported') init, intlen, rsize, n = self.daf.read_array(self.end_i - 3, self.end_i) initial_epoch = jd(init) interval_length = intlen / S_PER_DAY coefficient_count = int(rsize - 2) // component_count coefficients = self.daf.map_array(self.start_i, self.end_i - 4) coefficients.shape = (int(n), int(rsize)) coefficients = coefficients[:,2:] # ignore MID and RADIUS elements coefficients.shape = (int(n), component_count, coefficient_count) coefficients = rollaxis(coefficients, 1) return initial_epoch, interval_length, coefficients
[ "Map", "the", "coefficients", "into", "memory", "using", "a", "NumPy", "array", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/binary_pck.py#L102-L121
[ "def", "_load", "(", "self", ")", ":", "if", "self", ".", "data_type", "==", "2", ":", "component_count", "=", "3", "else", ":", "raise", "ValueError", "(", "'only binary PCK data type 2 is supported'", ")", "init", ",", "intlen", ",", "rsize", ",", "n", "=", "self", ".", "daf", ".", "read_array", "(", "self", ".", "end_i", "-", "3", ",", "self", ".", "end_i", ")", "initial_epoch", "=", "jd", "(", "init", ")", "interval_length", "=", "intlen", "/", "S_PER_DAY", "coefficient_count", "=", "int", "(", "rsize", "-", "2", ")", "//", "component_count", "coefficients", "=", "self", ".", "daf", ".", "map_array", "(", "self", ".", "start_i", ",", "self", ".", "end_i", "-", "4", ")", "coefficients", ".", "shape", "=", "(", "int", "(", "n", ")", ",", "int", "(", "rsize", ")", ")", "coefficients", "=", "coefficients", "[", ":", ",", "2", ":", "]", "# ignore MID and RADIUS elements", "coefficients", ".", "shape", "=", "(", "int", "(", "n", ")", ",", "component_count", ",", "coefficient_count", ")", "coefficients", "=", "rollaxis", "(", "coefficients", ",", "1", ")", "return", "initial_epoch", ",", "interval_length", ",", "coefficients" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
Segment.compute
Generate angles and derivatives for time `tdb` plus `tdb2`. If ``derivative`` is true, return a tuple containing both the angle and its derivative; otherwise simply return the angles.
jplephem/binary_pck.py
def compute(self, tdb, tdb2, derivative=True): """Generate angles and derivatives for time `tdb` plus `tdb2`. If ``derivative`` is true, return a tuple containing both the angle and its derivative; otherwise simply return the angles. """ scalar = not getattr(tdb, 'shape', 0) and not getattr(tdb2, 'shape', 0) if scalar: tdb = array((tdb,)) data = self._data if data is None: self._data = data = self._load() initial_epoch, interval_length, coefficients = data component_count, n, coefficient_count = coefficients.shape # Subtracting tdb before adding tdb2 affords greater precision. index, offset = divmod((tdb - initial_epoch) + tdb2, interval_length) index = index.astype(int) if (index < 0).any() or (index > n).any(): final_epoch = initial_epoch + interval_length * n raise ValueError('segment only covers dates %.1f through %.1f' % (initial_epoch, final_epoch)) omegas = (index == n) index[omegas] -= 1 offset[omegas] += interval_length coefficients = coefficients[:,index] # Chebyshev polynomial. T = empty((coefficient_count, len(index))) T[0] = 1.0 T[1] = t1 = 2.0 * offset / interval_length - 1.0 twot1 = t1 + t1 for i in range(2, coefficient_count): T[i] = twot1 * T[i-1] - T[i-2] components = (T.T * coefficients).sum(axis=2) if scalar: components = components[:,0] if not derivative: return components # Chebyshev differentiation. dT = empty_like(T) dT[0] = 0.0 dT[1] = 1.0 if coefficient_count > 2: dT[2] = twot1 + twot1 for i in range(3, coefficient_count): dT[i] = twot1 * dT[i-1] - dT[i-2] + T[i-1] + T[i-1] dT *= 2.0 dT /= interval_length rates = (dT.T * coefficients).sum(axis=2) if scalar: rates = rates[:,0] return components, rates
def compute(self, tdb, tdb2, derivative=True): """Generate angles and derivatives for time `tdb` plus `tdb2`. If ``derivative`` is true, return a tuple containing both the angle and its derivative; otherwise simply return the angles. """ scalar = not getattr(tdb, 'shape', 0) and not getattr(tdb2, 'shape', 0) if scalar: tdb = array((tdb,)) data = self._data if data is None: self._data = data = self._load() initial_epoch, interval_length, coefficients = data component_count, n, coefficient_count = coefficients.shape # Subtracting tdb before adding tdb2 affords greater precision. index, offset = divmod((tdb - initial_epoch) + tdb2, interval_length) index = index.astype(int) if (index < 0).any() or (index > n).any(): final_epoch = initial_epoch + interval_length * n raise ValueError('segment only covers dates %.1f through %.1f' % (initial_epoch, final_epoch)) omegas = (index == n) index[omegas] -= 1 offset[omegas] += interval_length coefficients = coefficients[:,index] # Chebyshev polynomial. T = empty((coefficient_count, len(index))) T[0] = 1.0 T[1] = t1 = 2.0 * offset / interval_length - 1.0 twot1 = t1 + t1 for i in range(2, coefficient_count): T[i] = twot1 * T[i-1] - T[i-2] components = (T.T * coefficients).sum(axis=2) if scalar: components = components[:,0] if not derivative: return components # Chebyshev differentiation. dT = empty_like(T) dT[0] = 0.0 dT[1] = 1.0 if coefficient_count > 2: dT[2] = twot1 + twot1 for i in range(3, coefficient_count): dT[i] = twot1 * dT[i-1] - dT[i-2] + T[i-1] + T[i-1] dT *= 2.0 dT /= interval_length rates = (dT.T * coefficients).sum(axis=2) if scalar: rates = rates[:,0] return components, rates
[ "Generate", "angles", "and", "derivatives", "for", "time", "tdb", "plus", "tdb2", "." ]
brandon-rhodes/python-jplephem
python
https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/binary_pck.py#L123-L188
[ "def", "compute", "(", "self", ",", "tdb", ",", "tdb2", ",", "derivative", "=", "True", ")", ":", "scalar", "=", "not", "getattr", "(", "tdb", ",", "'shape'", ",", "0", ")", "and", "not", "getattr", "(", "tdb2", ",", "'shape'", ",", "0", ")", "if", "scalar", ":", "tdb", "=", "array", "(", "(", "tdb", ",", ")", ")", "data", "=", "self", ".", "_data", "if", "data", "is", "None", ":", "self", ".", "_data", "=", "data", "=", "self", ".", "_load", "(", ")", "initial_epoch", ",", "interval_length", ",", "coefficients", "=", "data", "component_count", ",", "n", ",", "coefficient_count", "=", "coefficients", ".", "shape", "# Subtracting tdb before adding tdb2 affords greater precision.", "index", ",", "offset", "=", "divmod", "(", "(", "tdb", "-", "initial_epoch", ")", "+", "tdb2", ",", "interval_length", ")", "index", "=", "index", ".", "astype", "(", "int", ")", "if", "(", "index", "<", "0", ")", ".", "any", "(", ")", "or", "(", "index", ">", "n", ")", ".", "any", "(", ")", ":", "final_epoch", "=", "initial_epoch", "+", "interval_length", "*", "n", "raise", "ValueError", "(", "'segment only covers dates %.1f through %.1f'", "%", "(", "initial_epoch", ",", "final_epoch", ")", ")", "omegas", "=", "(", "index", "==", "n", ")", "index", "[", "omegas", "]", "-=", "1", "offset", "[", "omegas", "]", "+=", "interval_length", "coefficients", "=", "coefficients", "[", ":", ",", "index", "]", "# Chebyshev polynomial.", "T", "=", "empty", "(", "(", "coefficient_count", ",", "len", "(", "index", ")", ")", ")", "T", "[", "0", "]", "=", "1.0", "T", "[", "1", "]", "=", "t1", "=", "2.0", "*", "offset", "/", "interval_length", "-", "1.0", "twot1", "=", "t1", "+", "t1", "for", "i", "in", "range", "(", "2", ",", "coefficient_count", ")", ":", "T", "[", "i", "]", "=", "twot1", "*", "T", "[", "i", "-", "1", "]", "-", "T", "[", "i", "-", "2", "]", "components", "=", "(", "T", ".", "T", "*", "coefficients", ")", ".", "sum", "(", "axis", "=", "2", ")", "if", "scalar", ":", "components", "=", "components", "[", ":", ",", "0", "]", "if", "not", "derivative", ":", "return", "components", "# Chebyshev differentiation.", "dT", "=", "empty_like", "(", "T", ")", "dT", "[", "0", "]", "=", "0.0", "dT", "[", "1", "]", "=", "1.0", "if", "coefficient_count", ">", "2", ":", "dT", "[", "2", "]", "=", "twot1", "+", "twot1", "for", "i", "in", "range", "(", "3", ",", "coefficient_count", ")", ":", "dT", "[", "i", "]", "=", "twot1", "*", "dT", "[", "i", "-", "1", "]", "-", "dT", "[", "i", "-", "2", "]", "+", "T", "[", "i", "-", "1", "]", "+", "T", "[", "i", "-", "1", "]", "dT", "*=", "2.0", "dT", "/=", "interval_length", "rates", "=", "(", "dT", ".", "T", "*", "coefficients", ")", ".", "sum", "(", "axis", "=", "2", ")", "if", "scalar", ":", "rates", "=", "rates", "[", ":", ",", "0", "]", "return", "components", ",", "rates" ]
48c99ce40c627e24c95479d8845e312ea168f567
test
notify
Show system notification with duration t (ms)
MusicBoxApi/utils.py
def notify(msg, msg_type=0, t=None): "Show system notification with duration t (ms)" if platform.system() == 'Darwin': command = notify_command_osx(msg, msg_type, t) else: command = notify_command_linux(msg, t) os.system(command.encode('utf-8'))
def notify(msg, msg_type=0, t=None): "Show system notification with duration t (ms)" if platform.system() == 'Darwin': command = notify_command_osx(msg, msg_type, t) else: command = notify_command_linux(msg, t) os.system(command.encode('utf-8'))
[ "Show", "system", "notification", "with", "duration", "t", "(", "ms", ")" ]
wzpan/MusicBoxApi
python
https://github.com/wzpan/MusicBoxApi/blob/d539d4b06c59bdf79b8d44756c325e39fde81f13/MusicBoxApi/utils.py#L38-L44
[ "def", "notify", "(", "msg", ",", "msg_type", "=", "0", ",", "t", "=", "None", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "command", "=", "notify_command_osx", "(", "msg", ",", "msg_type", ",", "t", ")", "else", ":", "command", "=", "notify_command_linux", "(", "msg", ",", "t", ")", "os", ".", "system", "(", "command", ".", "encode", "(", "'utf-8'", ")", ")" ]
d539d4b06c59bdf79b8d44756c325e39fde81f13
test
geturls_new_api
批量获取音乐的地址
MusicBoxApi/api.py
def geturls_new_api(song_ids): """ 批量获取音乐的地址 """ br_to_quality = {128000: 'MD 128k', 320000: 'HD 320k'} alters = NetEase().songs_detail_new_api(song_ids) urls = [alter['url'] for alter in alters] return urls
def geturls_new_api(song_ids): """ 批量获取音乐的地址 """ br_to_quality = {128000: 'MD 128k', 320000: 'HD 320k'} alters = NetEase().songs_detail_new_api(song_ids) urls = [alter['url'] for alter in alters] return urls
[ "批量获取音乐的地址" ]
wzpan/MusicBoxApi
python
https://github.com/wzpan/MusicBoxApi/blob/d539d4b06c59bdf79b8d44756c325e39fde81f13/MusicBoxApi/api.py#L180-L185
[ "def", "geturls_new_api", "(", "song_ids", ")", ":", "br_to_quality", "=", "{", "128000", ":", "'MD 128k'", ",", "320000", ":", "'HD 320k'", "}", "alters", "=", "NetEase", "(", ")", ".", "songs_detail_new_api", "(", "song_ids", ")", "urls", "=", "[", "alter", "[", "'url'", "]", "for", "alter", "in", "alters", "]", "return", "urls" ]
d539d4b06c59bdf79b8d44756c325e39fde81f13
test
LoggingVisitor.visit_Call
Visit a function call. We expect every logging statement and string format to be a function call.
logging_format/visitor.py
def visit_Call(self, node): """ Visit a function call. We expect every logging statement and string format to be a function call. """ # CASE 1: We're in a logging statement if self.within_logging_statement(): if self.within_logging_argument() and self.is_format_call(node): self.violations.append((node, STRING_FORMAT_VIOLATION)) super(LoggingVisitor, self).generic_visit(node) return logging_level = self.detect_logging_level(node) if logging_level and self.current_logging_level is None: self.current_logging_level = logging_level # CASE 2: We're in some other statement if logging_level is None: super(LoggingVisitor, self).generic_visit(node) return # CASE 3: We're entering a new logging statement self.current_logging_call = node if logging_level == "warn": self.violations.append((node, WARN_VIOLATION)) self.check_exc_info(node) for index, child in enumerate(iter_child_nodes(node)): if index == 1: self.current_logging_argument = child if index >= 1: self.check_exception_arg(child) if index > 1 and isinstance(child, keyword) and child.arg == "extra": self.current_extra_keyword = child super(LoggingVisitor, self).visit(child) self.current_logging_argument = None self.current_extra_keyword = None self.current_logging_call = None self.current_logging_level = None
def visit_Call(self, node): """ Visit a function call. We expect every logging statement and string format to be a function call. """ # CASE 1: We're in a logging statement if self.within_logging_statement(): if self.within_logging_argument() and self.is_format_call(node): self.violations.append((node, STRING_FORMAT_VIOLATION)) super(LoggingVisitor, self).generic_visit(node) return logging_level = self.detect_logging_level(node) if logging_level and self.current_logging_level is None: self.current_logging_level = logging_level # CASE 2: We're in some other statement if logging_level is None: super(LoggingVisitor, self).generic_visit(node) return # CASE 3: We're entering a new logging statement self.current_logging_call = node if logging_level == "warn": self.violations.append((node, WARN_VIOLATION)) self.check_exc_info(node) for index, child in enumerate(iter_child_nodes(node)): if index == 1: self.current_logging_argument = child if index >= 1: self.check_exception_arg(child) if index > 1 and isinstance(child, keyword) and child.arg == "extra": self.current_extra_keyword = child super(LoggingVisitor, self).visit(child) self.current_logging_argument = None self.current_extra_keyword = None self.current_logging_call = None self.current_logging_level = None
[ "Visit", "a", "function", "call", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L65-L111
[ "def", "visit_Call", "(", "self", ",", "node", ")", ":", "# CASE 1: We're in a logging statement", "if", "self", ".", "within_logging_statement", "(", ")", ":", "if", "self", ".", "within_logging_argument", "(", ")", "and", "self", ".", "is_format_call", "(", "node", ")", ":", "self", ".", "violations", ".", "append", "(", "(", "node", ",", "STRING_FORMAT_VIOLATION", ")", ")", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")", "return", "logging_level", "=", "self", ".", "detect_logging_level", "(", "node", ")", "if", "logging_level", "and", "self", ".", "current_logging_level", "is", "None", ":", "self", ".", "current_logging_level", "=", "logging_level", "# CASE 2: We're in some other statement", "if", "logging_level", "is", "None", ":", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")", "return", "# CASE 3: We're entering a new logging statement", "self", ".", "current_logging_call", "=", "node", "if", "logging_level", "==", "\"warn\"", ":", "self", ".", "violations", ".", "append", "(", "(", "node", ",", "WARN_VIOLATION", ")", ")", "self", ".", "check_exc_info", "(", "node", ")", "for", "index", ",", "child", "in", "enumerate", "(", "iter_child_nodes", "(", "node", ")", ")", ":", "if", "index", "==", "1", ":", "self", ".", "current_logging_argument", "=", "child", "if", "index", ">=", "1", ":", "self", ".", "check_exception_arg", "(", "child", ")", "if", "index", ">", "1", "and", "isinstance", "(", "child", ",", "keyword", ")", "and", "child", ".", "arg", "==", "\"extra\"", ":", "self", ".", "current_extra_keyword", "=", "child", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "visit", "(", "child", ")", "self", ".", "current_logging_argument", "=", "None", "self", ".", "current_extra_keyword", "=", "None", "self", ".", "current_logging_call", "=", "None", "self", ".", "current_logging_level", "=", "None" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.visit_BinOp
Process binary operations while processing the first logging argument.
logging_format/visitor.py
def visit_BinOp(self, node): """ Process binary operations while processing the first logging argument. """ if self.within_logging_statement() and self.within_logging_argument(): # handle percent format if isinstance(node.op, Mod): self.violations.append((node, PERCENT_FORMAT_VIOLATION)) # handle string concat if isinstance(node.op, Add): self.violations.append((node, STRING_CONCAT_VIOLATION)) super(LoggingVisitor, self).generic_visit(node)
def visit_BinOp(self, node): """ Process binary operations while processing the first logging argument. """ if self.within_logging_statement() and self.within_logging_argument(): # handle percent format if isinstance(node.op, Mod): self.violations.append((node, PERCENT_FORMAT_VIOLATION)) # handle string concat if isinstance(node.op, Add): self.violations.append((node, STRING_CONCAT_VIOLATION)) super(LoggingVisitor, self).generic_visit(node)
[ "Process", "binary", "operations", "while", "processing", "the", "first", "logging", "argument", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L113-L125
[ "def", "visit_BinOp", "(", "self", ",", "node", ")", ":", "if", "self", ".", "within_logging_statement", "(", ")", "and", "self", ".", "within_logging_argument", "(", ")", ":", "# handle percent format", "if", "isinstance", "(", "node", ".", "op", ",", "Mod", ")", ":", "self", ".", "violations", ".", "append", "(", "(", "node", ",", "PERCENT_FORMAT_VIOLATION", ")", ")", "# handle string concat", "if", "isinstance", "(", "node", ".", "op", ",", "Add", ")", ":", "self", ".", "violations", ".", "append", "(", "(", "node", ",", "STRING_CONCAT_VIOLATION", ")", ")", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.visit_Dict
Process dict arguments.
logging_format/visitor.py
def visit_Dict(self, node): """ Process dict arguments. """ if self.should_check_whitelist(node): for key in node.keys: if key.s in self.whitelist or key.s.startswith("debug_"): continue self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(key.s))) if self.should_check_extra_exception(node): for value in node.values: self.check_exception_arg(value) super(LoggingVisitor, self).generic_visit(node)
def visit_Dict(self, node): """ Process dict arguments. """ if self.should_check_whitelist(node): for key in node.keys: if key.s in self.whitelist or key.s.startswith("debug_"): continue self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(key.s))) if self.should_check_extra_exception(node): for value in node.values: self.check_exception_arg(value) super(LoggingVisitor, self).generic_visit(node)
[ "Process", "dict", "arguments", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L127-L142
[ "def", "visit_Dict", "(", "self", ",", "node", ")", ":", "if", "self", ".", "should_check_whitelist", "(", "node", ")", ":", "for", "key", "in", "node", ".", "keys", ":", "if", "key", ".", "s", "in", "self", ".", "whitelist", "or", "key", ".", "s", ".", "startswith", "(", "\"debug_\"", ")", ":", "continue", "self", ".", "violations", ".", "append", "(", "(", "self", ".", "current_logging_call", ",", "WHITELIST_VIOLATION", ".", "format", "(", "key", ".", "s", ")", ")", ")", "if", "self", ".", "should_check_extra_exception", "(", "node", ")", ":", "for", "value", "in", "node", ".", "values", ":", "self", ".", "check_exception_arg", "(", "value", ")", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.visit_JoinedStr
Process f-string arguments.
logging_format/visitor.py
def visit_JoinedStr(self, node): """ Process f-string arguments. """ if version_info >= (3, 6): if self.within_logging_statement(): if any(isinstance(i, FormattedValue) for i in node.values): if self.within_logging_argument(): self.violations.append((node, FSTRING_VIOLATION)) super(LoggingVisitor, self).generic_visit(node)
def visit_JoinedStr(self, node): """ Process f-string arguments. """ if version_info >= (3, 6): if self.within_logging_statement(): if any(isinstance(i, FormattedValue) for i in node.values): if self.within_logging_argument(): self.violations.append((node, FSTRING_VIOLATION)) super(LoggingVisitor, self).generic_visit(node)
[ "Process", "f", "-", "string", "arguments", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L144-L154
[ "def", "visit_JoinedStr", "(", "self", ",", "node", ")", ":", "if", "version_info", ">=", "(", "3", ",", "6", ")", ":", "if", "self", ".", "within_logging_statement", "(", ")", ":", "if", "any", "(", "isinstance", "(", "i", ",", "FormattedValue", ")", "for", "i", "in", "node", ".", "values", ")", ":", "if", "self", ".", "within_logging_argument", "(", ")", ":", "self", ".", "violations", ".", "append", "(", "(", "node", ",", "FSTRING_VIOLATION", ")", ")", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.visit_keyword
Process keyword arguments.
logging_format/visitor.py
def visit_keyword(self, node): """ Process keyword arguments. """ if self.should_check_whitelist(node): if node.arg not in self.whitelist and not node.arg.startswith("debug_"): self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(node.arg))) if self.should_check_extra_exception(node): self.check_exception_arg(node.value) super(LoggingVisitor, self).generic_visit(node)
def visit_keyword(self, node): """ Process keyword arguments. """ if self.should_check_whitelist(node): if node.arg not in self.whitelist and not node.arg.startswith("debug_"): self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(node.arg))) if self.should_check_extra_exception(node): self.check_exception_arg(node.value) super(LoggingVisitor, self).generic_visit(node)
[ "Process", "keyword", "arguments", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L156-L168
[ "def", "visit_keyword", "(", "self", ",", "node", ")", ":", "if", "self", ".", "should_check_whitelist", "(", "node", ")", ":", "if", "node", ".", "arg", "not", "in", "self", ".", "whitelist", "and", "not", "node", ".", "arg", ".", "startswith", "(", "\"debug_\"", ")", ":", "self", ".", "violations", ".", "append", "(", "(", "self", ".", "current_logging_call", ",", "WHITELIST_VIOLATION", ".", "format", "(", "node", ".", "arg", ")", ")", ")", "if", "self", ".", "should_check_extra_exception", "(", "node", ")", ":", "self", ".", "check_exception_arg", "(", "node", ".", "value", ")", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.visit_ExceptHandler
Process except blocks.
logging_format/visitor.py
def visit_ExceptHandler(self, node): """ Process except blocks. """ name = self.get_except_handler_name(node) if not name: super(LoggingVisitor, self).generic_visit(node) return self.current_except_names.append(name) super(LoggingVisitor, self).generic_visit(node) self.current_except_names.pop()
def visit_ExceptHandler(self, node): """ Process except blocks. """ name = self.get_except_handler_name(node) if not name: super(LoggingVisitor, self).generic_visit(node) return self.current_except_names.append(name) super(LoggingVisitor, self).generic_visit(node) self.current_except_names.pop()
[ "Process", "except", "blocks", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L170-L182
[ "def", "visit_ExceptHandler", "(", "self", ",", "node", ")", ":", "name", "=", "self", ".", "get_except_handler_name", "(", "node", ")", "if", "not", "name", ":", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")", "return", "self", ".", "current_except_names", ".", "append", "(", "name", ")", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")", "self", ".", "current_except_names", ".", "pop", "(", ")" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.detect_logging_level
Heuristic to decide whether an AST Call is a logging call.
logging_format/visitor.py
def detect_logging_level(self, node): """ Heuristic to decide whether an AST Call is a logging call. """ try: if self.get_id_attr(node.func.value) == "warnings": return None # NB: We could also look at the argument signature or the target attribute if node.func.attr in LOGGING_LEVELS: return node.func.attr except AttributeError: pass return None
def detect_logging_level(self, node): """ Heuristic to decide whether an AST Call is a logging call. """ try: if self.get_id_attr(node.func.value) == "warnings": return None # NB: We could also look at the argument signature or the target attribute if node.func.attr in LOGGING_LEVELS: return node.func.attr except AttributeError: pass return None
[ "Heuristic", "to", "decide", "whether", "an", "AST", "Call", "is", "a", "logging", "call", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L184-L197
[ "def", "detect_logging_level", "(", "self", ",", "node", ")", ":", "try", ":", "if", "self", ".", "get_id_attr", "(", "node", ".", "func", ".", "value", ")", "==", "\"warnings\"", ":", "return", "None", "# NB: We could also look at the argument signature or the target attribute", "if", "node", ".", "func", ".", "attr", "in", "LOGGING_LEVELS", ":", "return", "node", ".", "func", ".", "attr", "except", "AttributeError", ":", "pass", "return", "None" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.get_except_handler_name
Helper to get the exception name from an ExceptHandler node in both py2 and py3.
logging_format/visitor.py
def get_except_handler_name(self, node): """ Helper to get the exception name from an ExceptHandler node in both py2 and py3. """ name = node.name if not name: return None if version_info < (3,): return name.id return name
def get_except_handler_name(self, node): """ Helper to get the exception name from an ExceptHandler node in both py2 and py3. """ name = node.name if not name: return None if version_info < (3,): return name.id return name
[ "Helper", "to", "get", "the", "exception", "name", "from", "an", "ExceptHandler", "node", "in", "both", "py2", "and", "py3", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L228-L239
[ "def", "get_except_handler_name", "(", "self", ",", "node", ")", ":", "name", "=", "node", ".", "name", "if", "not", "name", ":", "return", "None", "if", "version_info", "<", "(", "3", ",", ")", ":", "return", "name", ".", "id", "return", "name" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.get_id_attr
Check if value has id attribute and return it. :param value: The value to get id from. :return: The value.id.
logging_format/visitor.py
def get_id_attr(self, value): """Check if value has id attribute and return it. :param value: The value to get id from. :return: The value.id. """ if not hasattr(value, "id") and hasattr(value, "value"): value = value.value return value.id
def get_id_attr(self, value): """Check if value has id attribute and return it. :param value: The value to get id from. :return: The value.id. """ if not hasattr(value, "id") and hasattr(value, "value"): value = value.value return value.id
[ "Check", "if", "value", "has", "id", "attribute", "and", "return", "it", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L241-L249
[ "def", "get_id_attr", "(", "self", ",", "value", ")", ":", "if", "not", "hasattr", "(", "value", ",", "\"id\"", ")", "and", "hasattr", "(", "value", ",", "\"value\"", ")", ":", "value", "=", "value", ".", "value", "return", "value", ".", "id" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.is_bare_exception
Checks if the node is a bare exception name from an except block.
logging_format/visitor.py
def is_bare_exception(self, node): """ Checks if the node is a bare exception name from an except block. """ return isinstance(node, Name) and node.id in self.current_except_names
def is_bare_exception(self, node): """ Checks if the node is a bare exception name from an except block. """ return isinstance(node, Name) and node.id in self.current_except_names
[ "Checks", "if", "the", "node", "is", "a", "bare", "exception", "name", "from", "an", "except", "block", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L251-L256
[ "def", "is_bare_exception", "(", "self", ",", "node", ")", ":", "return", "isinstance", "(", "node", ",", "Name", ")", "and", "node", ".", "id", "in", "self", ".", "current_except_names" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.is_str_exception
Checks if the node is the expression str(e) or unicode(e), where e is an exception name from an except block
logging_format/visitor.py
def is_str_exception(self, node): """ Checks if the node is the expression str(e) or unicode(e), where e is an exception name from an except block """ return ( isinstance(node, Call) and isinstance(node.func, Name) and node.func.id in ('str', 'unicode') and node.args and self.is_bare_exception(node.args[0]) )
def is_str_exception(self, node): """ Checks if the node is the expression str(e) or unicode(e), where e is an exception name from an except block """ return ( isinstance(node, Call) and isinstance(node.func, Name) and node.func.id in ('str', 'unicode') and node.args and self.is_bare_exception(node.args[0]) )
[ "Checks", "if", "the", "node", "is", "the", "expression", "str", "(", "e", ")", "or", "unicode", "(", "e", ")", "where", "e", "is", "an", "exception", "name", "from", "an", "except", "block" ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L258-L269
[ "def", "is_str_exception", "(", "self", ",", "node", ")", ":", "return", "(", "isinstance", "(", "node", ",", "Call", ")", "and", "isinstance", "(", "node", ".", "func", ",", "Name", ")", "and", "node", ".", "func", ".", "id", "in", "(", "'str'", ",", "'unicode'", ")", "and", "node", ".", "args", "and", "self", ".", "is_bare_exception", "(", "node", ".", "args", "[", "0", "]", ")", ")" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
LoggingVisitor.check_exc_info
Reports a violation if exc_info keyword is used with logging.error or logging.exception.
logging_format/visitor.py
def check_exc_info(self, node): """ Reports a violation if exc_info keyword is used with logging.error or logging.exception. """ if self.current_logging_level not in ('error', 'exception'): return for kw in node.keywords: if kw.arg == 'exc_info': if self.current_logging_level == 'error': violation = ERROR_EXC_INFO_VIOLATION else: violation = REDUNDANT_EXC_INFO_VIOLATION self.violations.append((node, violation))
def check_exc_info(self, node): """ Reports a violation if exc_info keyword is used with logging.error or logging.exception. """ if self.current_logging_level not in ('error', 'exception'): return for kw in node.keywords: if kw.arg == 'exc_info': if self.current_logging_level == 'error': violation = ERROR_EXC_INFO_VIOLATION else: violation = REDUNDANT_EXC_INFO_VIOLATION self.violations.append((node, violation))
[ "Reports", "a", "violation", "if", "exc_info", "keyword", "is", "used", "with", "logging", ".", "error", "or", "logging", ".", "exception", "." ]
globality-corp/flake8-logging-format
python
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L275-L289
[ "def", "check_exc_info", "(", "self", ",", "node", ")", ":", "if", "self", ".", "current_logging_level", "not", "in", "(", "'error'", ",", "'exception'", ")", ":", "return", "for", "kw", "in", "node", ".", "keywords", ":", "if", "kw", ".", "arg", "==", "'exc_info'", ":", "if", "self", ".", "current_logging_level", "==", "'error'", ":", "violation", "=", "ERROR_EXC_INFO_VIOLATION", "else", ":", "violation", "=", "REDUNDANT_EXC_INFO_VIOLATION", "self", ".", "violations", ".", "append", "(", "(", "node", ",", "violation", ")", ")" ]
3c6ce53d0ff1ec369799cff0ed6d048343252e40
test
delete_file_if_needed
Delete file from database only if needed. When editing and the filefield is a new file, deletes the previous file (if any) from the database. Call this function immediately BEFORE saving the instance.
db_file_storage/model_utils.py
def delete_file_if_needed(instance, filefield_name): """Delete file from database only if needed. When editing and the filefield is a new file, deletes the previous file (if any) from the database. Call this function immediately BEFORE saving the instance. """ if instance.pk: model_class = type(instance) # Check if there is a file for the instance in the database if model_class.objects.filter(pk=instance.pk).exclude( **{'%s__isnull' % filefield_name: True} ).exclude( **{'%s__exact' % filefield_name: ''} ).exists(): old_file = getattr( model_class.objects.only(filefield_name).get(pk=instance.pk), filefield_name ) else: old_file = None # If there is a file, delete it if needed if old_file: # When editing and NOT changing the file, # old_file.name == getattr(instance, filefield_name) # returns True. In this case, the file must NOT be deleted. # If the file IS being changed, the comparison returns False. # In this case, the old file MUST be deleted. if (old_file.name == getattr(instance, filefield_name)) is False: DatabaseFileStorage().delete(old_file.name)
def delete_file_if_needed(instance, filefield_name): """Delete file from database only if needed. When editing and the filefield is a new file, deletes the previous file (if any) from the database. Call this function immediately BEFORE saving the instance. """ if instance.pk: model_class = type(instance) # Check if there is a file for the instance in the database if model_class.objects.filter(pk=instance.pk).exclude( **{'%s__isnull' % filefield_name: True} ).exclude( **{'%s__exact' % filefield_name: ''} ).exists(): old_file = getattr( model_class.objects.only(filefield_name).get(pk=instance.pk), filefield_name ) else: old_file = None # If there is a file, delete it if needed if old_file: # When editing and NOT changing the file, # old_file.name == getattr(instance, filefield_name) # returns True. In this case, the file must NOT be deleted. # If the file IS being changed, the comparison returns False. # In this case, the old file MUST be deleted. if (old_file.name == getattr(instance, filefield_name)) is False: DatabaseFileStorage().delete(old_file.name)
[ "Delete", "file", "from", "database", "only", "if", "needed", "." ]
victor-o-silva/db_file_storage
python
https://github.com/victor-o-silva/db_file_storage/blob/ff5375422246c42b8a7bba558f1c3b49bb985f36/db_file_storage/model_utils.py#L5-L36
[ "def", "delete_file_if_needed", "(", "instance", ",", "filefield_name", ")", ":", "if", "instance", ".", "pk", ":", "model_class", "=", "type", "(", "instance", ")", "# Check if there is a file for the instance in the database", "if", "model_class", ".", "objects", ".", "filter", "(", "pk", "=", "instance", ".", "pk", ")", ".", "exclude", "(", "*", "*", "{", "'%s__isnull'", "%", "filefield_name", ":", "True", "}", ")", ".", "exclude", "(", "*", "*", "{", "'%s__exact'", "%", "filefield_name", ":", "''", "}", ")", ".", "exists", "(", ")", ":", "old_file", "=", "getattr", "(", "model_class", ".", "objects", ".", "only", "(", "filefield_name", ")", ".", "get", "(", "pk", "=", "instance", ".", "pk", ")", ",", "filefield_name", ")", "else", ":", "old_file", "=", "None", "# If there is a file, delete it if needed", "if", "old_file", ":", "# When editing and NOT changing the file,", "# old_file.name == getattr(instance, filefield_name)", "# returns True. In this case, the file must NOT be deleted.", "# If the file IS being changed, the comparison returns False.", "# In this case, the old file MUST be deleted.", "if", "(", "old_file", ".", "name", "==", "getattr", "(", "instance", ",", "filefield_name", ")", ")", "is", "False", ":", "DatabaseFileStorage", "(", ")", ".", "delete", "(", "old_file", ".", "name", ")" ]
ff5375422246c42b8a7bba558f1c3b49bb985f36
test
delete_file
Delete the file (if any) from the database. Call this function immediately AFTER deleting the instance.
db_file_storage/model_utils.py
def delete_file(instance, filefield_name): """Delete the file (if any) from the database. Call this function immediately AFTER deleting the instance. """ file_instance = getattr(instance, filefield_name) if file_instance: DatabaseFileStorage().delete(file_instance.name)
def delete_file(instance, filefield_name): """Delete the file (if any) from the database. Call this function immediately AFTER deleting the instance. """ file_instance = getattr(instance, filefield_name) if file_instance: DatabaseFileStorage().delete(file_instance.name)
[ "Delete", "the", "file", "(", "if", "any", ")", "from", "the", "database", "." ]
victor-o-silva/db_file_storage
python
https://github.com/victor-o-silva/db_file_storage/blob/ff5375422246c42b8a7bba558f1c3b49bb985f36/db_file_storage/model_utils.py#L39-L46
[ "def", "delete_file", "(", "instance", ",", "filefield_name", ")", ":", "file_instance", "=", "getattr", "(", "instance", ",", "filefield_name", ")", "if", "file_instance", ":", "DatabaseFileStorage", "(", ")", ".", "delete", "(", "file_instance", ".", "name", ")" ]
ff5375422246c42b8a7bba558f1c3b49bb985f36
test
db_file_widget
Edit the download-link inner text.
db_file_storage/form_widgets.py
def db_file_widget(cls): """Edit the download-link inner text.""" def get_link_display(url): unquoted = unquote(url.split('%2F')[-1]) if sys.version_info.major == 2: # python 2 from django.utils.encoding import force_unicode unquoted = force_unicode(unquoted) return escape(unquoted) def get_template_substitution_values(self, value): # Used by Django < 1.11 subst = super(cls, self).get_template_substitution_values(value) subst['initial'] = get_link_display(value.url) return subst setattr(cls, 'get_template_substitution_values', get_template_substitution_values) def get_context(self, name, value, attrs): context = super(cls, self).get_context(name, value, attrs) if value and hasattr(value, 'url'): context['widget']['display'] = get_link_display(value.url) return context setattr(cls, 'get_context', get_context) return cls
def db_file_widget(cls): """Edit the download-link inner text.""" def get_link_display(url): unquoted = unquote(url.split('%2F')[-1]) if sys.version_info.major == 2: # python 2 from django.utils.encoding import force_unicode unquoted = force_unicode(unquoted) return escape(unquoted) def get_template_substitution_values(self, value): # Used by Django < 1.11 subst = super(cls, self).get_template_substitution_values(value) subst['initial'] = get_link_display(value.url) return subst setattr(cls, 'get_template_substitution_values', get_template_substitution_values) def get_context(self, name, value, attrs): context = super(cls, self).get_context(name, value, attrs) if value and hasattr(value, 'url'): context['widget']['display'] = get_link_display(value.url) return context setattr(cls, 'get_context', get_context) return cls
[ "Edit", "the", "download", "-", "link", "inner", "text", "." ]
victor-o-silva/db_file_storage
python
https://github.com/victor-o-silva/db_file_storage/blob/ff5375422246c42b8a7bba558f1c3b49bb985f36/db_file_storage/form_widgets.py#L14-L40
[ "def", "db_file_widget", "(", "cls", ")", ":", "def", "get_link_display", "(", "url", ")", ":", "unquoted", "=", "unquote", "(", "url", ".", "split", "(", "'%2F'", ")", "[", "-", "1", "]", ")", "if", "sys", ".", "version_info", ".", "major", "==", "2", ":", "# python 2", "from", "django", ".", "utils", ".", "encoding", "import", "force_unicode", "unquoted", "=", "force_unicode", "(", "unquoted", ")", "return", "escape", "(", "unquoted", ")", "def", "get_template_substitution_values", "(", "self", ",", "value", ")", ":", "# Used by Django < 1.11", "subst", "=", "super", "(", "cls", ",", "self", ")", ".", "get_template_substitution_values", "(", "value", ")", "subst", "[", "'initial'", "]", "=", "get_link_display", "(", "value", ".", "url", ")", "return", "subst", "setattr", "(", "cls", ",", "'get_template_substitution_values'", ",", "get_template_substitution_values", ")", "def", "get_context", "(", "self", ",", "name", ",", "value", ",", "attrs", ")", ":", "context", "=", "super", "(", "cls", ",", "self", ")", ".", "get_context", "(", "name", ",", "value", ",", "attrs", ")", "if", "value", "and", "hasattr", "(", "value", ",", "'url'", ")", ":", "context", "[", "'widget'", "]", "[", "'display'", "]", "=", "get_link_display", "(", "value", ".", "url", ")", "return", "context", "setattr", "(", "cls", ",", "'get_context'", ",", "get_context", ")", "return", "cls" ]
ff5375422246c42b8a7bba558f1c3b49bb985f36
test
PDFTemplateResponse.rendered_content
Returns the freshly rendered content for the template and context described by the PDFResponse. This *does not* set the final content of the response. To set the response content, you must either call render(), or set the content explicitly using the value of this property.
wkhtmltopdf/views.py
def rendered_content(self): """Returns the freshly rendered content for the template and context described by the PDFResponse. This *does not* set the final content of the response. To set the response content, you must either call render(), or set the content explicitly using the value of this property. """ cmd_options = self.cmd_options.copy() return render_pdf_from_template( self.resolve_template(self.template_name), self.resolve_template(self.header_template), self.resolve_template(self.footer_template), context=self.resolve_context(self.context_data), request=self._request, cmd_options=cmd_options, cover_template=self.resolve_template(self.cover_template) )
def rendered_content(self): """Returns the freshly rendered content for the template and context described by the PDFResponse. This *does not* set the final content of the response. To set the response content, you must either call render(), or set the content explicitly using the value of this property. """ cmd_options = self.cmd_options.copy() return render_pdf_from_template( self.resolve_template(self.template_name), self.resolve_template(self.header_template), self.resolve_template(self.footer_template), context=self.resolve_context(self.context_data), request=self._request, cmd_options=cmd_options, cover_template=self.resolve_template(self.cover_template) )
[ "Returns", "the", "freshly", "rendered", "content", "for", "the", "template", "and", "context", "described", "by", "the", "PDFResponse", "." ]
incuna/django-wkhtmltopdf
python
https://github.com/incuna/django-wkhtmltopdf/blob/4e73f604c48f7f449c916c4257a72af59517322c/wkhtmltopdf/views.py#L64-L82
[ "def", "rendered_content", "(", "self", ")", ":", "cmd_options", "=", "self", ".", "cmd_options", ".", "copy", "(", ")", "return", "render_pdf_from_template", "(", "self", ".", "resolve_template", "(", "self", ".", "template_name", ")", ",", "self", ".", "resolve_template", "(", "self", ".", "header_template", ")", ",", "self", ".", "resolve_template", "(", "self", ".", "footer_template", ")", ",", "context", "=", "self", ".", "resolve_context", "(", "self", ".", "context_data", ")", ",", "request", "=", "self", ".", "_request", ",", "cmd_options", "=", "cmd_options", ",", "cover_template", "=", "self", ".", "resolve_template", "(", "self", ".", "cover_template", ")", ")" ]
4e73f604c48f7f449c916c4257a72af59517322c
test
PDFTemplateView.render_to_response
Returns a PDF response with a template rendered with the given context.
wkhtmltopdf/views.py
def render_to_response(self, context, **response_kwargs): """ Returns a PDF response with a template rendered with the given context. """ filename = response_kwargs.pop('filename', None) cmd_options = response_kwargs.pop('cmd_options', None) if issubclass(self.response_class, PDFTemplateResponse): if filename is None: filename = self.get_filename() if cmd_options is None: cmd_options = self.get_cmd_options() return super(PDFTemplateView, self).render_to_response( context=context, filename=filename, show_content_in_browser=self.show_content_in_browser, header_template=self.header_template, footer_template=self.footer_template, cmd_options=cmd_options, cover_template=self.cover_template, **response_kwargs ) else: return super(PDFTemplateView, self).render_to_response( context=context, **response_kwargs )
def render_to_response(self, context, **response_kwargs): """ Returns a PDF response with a template rendered with the given context. """ filename = response_kwargs.pop('filename', None) cmd_options = response_kwargs.pop('cmd_options', None) if issubclass(self.response_class, PDFTemplateResponse): if filename is None: filename = self.get_filename() if cmd_options is None: cmd_options = self.get_cmd_options() return super(PDFTemplateView, self).render_to_response( context=context, filename=filename, show_content_in_browser=self.show_content_in_browser, header_template=self.header_template, footer_template=self.footer_template, cmd_options=cmd_options, cover_template=self.cover_template, **response_kwargs ) else: return super(PDFTemplateView, self).render_to_response( context=context, **response_kwargs )
[ "Returns", "a", "PDF", "response", "with", "a", "template", "rendered", "with", "the", "given", "context", "." ]
incuna/django-wkhtmltopdf
python
https://github.com/incuna/django-wkhtmltopdf/blob/4e73f604c48f7f449c916c4257a72af59517322c/wkhtmltopdf/views.py#L134-L161
[ "def", "render_to_response", "(", "self", ",", "context", ",", "*", "*", "response_kwargs", ")", ":", "filename", "=", "response_kwargs", ".", "pop", "(", "'filename'", ",", "None", ")", "cmd_options", "=", "response_kwargs", ".", "pop", "(", "'cmd_options'", ",", "None", ")", "if", "issubclass", "(", "self", ".", "response_class", ",", "PDFTemplateResponse", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "self", ".", "get_filename", "(", ")", "if", "cmd_options", "is", "None", ":", "cmd_options", "=", "self", ".", "get_cmd_options", "(", ")", "return", "super", "(", "PDFTemplateView", ",", "self", ")", ".", "render_to_response", "(", "context", "=", "context", ",", "filename", "=", "filename", ",", "show_content_in_browser", "=", "self", ".", "show_content_in_browser", ",", "header_template", "=", "self", ".", "header_template", ",", "footer_template", "=", "self", ".", "footer_template", ",", "cmd_options", "=", "cmd_options", ",", "cover_template", "=", "self", ".", "cover_template", ",", "*", "*", "response_kwargs", ")", "else", ":", "return", "super", "(", "PDFTemplateView", ",", "self", ")", ".", "render_to_response", "(", "context", "=", "context", ",", "*", "*", "response_kwargs", ")" ]
4e73f604c48f7f449c916c4257a72af59517322c
test
_options_to_args
Converts ``options`` into a list of command-line arguments. Skip arguments where no value is provided For flag-type (No argument) variables, pass only the name and only then if the value is True
wkhtmltopdf/utils.py
def _options_to_args(**options): """ Converts ``options`` into a list of command-line arguments. Skip arguments where no value is provided For flag-type (No argument) variables, pass only the name and only then if the value is True """ flags = [] for name in sorted(options): value = options[name] formatted_flag = '--%s' % name if len(name) > 1 else '-%s' % name formatted_flag = formatted_flag.replace('_', '-') accepts_no_arguments = formatted_flag in NO_ARGUMENT_OPTIONS if value is None or (value is False and accepts_no_arguments): continue flags.append(formatted_flag) if accepts_no_arguments: continue flags.append(six.text_type(value)) return flags
def _options_to_args(**options): """ Converts ``options`` into a list of command-line arguments. Skip arguments where no value is provided For flag-type (No argument) variables, pass only the name and only then if the value is True """ flags = [] for name in sorted(options): value = options[name] formatted_flag = '--%s' % name if len(name) > 1 else '-%s' % name formatted_flag = formatted_flag.replace('_', '-') accepts_no_arguments = formatted_flag in NO_ARGUMENT_OPTIONS if value is None or (value is False and accepts_no_arguments): continue flags.append(formatted_flag) if accepts_no_arguments: continue flags.append(six.text_type(value)) return flags
[ "Converts", "options", "into", "a", "list", "of", "command", "-", "line", "arguments", ".", "Skip", "arguments", "where", "no", "value", "is", "provided", "For", "flag", "-", "type", "(", "No", "argument", ")", "variables", "pass", "only", "the", "name", "and", "only", "then", "if", "the", "value", "is", "True" ]
incuna/django-wkhtmltopdf
python
https://github.com/incuna/django-wkhtmltopdf/blob/4e73f604c48f7f449c916c4257a72af59517322c/wkhtmltopdf/utils.py#L52-L70
[ "def", "_options_to_args", "(", "*", "*", "options", ")", ":", "flags", "=", "[", "]", "for", "name", "in", "sorted", "(", "options", ")", ":", "value", "=", "options", "[", "name", "]", "formatted_flag", "=", "'--%s'", "%", "name", "if", "len", "(", "name", ")", ">", "1", "else", "'-%s'", "%", "name", "formatted_flag", "=", "formatted_flag", ".", "replace", "(", "'_'", ",", "'-'", ")", "accepts_no_arguments", "=", "formatted_flag", "in", "NO_ARGUMENT_OPTIONS", "if", "value", "is", "None", "or", "(", "value", "is", "False", "and", "accepts_no_arguments", ")", ":", "continue", "flags", ".", "append", "(", "formatted_flag", ")", "if", "accepts_no_arguments", ":", "continue", "flags", ".", "append", "(", "six", ".", "text_type", "(", "value", ")", ")", "return", "flags" ]
4e73f604c48f7f449c916c4257a72af59517322c
test
wkhtmltopdf
Converts html to PDF using http://wkhtmltopdf.org/. pages: List of file paths or URLs of the html to be converted. output: Optional output file path. If None, the output is returned. **kwargs: Passed to wkhtmltopdf via _extra_args() (See https://github.com/antialize/wkhtmltopdf/blob/master/README_WKHTMLTOPDF for acceptable args.) Kwargs is passed through as arguments. e.g.: {'footer_html': 'http://example.com/foot.html'} becomes '--footer-html http://example.com/foot.html' Where there is no value passed, use True. e.g.: {'disable_javascript': True} becomes: '--disable-javascript' To disable a default option, use None. e.g: {'quiet': None'} becomes: '' example usage: wkhtmltopdf(pages=['/tmp/example.html'], dpi=300, orientation='Landscape', disable_javascript=True)
wkhtmltopdf/utils.py
def wkhtmltopdf(pages, output=None, **kwargs): """ Converts html to PDF using http://wkhtmltopdf.org/. pages: List of file paths or URLs of the html to be converted. output: Optional output file path. If None, the output is returned. **kwargs: Passed to wkhtmltopdf via _extra_args() (See https://github.com/antialize/wkhtmltopdf/blob/master/README_WKHTMLTOPDF for acceptable args.) Kwargs is passed through as arguments. e.g.: {'footer_html': 'http://example.com/foot.html'} becomes '--footer-html http://example.com/foot.html' Where there is no value passed, use True. e.g.: {'disable_javascript': True} becomes: '--disable-javascript' To disable a default option, use None. e.g: {'quiet': None'} becomes: '' example usage: wkhtmltopdf(pages=['/tmp/example.html'], dpi=300, orientation='Landscape', disable_javascript=True) """ if isinstance(pages, six.string_types): # Support a single page. pages = [pages] if output is None: # Standard output. output = '-' has_cover = kwargs.pop('has_cover', False) # Default options: options = getattr(settings, 'WKHTMLTOPDF_CMD_OPTIONS', None) if options is None: options = {'quiet': True} else: options = copy(options) options.update(kwargs) # Force --encoding utf8 unless the user has explicitly overridden this. options.setdefault('encoding', 'utf8') env = getattr(settings, 'WKHTMLTOPDF_ENV', None) if env is not None: env = dict(os.environ, **env) cmd = 'WKHTMLTOPDF_CMD' cmd = getattr(settings, cmd, os.environ.get(cmd, 'wkhtmltopdf')) # Adding 'cover' option to add cover_file to the pdf to generate. if has_cover: pages.insert(0, 'cover') ck_args = list(chain(shlex.split(cmd), _options_to_args(**options), list(pages), [output])) ck_kwargs = {'env': env} # Handling of fileno() attr. based on https://github.com/GrahamDumpleton/mod_wsgi/issues/85 try: i = sys.stderr.fileno() ck_kwargs['stderr'] = sys.stderr except (AttributeError, IOError): # can't call fileno() on mod_wsgi stderr object pass return check_output(ck_args, **ck_kwargs)
def wkhtmltopdf(pages, output=None, **kwargs): """ Converts html to PDF using http://wkhtmltopdf.org/. pages: List of file paths or URLs of the html to be converted. output: Optional output file path. If None, the output is returned. **kwargs: Passed to wkhtmltopdf via _extra_args() (See https://github.com/antialize/wkhtmltopdf/blob/master/README_WKHTMLTOPDF for acceptable args.) Kwargs is passed through as arguments. e.g.: {'footer_html': 'http://example.com/foot.html'} becomes '--footer-html http://example.com/foot.html' Where there is no value passed, use True. e.g.: {'disable_javascript': True} becomes: '--disable-javascript' To disable a default option, use None. e.g: {'quiet': None'} becomes: '' example usage: wkhtmltopdf(pages=['/tmp/example.html'], dpi=300, orientation='Landscape', disable_javascript=True) """ if isinstance(pages, six.string_types): # Support a single page. pages = [pages] if output is None: # Standard output. output = '-' has_cover = kwargs.pop('has_cover', False) # Default options: options = getattr(settings, 'WKHTMLTOPDF_CMD_OPTIONS', None) if options is None: options = {'quiet': True} else: options = copy(options) options.update(kwargs) # Force --encoding utf8 unless the user has explicitly overridden this. options.setdefault('encoding', 'utf8') env = getattr(settings, 'WKHTMLTOPDF_ENV', None) if env is not None: env = dict(os.environ, **env) cmd = 'WKHTMLTOPDF_CMD' cmd = getattr(settings, cmd, os.environ.get(cmd, 'wkhtmltopdf')) # Adding 'cover' option to add cover_file to the pdf to generate. if has_cover: pages.insert(0, 'cover') ck_args = list(chain(shlex.split(cmd), _options_to_args(**options), list(pages), [output])) ck_kwargs = {'env': env} # Handling of fileno() attr. based on https://github.com/GrahamDumpleton/mod_wsgi/issues/85 try: i = sys.stderr.fileno() ck_kwargs['stderr'] = sys.stderr except (AttributeError, IOError): # can't call fileno() on mod_wsgi stderr object pass return check_output(ck_args, **ck_kwargs)
[ "Converts", "html", "to", "PDF", "using", "http", ":", "//", "wkhtmltopdf", ".", "org", "/", "." ]
incuna/django-wkhtmltopdf
python
https://github.com/incuna/django-wkhtmltopdf/blob/4e73f604c48f7f449c916c4257a72af59517322c/wkhtmltopdf/utils.py#L73-L147
[ "def", "wkhtmltopdf", "(", "pages", ",", "output", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "pages", ",", "six", ".", "string_types", ")", ":", "# Support a single page.", "pages", "=", "[", "pages", "]", "if", "output", "is", "None", ":", "# Standard output.", "output", "=", "'-'", "has_cover", "=", "kwargs", ".", "pop", "(", "'has_cover'", ",", "False", ")", "# Default options:", "options", "=", "getattr", "(", "settings", ",", "'WKHTMLTOPDF_CMD_OPTIONS'", ",", "None", ")", "if", "options", "is", "None", ":", "options", "=", "{", "'quiet'", ":", "True", "}", "else", ":", "options", "=", "copy", "(", "options", ")", "options", ".", "update", "(", "kwargs", ")", "# Force --encoding utf8 unless the user has explicitly overridden this.", "options", ".", "setdefault", "(", "'encoding'", ",", "'utf8'", ")", "env", "=", "getattr", "(", "settings", ",", "'WKHTMLTOPDF_ENV'", ",", "None", ")", "if", "env", "is", "not", "None", ":", "env", "=", "dict", "(", "os", ".", "environ", ",", "*", "*", "env", ")", "cmd", "=", "'WKHTMLTOPDF_CMD'", "cmd", "=", "getattr", "(", "settings", ",", "cmd", ",", "os", ".", "environ", ".", "get", "(", "cmd", ",", "'wkhtmltopdf'", ")", ")", "# Adding 'cover' option to add cover_file to the pdf to generate.", "if", "has_cover", ":", "pages", ".", "insert", "(", "0", ",", "'cover'", ")", "ck_args", "=", "list", "(", "chain", "(", "shlex", ".", "split", "(", "cmd", ")", ",", "_options_to_args", "(", "*", "*", "options", ")", ",", "list", "(", "pages", ")", ",", "[", "output", "]", ")", ")", "ck_kwargs", "=", "{", "'env'", ":", "env", "}", "# Handling of fileno() attr. based on https://github.com/GrahamDumpleton/mod_wsgi/issues/85", "try", ":", "i", "=", "sys", ".", "stderr", ".", "fileno", "(", ")", "ck_kwargs", "[", "'stderr'", "]", "=", "sys", ".", "stderr", "except", "(", "AttributeError", ",", "IOError", ")", ":", "# can't call fileno() on mod_wsgi stderr object", "pass", "return", "check_output", "(", "ck_args", ",", "*", "*", "ck_kwargs", ")" ]
4e73f604c48f7f449c916c4257a72af59517322c
test
http_quote
Given a unicode string, will do its dandiest to give you back a valid ascii charset string you can use in, say, http headers and the like.
wkhtmltopdf/utils.py
def http_quote(string): """ Given a unicode string, will do its dandiest to give you back a valid ascii charset string you can use in, say, http headers and the like. """ if isinstance(string, six.text_type): try: import unidecode except ImportError: pass else: string = unidecode.unidecode(string) string = string.encode('ascii', 'replace') # Wrap in double-quotes for ; , and the like string = string.replace(b'\\', b'\\\\').replace(b'"', b'\\"') return '"{0!s}"'.format(string.decode())
def http_quote(string): """ Given a unicode string, will do its dandiest to give you back a valid ascii charset string you can use in, say, http headers and the like. """ if isinstance(string, six.text_type): try: import unidecode except ImportError: pass else: string = unidecode.unidecode(string) string = string.encode('ascii', 'replace') # Wrap in double-quotes for ; , and the like string = string.replace(b'\\', b'\\\\').replace(b'"', b'\\"') return '"{0!s}"'.format(string.decode())
[ "Given", "a", "unicode", "string", "will", "do", "its", "dandiest", "to", "give", "you", "back", "a", "valid", "ascii", "charset", "string", "you", "can", "use", "in", "say", "http", "headers", "and", "the", "like", "." ]
incuna/django-wkhtmltopdf
python
https://github.com/incuna/django-wkhtmltopdf/blob/4e73f604c48f7f449c916c4257a72af59517322c/wkhtmltopdf/utils.py#L254-L270
[ "def", "http_quote", "(", "string", ")", ":", "if", "isinstance", "(", "string", ",", "six", ".", "text_type", ")", ":", "try", ":", "import", "unidecode", "except", "ImportError", ":", "pass", "else", ":", "string", "=", "unidecode", ".", "unidecode", "(", "string", ")", "string", "=", "string", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", "# Wrap in double-quotes for ; , and the like", "string", "=", "string", ".", "replace", "(", "b'\\\\'", ",", "b'\\\\\\\\'", ")", ".", "replace", "(", "b'\"'", ",", "b'\\\\\"'", ")", "return", "'\"{0!s}\"'", ".", "format", "(", "string", ".", "decode", "(", ")", ")" ]
4e73f604c48f7f449c916c4257a72af59517322c
test
make_absolute_paths
Convert all MEDIA files into a file://URL paths in order to correctly get it displayed in PDFs.
wkhtmltopdf/utils.py
def make_absolute_paths(content): """Convert all MEDIA files into a file://URL paths in order to correctly get it displayed in PDFs.""" overrides = [ { 'root': settings.MEDIA_ROOT, 'url': settings.MEDIA_URL, }, { 'root': settings.STATIC_ROOT, 'url': settings.STATIC_URL, } ] has_scheme = re.compile(r'^[^:/]+://') for x in overrides: if not x['url'] or has_scheme.match(x['url']): continue if not x['root'].endswith('/'): x['root'] += '/' occur_pattern = '''["|']({0}.*?)["|']''' occurences = re.findall(occur_pattern.format(x['url']), content) occurences = list(set(occurences)) # Remove dups for occur in occurences: content = content.replace(occur, pathname2fileurl(x['root']) + occur[len(x['url']):]) return content
def make_absolute_paths(content): """Convert all MEDIA files into a file://URL paths in order to correctly get it displayed in PDFs.""" overrides = [ { 'root': settings.MEDIA_ROOT, 'url': settings.MEDIA_URL, }, { 'root': settings.STATIC_ROOT, 'url': settings.STATIC_URL, } ] has_scheme = re.compile(r'^[^:/]+://') for x in overrides: if not x['url'] or has_scheme.match(x['url']): continue if not x['root'].endswith('/'): x['root'] += '/' occur_pattern = '''["|']({0}.*?)["|']''' occurences = re.findall(occur_pattern.format(x['url']), content) occurences = list(set(occurences)) # Remove dups for occur in occurences: content = content.replace(occur, pathname2fileurl(x['root']) + occur[len(x['url']):]) return content
[ "Convert", "all", "MEDIA", "files", "into", "a", "file", ":", "//", "URL", "paths", "in", "order", "to", "correctly", "get", "it", "displayed", "in", "PDFs", "." ]
incuna/django-wkhtmltopdf
python
https://github.com/incuna/django-wkhtmltopdf/blob/4e73f604c48f7f449c916c4257a72af59517322c/wkhtmltopdf/utils.py#L278-L308
[ "def", "make_absolute_paths", "(", "content", ")", ":", "overrides", "=", "[", "{", "'root'", ":", "settings", ".", "MEDIA_ROOT", ",", "'url'", ":", "settings", ".", "MEDIA_URL", ",", "}", ",", "{", "'root'", ":", "settings", ".", "STATIC_ROOT", ",", "'url'", ":", "settings", ".", "STATIC_URL", ",", "}", "]", "has_scheme", "=", "re", ".", "compile", "(", "r'^[^:/]+://'", ")", "for", "x", "in", "overrides", ":", "if", "not", "x", "[", "'url'", "]", "or", "has_scheme", ".", "match", "(", "x", "[", "'url'", "]", ")", ":", "continue", "if", "not", "x", "[", "'root'", "]", ".", "endswith", "(", "'/'", ")", ":", "x", "[", "'root'", "]", "+=", "'/'", "occur_pattern", "=", "'''[\"|']({0}.*?)[\"|']'''", "occurences", "=", "re", ".", "findall", "(", "occur_pattern", ".", "format", "(", "x", "[", "'url'", "]", ")", ",", "content", ")", "occurences", "=", "list", "(", "set", "(", "occurences", ")", ")", "# Remove dups", "for", "occur", "in", "occurences", ":", "content", "=", "content", ".", "replace", "(", "occur", ",", "pathname2fileurl", "(", "x", "[", "'root'", "]", ")", "+", "occur", "[", "len", "(", "x", "[", "'url'", "]", ")", ":", "]", ")", "return", "content" ]
4e73f604c48f7f449c916c4257a72af59517322c
test
Grok.match
If text is matched with pattern, return variable names specified(%{pattern:variable name}) in pattern and their corresponding values.If not matched, return None. custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair) or custom_patterns_dir.
pygrok/pygrok.py
def match(self, text): """If text is matched with pattern, return variable names specified(%{pattern:variable name}) in pattern and their corresponding values.If not matched, return None. custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair) or custom_patterns_dir. """ match_obj = None if self.fullmatch: match_obj = self.regex_obj.fullmatch(text) else: match_obj = self.regex_obj.search(text) if match_obj == None: return None matches = match_obj.groupdict() for key,match in matches.items(): try: if self.type_mapper[key] == 'int': matches[key] = int(match) if self.type_mapper[key] == 'float': matches[key] = float(match) except (TypeError, KeyError) as e: pass return matches
def match(self, text): """If text is matched with pattern, return variable names specified(%{pattern:variable name}) in pattern and their corresponding values.If not matched, return None. custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair) or custom_patterns_dir. """ match_obj = None if self.fullmatch: match_obj = self.regex_obj.fullmatch(text) else: match_obj = self.regex_obj.search(text) if match_obj == None: return None matches = match_obj.groupdict() for key,match in matches.items(): try: if self.type_mapper[key] == 'int': matches[key] = int(match) if self.type_mapper[key] == 'float': matches[key] = float(match) except (TypeError, KeyError) as e: pass return matches
[ "If", "text", "is", "matched", "with", "pattern", "return", "variable", "names", "specified", "(", "%", "{", "pattern", ":", "variable", "name", "}", ")", "in", "pattern", "and", "their", "corresponding", "values", ".", "If", "not", "matched", "return", "None", ".", "custom", "patterns", "can", "be", "passed", "in", "by", "custom_patterns", "(", "pattern", "name", "pattern", "regular", "expression", "pair", ")", "or", "custom_patterns_dir", "." ]
garyelephant/pygrok
python
https://github.com/garyelephant/pygrok/blob/de9e3f92f5a52f0fc101aaa0f694f52aee6afba8/pygrok/pygrok.py#L33-L57
[ "def", "match", "(", "self", ",", "text", ")", ":", "match_obj", "=", "None", "if", "self", ".", "fullmatch", ":", "match_obj", "=", "self", ".", "regex_obj", ".", "fullmatch", "(", "text", ")", "else", ":", "match_obj", "=", "self", ".", "regex_obj", ".", "search", "(", "text", ")", "if", "match_obj", "==", "None", ":", "return", "None", "matches", "=", "match_obj", ".", "groupdict", "(", ")", "for", "key", ",", "match", "in", "matches", ".", "items", "(", ")", ":", "try", ":", "if", "self", ".", "type_mapper", "[", "key", "]", "==", "'int'", ":", "matches", "[", "key", "]", "=", "int", "(", "match", ")", "if", "self", ".", "type_mapper", "[", "key", "]", "==", "'float'", ":", "matches", "[", "key", "]", "=", "float", "(", "match", ")", "except", "(", "TypeError", ",", "KeyError", ")", "as", "e", ":", "pass", "return", "matches" ]
de9e3f92f5a52f0fc101aaa0f694f52aee6afba8
test
configure
Sets defaults for ``class Meta`` declarations. Arguments can either be extracted from a `module` (in that case all attributes starting from `prefix` are used): >>> import foo >>> configure(foo) or passed explicictly as keyword arguments: >>> configure(database='foo') .. warning:: Current implementation is by no means thread-safe -- use it wisely.
minimongo/options.py
def configure(module=None, prefix='MONGODB_', **kwargs): """Sets defaults for ``class Meta`` declarations. Arguments can either be extracted from a `module` (in that case all attributes starting from `prefix` are used): >>> import foo >>> configure(foo) or passed explicictly as keyword arguments: >>> configure(database='foo') .. warning:: Current implementation is by no means thread-safe -- use it wisely. """ if module is not None and isinstance(module, types.ModuleType): # Search module for MONGODB_* attributes and converting them # to _Options' values, ex: MONGODB_PORT ==> port. attrs = ((attr.replace(prefix, '').lower(), value) for attr, value in vars(module).items() if attr.startswith(prefix)) _Options._configure(**dict(attrs)) elif kwargs: _Options._configure(**kwargs)
def configure(module=None, prefix='MONGODB_', **kwargs): """Sets defaults for ``class Meta`` declarations. Arguments can either be extracted from a `module` (in that case all attributes starting from `prefix` are used): >>> import foo >>> configure(foo) or passed explicictly as keyword arguments: >>> configure(database='foo') .. warning:: Current implementation is by no means thread-safe -- use it wisely. """ if module is not None and isinstance(module, types.ModuleType): # Search module for MONGODB_* attributes and converting them # to _Options' values, ex: MONGODB_PORT ==> port. attrs = ((attr.replace(prefix, '').lower(), value) for attr, value in vars(module).items() if attr.startswith(prefix)) _Options._configure(**dict(attrs)) elif kwargs: _Options._configure(**kwargs)
[ "Sets", "defaults", "for", "class", "Meta", "declarations", "." ]
slacy/minimongo
python
https://github.com/slacy/minimongo/blob/29f38994831163b17bc625c82258068f1f90efa5/minimongo/options.py#L10-L35
[ "def", "configure", "(", "module", "=", "None", ",", "prefix", "=", "'MONGODB_'", ",", "*", "*", "kwargs", ")", ":", "if", "module", "is", "not", "None", "and", "isinstance", "(", "module", ",", "types", ".", "ModuleType", ")", ":", "# Search module for MONGODB_* attributes and converting them", "# to _Options' values, ex: MONGODB_PORT ==> port.", "attrs", "=", "(", "(", "attr", ".", "replace", "(", "prefix", ",", "''", ")", ".", "lower", "(", ")", ",", "value", ")", "for", "attr", ",", "value", "in", "vars", "(", "module", ")", ".", "items", "(", ")", "if", "attr", ".", "startswith", "(", "prefix", ")", ")", "_Options", ".", "_configure", "(", "*", "*", "dict", "(", "attrs", ")", ")", "elif", "kwargs", ":", "_Options", ".", "_configure", "(", "*", "*", "kwargs", ")" ]
29f38994831163b17bc625c82258068f1f90efa5
test
_Options._configure
Updates class-level defaults for :class:`_Options` container.
minimongo/options.py
def _configure(cls, **defaults): """Updates class-level defaults for :class:`_Options` container.""" for attr in defaults: setattr(cls, attr, defaults[attr])
def _configure(cls, **defaults): """Updates class-level defaults for :class:`_Options` container.""" for attr in defaults: setattr(cls, attr, defaults[attr])
[ "Updates", "class", "-", "level", "defaults", "for", ":", "class", ":", "_Options", "container", "." ]
slacy/minimongo
python
https://github.com/slacy/minimongo/blob/29f38994831163b17bc625c82258068f1f90efa5/minimongo/options.py#L82-L85
[ "def", "_configure", "(", "cls", ",", "*", "*", "defaults", ")", ":", "for", "attr", "in", "defaults", ":", "setattr", "(", "cls", ",", "attr", ",", "defaults", "[", "attr", "]", ")" ]
29f38994831163b17bc625c82258068f1f90efa5
test
to_underscore
Converts a given string from CamelCase to under_score. >>> to_underscore('FooBar') 'foo_bar'
minimongo/model.py
def to_underscore(string): """Converts a given string from CamelCase to under_score. >>> to_underscore('FooBar') 'foo_bar' """ new_string = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', string) new_string = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', new_string) return new_string.lower()
def to_underscore(string): """Converts a given string from CamelCase to under_score. >>> to_underscore('FooBar') 'foo_bar' """ new_string = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', string) new_string = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', new_string) return new_string.lower()
[ "Converts", "a", "given", "string", "from", "CamelCase", "to", "under_score", "." ]
slacy/minimongo
python
https://github.com/slacy/minimongo/blob/29f38994831163b17bc625c82258068f1f90efa5/minimongo/model.py#L241-L249
[ "def", "to_underscore", "(", "string", ")", ":", "new_string", "=", "re", ".", "sub", "(", "r'([A-Z]+)([A-Z][a-z])'", ",", "r'\\1_\\2'", ",", "string", ")", "new_string", "=", "re", ".", "sub", "(", "r'([a-z\\d])([A-Z])'", ",", "r'\\1_\\2'", ",", "new_string", ")", "return", "new_string", ".", "lower", "(", ")" ]
29f38994831163b17bc625c82258068f1f90efa5
test
ModelBase.auto_index
Builds all indices, listed in model's Meta class. >>> class SomeModel(Model) ... class Meta: ... indices = ( ... Index('foo'), ... ) .. note:: this will result in calls to :meth:`pymongo.collection.Collection.ensure_index` method at import time, so import all your models up front.
minimongo/model.py
def auto_index(mcs): """Builds all indices, listed in model's Meta class. >>> class SomeModel(Model) ... class Meta: ... indices = ( ... Index('foo'), ... ) .. note:: this will result in calls to :meth:`pymongo.collection.Collection.ensure_index` method at import time, so import all your models up front. """ for index in mcs._meta.indices: index.ensure(mcs.collection)
def auto_index(mcs): """Builds all indices, listed in model's Meta class. >>> class SomeModel(Model) ... class Meta: ... indices = ( ... Index('foo'), ... ) .. note:: this will result in calls to :meth:`pymongo.collection.Collection.ensure_index` method at import time, so import all your models up front. """ for index in mcs._meta.indices: index.ensure(mcs.collection)
[ "Builds", "all", "indices", "listed", "in", "model", "s", "Meta", "class", "." ]
slacy/minimongo
python
https://github.com/slacy/minimongo/blob/29f38994831163b17bc625c82258068f1f90efa5/minimongo/model.py#L83-L98
[ "def", "auto_index", "(", "mcs", ")", ":", "for", "index", "in", "mcs", ".", "_meta", ".", "indices", ":", "index", ".", "ensure", "(", "mcs", ".", "collection", ")" ]
29f38994831163b17bc625c82258068f1f90efa5
test
Collection.find
Same as :meth:`pymongo.collection.Collection.find`, except it returns the right document class.
minimongo/collection.py
def find(self, *args, **kwargs): """Same as :meth:`pymongo.collection.Collection.find`, except it returns the right document class. """ return Cursor(self, *args, wrap=self.document_class, **kwargs)
def find(self, *args, **kwargs): """Same as :meth:`pymongo.collection.Collection.find`, except it returns the right document class. """ return Cursor(self, *args, wrap=self.document_class, **kwargs)
[ "Same", "as", ":", "meth", ":", "pymongo", ".", "collection", ".", "Collection", ".", "find", "except", "it", "returns", "the", "right", "document", "class", "." ]
slacy/minimongo
python
https://github.com/slacy/minimongo/blob/29f38994831163b17bc625c82258068f1f90efa5/minimongo/collection.py#L44-L48
[ "def", "find", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "Cursor", "(", "self", ",", "*", "args", ",", "wrap", "=", "self", ".", "document_class", ",", "*", "*", "kwargs", ")" ]
29f38994831163b17bc625c82258068f1f90efa5
test
Collection.find_one
Same as :meth:`pymongo.collection.Collection.find_one`, except it returns the right document class.
minimongo/collection.py
def find_one(self, *args, **kwargs): """Same as :meth:`pymongo.collection.Collection.find_one`, except it returns the right document class. """ data = super(Collection, self).find_one(*args, **kwargs) if data: return self.document_class(data) return None
def find_one(self, *args, **kwargs): """Same as :meth:`pymongo.collection.Collection.find_one`, except it returns the right document class. """ data = super(Collection, self).find_one(*args, **kwargs) if data: return self.document_class(data) return None
[ "Same", "as", ":", "meth", ":", "pymongo", ".", "collection", ".", "Collection", ".", "find_one", "except", "it", "returns", "the", "right", "document", "class", "." ]
slacy/minimongo
python
https://github.com/slacy/minimongo/blob/29f38994831163b17bc625c82258068f1f90efa5/minimongo/collection.py#L50-L57
[ "def", "find_one", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "super", "(", "Collection", ",", "self", ")", ".", "find_one", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "data", ":", "return", "self", ".", "document_class", "(", "data", ")", "return", "None" ]
29f38994831163b17bc625c82258068f1f90efa5
test
CsvParser.parse_file
Load and parse a .csv file
pricedb/csv.py
def parse_file(self, file_path, currency) -> List[PriceModel]: """ Load and parse a .csv file """ # load file # read csv into memory? contents = self.load_file(file_path) prices = [] # parse price elements for line in contents: price = self.parse_line(line) assert isinstance(price, PriceModel) price.currency = currency prices.append(price) return prices
def parse_file(self, file_path, currency) -> List[PriceModel]: """ Load and parse a .csv file """ # load file # read csv into memory? contents = self.load_file(file_path) prices = [] # parse price elements for line in contents: price = self.parse_line(line) assert isinstance(price, PriceModel) price.currency = currency prices.append(price) return prices
[ "Load", "and", "parse", "a", ".", "csv", "file" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/csv.py#L28-L42
[ "def", "parse_file", "(", "self", ",", "file_path", ",", "currency", ")", "->", "List", "[", "PriceModel", "]", ":", "# load file", "# read csv into memory?", "contents", "=", "self", ".", "load_file", "(", "file_path", ")", "prices", "=", "[", "]", "# parse price elements", "for", "line", "in", "contents", ":", "price", "=", "self", ".", "parse_line", "(", "line", ")", "assert", "isinstance", "(", "price", ",", "PriceModel", ")", "price", ".", "currency", "=", "currency", "prices", ".", "append", "(", "price", ")", "return", "prices" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
CsvParser.load_file
Loads the content of the text file
pricedb/csv.py
def load_file(self, file_path) -> List[str]: """ Loads the content of the text file """ content = [] content = read_lines_from_file(file_path) return content
def load_file(self, file_path) -> List[str]: """ Loads the content of the text file """ content = [] content = read_lines_from_file(file_path) return content
[ "Loads", "the", "content", "of", "the", "text", "file" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/csv.py#L44-L48
[ "def", "load_file", "(", "self", ",", "file_path", ")", "->", "List", "[", "str", "]", ":", "content", "=", "[", "]", "content", "=", "read_lines_from_file", "(", "file_path", ")", "return", "content" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
CsvParser.parse_line
Parse a CSV line into a price element
pricedb/csv.py
def parse_line(self, line: str) -> PriceModel: """ Parse a CSV line into a price element """ line = line.rstrip() parts = line.split(',') result = PriceModel() # symbol result.symbol = self.translate_symbol(parts[0]) # value result.value = Decimal(parts[1]) # date date_str = parts[2] date_str = date_str.replace('"', '') date_parts = date_str.split('/') year_str = date_parts[2] month_str = date_parts[1] day_str = date_parts[0] logging.debug(f"parsing {date_parts} into date") result.datetime = datetime(int(year_str), int(month_str), int(day_str)) return result
def parse_line(self, line: str) -> PriceModel: """ Parse a CSV line into a price element """ line = line.rstrip() parts = line.split(',') result = PriceModel() # symbol result.symbol = self.translate_symbol(parts[0]) # value result.value = Decimal(parts[1]) # date date_str = parts[2] date_str = date_str.replace('"', '') date_parts = date_str.split('/') year_str = date_parts[2] month_str = date_parts[1] day_str = date_parts[0] logging.debug(f"parsing {date_parts} into date") result.datetime = datetime(int(year_str), int(month_str), int(day_str)) return result
[ "Parse", "a", "CSV", "line", "into", "a", "price", "element" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/csv.py#L50-L75
[ "def", "parse_line", "(", "self", ",", "line", ":", "str", ")", "->", "PriceModel", ":", "line", "=", "line", ".", "rstrip", "(", ")", "parts", "=", "line", ".", "split", "(", "','", ")", "result", "=", "PriceModel", "(", ")", "# symbol", "result", ".", "symbol", "=", "self", ".", "translate_symbol", "(", "parts", "[", "0", "]", ")", "# value", "result", ".", "value", "=", "Decimal", "(", "parts", "[", "1", "]", ")", "# date", "date_str", "=", "parts", "[", "2", "]", "date_str", "=", "date_str", ".", "replace", "(", "'\"'", ",", "''", ")", "date_parts", "=", "date_str", ".", "split", "(", "'/'", ")", "year_str", "=", "date_parts", "[", "2", "]", "month_str", "=", "date_parts", "[", "1", "]", "day_str", "=", "date_parts", "[", "0", "]", "logging", ".", "debug", "(", "f\"parsing {date_parts} into date\"", ")", "result", ".", "datetime", "=", "datetime", "(", "int", "(", "year_str", ")", ",", "int", "(", "month_str", ")", ",", "int", "(", "day_str", ")", ")", "return", "result" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
CsvParser.translate_symbol
translate the incoming symbol into locally-used
pricedb/csv.py
def translate_symbol(self, in_symbol: str) -> str: """ translate the incoming symbol into locally-used """ # read all mappings from the db if not self.symbol_maps: self.__load_symbol_maps() # translate the incoming symbol result = self.symbol_maps[in_symbol] if in_symbol in self.symbol_maps else in_symbol return result
def translate_symbol(self, in_symbol: str) -> str: """ translate the incoming symbol into locally-used """ # read all mappings from the db if not self.symbol_maps: self.__load_symbol_maps() # translate the incoming symbol result = self.symbol_maps[in_symbol] if in_symbol in self.symbol_maps else in_symbol return result
[ "translate", "the", "incoming", "symbol", "into", "locally", "-", "used" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/csv.py#L77-L85
[ "def", "translate_symbol", "(", "self", ",", "in_symbol", ":", "str", ")", "->", "str", ":", "# read all mappings from the db", "if", "not", "self", ".", "symbol_maps", ":", "self", ".", "__load_symbol_maps", "(", ")", "# translate the incoming symbol", "result", "=", "self", ".", "symbol_maps", "[", "in_symbol", "]", "if", "in_symbol", "in", "self", ".", "symbol_maps", "else", "in_symbol", "return", "result" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
CsvParser.__load_symbol_maps
Loads all symbol maps from db
pricedb/csv.py
def __load_symbol_maps(self): """ Loads all symbol maps from db """ repo = SymbolMapRepository(self.__get_session()) all_maps = repo.get_all() self.symbol_maps = {} for item in all_maps: self.symbol_maps[item.in_symbol] = item.out_symbol
def __load_symbol_maps(self): """ Loads all symbol maps from db """ repo = SymbolMapRepository(self.__get_session()) all_maps = repo.get_all() self.symbol_maps = {} for item in all_maps: self.symbol_maps[item.in_symbol] = item.out_symbol
[ "Loads", "all", "symbol", "maps", "from", "db" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/csv.py#L87-L93
[ "def", "__load_symbol_maps", "(", "self", ")", ":", "repo", "=", "SymbolMapRepository", "(", "self", ".", "__get_session", "(", ")", ")", "all_maps", "=", "repo", ".", "get_all", "(", ")", "self", ".", "symbol_maps", "=", "{", "}", "for", "item", "in", "all_maps", ":", "self", ".", "symbol_maps", "[", "item", ".", "in_symbol", "]", "=", "item", ".", "out_symbol" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
CsvParser.__get_session
Reuses the same db session
pricedb/csv.py
def __get_session(self): """ Reuses the same db session """ if not self.session: self.session = dal.get_default_session() return self.session
def __get_session(self): """ Reuses the same db session """ if not self.session: self.session = dal.get_default_session() return self.session
[ "Reuses", "the", "same", "db", "session" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/csv.py#L95-L99
[ "def", "__get_session", "(", "self", ")", ":", "if", "not", "self", ".", "session", ":", "self", ".", "session", "=", "dal", ".", "get_default_session", "(", ")", "return", "self", ".", "session" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
add
Add individual price
pricedb/cli.py
def add(symbol: str, date, value, currency: str): """ Add individual price """ symbol = symbol.upper() currency = currency.upper() app = PriceDbApplication() price = PriceModel() # security = SecuritySymbol("", "") price.symbol.parse(symbol) # price.symbol.mnemonic = price.symbol.mnemonic.upper() # date_str = f"{date}" # date_format = "%Y-%m-%d" # if time: # date_str = f"{date_str}T{time}" # date_format += "T%H:%M:%S" # datum.from_iso_date_string(date) # price.datetime = datetime.strptime(date_str, date_format) price.datum.from_iso_date_string(date) price.value = Decimal(value) price.currency = currency app.add_price(price) app.save() click.echo("Price added.")
def add(symbol: str, date, value, currency: str): """ Add individual price """ symbol = symbol.upper() currency = currency.upper() app = PriceDbApplication() price = PriceModel() # security = SecuritySymbol("", "") price.symbol.parse(symbol) # price.symbol.mnemonic = price.symbol.mnemonic.upper() # date_str = f"{date}" # date_format = "%Y-%m-%d" # if time: # date_str = f"{date_str}T{time}" # date_format += "T%H:%M:%S" # datum.from_iso_date_string(date) # price.datetime = datetime.strptime(date_str, date_format) price.datum.from_iso_date_string(date) price.value = Decimal(value) price.currency = currency app.add_price(price) app.save() click.echo("Price added.")
[ "Add", "individual", "price" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/cli.py#L30-L56
[ "def", "add", "(", "symbol", ":", "str", ",", "date", ",", "value", ",", "currency", ":", "str", ")", ":", "symbol", "=", "symbol", ".", "upper", "(", ")", "currency", "=", "currency", ".", "upper", "(", ")", "app", "=", "PriceDbApplication", "(", ")", "price", "=", "PriceModel", "(", ")", "# security = SecuritySymbol(\"\", \"\")", "price", ".", "symbol", ".", "parse", "(", "symbol", ")", "# price.symbol.mnemonic = price.symbol.mnemonic.upper()", "# date_str = f\"{date}\"", "# date_format = \"%Y-%m-%d\"", "# if time:", "# date_str = f\"{date_str}T{time}\"", "# date_format += \"T%H:%M:%S\"", "# datum.from_iso_date_string(date)", "# price.datetime = datetime.strptime(date_str, date_format)", "price", ".", "datum", ".", "from_iso_date_string", "(", "date", ")", "price", ".", "value", "=", "Decimal", "(", "value", ")", "price", ".", "currency", "=", "currency", "app", ".", "add_price", "(", "price", ")", "app", ".", "save", "(", ")", "click", ".", "echo", "(", "\"Price added.\"", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
import_csv
Import prices from CSV file
pricedb/cli.py
def import_csv(filepath: str, currency: str): """ Import prices from CSV file """ logger.debug(f"currency = {currency}") # auto-convert to uppercase. currency = currency.upper() app = PriceDbApplication() app.logger = logger app.import_prices(filepath, currency)
def import_csv(filepath: str, currency: str): """ Import prices from CSV file """ logger.debug(f"currency = {currency}") # auto-convert to uppercase. currency = currency.upper() app = PriceDbApplication() app.logger = logger app.import_prices(filepath, currency)
[ "Import", "prices", "from", "CSV", "file" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/cli.py#L63-L71
[ "def", "import_csv", "(", "filepath", ":", "str", ",", "currency", ":", "str", ")", ":", "logger", ".", "debug", "(", "f\"currency = {currency}\"", ")", "# auto-convert to uppercase.", "currency", "=", "currency", ".", "upper", "(", ")", "app", "=", "PriceDbApplication", "(", ")", "app", ".", "logger", "=", "logger", "app", ".", "import_prices", "(", "filepath", ",", "currency", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
last
displays last price, for symbol if provided
pricedb/cli.py
def last(symbol: str): """ displays last price, for symbol if provided """ app = PriceDbApplication() # convert to uppercase if symbol: symbol = symbol.upper() # extract namespace sec_symbol = SecuritySymbol("", "") sec_symbol.parse(symbol) latest = app.get_latest_price(sec_symbol) assert isinstance(latest, PriceModel) print(f"{latest}") else: # Show the latest prices available for all securities. latest = app.get_latest_prices() for price in latest: print(f"{price}")
def last(symbol: str): """ displays last price, for symbol if provided """ app = PriceDbApplication() # convert to uppercase if symbol: symbol = symbol.upper() # extract namespace sec_symbol = SecuritySymbol("", "") sec_symbol.parse(symbol) latest = app.get_latest_price(sec_symbol) assert isinstance(latest, PriceModel) print(f"{latest}") else: # Show the latest prices available for all securities. latest = app.get_latest_prices() for price in latest: print(f"{price}")
[ "displays", "last", "price", "for", "symbol", "if", "provided" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/cli.py#L76-L94
[ "def", "last", "(", "symbol", ":", "str", ")", ":", "app", "=", "PriceDbApplication", "(", ")", "# convert to uppercase", "if", "symbol", ":", "symbol", "=", "symbol", ".", "upper", "(", ")", "# extract namespace", "sec_symbol", "=", "SecuritySymbol", "(", "\"\"", ",", "\"\"", ")", "sec_symbol", ".", "parse", "(", "symbol", ")", "latest", "=", "app", ".", "get_latest_price", "(", "sec_symbol", ")", "assert", "isinstance", "(", "latest", ",", "PriceModel", ")", "print", "(", "f\"{latest}\"", ")", "else", ":", "# Show the latest prices available for all securities.", "latest", "=", "app", ".", "get_latest_prices", "(", ")", "for", "price", "in", "latest", ":", "print", "(", "f\"{price}\"", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
list_prices
Display all prices
pricedb/cli.py
def list_prices(date, currency, last): """ Display all prices """ app = PriceDbApplication() app.logger = logger if last: # fetch only the last prices prices = app.get_latest_prices() else: prices = app.get_prices(date, currency) for price in prices: print(price) print(f"{len(prices)} records found.")
def list_prices(date, currency, last): """ Display all prices """ app = PriceDbApplication() app.logger = logger if last: # fetch only the last prices prices = app.get_latest_prices() else: prices = app.get_prices(date, currency) for price in prices: print(price) print(f"{len(prices)} records found.")
[ "Display", "all", "prices" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/cli.py#L102-L115
[ "def", "list_prices", "(", "date", ",", "currency", ",", "last", ")", ":", "app", "=", "PriceDbApplication", "(", ")", "app", ".", "logger", "=", "logger", "if", "last", ":", "# fetch only the last prices", "prices", "=", "app", ".", "get_latest_prices", "(", ")", "else", ":", "prices", "=", "app", ".", "get_prices", "(", "date", ",", "currency", ")", "for", "price", "in", "prices", ":", "print", "(", "price", ")", "print", "(", "f\"{len(prices)} records found.\"", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
download
Download the latest prices
pricedb/cli.py
def download(ctx, help: bool, symbol: str, namespace: str, agent: str, currency: str): """ Download the latest prices """ if help: click.echo(ctx.get_help()) ctx.exit() app = PriceDbApplication() app.logger = logger if currency: currency = currency.strip() currency = currency.upper() # Otherwise download the prices for securities listed in the database. app.download_prices(currency=currency, agent=agent, symbol=symbol, namespace=namespace)
def download(ctx, help: bool, symbol: str, namespace: str, agent: str, currency: str): """ Download the latest prices """ if help: click.echo(ctx.get_help()) ctx.exit() app = PriceDbApplication() app.logger = logger if currency: currency = currency.strip() currency = currency.upper() # Otherwise download the prices for securities listed in the database. app.download_prices(currency=currency, agent=agent, symbol=symbol, namespace=namespace)
[ "Download", "the", "latest", "prices" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/cli.py#L127-L141
[ "def", "download", "(", "ctx", ",", "help", ":", "bool", ",", "symbol", ":", "str", ",", "namespace", ":", "str", ",", "agent", ":", "str", ",", "currency", ":", "str", ")", ":", "if", "help", ":", "click", ".", "echo", "(", "ctx", ".", "get_help", "(", ")", ")", "ctx", ".", "exit", "(", ")", "app", "=", "PriceDbApplication", "(", ")", "app", ".", "logger", "=", "logger", "if", "currency", ":", "currency", "=", "currency", ".", "strip", "(", ")", "currency", "=", "currency", ".", "upper", "(", ")", "# Otherwise download the prices for securities listed in the database.", "app", ".", "download_prices", "(", "currency", "=", "currency", ",", "agent", "=", "agent", ",", "symbol", "=", "symbol", ",", "namespace", "=", "namespace", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
prune
Delete old prices, leaving just the last.
pricedb/cli.py
def prune(symbol: str, all: str): """ Delete old prices, leaving just the last. """ app = PriceDbApplication() app.logger = logger count = 0 if symbol is not None: sec_symbol = SecuritySymbol("", "") sec_symbol.parse(symbol) deleted = app.prune(sec_symbol) if deleted: count = 1 else: count = app.prune_all() print(f"Removed {count} old price entries.")
def prune(symbol: str, all: str): """ Delete old prices, leaving just the last. """ app = PriceDbApplication() app.logger = logger count = 0 if symbol is not None: sec_symbol = SecuritySymbol("", "") sec_symbol.parse(symbol) deleted = app.prune(sec_symbol) if deleted: count = 1 else: count = app.prune_all() print(f"Removed {count} old price entries.")
[ "Delete", "old", "prices", "leaving", "just", "the", "last", "." ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/cli.py#L148-L164
[ "def", "prune", "(", "symbol", ":", "str", ",", "all", ":", "str", ")", ":", "app", "=", "PriceDbApplication", "(", ")", "app", ".", "logger", "=", "logger", "count", "=", "0", "if", "symbol", "is", "not", "None", ":", "sec_symbol", "=", "SecuritySymbol", "(", "\"\"", ",", "\"\"", ")", "sec_symbol", ".", "parse", "(", "symbol", ")", "deleted", "=", "app", ".", "prune", "(", "sec_symbol", ")", "if", "deleted", ":", "count", "=", "1", "else", ":", "count", "=", "app", ".", "prune_all", "(", ")", "print", "(", "f\"Removed {count} old price entries.\"", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
get_default_session
Return the default session. The path is read from the default config.
pricedb/dal.py
def get_default_session(): """ Return the default session. The path is read from the default config. """ from .config import Config, ConfigKeys db_path = Config().get(ConfigKeys.price_database) if not db_path: raise ValueError("Price database not set in the configuration file!") return get_session(db_path)
def get_default_session(): """ Return the default session. The path is read from the default config. """ from .config import Config, ConfigKeys db_path = Config().get(ConfigKeys.price_database) if not db_path: raise ValueError("Price database not set in the configuration file!") return get_session(db_path)
[ "Return", "the", "default", "session", ".", "The", "path", "is", "read", "from", "the", "default", "config", "." ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/dal.py#L68-L75
[ "def", "get_default_session", "(", ")", ":", "from", ".", "config", "import", "Config", ",", "ConfigKeys", "db_path", "=", "Config", "(", ")", ".", "get", "(", "ConfigKeys", ".", "price_database", ")", "if", "not", "db_path", ":", "raise", "ValueError", "(", "\"Price database not set in the configuration file!\"", ")", "return", "get_session", "(", "db_path", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
add_map
Creates a symbol mapping
pricedb/map_cli.py
def add_map(incoming, outgoing): """ Creates a symbol mapping """ db_path = Config().get(ConfigKeys.pricedb_path) session = get_session(db_path) new_map = SymbolMap() new_map.in_symbol = incoming new_map.out_symbol = outgoing session.add(new_map) session.commit() click.echo("Record saved.")
def add_map(incoming, outgoing): """ Creates a symbol mapping """ db_path = Config().get(ConfigKeys.pricedb_path) session = get_session(db_path) new_map = SymbolMap() new_map.in_symbol = incoming new_map.out_symbol = outgoing session.add(new_map) session.commit() click.echo("Record saved.")
[ "Creates", "a", "symbol", "mapping" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/map_cli.py#L14-L25
[ "def", "add_map", "(", "incoming", ",", "outgoing", ")", ":", "db_path", "=", "Config", "(", ")", ".", "get", "(", "ConfigKeys", ".", "pricedb_path", ")", "session", "=", "get_session", "(", "db_path", ")", "new_map", "=", "SymbolMap", "(", ")", "new_map", ".", "in_symbol", "=", "incoming", "new_map", ".", "out_symbol", "=", "outgoing", "session", ".", "add", "(", "new_map", ")", "session", ".", "commit", "(", ")", "click", ".", "echo", "(", "\"Record saved.\"", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
list_maps
Displays all symbol maps
pricedb/map_cli.py
def list_maps(): """ Displays all symbol maps """ db_path = Config().get(ConfigKeys.price_database) session = get_session(db_path) maps = session.query(SymbolMap).all() for item in maps: click.echo(item)
def list_maps(): """ Displays all symbol maps """ db_path = Config().get(ConfigKeys.price_database) session = get_session(db_path) maps = session.query(SymbolMap).all() for item in maps: click.echo(item)
[ "Displays", "all", "symbol", "maps" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/map_cli.py#L28-L35
[ "def", "list_maps", "(", ")", ":", "db_path", "=", "Config", "(", ")", ".", "get", "(", "ConfigKeys", ".", "price_database", ")", "session", "=", "get_session", "(", "db_path", ")", "maps", "=", "session", ".", "query", "(", "SymbolMap", ")", ".", "all", "(", ")", "for", "item", "in", "maps", ":", "click", ".", "echo", "(", "item", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
SymbolMapRepository.get_by_id
Finds the map by in-symbol
pricedb/repositories.py
def get_by_id(self, symbol: str) -> SymbolMap: """ Finds the map by in-symbol """ return self.query.filter(SymbolMap.in_symbol == symbol).first()
def get_by_id(self, symbol: str) -> SymbolMap: """ Finds the map by in-symbol """ return self.query.filter(SymbolMap.in_symbol == symbol).first()
[ "Finds", "the", "map", "by", "in", "-", "symbol" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/repositories.py#L11-L13
[ "def", "get_by_id", "(", "self", ",", "symbol", ":", "str", ")", "->", "SymbolMap", ":", "return", "self", ".", "query", ".", "filter", "(", "SymbolMap", ".", "in_symbol", "==", "symbol", ")", ".", "first", "(", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
read_lines_from_file
Read text lines from a file
pricedb/utils.py
def read_lines_from_file(file_path: str) -> List[str]: """ Read text lines from a file """ # check if the file exists? with open(file_path) as csv_file: content = csv_file.readlines() return content
def read_lines_from_file(file_path: str) -> List[str]: """ Read text lines from a file """ # check if the file exists? with open(file_path) as csv_file: content = csv_file.readlines() return content
[ "Read", "text", "lines", "from", "a", "file" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/utils.py#L5-L10
[ "def", "read_lines_from_file", "(", "file_path", ":", "str", ")", "->", "List", "[", "str", "]", ":", "# check if the file exists?", "with", "open", "(", "file_path", ")", "as", "csv_file", ":", "content", "=", "csv_file", ".", "readlines", "(", ")", "return", "content" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
PriceMapper.map_entity
Map the price entity
pricedb/mappers.py
def map_entity(self, entity: dal.Price) -> PriceModel: """ Map the price entity """ if not entity: return None result = PriceModel() result.currency = entity.currency # date/time dt_string = entity.date format_string = "%Y-%m-%d" if entity.time: dt_string += f"T{entity.time}" format_string += "T%H:%M:%S" price_datetime = datetime.strptime(dt_string, format_string) result.datum = Datum() result.datum.from_datetime(price_datetime) assert isinstance(result.datum, Datum) #result.namespace = entity.namespace #result.symbol = entity.symbol result.symbol = SecuritySymbol(entity.namespace, entity.symbol) # Value value = Decimal(entity.value) / Decimal(entity.denom) result.value = Decimal(value) return result
def map_entity(self, entity: dal.Price) -> PriceModel: """ Map the price entity """ if not entity: return None result = PriceModel() result.currency = entity.currency # date/time dt_string = entity.date format_string = "%Y-%m-%d" if entity.time: dt_string += f"T{entity.time}" format_string += "T%H:%M:%S" price_datetime = datetime.strptime(dt_string, format_string) result.datum = Datum() result.datum.from_datetime(price_datetime) assert isinstance(result.datum, Datum) #result.namespace = entity.namespace #result.symbol = entity.symbol result.symbol = SecuritySymbol(entity.namespace, entity.symbol) # Value value = Decimal(entity.value) / Decimal(entity.denom) result.value = Decimal(value) return result
[ "Map", "the", "price", "entity" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/mappers.py#L15-L42
[ "def", "map_entity", "(", "self", ",", "entity", ":", "dal", ".", "Price", ")", "->", "PriceModel", ":", "if", "not", "entity", ":", "return", "None", "result", "=", "PriceModel", "(", ")", "result", ".", "currency", "=", "entity", ".", "currency", "# date/time", "dt_string", "=", "entity", ".", "date", "format_string", "=", "\"%Y-%m-%d\"", "if", "entity", ".", "time", ":", "dt_string", "+=", "f\"T{entity.time}\"", "format_string", "+=", "\"T%H:%M:%S\"", "price_datetime", "=", "datetime", ".", "strptime", "(", "dt_string", ",", "format_string", ")", "result", ".", "datum", "=", "Datum", "(", ")", "result", ".", "datum", ".", "from_datetime", "(", "price_datetime", ")", "assert", "isinstance", "(", "result", ".", "datum", ",", "Datum", ")", "#result.namespace = entity.namespace", "#result.symbol = entity.symbol", "result", ".", "symbol", "=", "SecuritySymbol", "(", "entity", ".", "namespace", ",", "entity", ".", "symbol", ")", "# Value", "value", "=", "Decimal", "(", "entity", ".", "value", ")", "/", "Decimal", "(", "entity", ".", "denom", ")", "result", ".", "value", "=", "Decimal", "(", "value", ")", "return", "result" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
PriceMapper.map_model
Parse into the Price entity, ready for saving
pricedb/mappers.py
def map_model(self, model: PriceModel) -> Price: """ Parse into the Price entity, ready for saving """ # assert isinstance(model, PriceModel) assert isinstance(model.symbol, SecuritySymbol) assert isinstance(model.datum, Datum) entity = Price() # Format date as ISO string date_iso = f"{model.datum.value.year}-{model.datum.value.month:02d}-{model.datum.value.day:02d}" entity.date = date_iso entity.time = f"{model.datum.value.hour:02d}:{model.datum.value.minute:02d}:{model.datum.value.second:02d}" # Symbol # properly mapped symbols have a namespace, except for the US markets # TODO check this with .csv import if model.symbol.namespace: entity.namespace = model.symbol.namespace.upper() entity.symbol = model.symbol.mnemonic.upper() assert isinstance(model.value, Decimal) # Find number of decimal places dec_places = abs(model.value.as_tuple().exponent) entity.denom = 10 ** dec_places # Price value entity.value = int(model.value * entity.denom) # Currency entity.currency = model.currency.upper() # self.logger.debug(f"{entity}") return entity
def map_model(self, model: PriceModel) -> Price: """ Parse into the Price entity, ready for saving """ # assert isinstance(model, PriceModel) assert isinstance(model.symbol, SecuritySymbol) assert isinstance(model.datum, Datum) entity = Price() # Format date as ISO string date_iso = f"{model.datum.value.year}-{model.datum.value.month:02d}-{model.datum.value.day:02d}" entity.date = date_iso entity.time = f"{model.datum.value.hour:02d}:{model.datum.value.minute:02d}:{model.datum.value.second:02d}" # Symbol # properly mapped symbols have a namespace, except for the US markets # TODO check this with .csv import if model.symbol.namespace: entity.namespace = model.symbol.namespace.upper() entity.symbol = model.symbol.mnemonic.upper() assert isinstance(model.value, Decimal) # Find number of decimal places dec_places = abs(model.value.as_tuple().exponent) entity.denom = 10 ** dec_places # Price value entity.value = int(model.value * entity.denom) # Currency entity.currency = model.currency.upper() # self.logger.debug(f"{entity}") return entity
[ "Parse", "into", "the", "Price", "entity", "ready", "for", "saving" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/mappers.py#L44-L76
[ "def", "map_model", "(", "self", ",", "model", ":", "PriceModel", ")", "->", "Price", ":", "# assert isinstance(model, PriceModel)", "assert", "isinstance", "(", "model", ".", "symbol", ",", "SecuritySymbol", ")", "assert", "isinstance", "(", "model", ".", "datum", ",", "Datum", ")", "entity", "=", "Price", "(", ")", "# Format date as ISO string", "date_iso", "=", "f\"{model.datum.value.year}-{model.datum.value.month:02d}-{model.datum.value.day:02d}\"", "entity", ".", "date", "=", "date_iso", "entity", ".", "time", "=", "f\"{model.datum.value.hour:02d}:{model.datum.value.minute:02d}:{model.datum.value.second:02d}\"", "# Symbol", "# properly mapped symbols have a namespace, except for the US markets", "# TODO check this with .csv import", "if", "model", ".", "symbol", ".", "namespace", ":", "entity", ".", "namespace", "=", "model", ".", "symbol", ".", "namespace", ".", "upper", "(", ")", "entity", ".", "symbol", "=", "model", ".", "symbol", ".", "mnemonic", ".", "upper", "(", ")", "assert", "isinstance", "(", "model", ".", "value", ",", "Decimal", ")", "# Find number of decimal places", "dec_places", "=", "abs", "(", "model", ".", "value", ".", "as_tuple", "(", ")", ".", "exponent", ")", "entity", ".", "denom", "=", "10", "**", "dec_places", "# Price value", "entity", ".", "value", "=", "int", "(", "model", ".", "value", "*", "entity", ".", "denom", ")", "# Currency", "entity", ".", "currency", "=", "model", ".", "currency", ".", "upper", "(", ")", "# self.logger.debug(f\"{entity}\")", "return", "entity" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
Config.__read_config
Read the config file
pricedb/config.py
def __read_config(self, file_path: str): """ Read the config file """ if not os.path.exists(file_path): raise FileNotFoundError(f"File path not found: {file_path}") # check if file exists if not os.path.isfile(file_path): self.logger.error(f"file not found: {file_path}") raise FileNotFoundError(f"configuration file not found {file_path}") self.config.read(file_path)
def __read_config(self, file_path: str): """ Read the config file """ if not os.path.exists(file_path): raise FileNotFoundError(f"File path not found: {file_path}") # check if file exists if not os.path.isfile(file_path): self.logger.error(f"file not found: {file_path}") raise FileNotFoundError(f"configuration file not found {file_path}") self.config.read(file_path)
[ "Read", "the", "config", "file" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/config.py#L52-L61
[ "def", "__read_config", "(", "self", ",", "file_path", ":", "str", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "raise", "FileNotFoundError", "(", "f\"File path not found: {file_path}\"", ")", "# check if file exists", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "self", ".", "logger", ".", "error", "(", "f\"file not found: {file_path}\"", ")", "raise", "FileNotFoundError", "(", "f\"configuration file not found {file_path}\"", ")", "self", ".", "config", ".", "read", "(", "file_path", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
Config.__get_config_template_path
gets the default config path from resources
pricedb/config.py
def __get_config_template_path(self) -> str: """ gets the default config path from resources """ filename = resource_filename( Requirement.parse(package_name), template_path + config_filename) return filename
def __get_config_template_path(self) -> str: """ gets the default config path from resources """ filename = resource_filename( Requirement.parse(package_name), template_path + config_filename) return filename
[ "gets", "the", "default", "config", "path", "from", "resources" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/config.py#L63-L68
[ "def", "__get_config_template_path", "(", "self", ")", "->", "str", ":", "filename", "=", "resource_filename", "(", "Requirement", ".", "parse", "(", "package_name", ")", ",", "template_path", "+", "config_filename", ")", "return", "filename" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
Config.__create_user_config
Copy the config template into user's directory
pricedb/config.py
def __create_user_config(self): """ Copy the config template into user's directory """ src_path = self.__get_config_template_path() src = os.path.abspath(src_path) if not os.path.exists(src): message = f"Config template not found {src}" self.logger.error(message) raise FileNotFoundError(message) dst = os.path.abspath(self.get_config_path()) shutil.copyfile(src, dst) if not os.path.exists(dst): raise FileNotFoundError("Config file could not be copied to user dir!")
def __create_user_config(self): """ Copy the config template into user's directory """ src_path = self.__get_config_template_path() src = os.path.abspath(src_path) if not os.path.exists(src): message = f"Config template not found {src}" self.logger.error(message) raise FileNotFoundError(message) dst = os.path.abspath(self.get_config_path()) shutil.copyfile(src, dst) if not os.path.exists(dst): raise FileNotFoundError("Config file could not be copied to user dir!")
[ "Copy", "the", "config", "template", "into", "user", "s", "directory" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/config.py#L74-L89
[ "def", "__create_user_config", "(", "self", ")", ":", "src_path", "=", "self", ".", "__get_config_template_path", "(", ")", "src", "=", "os", ".", "path", ".", "abspath", "(", "src_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "src", ")", ":", "message", "=", "f\"Config template not found {src}\"", "self", ".", "logger", ".", "error", "(", "message", ")", "raise", "FileNotFoundError", "(", "message", ")", "dst", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "get_config_path", "(", ")", ")", "shutil", ".", "copyfile", "(", "src", ",", "dst", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dst", ")", ":", "raise", "FileNotFoundError", "(", "\"Config file could not be copied to user dir!\"", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
Config.get_config_path
Returns the path where the active config file is expected. This is the user's profile folder.
pricedb/config.py
def get_config_path(self) -> str: """ Returns the path where the active config file is expected. This is the user's profile folder. """ dst_dir = self.__get_user_path() dst = dst_dir + "/" + config_filename return dst
def get_config_path(self) -> str: """ Returns the path where the active config file is expected. This is the user's profile folder. """ dst_dir = self.__get_user_path() dst = dst_dir + "/" + config_filename return dst
[ "Returns", "the", "path", "where", "the", "active", "config", "file", "is", "expected", ".", "This", "is", "the", "user", "s", "profile", "folder", "." ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/config.py#L91-L98
[ "def", "get_config_path", "(", "self", ")", "->", "str", ":", "dst_dir", "=", "self", ".", "__get_user_path", "(", ")", "dst", "=", "dst_dir", "+", "\"/\"", "+", "config_filename", "return", "dst" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
Config.get_contents
Reads the contents of the config file
pricedb/config.py
def get_contents(self) -> str: """ Reads the contents of the config file """ content = None # with open(file_path) as cfg_file: # contents = cfg_file.read() # Dump the current contents into an in-memory file. in_memory = io.StringIO("") self.config.write(in_memory) in_memory.seek(0) content = in_memory.read() # log(DEBUG, "config content: %s", content) in_memory.close() return content
def get_contents(self) -> str: """ Reads the contents of the config file """ content = None # with open(file_path) as cfg_file: # contents = cfg_file.read() # Dump the current contents into an in-memory file. in_memory = io.StringIO("") self.config.write(in_memory) in_memory.seek(0) content = in_memory.read() # log(DEBUG, "config content: %s", content) in_memory.close() return content
[ "Reads", "the", "contents", "of", "the", "config", "file" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/config.py#L100-L113
[ "def", "get_contents", "(", "self", ")", "->", "str", ":", "content", "=", "None", "# with open(file_path) as cfg_file:", "# contents = cfg_file.read()", "# Dump the current contents into an in-memory file.", "in_memory", "=", "io", ".", "StringIO", "(", "\"\"", ")", "self", ".", "config", ".", "write", "(", "in_memory", ")", "in_memory", ".", "seek", "(", "0", ")", "content", "=", "in_memory", ".", "read", "(", ")", "# log(DEBUG, \"config content: %s\", content)", "in_memory", ".", "close", "(", ")", "return", "content" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
Config.set
Sets a value in config
pricedb/config.py
def set(self, option: ConfigKeys, value): """ Sets a value in config """ assert isinstance(option, ConfigKeys) # As currently we only have 1 section. section = SECTION self.config.set(section, option.name, value) self.save()
def set(self, option: ConfigKeys, value): """ Sets a value in config """ assert isinstance(option, ConfigKeys) # As currently we only have 1 section. section = SECTION self.config.set(section, option.name, value) self.save()
[ "Sets", "a", "value", "in", "config" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/config.py#L115-L122
[ "def", "set", "(", "self", ",", "option", ":", "ConfigKeys", ",", "value", ")", ":", "assert", "isinstance", "(", "option", ",", "ConfigKeys", ")", "# As currently we only have 1 section.", "section", "=", "SECTION", "self", ".", "config", ".", "set", "(", "section", ",", "option", ".", "name", ",", "value", ")", "self", ".", "save", "(", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
Config.get
Retrieves a config value
pricedb/config.py
def get(self, option: ConfigKeys): """ Retrieves a config value """ assert isinstance(option, ConfigKeys) # Currently only one section is used section = SECTION return self.config.get(section, option.name)
def get(self, option: ConfigKeys): """ Retrieves a config value """ assert isinstance(option, ConfigKeys) # Currently only one section is used section = SECTION return self.config.get(section, option.name)
[ "Retrieves", "a", "config", "value" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/config.py#L124-L130
[ "def", "get", "(", "self", ",", "option", ":", "ConfigKeys", ")", ":", "assert", "isinstance", "(", "option", ",", "ConfigKeys", ")", "# Currently only one section is used", "section", "=", "SECTION", "return", "self", ".", "config", ".", "get", "(", "section", ",", "option", ".", "name", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
Config.save
Save the config file
pricedb/config.py
def save(self): """ Save the config file """ file_path = self.get_config_path() contents = self.get_contents() with open(file_path, mode='w') as cfg_file: cfg_file.write(contents)
def save(self): """ Save the config file """ file_path = self.get_config_path() contents = self.get_contents() with open(file_path, mode='w') as cfg_file: cfg_file.write(contents)
[ "Save", "the", "config", "file" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/config.py#L132-L137
[ "def", "save", "(", "self", ")", ":", "file_path", "=", "self", ".", "get_config_path", "(", ")", "contents", "=", "self", ".", "get_contents", "(", ")", "with", "open", "(", "file_path", ",", "mode", "=", "'w'", ")", "as", "cfg_file", ":", "cfg_file", ".", "write", "(", "contents", ")" ]
b4fd366b7763891c690fe3000b8840e656da023e
test
SecuritySymbol.parse
Splits the symbol into namespace, symbol tuple
pricedb/model.py
def parse(self, symbol: str) -> (str, str): """ Splits the symbol into namespace, symbol tuple """ symbol_parts = symbol.split(":") namespace = None mnemonic = symbol if len(symbol_parts) > 1: namespace = symbol_parts[0] mnemonic = symbol_parts[1] self.namespace = namespace self.mnemonic = mnemonic return namespace, mnemonic
def parse(self, symbol: str) -> (str, str): """ Splits the symbol into namespace, symbol tuple """ symbol_parts = symbol.split(":") namespace = None mnemonic = symbol if len(symbol_parts) > 1: namespace = symbol_parts[0] mnemonic = symbol_parts[1] self.namespace = namespace self.mnemonic = mnemonic return namespace, mnemonic
[ "Splits", "the", "symbol", "into", "namespace", "symbol", "tuple" ]
MisterY/price-database
python
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/model.py#L15-L28
[ "def", "parse", "(", "self", ",", "symbol", ":", "str", ")", "->", "(", "str", ",", "str", ")", ":", "symbol_parts", "=", "symbol", ".", "split", "(", "\":\"", ")", "namespace", "=", "None", "mnemonic", "=", "symbol", "if", "len", "(", "symbol_parts", ")", ">", "1", ":", "namespace", "=", "symbol_parts", "[", "0", "]", "mnemonic", "=", "symbol_parts", "[", "1", "]", "self", ".", "namespace", "=", "namespace", "self", ".", "mnemonic", "=", "mnemonic", "return", "namespace", ",", "mnemonic" ]
b4fd366b7763891c690fe3000b8840e656da023e