partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
ServerInterface.confirm
Called when a dangerous action is about to be done to make sure it's okay. `prompt' is printed; user response is returned.
trepan/interfaces/server.py
def confirm(self, prompt, default): """ Called when a dangerous action is about to be done to make sure it's okay. `prompt' is printed; user response is returned.""" while True: try: self.write_confirm(prompt, default) reply = self.readline('').strip().lower() except EOFError: return default if reply in ('y', 'yes'): return True elif reply in ('n', 'no'): return False else: self.msg("Please answer y or n.") pass pass return default
def confirm(self, prompt, default): """ Called when a dangerous action is about to be done to make sure it's okay. `prompt' is printed; user response is returned.""" while True: try: self.write_confirm(prompt, default) reply = self.readline('').strip().lower() except EOFError: return default if reply in ('y', 'yes'): return True elif reply in ('n', 'no'): return False else: self.msg("Please answer y or n.") pass pass return default
[ "Called", "when", "a", "dangerous", "action", "is", "about", "to", "be", "done", "to", "make", "sure", "it", "s", "okay", ".", "prompt", "is", "printed", ";", "user", "response", "is", "returned", "." ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/interfaces/server.py#L61-L78
[ "def", "confirm", "(", "self", ",", "prompt", ",", "default", ")", ":", "while", "True", ":", "try", ":", "self", ".", "write_confirm", "(", "prompt", ",", "default", ")", "reply", "=", "self", ".", "readline", "(", "''", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "except", "EOFError", ":", "return", "default", "if", "reply", "in", "(", "'y'", ",", "'yes'", ")", ":", "return", "True", "elif", "reply", "in", "(", "'n'", ",", "'no'", ")", ":", "return", "False", "else", ":", "self", ".", "msg", "(", "\"Please answer y or n.\"", ")", "pass", "pass", "return", "default" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_whitespace
r'\s+
trepan/processor/parse/scanner.py
def t_whitespace(self, s): r'\s+' self.add_token('SPACE', s) self.pos += len(s) pass
def t_whitespace(self, s): r'\s+' self.add_token('SPACE', s) self.pos += len(s) pass
[ "r", "\\", "s", "+" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L52-L56
[ "def", "t_whitespace", "(", "self", ",", "s", ")", ":", "self", ".", "add_token", "(", "'SPACE'", ",", "s", ")", "self", ".", "pos", "+=", "len", "(", "s", ")", "pass" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_single_quote_file
r"'[^'].+
trepan/processor/parse/scanner.py
def t_single_quote_file(self, s): r"'[^'].+'" # Pick out text inside of singe-quoted string base = s[1:-1] self.add_token('FILENAME', base) self.pos += len(s)
def t_single_quote_file(self, s): r"'[^'].+'" # Pick out text inside of singe-quoted string base = s[1:-1] self.add_token('FILENAME', base) self.pos += len(s)
[ "r", "[", "^", "]", ".", "+" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L81-L86
[ "def", "t_single_quote_file", "(", "self", ",", "s", ")", ":", "# Pick out text inside of singe-quoted string", "base", "=", "s", "[", "1", ":", "-", "1", "]", "self", ".", "add_token", "(", "'FILENAME'", ",", "base", ")", "self", ".", "pos", "+=", "len", "(", "s", ")" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_double_quote_file
r'"[^"]+"
trepan/processor/parse/scanner.py
def t_double_quote_file(self, s): r'"[^"]+"' # Pick out text inside of singe-quoted string base = s[1:-1] self.add_token('FILENAME', base) self.pos += len(s)
def t_double_quote_file(self, s): r'"[^"]+"' # Pick out text inside of singe-quoted string base = s[1:-1] self.add_token('FILENAME', base) self.pos += len(s)
[ "r", "[", "^", "]", "+" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L88-L93
[ "def", "t_double_quote_file", "(", "self", ",", "s", ")", ":", "# Pick out text inside of singe-quoted string", "base", "=", "s", "[", "1", ":", "-", "1", "]", "self", ".", "add_token", "(", "'FILENAME'", ",", "base", ")", "self", ".", "pos", "+=", "len", "(", "s", ")" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_colon
r':
trepan/processor/parse/scanner.py
def t_colon(self, s): r':' # Used to separate a filename from a line number self.add_token('COLON', s) self.pos += len(s)
def t_colon(self, s): r':' # Used to separate a filename from a line number self.add_token('COLON', s) self.pos += len(s)
[ "r", ":" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L95-L99
[ "def", "t_colon", "(", "self", ",", "s", ")", ":", "# Used to separate a filename from a line number", "self", ".", "add_token", "(", "'COLON'", ",", "s", ")", "self", ".", "pos", "+=", "len", "(", "s", ")" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_comma
r',
trepan/processor/parse/scanner.py
def t_comma(self, s): r',' # Used in "list" to separate first from last self.add_token('COMMA', s) self.pos += len(s)
def t_comma(self, s): r',' # Used in "list" to separate first from last self.add_token('COMMA', s) self.pos += len(s)
[ "r" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L101-L105
[ "def", "t_comma", "(", "self", ",", "s", ")", ":", "# Used in \"list\" to separate first from last", "self", ".", "add_token", "(", "'COMMA'", ",", "s", ")", "self", ".", "pos", "+=", "len", "(", "s", ")" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_direction
r'^[+-]$
trepan/processor/parse/scanner.py
def t_direction(self, s): r'^[+-]$' # Used in the "list" command self.add_token('DIRECTION', s) self.pos += len(s)
def t_direction(self, s): r'^[+-]$' # Used in the "list" command self.add_token('DIRECTION', s) self.pos += len(s)
[ "r", "^", "[", "+", "-", "]", "$" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L107-L111
[ "def", "t_direction", "(", "self", ",", "s", ")", ":", "# Used in the \"list\" command", "self", ".", "add_token", "(", "'DIRECTION'", ",", "s", ")", "self", ".", "pos", "+=", "len", "(", "s", ")" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_number
r'\d+
trepan/processor/parse/scanner.py
def t_number(self, s): r'\d+' pos = self.pos self.add_token('NUMBER', int(s)) self.pos = pos + len(s)
def t_number(self, s): r'\d+' pos = self.pos self.add_token('NUMBER', int(s)) self.pos = pos + len(s)
[ "r", "\\", "d", "+" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L114-L118
[ "def", "t_number", "(", "self", ",", "s", ")", ":", "pos", "=", "self", ".", "pos", "self", ".", "add_token", "(", "'NUMBER'", ",", "int", "(", "s", ")", ")", "self", ".", "pos", "=", "pos", "+", "len", "(", "s", ")" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_offset
r'[+]\d+
trepan/processor/parse/scanner.py
def t_offset(self, s): r'[+]\d+' pos = self.pos self.add_token('OFFSET', s) self.pos = pos + len(s)
def t_offset(self, s): r'[+]\d+' pos = self.pos self.add_token('OFFSET', s) self.pos = pos + len(s)
[ "r", "[", "+", "]", "\\", "d", "+" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L121-L125
[ "def", "t_offset", "(", "self", ",", "s", ")", ":", "pos", "=", "self", ".", "pos", "self", ".", "add_token", "(", "'OFFSET'", ",", "s", ")", "self", ".", "pos", "=", "pos", "+", "len", "(", "s", ")" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
LocationScanner.t_address
r'[*]\d+
trepan/processor/parse/scanner.py
def t_address(self, s): r'[*]\d+' pos = self.pos self.add_token('ADDRESS', s) self.pos = pos + len(s)
def t_address(self, s): r'[*]\d+' pos = self.pos self.add_token('ADDRESS', s) self.pos = pos + len(s)
[ "r", "[", "*", "]", "\\", "d", "+" ]
rocky/python3-trepan
python
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L128-L132
[ "def", "t_address", "(", "self", ",", "s", ")", ":", "pos", "=", "self", ".", "pos", "self", ".", "add_token", "(", "'ADDRESS'", ",", "s", ")", "self", ".", "pos", "=", "pos", "+", "len", "(", "s", ")" ]
14e91bc0acce090d67be145b1ac040cab92ac5f3
test
SubprocessBackend.extract_concepts
extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMap. Supported Options: Composite Phrase -Q Word Sense Disambiguation -y use strict model -A use relaxed model -C allow large N -l allow overmatches -o allow concept gaps -g term processing -z No Derivational Variants -d All Derivational Variants -D Ignore Word Order -i Allow Acronym Variants -a Unique Acronym Variants -u Prefer Multiple Concepts -Y Ignore Stop Phrases -K Compute All Mappings -b MM Data Version -V Exclude Sources -e Restrict to Sources -R Restrict to Semantic Types -J Exclude Semantic Types -k For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found.
pymetamap/SubprocessBackend.py
def extract_concepts(self, sentences=None, ids=None, composite_phrase=4, filename=None, file_format='sldi', allow_acronym_variants=False, word_sense_disambiguation=False, allow_large_n=False, strict_model=False, relaxed_model=False, allow_overmatches=False, allow_concept_gaps=False, term_processing=False, no_derivational_variants=False, derivational_variants=False, ignore_word_order=False, unique_acronym_variants=False, prefer_multiple_concepts=False, ignore_stop_phrases=False, compute_all_mappings=False, mm_data_version=False, exclude_sources=[], restrict_to_sources=[], restrict_to_sts=[], exclude_sts=[]): """ extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMap. Supported Options: Composite Phrase -Q Word Sense Disambiguation -y use strict model -A use relaxed model -C allow large N -l allow overmatches -o allow concept gaps -g term processing -z No Derivational Variants -d All Derivational Variants -D Ignore Word Order -i Allow Acronym Variants -a Unique Acronym Variants -u Prefer Multiple Concepts -Y Ignore Stop Phrases -K Compute All Mappings -b MM Data Version -V Exclude Sources -e Restrict to Sources -R Restrict to Semantic Types -J Exclude Semantic Types -k For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found. """ if allow_acronym_variants and unique_acronym_variants: raise ValueError("You can't use both allow_acronym_variants and " "unique_acronym_variants.") if (sentences is not None and filename is not None) or \ (sentences is None and filename is None): raise ValueError("You must either pass a list of sentences " "OR a filename.") if file_format not in ['sldi','sldiID']: raise ValueError("file_format must be either sldi or sldiID") input_file = None if sentences is not None: input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False) else: input_file = open(filename, 'r') output_file = tempfile.NamedTemporaryFile(mode="r", delete=False) error = None try: if sentences is not None: if ids is not None: for identifier, sentence in zip(ids, sentences): input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8')) else: for sentence in sentences: input_file.write('{0!r}\n'.format(sentence).encode('utf8')) input_file.flush() command = [self.metamap_filename, '-N'] command.append('-Q') command.append(str(composite_phrase)) if mm_data_version is not False: if mm_data_version not in ['Base', 'USAbase', 'NLM']: raise ValueError("mm_data_version must be Base, USAbase, or NLM.") command.append('-V') command.append(str(mm_data_version)) if word_sense_disambiguation: command.append('-y') if strict_model: command.append('-A') if relaxed_model: command.append('-C') if allow_large_n: command.append('-l') if allow_overmatches: command.append('-o') if allow_concept_gaps: command.append('-g') if term_processing: command.append('-z') if no_derivational_variants: command.append('-d') if derivational_variants: command.append('-D') if ignore_word_order: command.append('-i') if allow_acronym_variants: command.append('-a') if unique_acronym_variants: command.append('-u') if prefer_multiple_concepts: command.append('-Y') if ignore_stop_phrases: command.append('-K') if compute_all_mappings: command.append('-b') if len(exclude_sources) > 0: command.append('-e') command.append(str(','.join(exclude_sources))) if len(restrict_to_sources) > 0: command.append('-R') command.append(str(','.join(restrict_to_sources))) if len(restrict_to_sts) > 0: command.append('-J') command.append(str(','.join(restrict_to_sts))) if len(exclude_sts) > 0: command.append('-k') command.append(str(','.join(exclude_sts))) if ids is not None or (file_format == 'sldiID' and sentences is None): command.append('--sldiID') else: command.append('--sldi') command.append(input_file.name) command.append(output_file.name) metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE) while metamap_process.poll() is None: stdout = str(metamap_process.stdout.readline()) if 'ERROR' in stdout: metamap_process.terminate() error = stdout.rstrip() output = str(output_file.read()) finally: if sentences is not None: os.remove(input_file.name) else: input_file.close() os.remove(output_file.name) concepts = Corpus.load(output.splitlines()) return (concepts, error)
def extract_concepts(self, sentences=None, ids=None, composite_phrase=4, filename=None, file_format='sldi', allow_acronym_variants=False, word_sense_disambiguation=False, allow_large_n=False, strict_model=False, relaxed_model=False, allow_overmatches=False, allow_concept_gaps=False, term_processing=False, no_derivational_variants=False, derivational_variants=False, ignore_word_order=False, unique_acronym_variants=False, prefer_multiple_concepts=False, ignore_stop_phrases=False, compute_all_mappings=False, mm_data_version=False, exclude_sources=[], restrict_to_sources=[], restrict_to_sts=[], exclude_sts=[]): """ extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMap. Supported Options: Composite Phrase -Q Word Sense Disambiguation -y use strict model -A use relaxed model -C allow large N -l allow overmatches -o allow concept gaps -g term processing -z No Derivational Variants -d All Derivational Variants -D Ignore Word Order -i Allow Acronym Variants -a Unique Acronym Variants -u Prefer Multiple Concepts -Y Ignore Stop Phrases -K Compute All Mappings -b MM Data Version -V Exclude Sources -e Restrict to Sources -R Restrict to Semantic Types -J Exclude Semantic Types -k For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found. """ if allow_acronym_variants and unique_acronym_variants: raise ValueError("You can't use both allow_acronym_variants and " "unique_acronym_variants.") if (sentences is not None and filename is not None) or \ (sentences is None and filename is None): raise ValueError("You must either pass a list of sentences " "OR a filename.") if file_format not in ['sldi','sldiID']: raise ValueError("file_format must be either sldi or sldiID") input_file = None if sentences is not None: input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False) else: input_file = open(filename, 'r') output_file = tempfile.NamedTemporaryFile(mode="r", delete=False) error = None try: if sentences is not None: if ids is not None: for identifier, sentence in zip(ids, sentences): input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8')) else: for sentence in sentences: input_file.write('{0!r}\n'.format(sentence).encode('utf8')) input_file.flush() command = [self.metamap_filename, '-N'] command.append('-Q') command.append(str(composite_phrase)) if mm_data_version is not False: if mm_data_version not in ['Base', 'USAbase', 'NLM']: raise ValueError("mm_data_version must be Base, USAbase, or NLM.") command.append('-V') command.append(str(mm_data_version)) if word_sense_disambiguation: command.append('-y') if strict_model: command.append('-A') if relaxed_model: command.append('-C') if allow_large_n: command.append('-l') if allow_overmatches: command.append('-o') if allow_concept_gaps: command.append('-g') if term_processing: command.append('-z') if no_derivational_variants: command.append('-d') if derivational_variants: command.append('-D') if ignore_word_order: command.append('-i') if allow_acronym_variants: command.append('-a') if unique_acronym_variants: command.append('-u') if prefer_multiple_concepts: command.append('-Y') if ignore_stop_phrases: command.append('-K') if compute_all_mappings: command.append('-b') if len(exclude_sources) > 0: command.append('-e') command.append(str(','.join(exclude_sources))) if len(restrict_to_sources) > 0: command.append('-R') command.append(str(','.join(restrict_to_sources))) if len(restrict_to_sts) > 0: command.append('-J') command.append(str(','.join(restrict_to_sts))) if len(exclude_sts) > 0: command.append('-k') command.append(str(','.join(exclude_sts))) if ids is not None or (file_format == 'sldiID' and sentences is None): command.append('--sldiID') else: command.append('--sldi') command.append(input_file.name) command.append(output_file.name) metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE) while metamap_process.poll() is None: stdout = str(metamap_process.stdout.readline()) if 'ERROR' in stdout: metamap_process.terminate() error = stdout.rstrip() output = str(output_file.read()) finally: if sentences is not None: os.remove(input_file.name) else: input_file.close() os.remove(output_file.name) concepts = Corpus.load(output.splitlines()) return (concepts, error)
[ "extract_concepts", "takes", "a", "list", "of", "sentences", "and", "ids", "(", "optional", ")", "then", "returns", "a", "list", "of", "Concept", "objects", "extracted", "via", "MetaMap", "." ]
AnthonyMRios/pymetamap
python
https://github.com/AnthonyMRios/pymetamap/blob/01f35802ad13f2eb7dff033e66e5445e8f5bc039/pymetamap/SubprocessBackend.py#L27-L175
[ "def", "extract_concepts", "(", "self", ",", "sentences", "=", "None", ",", "ids", "=", "None", ",", "composite_phrase", "=", "4", ",", "filename", "=", "None", ",", "file_format", "=", "'sldi'", ",", "allow_acronym_variants", "=", "False", ",", "word_sense_disambiguation", "=", "False", ",", "allow_large_n", "=", "False", ",", "strict_model", "=", "False", ",", "relaxed_model", "=", "False", ",", "allow_overmatches", "=", "False", ",", "allow_concept_gaps", "=", "False", ",", "term_processing", "=", "False", ",", "no_derivational_variants", "=", "False", ",", "derivational_variants", "=", "False", ",", "ignore_word_order", "=", "False", ",", "unique_acronym_variants", "=", "False", ",", "prefer_multiple_concepts", "=", "False", ",", "ignore_stop_phrases", "=", "False", ",", "compute_all_mappings", "=", "False", ",", "mm_data_version", "=", "False", ",", "exclude_sources", "=", "[", "]", ",", "restrict_to_sources", "=", "[", "]", ",", "restrict_to_sts", "=", "[", "]", ",", "exclude_sts", "=", "[", "]", ")", ":", "if", "allow_acronym_variants", "and", "unique_acronym_variants", ":", "raise", "ValueError", "(", "\"You can't use both allow_acronym_variants and \"", "\"unique_acronym_variants.\"", ")", "if", "(", "sentences", "is", "not", "None", "and", "filename", "is", "not", "None", ")", "or", "(", "sentences", "is", "None", "and", "filename", "is", "None", ")", ":", "raise", "ValueError", "(", "\"You must either pass a list of sentences \"", "\"OR a filename.\"", ")", "if", "file_format", "not", "in", "[", "'sldi'", ",", "'sldiID'", "]", ":", "raise", "ValueError", "(", "\"file_format must be either sldi or sldiID\"", ")", "input_file", "=", "None", "if", "sentences", "is", "not", "None", ":", "input_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "\"wb\"", ",", "delete", "=", "False", ")", "else", ":", "input_file", "=", "open", "(", "filename", ",", "'r'", ")", "output_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "\"r\"", ",", "delete", "=", "False", ")", "error", "=", "None", "try", ":", "if", "sentences", "is", "not", "None", ":", "if", "ids", "is", "not", "None", ":", "for", "identifier", ",", "sentence", "in", "zip", "(", "ids", ",", "sentences", ")", ":", "input_file", ".", "write", "(", "'{0!r}|{1!r}\\n'", ".", "format", "(", "identifier", ",", "sentence", ")", ".", "encode", "(", "'utf8'", ")", ")", "else", ":", "for", "sentence", "in", "sentences", ":", "input_file", ".", "write", "(", "'{0!r}\\n'", ".", "format", "(", "sentence", ")", ".", "encode", "(", "'utf8'", ")", ")", "input_file", ".", "flush", "(", ")", "command", "=", "[", "self", ".", "metamap_filename", ",", "'-N'", "]", "command", ".", "append", "(", "'-Q'", ")", "command", ".", "append", "(", "str", "(", "composite_phrase", ")", ")", "if", "mm_data_version", "is", "not", "False", ":", "if", "mm_data_version", "not", "in", "[", "'Base'", ",", "'USAbase'", ",", "'NLM'", "]", ":", "raise", "ValueError", "(", "\"mm_data_version must be Base, USAbase, or NLM.\"", ")", "command", ".", "append", "(", "'-V'", ")", "command", ".", "append", "(", "str", "(", "mm_data_version", ")", ")", "if", "word_sense_disambiguation", ":", "command", ".", "append", "(", "'-y'", ")", "if", "strict_model", ":", "command", ".", "append", "(", "'-A'", ")", "if", "relaxed_model", ":", "command", ".", "append", "(", "'-C'", ")", "if", "allow_large_n", ":", "command", ".", "append", "(", "'-l'", ")", "if", "allow_overmatches", ":", "command", ".", "append", "(", "'-o'", ")", "if", "allow_concept_gaps", ":", "command", ".", "append", "(", "'-g'", ")", "if", "term_processing", ":", "command", ".", "append", "(", "'-z'", ")", "if", "no_derivational_variants", ":", "command", ".", "append", "(", "'-d'", ")", "if", "derivational_variants", ":", "command", ".", "append", "(", "'-D'", ")", "if", "ignore_word_order", ":", "command", ".", "append", "(", "'-i'", ")", "if", "allow_acronym_variants", ":", "command", ".", "append", "(", "'-a'", ")", "if", "unique_acronym_variants", ":", "command", ".", "append", "(", "'-u'", ")", "if", "prefer_multiple_concepts", ":", "command", ".", "append", "(", "'-Y'", ")", "if", "ignore_stop_phrases", ":", "command", ".", "append", "(", "'-K'", ")", "if", "compute_all_mappings", ":", "command", ".", "append", "(", "'-b'", ")", "if", "len", "(", "exclude_sources", ")", ">", "0", ":", "command", ".", "append", "(", "'-e'", ")", "command", ".", "append", "(", "str", "(", "','", ".", "join", "(", "exclude_sources", ")", ")", ")", "if", "len", "(", "restrict_to_sources", ")", ">", "0", ":", "command", ".", "append", "(", "'-R'", ")", "command", ".", "append", "(", "str", "(", "','", ".", "join", "(", "restrict_to_sources", ")", ")", ")", "if", "len", "(", "restrict_to_sts", ")", ">", "0", ":", "command", ".", "append", "(", "'-J'", ")", "command", ".", "append", "(", "str", "(", "','", ".", "join", "(", "restrict_to_sts", ")", ")", ")", "if", "len", "(", "exclude_sts", ")", ">", "0", ":", "command", ".", "append", "(", "'-k'", ")", "command", ".", "append", "(", "str", "(", "','", ".", "join", "(", "exclude_sts", ")", ")", ")", "if", "ids", "is", "not", "None", "or", "(", "file_format", "==", "'sldiID'", "and", "sentences", "is", "None", ")", ":", "command", ".", "append", "(", "'--sldiID'", ")", "else", ":", "command", ".", "append", "(", "'--sldi'", ")", "command", ".", "append", "(", "input_file", ".", "name", ")", "command", ".", "append", "(", "output_file", ".", "name", ")", "metamap_process", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "while", "metamap_process", ".", "poll", "(", ")", "is", "None", ":", "stdout", "=", "str", "(", "metamap_process", ".", "stdout", ".", "readline", "(", ")", ")", "if", "'ERROR'", "in", "stdout", ":", "metamap_process", ".", "terminate", "(", ")", "error", "=", "stdout", ".", "rstrip", "(", ")", "output", "=", "str", "(", "output_file", ".", "read", "(", ")", ")", "finally", ":", "if", "sentences", "is", "not", "None", ":", "os", ".", "remove", "(", "input_file", ".", "name", ")", "else", ":", "input_file", ".", "close", "(", ")", "os", ".", "remove", "(", "output_file", ".", "name", ")", "concepts", "=", "Corpus", ".", "load", "(", "output", ".", "splitlines", "(", ")", ")", "return", "(", "concepts", ",", "error", ")" ]
01f35802ad13f2eb7dff033e66e5445e8f5bc039
test
SubprocessBackendLite.extract_concepts
extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMapLite. Supported Options: Restrict to Semantic Types --restrict_to_sts Restrict to Sources --restrict_to_sources For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found.
pymetamap/SubprocessBackendLite.py
def extract_concepts(self, sentences=None, ids=None, filename=None, restrict_to_sts=None, restrict_to_sources=None): """ extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMapLite. Supported Options: Restrict to Semantic Types --restrict_to_sts Restrict to Sources --restrict_to_sources For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found. """ if (sentences is not None and filename is not None) or \ (sentences is None and filename is None): raise ValueError("You must either pass a list of sentences " "OR a filename.") input_file = None if sentences is not None: input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False) else: input_file = open(filename, 'r') # Unlike MetaMap, MetaMapLite does not take an output filename as a parameter. # It creates a new output file at same location as "input_file" with the default file extension ".mmi". # output_file = tempfile.NamedTemporaryFile(mode="r", delete=False) output_file_name = None error = None try: if sentences is not None: if ids is not None: for identifier, sentence in zip(ids, sentences): input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8')) else: for sentence in sentences: input_file.write('{0!r}\n'.format(sentence).encode('utf8')) input_file.flush() command = ["bash", os.path.join(self.metamap_filename, "metamaplite.sh")] if restrict_to_sts: if isinstance(restrict_to_sts, str): restrict_to_sts = [restrict_to_sts] if len(restrict_to_sts) > 0: command.append('--restrict_to_sts') command.append(str(','.join(restrict_to_sts))) if restrict_to_sources: if isinstance(restrict_to_sources, str): restrict_to_sources = [restrict_to_sources] if len(restrict_to_sources) > 0: command.append('--restrict_to_sources') command.append(str(','.join(restrict_to_sources))) if ids is not None: command.append('--inputformat=sldiwi') command.append(input_file.name) # command.append(output_file.name) metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE) while metamap_process.poll() is None: stdout = str(metamap_process.stdout.readline()) if 'ERROR' in stdout: metamap_process.terminate() error = stdout.rstrip() # print("input file name: {0}".format(input_file.name)) output_file_name, file_extension = os.path.splitext(input_file.name) output_file_name += "." + "mmi" # print("output_file_name: {0}".format(output_file_name)) with open(output_file_name) as fd: output = fd.read() # output = str(output_file.read()) # print("output: {0}".format(output)) finally: if sentences is not None: os.remove(input_file.name) else: input_file.close() # os.remove(output_file.name) os.remove(output_file_name) concepts = CorpusLite.load(output.splitlines()) return concepts, error
def extract_concepts(self, sentences=None, ids=None, filename=None, restrict_to_sts=None, restrict_to_sources=None): """ extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMapLite. Supported Options: Restrict to Semantic Types --restrict_to_sts Restrict to Sources --restrict_to_sources For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found. """ if (sentences is not None and filename is not None) or \ (sentences is None and filename is None): raise ValueError("You must either pass a list of sentences " "OR a filename.") input_file = None if sentences is not None: input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False) else: input_file = open(filename, 'r') # Unlike MetaMap, MetaMapLite does not take an output filename as a parameter. # It creates a new output file at same location as "input_file" with the default file extension ".mmi". # output_file = tempfile.NamedTemporaryFile(mode="r", delete=False) output_file_name = None error = None try: if sentences is not None: if ids is not None: for identifier, sentence in zip(ids, sentences): input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8')) else: for sentence in sentences: input_file.write('{0!r}\n'.format(sentence).encode('utf8')) input_file.flush() command = ["bash", os.path.join(self.metamap_filename, "metamaplite.sh")] if restrict_to_sts: if isinstance(restrict_to_sts, str): restrict_to_sts = [restrict_to_sts] if len(restrict_to_sts) > 0: command.append('--restrict_to_sts') command.append(str(','.join(restrict_to_sts))) if restrict_to_sources: if isinstance(restrict_to_sources, str): restrict_to_sources = [restrict_to_sources] if len(restrict_to_sources) > 0: command.append('--restrict_to_sources') command.append(str(','.join(restrict_to_sources))) if ids is not None: command.append('--inputformat=sldiwi') command.append(input_file.name) # command.append(output_file.name) metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE) while metamap_process.poll() is None: stdout = str(metamap_process.stdout.readline()) if 'ERROR' in stdout: metamap_process.terminate() error = stdout.rstrip() # print("input file name: {0}".format(input_file.name)) output_file_name, file_extension = os.path.splitext(input_file.name) output_file_name += "." + "mmi" # print("output_file_name: {0}".format(output_file_name)) with open(output_file_name) as fd: output = fd.read() # output = str(output_file.read()) # print("output: {0}".format(output)) finally: if sentences is not None: os.remove(input_file.name) else: input_file.close() # os.remove(output_file.name) os.remove(output_file_name) concepts = CorpusLite.load(output.splitlines()) return concepts, error
[ "extract_concepts", "takes", "a", "list", "of", "sentences", "and", "ids", "(", "optional", ")", "then", "returns", "a", "list", "of", "Concept", "objects", "extracted", "via", "MetaMapLite", "." ]
AnthonyMRios/pymetamap
python
https://github.com/AnthonyMRios/pymetamap/blob/01f35802ad13f2eb7dff033e66e5445e8f5bc039/pymetamap/SubprocessBackendLite.py#L27-L115
[ "def", "extract_concepts", "(", "self", ",", "sentences", "=", "None", ",", "ids", "=", "None", ",", "filename", "=", "None", ",", "restrict_to_sts", "=", "None", ",", "restrict_to_sources", "=", "None", ")", ":", "if", "(", "sentences", "is", "not", "None", "and", "filename", "is", "not", "None", ")", "or", "(", "sentences", "is", "None", "and", "filename", "is", "None", ")", ":", "raise", "ValueError", "(", "\"You must either pass a list of sentences \"", "\"OR a filename.\"", ")", "input_file", "=", "None", "if", "sentences", "is", "not", "None", ":", "input_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "\"wb\"", ",", "delete", "=", "False", ")", "else", ":", "input_file", "=", "open", "(", "filename", ",", "'r'", ")", "# Unlike MetaMap, MetaMapLite does not take an output filename as a parameter.", "# It creates a new output file at same location as \"input_file\" with the default file extension \".mmi\".", "# output_file = tempfile.NamedTemporaryFile(mode=\"r\", delete=False)", "output_file_name", "=", "None", "error", "=", "None", "try", ":", "if", "sentences", "is", "not", "None", ":", "if", "ids", "is", "not", "None", ":", "for", "identifier", ",", "sentence", "in", "zip", "(", "ids", ",", "sentences", ")", ":", "input_file", ".", "write", "(", "'{0!r}|{1!r}\\n'", ".", "format", "(", "identifier", ",", "sentence", ")", ".", "encode", "(", "'utf8'", ")", ")", "else", ":", "for", "sentence", "in", "sentences", ":", "input_file", ".", "write", "(", "'{0!r}\\n'", ".", "format", "(", "sentence", ")", ".", "encode", "(", "'utf8'", ")", ")", "input_file", ".", "flush", "(", ")", "command", "=", "[", "\"bash\"", ",", "os", ".", "path", ".", "join", "(", "self", ".", "metamap_filename", ",", "\"metamaplite.sh\"", ")", "]", "if", "restrict_to_sts", ":", "if", "isinstance", "(", "restrict_to_sts", ",", "str", ")", ":", "restrict_to_sts", "=", "[", "restrict_to_sts", "]", "if", "len", "(", "restrict_to_sts", ")", ">", "0", ":", "command", ".", "append", "(", "'--restrict_to_sts'", ")", "command", ".", "append", "(", "str", "(", "','", ".", "join", "(", "restrict_to_sts", ")", ")", ")", "if", "restrict_to_sources", ":", "if", "isinstance", "(", "restrict_to_sources", ",", "str", ")", ":", "restrict_to_sources", "=", "[", "restrict_to_sources", "]", "if", "len", "(", "restrict_to_sources", ")", ">", "0", ":", "command", ".", "append", "(", "'--restrict_to_sources'", ")", "command", ".", "append", "(", "str", "(", "','", ".", "join", "(", "restrict_to_sources", ")", ")", ")", "if", "ids", "is", "not", "None", ":", "command", ".", "append", "(", "'--inputformat=sldiwi'", ")", "command", ".", "append", "(", "input_file", ".", "name", ")", "# command.append(output_file.name)", "metamap_process", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "while", "metamap_process", ".", "poll", "(", ")", "is", "None", ":", "stdout", "=", "str", "(", "metamap_process", ".", "stdout", ".", "readline", "(", ")", ")", "if", "'ERROR'", "in", "stdout", ":", "metamap_process", ".", "terminate", "(", ")", "error", "=", "stdout", ".", "rstrip", "(", ")", "# print(\"input file name: {0}\".format(input_file.name))", "output_file_name", ",", "file_extension", "=", "os", ".", "path", ".", "splitext", "(", "input_file", ".", "name", ")", "output_file_name", "+=", "\".\"", "+", "\"mmi\"", "# print(\"output_file_name: {0}\".format(output_file_name))", "with", "open", "(", "output_file_name", ")", "as", "fd", ":", "output", "=", "fd", ".", "read", "(", ")", "# output = str(output_file.read())", "# print(\"output: {0}\".format(output))", "finally", ":", "if", "sentences", "is", "not", "None", ":", "os", ".", "remove", "(", "input_file", ".", "name", ")", "else", ":", "input_file", ".", "close", "(", ")", "# os.remove(output_file.name)", "os", ".", "remove", "(", "output_file_name", ")", "concepts", "=", "CorpusLite", ".", "load", "(", "output", ".", "splitlines", "(", ")", ")", "return", "concepts", ",", "error" ]
01f35802ad13f2eb7dff033e66e5445e8f5bc039
test
_AsyncExecution.as_future
Wrap a `sqlalchemy.orm.query.Query` object into a `concurrent.futures.Future` so that it can be yielded. Parameters ---------- query : sqlalchemy.orm.query.Query SQLAlchemy query object to execute Returns ------- tornado.concurrent.Future A `Future` object wrapping the given query so that tornado can await/yield on it
tornado_sqlalchemy/__init__.py
def as_future(self, query): """Wrap a `sqlalchemy.orm.query.Query` object into a `concurrent.futures.Future` so that it can be yielded. Parameters ---------- query : sqlalchemy.orm.query.Query SQLAlchemy query object to execute Returns ------- tornado.concurrent.Future A `Future` object wrapping the given query so that tornado can await/yield on it """ # concurrent.futures.Future is not compatible with the "new style" # asyncio Future, and awaiting on such "old-style" futures does not # work. # # tornado includes a `run_in_executor` function to help with this # problem, but it's only included in version 5+. Hence, we copy a # little bit of code here to handle this incompatibility. if not self._pool: self._pool = ThreadPoolExecutor(max_workers=self._max_workers) old_future = self._pool.submit(query) new_future = Future() IOLoop.current().add_future( old_future, lambda f: chain_future(f, new_future) ) return new_future
def as_future(self, query): """Wrap a `sqlalchemy.orm.query.Query` object into a `concurrent.futures.Future` so that it can be yielded. Parameters ---------- query : sqlalchemy.orm.query.Query SQLAlchemy query object to execute Returns ------- tornado.concurrent.Future A `Future` object wrapping the given query so that tornado can await/yield on it """ # concurrent.futures.Future is not compatible with the "new style" # asyncio Future, and awaiting on such "old-style" futures does not # work. # # tornado includes a `run_in_executor` function to help with this # problem, but it's only included in version 5+. Hence, we copy a # little bit of code here to handle this incompatibility. if not self._pool: self._pool = ThreadPoolExecutor(max_workers=self._max_workers) old_future = self._pool.submit(query) new_future = Future() IOLoop.current().add_future( old_future, lambda f: chain_future(f, new_future) ) return new_future
[ "Wrap", "a", "sqlalchemy", ".", "orm", ".", "query", ".", "Query", "object", "into", "a", "concurrent", ".", "futures", ".", "Future", "so", "that", "it", "can", "be", "yielded", "." ]
siddhantgoel/tornado-sqlalchemy
python
https://github.com/siddhantgoel/tornado-sqlalchemy/blob/3e622b5a2be57b505599b98156540b52a8a5cf4e/tornado_sqlalchemy/__init__.py#L47-L80
[ "def", "as_future", "(", "self", ",", "query", ")", ":", "# concurrent.futures.Future is not compatible with the \"new style\"", "# asyncio Future, and awaiting on such \"old-style\" futures does not", "# work.", "#", "# tornado includes a `run_in_executor` function to help with this", "# problem, but it's only included in version 5+. Hence, we copy a", "# little bit of code here to handle this incompatibility.", "if", "not", "self", ".", "_pool", ":", "self", ".", "_pool", "=", "ThreadPoolExecutor", "(", "max_workers", "=", "self", ".", "_max_workers", ")", "old_future", "=", "self", ".", "_pool", ".", "submit", "(", "query", ")", "new_future", "=", "Future", "(", ")", "IOLoop", ".", "current", "(", ")", ".", "add_future", "(", "old_future", ",", "lambda", "f", ":", "chain_future", "(", "f", ",", "new_future", ")", ")", "return", "new_future" ]
3e622b5a2be57b505599b98156540b52a8a5cf4e
test
login_as
Utility function for forcing a login as specific user -- be careful about calling this carelessly :)
loginas/utils.py
def login_as(user, request, store_original_user=True): """ Utility function for forcing a login as specific user -- be careful about calling this carelessly :) """ # Save the original user pk before it is replaced in the login method original_user_pk = request.user.pk # Find a suitable backend. if not hasattr(user, "backend"): for backend in django_settings.AUTHENTICATION_BACKENDS: if not hasattr(load_backend(backend), "get_user"): continue if user == load_backend(backend).get_user(user.pk): user.backend = backend break else: raise ImproperlyConfigured("Could not found an appropriate authentication backend") # Add admin audit log entry if original_user_pk: change_message = "User {0} logged in as {1}.".format(request.user, user) LogEntry.objects.log_action( user_id=original_user_pk, content_type_id=ContentType.objects.get_for_model(user).pk, object_id=user.pk, object_repr=str(user), change_message=change_message, action_flag=CHANGE, ) # Log the user in. if not hasattr(user, "backend"): return if la_settings.UPDATE_LAST_LOGIN: login(request, user) else: with no_update_last_login(): login(request, user) # Set a flag on the session if store_original_user: messages.warning( request, la_settings.MESSAGE_LOGIN_SWITCH.format(username=user.__dict__[username_field]), extra_tags=la_settings.MESSAGE_EXTRA_TAGS, ) request.session[la_settings.USER_SESSION_FLAG] = signer.sign(original_user_pk)
def login_as(user, request, store_original_user=True): """ Utility function for forcing a login as specific user -- be careful about calling this carelessly :) """ # Save the original user pk before it is replaced in the login method original_user_pk = request.user.pk # Find a suitable backend. if not hasattr(user, "backend"): for backend in django_settings.AUTHENTICATION_BACKENDS: if not hasattr(load_backend(backend), "get_user"): continue if user == load_backend(backend).get_user(user.pk): user.backend = backend break else: raise ImproperlyConfigured("Could not found an appropriate authentication backend") # Add admin audit log entry if original_user_pk: change_message = "User {0} logged in as {1}.".format(request.user, user) LogEntry.objects.log_action( user_id=original_user_pk, content_type_id=ContentType.objects.get_for_model(user).pk, object_id=user.pk, object_repr=str(user), change_message=change_message, action_flag=CHANGE, ) # Log the user in. if not hasattr(user, "backend"): return if la_settings.UPDATE_LAST_LOGIN: login(request, user) else: with no_update_last_login(): login(request, user) # Set a flag on the session if store_original_user: messages.warning( request, la_settings.MESSAGE_LOGIN_SWITCH.format(username=user.__dict__[username_field]), extra_tags=la_settings.MESSAGE_EXTRA_TAGS, ) request.session[la_settings.USER_SESSION_FLAG] = signer.sign(original_user_pk)
[ "Utility", "function", "for", "forcing", "a", "login", "as", "specific", "user", "--", "be", "careful", "about", "calling", "this", "carelessly", ":", ")" ]
skorokithakis/django-loginas
python
https://github.com/skorokithakis/django-loginas/blob/6257857b40ed5b59e4c59a3af4b54d4856cacaf0/loginas/utils.py#L41-L91
[ "def", "login_as", "(", "user", ",", "request", ",", "store_original_user", "=", "True", ")", ":", "# Save the original user pk before it is replaced in the login method", "original_user_pk", "=", "request", ".", "user", ".", "pk", "# Find a suitable backend.", "if", "not", "hasattr", "(", "user", ",", "\"backend\"", ")", ":", "for", "backend", "in", "django_settings", ".", "AUTHENTICATION_BACKENDS", ":", "if", "not", "hasattr", "(", "load_backend", "(", "backend", ")", ",", "\"get_user\"", ")", ":", "continue", "if", "user", "==", "load_backend", "(", "backend", ")", ".", "get_user", "(", "user", ".", "pk", ")", ":", "user", ".", "backend", "=", "backend", "break", "else", ":", "raise", "ImproperlyConfigured", "(", "\"Could not found an appropriate authentication backend\"", ")", "# Add admin audit log entry", "if", "original_user_pk", ":", "change_message", "=", "\"User {0} logged in as {1}.\"", ".", "format", "(", "request", ".", "user", ",", "user", ")", "LogEntry", ".", "objects", ".", "log_action", "(", "user_id", "=", "original_user_pk", ",", "content_type_id", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "user", ")", ".", "pk", ",", "object_id", "=", "user", ".", "pk", ",", "object_repr", "=", "str", "(", "user", ")", ",", "change_message", "=", "change_message", ",", "action_flag", "=", "CHANGE", ",", ")", "# Log the user in.", "if", "not", "hasattr", "(", "user", ",", "\"backend\"", ")", ":", "return", "if", "la_settings", ".", "UPDATE_LAST_LOGIN", ":", "login", "(", "request", ",", "user", ")", "else", ":", "with", "no_update_last_login", "(", ")", ":", "login", "(", "request", ",", "user", ")", "# Set a flag on the session", "if", "store_original_user", ":", "messages", ".", "warning", "(", "request", ",", "la_settings", ".", "MESSAGE_LOGIN_SWITCH", ".", "format", "(", "username", "=", "user", ".", "__dict__", "[", "username_field", "]", ")", ",", "extra_tags", "=", "la_settings", ".", "MESSAGE_EXTRA_TAGS", ",", ")", "request", ".", "session", "[", "la_settings", ".", "USER_SESSION_FLAG", "]", "=", "signer", ".", "sign", "(", "original_user_pk", ")" ]
6257857b40ed5b59e4c59a3af4b54d4856cacaf0
test
restore_original_login
Restore an original login session, checking the signed session
loginas/utils.py
def restore_original_login(request): """ Restore an original login session, checking the signed session """ original_session = request.session.get(la_settings.USER_SESSION_FLAG) logout(request) if not original_session: return try: original_user_pk = signer.unsign( original_session, max_age=timedelta(days=la_settings.USER_SESSION_DAYS_TIMESTAMP).total_seconds() ) user = get_user_model().objects.get(pk=original_user_pk) messages.info( request, la_settings.MESSAGE_LOGIN_REVERT.format(username=user.__dict__[username_field]), extra_tags=la_settings.MESSAGE_EXTRA_TAGS, ) login_as(user, request, store_original_user=False) if la_settings.USER_SESSION_FLAG in request.session: del request.session[la_settings.USER_SESSION_FLAG] except SignatureExpired: pass
def restore_original_login(request): """ Restore an original login session, checking the signed session """ original_session = request.session.get(la_settings.USER_SESSION_FLAG) logout(request) if not original_session: return try: original_user_pk = signer.unsign( original_session, max_age=timedelta(days=la_settings.USER_SESSION_DAYS_TIMESTAMP).total_seconds() ) user = get_user_model().objects.get(pk=original_user_pk) messages.info( request, la_settings.MESSAGE_LOGIN_REVERT.format(username=user.__dict__[username_field]), extra_tags=la_settings.MESSAGE_EXTRA_TAGS, ) login_as(user, request, store_original_user=False) if la_settings.USER_SESSION_FLAG in request.session: del request.session[la_settings.USER_SESSION_FLAG] except SignatureExpired: pass
[ "Restore", "an", "original", "login", "session", "checking", "the", "signed", "session" ]
skorokithakis/django-loginas
python
https://github.com/skorokithakis/django-loginas/blob/6257857b40ed5b59e4c59a3af4b54d4856cacaf0/loginas/utils.py#L94-L118
[ "def", "restore_original_login", "(", "request", ")", ":", "original_session", "=", "request", ".", "session", ".", "get", "(", "la_settings", ".", "USER_SESSION_FLAG", ")", "logout", "(", "request", ")", "if", "not", "original_session", ":", "return", "try", ":", "original_user_pk", "=", "signer", ".", "unsign", "(", "original_session", ",", "max_age", "=", "timedelta", "(", "days", "=", "la_settings", ".", "USER_SESSION_DAYS_TIMESTAMP", ")", ".", "total_seconds", "(", ")", ")", "user", "=", "get_user_model", "(", ")", ".", "objects", ".", "get", "(", "pk", "=", "original_user_pk", ")", "messages", ".", "info", "(", "request", ",", "la_settings", ".", "MESSAGE_LOGIN_REVERT", ".", "format", "(", "username", "=", "user", ".", "__dict__", "[", "username_field", "]", ")", ",", "extra_tags", "=", "la_settings", ".", "MESSAGE_EXTRA_TAGS", ",", ")", "login_as", "(", "user", ",", "request", ",", "store_original_user", "=", "False", ")", "if", "la_settings", ".", "USER_SESSION_FLAG", "in", "request", ".", "session", ":", "del", "request", ".", "session", "[", "la_settings", ".", "USER_SESSION_FLAG", "]", "except", "SignatureExpired", ":", "pass" ]
6257857b40ed5b59e4c59a3af4b54d4856cacaf0
test
_load_module
Code to load create user module. Copied off django-browserid.
loginas/views.py
def _load_module(path): """Code to load create user module. Copied off django-browserid.""" i = path.rfind(".") module, attr = path[:i], path[i + 1 :] try: mod = import_module(module) except ImportError: raise ImproperlyConfigured("Error importing CAN_LOGIN_AS function: {}".format(module)) except ValueError: raise ImproperlyConfigured("Error importing CAN_LOGIN_AS" " function. Is CAN_LOGIN_AS a" " string?") try: can_login_as = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured("Module {0} does not define a {1} " "function.".format(module, attr)) return can_login_as
def _load_module(path): """Code to load create user module. Copied off django-browserid.""" i = path.rfind(".") module, attr = path[:i], path[i + 1 :] try: mod = import_module(module) except ImportError: raise ImproperlyConfigured("Error importing CAN_LOGIN_AS function: {}".format(module)) except ValueError: raise ImproperlyConfigured("Error importing CAN_LOGIN_AS" " function. Is CAN_LOGIN_AS a" " string?") try: can_login_as = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured("Module {0} does not define a {1} " "function.".format(module, attr)) return can_login_as
[ "Code", "to", "load", "create", "user", "module", ".", "Copied", "off", "django", "-", "browserid", "." ]
skorokithakis/django-loginas
python
https://github.com/skorokithakis/django-loginas/blob/6257857b40ed5b59e4c59a3af4b54d4856cacaf0/loginas/views.py#L27-L44
[ "def", "_load_module", "(", "path", ")", ":", "i", "=", "path", ".", "rfind", "(", "\".\"", ")", "module", ",", "attr", "=", "path", "[", ":", "i", "]", ",", "path", "[", "i", "+", "1", ":", "]", "try", ":", "mod", "=", "import_module", "(", "module", ")", "except", "ImportError", ":", "raise", "ImproperlyConfigured", "(", "\"Error importing CAN_LOGIN_AS function: {}\"", ".", "format", "(", "module", ")", ")", "except", "ValueError", ":", "raise", "ImproperlyConfigured", "(", "\"Error importing CAN_LOGIN_AS\"", "\" function. Is CAN_LOGIN_AS a\"", "\" string?\"", ")", "try", ":", "can_login_as", "=", "getattr", "(", "mod", ",", "attr", ")", "except", "AttributeError", ":", "raise", "ImproperlyConfigured", "(", "\"Module {0} does not define a {1} \"", "\"function.\"", ".", "format", "(", "module", ",", "attr", ")", ")", "return", "can_login_as" ]
6257857b40ed5b59e4c59a3af4b54d4856cacaf0
test
iterate_docs
Yield each document in a Luminoso project in turn. Requires a client whose URL points to a project. If expanded=True, it will include additional fields that Luminoso added in its analysis, such as 'terms' and 'vector'. Otherwise, it will contain only the fields necessary to reconstruct the document: 'title', 'text', and 'metadata'. Shows a progress bar if progress=True.
luminoso_api/v5_download.py
def iterate_docs(client, expanded=False, progress=False): """ Yield each document in a Luminoso project in turn. Requires a client whose URL points to a project. If expanded=True, it will include additional fields that Luminoso added in its analysis, such as 'terms' and 'vector'. Otherwise, it will contain only the fields necessary to reconstruct the document: 'title', 'text', and 'metadata'. Shows a progress bar if progress=True. """ # Get total number of docs from the project record num_docs = client.get()['document_count'] progress_bar = None try: if progress: progress_bar = tqdm(desc='Downloading documents', total=num_docs) for offset in range(0, num_docs, DOCS_PER_BATCH): response = client.get('docs', offset=offset, limit=DOCS_PER_BATCH) docs = response['result'] for doc in docs: # Get the appropriate set of fields for each document if expanded: for field in UNNECESSARY_FIELDS: doc.pop(field, None) else: doc = {field: doc[field] for field in CONCISE_FIELDS} if progress: progress_bar.update() yield doc finally: if progress: progress_bar.close()
def iterate_docs(client, expanded=False, progress=False): """ Yield each document in a Luminoso project in turn. Requires a client whose URL points to a project. If expanded=True, it will include additional fields that Luminoso added in its analysis, such as 'terms' and 'vector'. Otherwise, it will contain only the fields necessary to reconstruct the document: 'title', 'text', and 'metadata'. Shows a progress bar if progress=True. """ # Get total number of docs from the project record num_docs = client.get()['document_count'] progress_bar = None try: if progress: progress_bar = tqdm(desc='Downloading documents', total=num_docs) for offset in range(0, num_docs, DOCS_PER_BATCH): response = client.get('docs', offset=offset, limit=DOCS_PER_BATCH) docs = response['result'] for doc in docs: # Get the appropriate set of fields for each document if expanded: for field in UNNECESSARY_FIELDS: doc.pop(field, None) else: doc = {field: doc[field] for field in CONCISE_FIELDS} if progress: progress_bar.update() yield doc finally: if progress: progress_bar.close()
[ "Yield", "each", "document", "in", "a", "Luminoso", "project", "in", "turn", ".", "Requires", "a", "client", "whose", "URL", "points", "to", "a", "project", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_download.py#L30-L67
[ "def", "iterate_docs", "(", "client", ",", "expanded", "=", "False", ",", "progress", "=", "False", ")", ":", "# Get total number of docs from the project record", "num_docs", "=", "client", ".", "get", "(", ")", "[", "'document_count'", "]", "progress_bar", "=", "None", "try", ":", "if", "progress", ":", "progress_bar", "=", "tqdm", "(", "desc", "=", "'Downloading documents'", ",", "total", "=", "num_docs", ")", "for", "offset", "in", "range", "(", "0", ",", "num_docs", ",", "DOCS_PER_BATCH", ")", ":", "response", "=", "client", ".", "get", "(", "'docs'", ",", "offset", "=", "offset", ",", "limit", "=", "DOCS_PER_BATCH", ")", "docs", "=", "response", "[", "'result'", "]", "for", "doc", "in", "docs", ":", "# Get the appropriate set of fields for each document", "if", "expanded", ":", "for", "field", "in", "UNNECESSARY_FIELDS", ":", "doc", ".", "pop", "(", "field", ",", "None", ")", "else", ":", "doc", "=", "{", "field", ":", "doc", "[", "field", "]", "for", "field", "in", "CONCISE_FIELDS", "}", "if", "progress", ":", "progress_bar", ".", "update", "(", ")", "yield", "doc", "finally", ":", "if", "progress", ":", "progress_bar", ".", "close", "(", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
download_docs
Given a LuminosoClient pointing to a project and a filename to write to, retrieve all its documents in batches, and write them to a JSON lines (.jsons) file with one document per line.
luminoso_api/v5_download.py
def download_docs(client, output_filename=None, expanded=False): """ Given a LuminosoClient pointing to a project and a filename to write to, retrieve all its documents in batches, and write them to a JSON lines (.jsons) file with one document per line. """ if output_filename is None: # Find a default filename to download to, based on the project name. projname = _sanitize_filename(client.get()['name']) output_filename = '{}.jsons'.format(projname) # If the file already exists, add .1, .2, ..., after the project name # to unobtrusively get a unique filename. counter = 0 while os.access(output_filename, os.F_OK): counter += 1 output_filename = '{}.{}.jsons'.format(projname, counter) print('Downloading project to {!r}'.format(output_filename)) with open(output_filename, 'w', encoding='utf-8') as out: for doc in iterate_docs(client, expanded=expanded, progress=True): print(json.dumps(doc, ensure_ascii=False), file=out)
def download_docs(client, output_filename=None, expanded=False): """ Given a LuminosoClient pointing to a project and a filename to write to, retrieve all its documents in batches, and write them to a JSON lines (.jsons) file with one document per line. """ if output_filename is None: # Find a default filename to download to, based on the project name. projname = _sanitize_filename(client.get()['name']) output_filename = '{}.jsons'.format(projname) # If the file already exists, add .1, .2, ..., after the project name # to unobtrusively get a unique filename. counter = 0 while os.access(output_filename, os.F_OK): counter += 1 output_filename = '{}.{}.jsons'.format(projname, counter) print('Downloading project to {!r}'.format(output_filename)) with open(output_filename, 'w', encoding='utf-8') as out: for doc in iterate_docs(client, expanded=expanded, progress=True): print(json.dumps(doc, ensure_ascii=False), file=out)
[ "Given", "a", "LuminosoClient", "pointing", "to", "a", "project", "and", "a", "filename", "to", "write", "to", "retrieve", "all", "its", "documents", "in", "batches", "and", "write", "them", "to", "a", "JSON", "lines", "(", ".", "jsons", ")", "file", "with", "one", "document", "per", "line", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_download.py#L70-L92
[ "def", "download_docs", "(", "client", ",", "output_filename", "=", "None", ",", "expanded", "=", "False", ")", ":", "if", "output_filename", "is", "None", ":", "# Find a default filename to download to, based on the project name.", "projname", "=", "_sanitize_filename", "(", "client", ".", "get", "(", ")", "[", "'name'", "]", ")", "output_filename", "=", "'{}.jsons'", ".", "format", "(", "projname", ")", "# If the file already exists, add .1, .2, ..., after the project name", "# to unobtrusively get a unique filename.", "counter", "=", "0", "while", "os", ".", "access", "(", "output_filename", ",", "os", ".", "F_OK", ")", ":", "counter", "+=", "1", "output_filename", "=", "'{}.{}.jsons'", ".", "format", "(", "projname", ",", "counter", ")", "print", "(", "'Downloading project to {!r}'", ".", "format", "(", "output_filename", ")", ")", "with", "open", "(", "output_filename", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "out", ":", "for", "doc", "in", "iterate_docs", "(", "client", ",", "expanded", "=", "expanded", ",", "progress", "=", "True", ")", ":", "print", "(", "json", ".", "dumps", "(", "doc", ",", "ensure_ascii", "=", "False", ")", ",", "file", "=", "out", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_main
Handle arguments for the 'lumi-download' command.
luminoso_api/v5_download.py
def _main(argv): """ Handle arguments for the 'lumi-download' command. """ parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( '-b', '--base-url', default=URL_BASE, help='API root url, default: %s' % URL_BASE, ) parser.add_argument( '-e', '--expanded', help="Include Luminoso's analysis of each document, such as terms and" ' document vectors', action='store_true', ) parser.add_argument('-t', '--token', help='API authentication token') parser.add_argument( '-s', '--save-token', action='store_true', help='save --token for --base-url to ~/.luminoso/tokens.json', ) parser.add_argument( 'project_id', help='The ID of the project in the Daylight API' ) parser.add_argument( 'output_file', nargs='?', default=None, help='The JSON lines (.jsons) file to write to' ) args = parser.parse_args(argv) if args.save_token: if not args.token: raise ValueError("error: no token provided") LuminosoClient.save_token(args.token, domain=urlparse(args.base_url).netloc) client = LuminosoClient.connect(url=args.base_url, token=args.token) proj_client = client.client_for_path('projects/{}'.format(args.project_id)) download_docs(proj_client, args.output_file, args.expanded)
def _main(argv): """ Handle arguments for the 'lumi-download' command. """ parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( '-b', '--base-url', default=URL_BASE, help='API root url, default: %s' % URL_BASE, ) parser.add_argument( '-e', '--expanded', help="Include Luminoso's analysis of each document, such as terms and" ' document vectors', action='store_true', ) parser.add_argument('-t', '--token', help='API authentication token') parser.add_argument( '-s', '--save-token', action='store_true', help='save --token for --base-url to ~/.luminoso/tokens.json', ) parser.add_argument( 'project_id', help='The ID of the project in the Daylight API' ) parser.add_argument( 'output_file', nargs='?', default=None, help='The JSON lines (.jsons) file to write to' ) args = parser.parse_args(argv) if args.save_token: if not args.token: raise ValueError("error: no token provided") LuminosoClient.save_token(args.token, domain=urlparse(args.base_url).netloc) client = LuminosoClient.connect(url=args.base_url, token=args.token) proj_client = client.client_for_path('projects/{}'.format(args.project_id)) download_docs(proj_client, args.output_file, args.expanded)
[ "Handle", "arguments", "for", "the", "lumi", "-", "download", "command", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_download.py#L95-L138
[ "def", "_main", "(", "argv", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "DESCRIPTION", ",", "formatter_class", "=", "argparse", ".", "RawDescriptionHelpFormatter", ")", "parser", ".", "add_argument", "(", "'-b'", ",", "'--base-url'", ",", "default", "=", "URL_BASE", ",", "help", "=", "'API root url, default: %s'", "%", "URL_BASE", ",", ")", "parser", ".", "add_argument", "(", "'-e'", ",", "'--expanded'", ",", "help", "=", "\"Include Luminoso's analysis of each document, such as terms and\"", "' document vectors'", ",", "action", "=", "'store_true'", ",", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--token'", ",", "help", "=", "'API authentication token'", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--save-token'", ",", "action", "=", "'store_true'", ",", "help", "=", "'save --token for --base-url to ~/.luminoso/tokens.json'", ",", ")", "parser", ".", "add_argument", "(", "'project_id'", ",", "help", "=", "'The ID of the project in the Daylight API'", ")", "parser", ".", "add_argument", "(", "'output_file'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ",", "help", "=", "'The JSON lines (.jsons) file to write to'", ")", "args", "=", "parser", ".", "parse_args", "(", "argv", ")", "if", "args", ".", "save_token", ":", "if", "not", "args", ".", "token", ":", "raise", "ValueError", "(", "\"error: no token provided\"", ")", "LuminosoClient", ".", "save_token", "(", "args", ".", "token", ",", "domain", "=", "urlparse", "(", "args", ".", "base_url", ")", ".", "netloc", ")", "client", "=", "LuminosoClient", ".", "connect", "(", "url", "=", "args", ".", "base_url", ",", "token", "=", "args", ".", "token", ")", "proj_client", "=", "client", ".", "client_for_path", "(", "'projects/{}'", ".", "format", "(", "args", ".", "project_id", ")", ")", "download_docs", "(", "proj_client", ",", "args", ".", "output_file", ",", "args", ".", "expanded", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
transcode
Convert a JSON or CSV file of input to a JSON stream (.jsons). This kind of file can be easily uploaded using `luminoso_api.upload`.
luminoso_api/v4_json_stream.py
def transcode(input_filename, output_filename=None, date_format=None): """ Convert a JSON or CSV file of input to a JSON stream (.jsons). This kind of file can be easily uploaded using `luminoso_api.upload`. """ if output_filename is None: # transcode to standard output output = sys.stdout else: if output_filename.endswith('.json'): logger.warning("Changing .json to .jsons, because this program " "outputs a JSON stream format that is not " "technically JSON itself.") output_filename += 's' output = open(output_filename, 'w') for entry in open_json_or_csv_somehow(input_filename, date_format=date_format): output.write(json.dumps(entry, ensure_ascii=False).encode('utf-8')) output.write('\n') output.close()
def transcode(input_filename, output_filename=None, date_format=None): """ Convert a JSON or CSV file of input to a JSON stream (.jsons). This kind of file can be easily uploaded using `luminoso_api.upload`. """ if output_filename is None: # transcode to standard output output = sys.stdout else: if output_filename.endswith('.json'): logger.warning("Changing .json to .jsons, because this program " "outputs a JSON stream format that is not " "technically JSON itself.") output_filename += 's' output = open(output_filename, 'w') for entry in open_json_or_csv_somehow(input_filename, date_format=date_format): output.write(json.dumps(entry, ensure_ascii=False).encode('utf-8')) output.write('\n') output.close()
[ "Convert", "a", "JSON", "or", "CSV", "file", "of", "input", "to", "a", "JSON", "stream", "(", ".", "jsons", ")", ".", "This", "kind", "of", "file", "can", "be", "easily", "uploaded", "using", "luminoso_api", ".", "upload", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L36-L56
[ "def", "transcode", "(", "input_filename", ",", "output_filename", "=", "None", ",", "date_format", "=", "None", ")", ":", "if", "output_filename", "is", "None", ":", "# transcode to standard output", "output", "=", "sys", ".", "stdout", "else", ":", "if", "output_filename", ".", "endswith", "(", "'.json'", ")", ":", "logger", ".", "warning", "(", "\"Changing .json to .jsons, because this program \"", "\"outputs a JSON stream format that is not \"", "\"technically JSON itself.\"", ")", "output_filename", "+=", "'s'", "output", "=", "open", "(", "output_filename", ",", "'w'", ")", "for", "entry", "in", "open_json_or_csv_somehow", "(", "input_filename", ",", "date_format", "=", "date_format", ")", ":", "output", ".", "write", "(", "json", ".", "dumps", "(", "entry", ",", "ensure_ascii", "=", "False", ")", ".", "encode", "(", "'utf-8'", ")", ")", "output", ".", "write", "(", "'\\n'", ")", "output", ".", "close", "(", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
transcode_to_stream
Read a JSON or CSV file and convert it into a JSON stream, which will be saved in an anonymous temp file.
luminoso_api/v4_json_stream.py
def transcode_to_stream(input_filename, date_format=None): """ Read a JSON or CSV file and convert it into a JSON stream, which will be saved in an anonymous temp file. """ tmp = tempfile.TemporaryFile() for entry in open_json_or_csv_somehow(input_filename, date_format=date_format): tmp.write(json.dumps(entry, ensure_ascii=False).encode('utf-8')) tmp.write(b'\n') tmp.seek(0) return tmp
def transcode_to_stream(input_filename, date_format=None): """ Read a JSON or CSV file and convert it into a JSON stream, which will be saved in an anonymous temp file. """ tmp = tempfile.TemporaryFile() for entry in open_json_or_csv_somehow(input_filename, date_format=date_format): tmp.write(json.dumps(entry, ensure_ascii=False).encode('utf-8')) tmp.write(b'\n') tmp.seek(0) return tmp
[ "Read", "a", "JSON", "or", "CSV", "file", "and", "convert", "it", "into", "a", "JSON", "stream", "which", "will", "be", "saved", "in", "an", "anonymous", "temp", "file", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L59-L70
[ "def", "transcode_to_stream", "(", "input_filename", ",", "date_format", "=", "None", ")", ":", "tmp", "=", "tempfile", ".", "TemporaryFile", "(", ")", "for", "entry", "in", "open_json_or_csv_somehow", "(", "input_filename", ",", "date_format", "=", "date_format", ")", ":", "tmp", ".", "write", "(", "json", ".", "dumps", "(", "entry", ",", "ensure_ascii", "=", "False", ")", ".", "encode", "(", "'utf-8'", ")", ")", "tmp", ".", "write", "(", "b'\\n'", ")", "tmp", ".", "seek", "(", "0", ")", "return", "tmp" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
open_json_or_csv_somehow
Deduce the format of a file, within reason. - If the filename ends with .csv or .txt, it's csv. - If the filename ends with .jsons, it's a JSON stream (conveniently the format we want to output). - If the filename ends with .json, it could be a legitimate JSON file, or it could be a JSON stream, following a nonstandard convention that many people including us are guilty of. In that case: - If the first line is a complete JSON document, and there is more in the file besides the first line, then it is a JSON stream. - Otherwise, it is probably really JSON. - If the filename does not end with .json, .jsons, or .csv, we have to guess whether it's still CSV or tab-separated values or something like that. If it's JSON, the first character would almost certainly have to be a bracket or a brace. If it isn't, assume it's CSV or similar.
luminoso_api/v4_json_stream.py
def open_json_or_csv_somehow(filename, date_format=None): """ Deduce the format of a file, within reason. - If the filename ends with .csv or .txt, it's csv. - If the filename ends with .jsons, it's a JSON stream (conveniently the format we want to output). - If the filename ends with .json, it could be a legitimate JSON file, or it could be a JSON stream, following a nonstandard convention that many people including us are guilty of. In that case: - If the first line is a complete JSON document, and there is more in the file besides the first line, then it is a JSON stream. - Otherwise, it is probably really JSON. - If the filename does not end with .json, .jsons, or .csv, we have to guess whether it's still CSV or tab-separated values or something like that. If it's JSON, the first character would almost certainly have to be a bracket or a brace. If it isn't, assume it's CSV or similar. """ fileformat = None if filename.endswith('.csv'): fileformat = 'csv' elif filename.endswith('.jsons'): fileformat = 'jsons' else: with open(filename) as opened: line = opened.readline() if line[0] not in '{[' and not filename.endswith('.json'): fileformat = 'csv' else: if (line.count('{') == line.count('}') and line.count('[') == line.count(']')): # This line contains a complete JSON document. This probably # means it's in linewise JSON ('.jsons') format, unless the # whole file is on one line. char = ' ' while char.isspace(): char = opened.read() if char == '': fileformat = 'json' break if fileformat is None: fileformat = 'jsons' else: fileformat = 'json' if fileformat == 'json': stream = json.load(open(filename), encoding='utf-8') elif fileformat == 'csv': stream = open_csv_somehow(filename) else: stream = stream_json_lines(filename) return _normalize_data(stream, date_format=date_format)
def open_json_or_csv_somehow(filename, date_format=None): """ Deduce the format of a file, within reason. - If the filename ends with .csv or .txt, it's csv. - If the filename ends with .jsons, it's a JSON stream (conveniently the format we want to output). - If the filename ends with .json, it could be a legitimate JSON file, or it could be a JSON stream, following a nonstandard convention that many people including us are guilty of. In that case: - If the first line is a complete JSON document, and there is more in the file besides the first line, then it is a JSON stream. - Otherwise, it is probably really JSON. - If the filename does not end with .json, .jsons, or .csv, we have to guess whether it's still CSV or tab-separated values or something like that. If it's JSON, the first character would almost certainly have to be a bracket or a brace. If it isn't, assume it's CSV or similar. """ fileformat = None if filename.endswith('.csv'): fileformat = 'csv' elif filename.endswith('.jsons'): fileformat = 'jsons' else: with open(filename) as opened: line = opened.readline() if line[0] not in '{[' and not filename.endswith('.json'): fileformat = 'csv' else: if (line.count('{') == line.count('}') and line.count('[') == line.count(']')): # This line contains a complete JSON document. This probably # means it's in linewise JSON ('.jsons') format, unless the # whole file is on one line. char = ' ' while char.isspace(): char = opened.read() if char == '': fileformat = 'json' break if fileformat is None: fileformat = 'jsons' else: fileformat = 'json' if fileformat == 'json': stream = json.load(open(filename), encoding='utf-8') elif fileformat == 'csv': stream = open_csv_somehow(filename) else: stream = stream_json_lines(filename) return _normalize_data(stream, date_format=date_format)
[ "Deduce", "the", "format", "of", "a", "file", "within", "reason", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L73-L125
[ "def", "open_json_or_csv_somehow", "(", "filename", ",", "date_format", "=", "None", ")", ":", "fileformat", "=", "None", "if", "filename", ".", "endswith", "(", "'.csv'", ")", ":", "fileformat", "=", "'csv'", "elif", "filename", ".", "endswith", "(", "'.jsons'", ")", ":", "fileformat", "=", "'jsons'", "else", ":", "with", "open", "(", "filename", ")", "as", "opened", ":", "line", "=", "opened", ".", "readline", "(", ")", "if", "line", "[", "0", "]", "not", "in", "'{['", "and", "not", "filename", ".", "endswith", "(", "'.json'", ")", ":", "fileformat", "=", "'csv'", "else", ":", "if", "(", "line", ".", "count", "(", "'{'", ")", "==", "line", ".", "count", "(", "'}'", ")", "and", "line", ".", "count", "(", "'['", ")", "==", "line", ".", "count", "(", "']'", ")", ")", ":", "# This line contains a complete JSON document. This probably", "# means it's in linewise JSON ('.jsons') format, unless the", "# whole file is on one line.", "char", "=", "' '", "while", "char", ".", "isspace", "(", ")", ":", "char", "=", "opened", ".", "read", "(", ")", "if", "char", "==", "''", ":", "fileformat", "=", "'json'", "break", "if", "fileformat", "is", "None", ":", "fileformat", "=", "'jsons'", "else", ":", "fileformat", "=", "'json'", "if", "fileformat", "==", "'json'", ":", "stream", "=", "json", ".", "load", "(", "open", "(", "filename", ")", ",", "encoding", "=", "'utf-8'", ")", "elif", "fileformat", "==", "'csv'", ":", "stream", "=", "open_csv_somehow", "(", "filename", ")", "else", ":", "stream", "=", "stream_json_lines", "(", "filename", ")", "return", "_normalize_data", "(", "stream", ",", "date_format", "=", "date_format", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_normalize_data
This function is meant to normalize data for upload to the Luminoso Analytics system. Currently it only normalizes dates. If date_format is not specified, or if there's no date in a particular doc, the the doc is yielded unchanged.
luminoso_api/v4_json_stream.py
def _normalize_data(stream, date_format=None): """ This function is meant to normalize data for upload to the Luminoso Analytics system. Currently it only normalizes dates. If date_format is not specified, or if there's no date in a particular doc, the the doc is yielded unchanged. """ for doc in stream: if 'date' in doc and date_format is not None: try: doc['date'] = _convert_date(doc['date'], date_format) except ValueError: # ValueErrors cover the cases when date_format does not match # the actual format of the date, both for epoch and non-epoch # times. logger.exception('%s does not match the date format %s;' % (doc['date'], date_format)) yield doc
def _normalize_data(stream, date_format=None): """ This function is meant to normalize data for upload to the Luminoso Analytics system. Currently it only normalizes dates. If date_format is not specified, or if there's no date in a particular doc, the the doc is yielded unchanged. """ for doc in stream: if 'date' in doc and date_format is not None: try: doc['date'] = _convert_date(doc['date'], date_format) except ValueError: # ValueErrors cover the cases when date_format does not match # the actual format of the date, both for epoch and non-epoch # times. logger.exception('%s does not match the date format %s;' % (doc['date'], date_format)) yield doc
[ "This", "function", "is", "meant", "to", "normalize", "data", "for", "upload", "to", "the", "Luminoso", "Analytics", "system", ".", "Currently", "it", "only", "normalizes", "dates", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L128-L146
[ "def", "_normalize_data", "(", "stream", ",", "date_format", "=", "None", ")", ":", "for", "doc", "in", "stream", ":", "if", "'date'", "in", "doc", "and", "date_format", "is", "not", "None", ":", "try", ":", "doc", "[", "'date'", "]", "=", "_convert_date", "(", "doc", "[", "'date'", "]", ",", "date_format", ")", "except", "ValueError", ":", "# ValueErrors cover the cases when date_format does not match", "# the actual format of the date, both for epoch and non-epoch", "# times.", "logger", ".", "exception", "(", "'%s does not match the date format %s;'", "%", "(", "doc", "[", "'date'", "]", ",", "date_format", ")", ")", "yield", "doc" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_convert_date
Convert a date in a given format to epoch time. Mostly a wrapper for datetime's strptime.
luminoso_api/v4_json_stream.py
def _convert_date(date_string, date_format): """ Convert a date in a given format to epoch time. Mostly a wrapper for datetime's strptime. """ if date_format != 'epoch': return datetime.strptime(date_string, date_format).timestamp() else: return float(date_string)
def _convert_date(date_string, date_format): """ Convert a date in a given format to epoch time. Mostly a wrapper for datetime's strptime. """ if date_format != 'epoch': return datetime.strptime(date_string, date_format).timestamp() else: return float(date_string)
[ "Convert", "a", "date", "in", "a", "given", "format", "to", "epoch", "time", ".", "Mostly", "a", "wrapper", "for", "datetime", "s", "strptime", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L149-L157
[ "def", "_convert_date", "(", "date_string", ",", "date_format", ")", ":", "if", "date_format", "!=", "'epoch'", ":", "return", "datetime", ".", "strptime", "(", "date_string", ",", "date_format", ")", ".", "timestamp", "(", ")", "else", ":", "return", "float", "(", "date_string", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
detect_file_encoding
Use ftfy to detect the encoding of a file, based on a sample of its first megabyte. ftfy's encoding detector is limited. The only encodings it can detect are UTF-8, CESU-8, UTF-16, Windows-1252, and occasionally MacRoman. But it does much better than chardet.
luminoso_api/v4_json_stream.py
def detect_file_encoding(filename): """ Use ftfy to detect the encoding of a file, based on a sample of its first megabyte. ftfy's encoding detector is limited. The only encodings it can detect are UTF-8, CESU-8, UTF-16, Windows-1252, and occasionally MacRoman. But it does much better than chardet. """ with open(filename, 'rb') as opened: sample = opened.read(2 ** 20) _, encoding = ftfy.guess_bytes(sample) return encoding
def detect_file_encoding(filename): """ Use ftfy to detect the encoding of a file, based on a sample of its first megabyte. ftfy's encoding detector is limited. The only encodings it can detect are UTF-8, CESU-8, UTF-16, Windows-1252, and occasionally MacRoman. But it does much better than chardet. """ with open(filename, 'rb') as opened: sample = opened.read(2 ** 20) _, encoding = ftfy.guess_bytes(sample) return encoding
[ "Use", "ftfy", "to", "detect", "the", "encoding", "of", "a", "file", "based", "on", "a", "sample", "of", "its", "first", "megabyte", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L160-L172
[ "def", "detect_file_encoding", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "opened", ":", "sample", "=", "opened", ".", "read", "(", "2", "**", "20", ")", "_", ",", "encoding", "=", "ftfy", ".", "guess_bytes", "(", "sample", ")", "return", "encoding" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
stream_json_lines
Load a JSON stream and return a generator, yielding one object at a time.
luminoso_api/v4_json_stream.py
def stream_json_lines(file): """ Load a JSON stream and return a generator, yielding one object at a time. """ if isinstance(file, string_type): file = open(file, 'rb') for line in file: line = line.strip() if line: if isinstance(line, bytes): line = line.decode('utf-8') yield json.loads(line)
def stream_json_lines(file): """ Load a JSON stream and return a generator, yielding one object at a time. """ if isinstance(file, string_type): file = open(file, 'rb') for line in file: line = line.strip() if line: if isinstance(line, bytes): line = line.decode('utf-8') yield json.loads(line)
[ "Load", "a", "JSON", "stream", "and", "return", "a", "generator", "yielding", "one", "object", "at", "a", "time", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L175-L186
[ "def", "stream_json_lines", "(", "file", ")", ":", "if", "isinstance", "(", "file", ",", "string_type", ")", ":", "file", "=", "open", "(", "file", ",", "'rb'", ")", "for", "line", "in", "file", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ":", "if", "isinstance", "(", "line", ",", "bytes", ")", ":", "line", "=", "line", ".", "decode", "(", "'utf-8'", ")", "yield", "json", ".", "loads", "(", "line", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
transcode_to_utf8
Convert a file in some other encoding into a temporary file that's in UTF-8.
luminoso_api/v4_json_stream.py
def transcode_to_utf8(filename, encoding): """ Convert a file in some other encoding into a temporary file that's in UTF-8. """ tmp = tempfile.TemporaryFile() for line in io.open(filename, encoding=encoding): tmp.write(line.strip('\uFEFF').encode('utf-8')) tmp.seek(0) return tmp
def transcode_to_utf8(filename, encoding): """ Convert a file in some other encoding into a temporary file that's in UTF-8. """ tmp = tempfile.TemporaryFile() for line in io.open(filename, encoding=encoding): tmp.write(line.strip('\uFEFF').encode('utf-8')) tmp.seek(0) return tmp
[ "Convert", "a", "file", "in", "some", "other", "encoding", "into", "a", "temporary", "file", "that", "s", "in", "UTF", "-", "8", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L200-L210
[ "def", "transcode_to_utf8", "(", "filename", ",", "encoding", ")", ":", "tmp", "=", "tempfile", ".", "TemporaryFile", "(", ")", "for", "line", "in", "io", ".", "open", "(", "filename", ",", "encoding", "=", "encoding", ")", ":", "tmp", ".", "write", "(", "line", ".", "strip", "(", "'\\uFEFF'", ")", ".", "encode", "(", "'utf-8'", ")", ")", "tmp", ".", "seek", "(", "0", ")", "return", "tmp" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
open_csv_somehow_py2
Open a CSV file using Python 2's CSV module, working around the deficiency where it can't handle the null bytes of UTF-16.
luminoso_api/v4_json_stream.py
def open_csv_somehow_py2(filename): """ Open a CSV file using Python 2's CSV module, working around the deficiency where it can't handle the null bytes of UTF-16. """ encoding = detect_file_encoding(filename) if encoding.startswith('UTF-16'): csvfile = transcode_to_utf8(filename, encoding) encoding = 'UTF-8' else: csvfile = open(filename, 'rU') line = csvfile.readline() csvfile.seek(0) if '\t' in line: # tab-separated reader = csv.reader(csvfile, delimiter='\t') else: reader = csv.reader(csvfile, dialect='excel') header = reader.next() header = [cell.decode(encoding).lower().strip() for cell in header] encode_fn = lambda x: x.decode(encoding, 'replace') return _read_csv(reader, header, encode_fn)
def open_csv_somehow_py2(filename): """ Open a CSV file using Python 2's CSV module, working around the deficiency where it can't handle the null bytes of UTF-16. """ encoding = detect_file_encoding(filename) if encoding.startswith('UTF-16'): csvfile = transcode_to_utf8(filename, encoding) encoding = 'UTF-8' else: csvfile = open(filename, 'rU') line = csvfile.readline() csvfile.seek(0) if '\t' in line: # tab-separated reader = csv.reader(csvfile, delimiter='\t') else: reader = csv.reader(csvfile, dialect='excel') header = reader.next() header = [cell.decode(encoding).lower().strip() for cell in header] encode_fn = lambda x: x.decode(encoding, 'replace') return _read_csv(reader, header, encode_fn)
[ "Open", "a", "CSV", "file", "using", "Python", "2", "s", "CSV", "module", "working", "around", "the", "deficiency", "where", "it", "can", "t", "handle", "the", "null", "bytes", "of", "UTF", "-", "16", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L213-L236
[ "def", "open_csv_somehow_py2", "(", "filename", ")", ":", "encoding", "=", "detect_file_encoding", "(", "filename", ")", "if", "encoding", ".", "startswith", "(", "'UTF-16'", ")", ":", "csvfile", "=", "transcode_to_utf8", "(", "filename", ",", "encoding", ")", "encoding", "=", "'UTF-8'", "else", ":", "csvfile", "=", "open", "(", "filename", ",", "'rU'", ")", "line", "=", "csvfile", ".", "readline", "(", ")", "csvfile", ".", "seek", "(", "0", ")", "if", "'\\t'", "in", "line", ":", "# tab-separated", "reader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ")", "else", ":", "reader", "=", "csv", ".", "reader", "(", "csvfile", ",", "dialect", "=", "'excel'", ")", "header", "=", "reader", ".", "next", "(", ")", "header", "=", "[", "cell", ".", "decode", "(", "encoding", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", "for", "cell", "in", "header", "]", "encode_fn", "=", "lambda", "x", ":", "x", ".", "decode", "(", "encoding", ",", "'replace'", ")", "return", "_read_csv", "(", "reader", ",", "header", ",", "encode_fn", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_read_csv
Given a constructed CSV reader object, a header row that we've read, and a detected encoding, yield its rows as dictionaries.
luminoso_api/v4_json_stream.py
def _read_csv(reader, header, encode_fn): """ Given a constructed CSV reader object, a header row that we've read, and a detected encoding, yield its rows as dictionaries. """ for row in reader: if len(row) == 0: continue row = [encode_fn(cell) for cell in row] row_list = zip(header, row) row_dict = dict(row_list) if len(row_dict['text']) == 0: continue row_dict['text'] = unicodedata.normalize( 'NFKC', row_dict['text'].strip() ) if row_dict.get('title') == '': del row_dict['title'] if 'date' in row_dict: # We handle dates further in open_json_or_csv_somehow if row_dict['date'] == '': del row_dict['date'] if 'subset' in row_dict: subsets = [cell[1] for cell in row_list if cell[1] != '' and cell[0] == 'subset'] if subsets: row_dict['subsets'] = subsets if 'subset' in row_dict: del row_dict['subset'] yield row_dict
def _read_csv(reader, header, encode_fn): """ Given a constructed CSV reader object, a header row that we've read, and a detected encoding, yield its rows as dictionaries. """ for row in reader: if len(row) == 0: continue row = [encode_fn(cell) for cell in row] row_list = zip(header, row) row_dict = dict(row_list) if len(row_dict['text']) == 0: continue row_dict['text'] = unicodedata.normalize( 'NFKC', row_dict['text'].strip() ) if row_dict.get('title') == '': del row_dict['title'] if 'date' in row_dict: # We handle dates further in open_json_or_csv_somehow if row_dict['date'] == '': del row_dict['date'] if 'subset' in row_dict: subsets = [cell[1] for cell in row_list if cell[1] != '' and cell[0] == 'subset'] if subsets: row_dict['subsets'] = subsets if 'subset' in row_dict: del row_dict['subset'] yield row_dict
[ "Given", "a", "constructed", "CSV", "reader", "object", "a", "header", "row", "that", "we", "ve", "read", "and", "a", "detected", "encoding", "yield", "its", "rows", "as", "dictionaries", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L257-L286
[ "def", "_read_csv", "(", "reader", ",", "header", ",", "encode_fn", ")", ":", "for", "row", "in", "reader", ":", "if", "len", "(", "row", ")", "==", "0", ":", "continue", "row", "=", "[", "encode_fn", "(", "cell", ")", "for", "cell", "in", "row", "]", "row_list", "=", "zip", "(", "header", ",", "row", ")", "row_dict", "=", "dict", "(", "row_list", ")", "if", "len", "(", "row_dict", "[", "'text'", "]", ")", "==", "0", ":", "continue", "row_dict", "[", "'text'", "]", "=", "unicodedata", ".", "normalize", "(", "'NFKC'", ",", "row_dict", "[", "'text'", "]", ".", "strip", "(", ")", ")", "if", "row_dict", ".", "get", "(", "'title'", ")", "==", "''", ":", "del", "row_dict", "[", "'title'", "]", "if", "'date'", "in", "row_dict", ":", "# We handle dates further in open_json_or_csv_somehow", "if", "row_dict", "[", "'date'", "]", "==", "''", ":", "del", "row_dict", "[", "'date'", "]", "if", "'subset'", "in", "row_dict", ":", "subsets", "=", "[", "cell", "[", "1", "]", "for", "cell", "in", "row_list", "if", "cell", "[", "1", "]", "!=", "''", "and", "cell", "[", "0", "]", "==", "'subset'", "]", "if", "subsets", ":", "row_dict", "[", "'subsets'", "]", "=", "subsets", "if", "'subset'", "in", "row_dict", ":", "del", "row_dict", "[", "'subset'", "]", "yield", "row_dict" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
main
Handle command line arguments to convert a file to a JSON stream as a script.
luminoso_api/v4_json_stream.py
def main(): """ Handle command line arguments to convert a file to a JSON stream as a script. """ logging.basicConfig(level=logging.INFO) import argparse parser = argparse.ArgumentParser( description="Translate CSV or JSON input to a JSON stream, or verify " "something that is already a JSON stream." ) parser.add_argument('input', help='A CSV, JSON, or JSON stream file to read.') parser.add_argument('output', nargs='?', default=None, help="The filename to output to. Recommended extension is .jsons. " "If omitted, use standard output.") args = parser.parse_args() transcode(args.input, args.output)
def main(): """ Handle command line arguments to convert a file to a JSON stream as a script. """ logging.basicConfig(level=logging.INFO) import argparse parser = argparse.ArgumentParser( description="Translate CSV or JSON input to a JSON stream, or verify " "something that is already a JSON stream." ) parser.add_argument('input', help='A CSV, JSON, or JSON stream file to read.') parser.add_argument('output', nargs='?', default=None, help="The filename to output to. Recommended extension is .jsons. " "If omitted, use standard output.") args = parser.parse_args() transcode(args.input, args.output)
[ "Handle", "command", "line", "arguments", "to", "convert", "a", "file", "to", "a", "JSON", "stream", "as", "a", "script", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L289-L306
[ "def", "main", "(", ")", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "import", "argparse", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Translate CSV or JSON input to a JSON stream, or verify \"", "\"something that is already a JSON stream.\"", ")", "parser", ".", "add_argument", "(", "'input'", ",", "help", "=", "'A CSV, JSON, or JSON stream file to read.'", ")", "parser", ".", "add_argument", "(", "'output'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ",", "help", "=", "\"The filename to output to. Recommended extension is .jsons. \"", "\"If omitted, use standard output.\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "transcode", "(", "args", ".", "input", ",", "args", ".", "output", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
jsonify_parameters
When sent in an authorized REST request, only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON.
luminoso_api/v5_client.py
def jsonify_parameters(params): """ When sent in an authorized REST request, only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON. """ result = {} for param, value in params.items(): if isinstance(value, (int, str)): result[param] = value else: result[param] = json.dumps(value) return result
def jsonify_parameters(params): """ When sent in an authorized REST request, only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON. """ result = {} for param, value in params.items(): if isinstance(value, (int, str)): result[param] = value else: result[param] = json.dumps(value) return result
[ "When", "sent", "in", "an", "authorized", "REST", "request", "only", "strings", "and", "integers", "can", "be", "transmitted", "accurately", ".", "Other", "types", "of", "data", "need", "to", "be", "encoded", "into", "JSON", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L401-L412
[ "def", "jsonify_parameters", "(", "params", ")", ":", "result", "=", "{", "}", "for", "param", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "(", "int", ",", "str", ")", ")", ":", "result", "[", "param", "]", "=", "value", "else", ":", "result", "[", "param", "]", "=", "json", ".", "dumps", "(", "value", ")", "return", "result" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.connect
Returns an object that makes requests to the API, authenticated with a saved or specified long-lived token, at URLs beginning with `url`. If no URL is specified, or if the specified URL is a path such as '/projects' without a scheme and domain, the client will default to https://analytics.luminoso.com/api/v5/. If neither token nor token_file are specified, the client will look for a token in $HOME/.luminoso/tokens.json. The file should contain a single json dictionary of the format `{'root_url': 'token', 'root_url2': 'token2', ...}`.
luminoso_api/v5_client.py
def connect(cls, url=None, token_file=None, token=None): """ Returns an object that makes requests to the API, authenticated with a saved or specified long-lived token, at URLs beginning with `url`. If no URL is specified, or if the specified URL is a path such as '/projects' without a scheme and domain, the client will default to https://analytics.luminoso.com/api/v5/. If neither token nor token_file are specified, the client will look for a token in $HOME/.luminoso/tokens.json. The file should contain a single json dictionary of the format `{'root_url': 'token', 'root_url2': 'token2', ...}`. """ if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE if token is None: token_file = token_file or get_token_filename() try: with open(token_file) as tf: token_dict = json.load(tf) except FileNotFoundError: raise LuminosoAuthError('No token file at %s' % token_file) try: token = token_dict[urlparse(root_url).netloc] except KeyError: raise LuminosoAuthError('No token stored for %s' % root_url) session = requests.session() session.auth = _TokenAuth(token) return cls(session, url)
def connect(cls, url=None, token_file=None, token=None): """ Returns an object that makes requests to the API, authenticated with a saved or specified long-lived token, at URLs beginning with `url`. If no URL is specified, or if the specified URL is a path such as '/projects' without a scheme and domain, the client will default to https://analytics.luminoso.com/api/v5/. If neither token nor token_file are specified, the client will look for a token in $HOME/.luminoso/tokens.json. The file should contain a single json dictionary of the format `{'root_url': 'token', 'root_url2': 'token2', ...}`. """ if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE if token is None: token_file = token_file or get_token_filename() try: with open(token_file) as tf: token_dict = json.load(tf) except FileNotFoundError: raise LuminosoAuthError('No token file at %s' % token_file) try: token = token_dict[urlparse(root_url).netloc] except KeyError: raise LuminosoAuthError('No token stored for %s' % root_url) session = requests.session() session.auth = _TokenAuth(token) return cls(session, url)
[ "Returns", "an", "object", "that", "makes", "requests", "to", "the", "API", "authenticated", "with", "a", "saved", "or", "specified", "long", "-", "lived", "token", "at", "URLs", "beginning", "with", "url", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L64-L102
[ "def", "connect", "(", "cls", ",", "url", "=", "None", ",", "token_file", "=", "None", ",", "token", "=", "None", ")", ":", "if", "url", "is", "None", ":", "url", "=", "'/'", "if", "url", ".", "startswith", "(", "'http'", ")", ":", "root_url", "=", "get_root_url", "(", "url", ")", "else", ":", "url", "=", "URL_BASE", "+", "'/'", "+", "url", ".", "lstrip", "(", "'/'", ")", "root_url", "=", "URL_BASE", "if", "token", "is", "None", ":", "token_file", "=", "token_file", "or", "get_token_filename", "(", ")", "try", ":", "with", "open", "(", "token_file", ")", "as", "tf", ":", "token_dict", "=", "json", ".", "load", "(", "tf", ")", "except", "FileNotFoundError", ":", "raise", "LuminosoAuthError", "(", "'No token file at %s'", "%", "token_file", ")", "try", ":", "token", "=", "token_dict", "[", "urlparse", "(", "root_url", ")", ".", "netloc", "]", "except", "KeyError", ":", "raise", "LuminosoAuthError", "(", "'No token stored for %s'", "%", "root_url", ")", "session", "=", "requests", ".", "session", "(", ")", "session", ".", "auth", "=", "_TokenAuth", "(", "token", ")", "return", "cls", "(", "session", ",", "url", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.save_token
Take a long-lived API token and store it to a local file. Long-lived tokens can be retrieved through the UI. Optional arguments are the domain for which the token is valid and the file in which to store the token.
luminoso_api/v5_client.py
def save_token(token, domain='analytics.luminoso.com', token_file=None): """ Take a long-lived API token and store it to a local file. Long-lived tokens can be retrieved through the UI. Optional arguments are the domain for which the token is valid and the file in which to store the token. """ token_file = token_file or get_token_filename() if os.path.exists(token_file): saved_tokens = json.load(open(token_file)) else: saved_tokens = {} saved_tokens[domain] = token directory, filename = os.path.split(token_file) if directory and not os.path.exists(directory): os.makedirs(directory) with open(token_file, 'w') as f: json.dump(saved_tokens, f)
def save_token(token, domain='analytics.luminoso.com', token_file=None): """ Take a long-lived API token and store it to a local file. Long-lived tokens can be retrieved through the UI. Optional arguments are the domain for which the token is valid and the file in which to store the token. """ token_file = token_file or get_token_filename() if os.path.exists(token_file): saved_tokens = json.load(open(token_file)) else: saved_tokens = {} saved_tokens[domain] = token directory, filename = os.path.split(token_file) if directory and not os.path.exists(directory): os.makedirs(directory) with open(token_file, 'w') as f: json.dump(saved_tokens, f)
[ "Take", "a", "long", "-", "lived", "API", "token", "and", "store", "it", "to", "a", "local", "file", ".", "Long", "-", "lived", "tokens", "can", "be", "retrieved", "through", "the", "UI", ".", "Optional", "arguments", "are", "the", "domain", "for", "which", "the", "token", "is", "valid", "and", "the", "file", "in", "which", "to", "store", "the", "token", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L105-L122
[ "def", "save_token", "(", "token", ",", "domain", "=", "'analytics.luminoso.com'", ",", "token_file", "=", "None", ")", ":", "token_file", "=", "token_file", "or", "get_token_filename", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "token_file", ")", ":", "saved_tokens", "=", "json", ".", "load", "(", "open", "(", "token_file", ")", ")", "else", ":", "saved_tokens", "=", "{", "}", "saved_tokens", "[", "domain", "]", "=", "token", "directory", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "token_file", ")", "if", "directory", "and", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "with", "open", "(", "token_file", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "saved_tokens", ",", "f", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.connect_with_username_and_password
Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file.
luminoso_api/v5_client.py
def connect_with_username_and_password(cls, url=None, username=None, password=None): """ Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. """ from .v4_client import LuminosoClient as v4LC if username is None: username = input('Username: ') v4client = v4LC.connect(url=url, username=username, password=password) if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE return cls(v4client.session, root_url)
def connect_with_username_and_password(cls, url=None, username=None, password=None): """ Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. """ from .v4_client import LuminosoClient as v4LC if username is None: username = input('Username: ') v4client = v4LC.connect(url=url, username=username, password=password) if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE return cls(v4client.session, root_url)
[ "Returns", "an", "object", "that", "makes", "requests", "to", "the", "API", "authenticated", "with", "a", "short", "-", "lived", "token", "retrieved", "from", "username", "and", "password", ".", "If", "username", "or", "password", "is", "not", "supplied", "the", "method", "will", "prompt", "for", "a", "username", "and", "/", "or", "password", "to", "be", "entered", "interactively", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L125-L153
[ "def", "connect_with_username_and_password", "(", "cls", ",", "url", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "from", ".", "v4_client", "import", "LuminosoClient", "as", "v4LC", "if", "username", "is", "None", ":", "username", "=", "input", "(", "'Username: '", ")", "v4client", "=", "v4LC", ".", "connect", "(", "url", "=", "url", ",", "username", "=", "username", ",", "password", "=", "password", ")", "if", "url", "is", "None", ":", "url", "=", "'/'", "if", "url", ".", "startswith", "(", "'http'", ")", ":", "root_url", "=", "get_root_url", "(", "url", ")", "else", ":", "url", "=", "URL_BASE", "+", "'/'", "+", "url", ".", "lstrip", "(", "'/'", ")", "root_url", "=", "URL_BASE", "return", "cls", "(", "v4client", ".", "session", ",", "root_url", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient._request
Make a request via the `requests` module. If the result has an HTTP error status, convert that to a Python exception.
luminoso_api/v5_client.py
def _request(self, req_type, url, **kwargs): """ Make a request via the `requests` module. If the result has an HTTP error status, convert that to a Python exception. """ logger.debug('%s %s' % (req_type, url)) result = self.session.request(req_type, url, **kwargs) try: result.raise_for_status() except requests.HTTPError: error = result.text try: error = json.loads(error) except ValueError: pass if result.status_code in (401, 403): error_class = LuminosoAuthError elif result.status_code in (400, 404, 405): error_class = LuminosoClientError elif result.status_code >= 500: error_class = LuminosoServerError else: error_class = LuminosoError raise error_class(error) return result
def _request(self, req_type, url, **kwargs): """ Make a request via the `requests` module. If the result has an HTTP error status, convert that to a Python exception. """ logger.debug('%s %s' % (req_type, url)) result = self.session.request(req_type, url, **kwargs) try: result.raise_for_status() except requests.HTTPError: error = result.text try: error = json.loads(error) except ValueError: pass if result.status_code in (401, 403): error_class = LuminosoAuthError elif result.status_code in (400, 404, 405): error_class = LuminosoClientError elif result.status_code >= 500: error_class = LuminosoServerError else: error_class = LuminosoError raise error_class(error) return result
[ "Make", "a", "request", "via", "the", "requests", "module", ".", "If", "the", "result", "has", "an", "HTTP", "error", "status", "convert", "that", "to", "a", "Python", "exception", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L155-L179
[ "def", "_request", "(", "self", ",", "req_type", ",", "url", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "'%s %s'", "%", "(", "req_type", ",", "url", ")", ")", "result", "=", "self", ".", "session", ".", "request", "(", "req_type", ",", "url", ",", "*", "*", "kwargs", ")", "try", ":", "result", ".", "raise_for_status", "(", ")", "except", "requests", ".", "HTTPError", ":", "error", "=", "result", ".", "text", "try", ":", "error", "=", "json", ".", "loads", "(", "error", ")", "except", "ValueError", ":", "pass", "if", "result", ".", "status_code", "in", "(", "401", ",", "403", ")", ":", "error_class", "=", "LuminosoAuthError", "elif", "result", ".", "status_code", "in", "(", "400", ",", "404", ",", "405", ")", ":", "error_class", "=", "LuminosoClientError", "elif", "result", ".", "status_code", ">=", "500", ":", "error_class", "=", "LuminosoServerError", "else", ":", "error_class", "=", "LuminosoError", "raise", "error_class", "(", "error", ")", "return", "result" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.post
Make a POST request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to form values, sent in the body of the POST. POST requests are requests that cause a change on the server, especially those that ask to create and return an object of some kind.
luminoso_api/v5_client.py
def post(self, path='', **params): """ Make a POST request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to form values, sent in the body of the POST. POST requests are requests that cause a change on the server, especially those that ask to create and return an object of some kind. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, data=json.dumps(params), headers={'Content-Type': 'application/json'})
def post(self, path='', **params): """ Make a POST request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to form values, sent in the body of the POST. POST requests are requests that cause a change on the server, especially those that ask to create and return an object of some kind. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, data=json.dumps(params), headers={'Content-Type': 'application/json'})
[ "Make", "a", "POST", "request", "to", "the", "given", "path", "and", "return", "the", "JSON", "-", "decoded", "result", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L213-L226
[ "def", "post", "(", "self", ",", "path", "=", "''", ",", "*", "*", "params", ")", ":", "url", "=", "ensure_trailing_slash", "(", "self", ".", "url", "+", "path", ".", "lstrip", "(", "'/'", ")", ")", "return", "self", ".", "_json_request", "(", "'post'", ",", "url", ",", "data", "=", "json", ".", "dumps", "(", "params", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.delete
Make a DELETE request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to URL parameters. DELETE requests ask to delete the object represented by this URL.
luminoso_api/v5_client.py
def delete(self, path='', **params): """ Make a DELETE request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to URL parameters. DELETE requests ask to delete the object represented by this URL. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('delete', url, params=params)
def delete(self, path='', **params): """ Make a DELETE request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to URL parameters. DELETE requests ask to delete the object represented by this URL. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('delete', url, params=params)
[ "Make", "a", "DELETE", "request", "to", "the", "given", "path", "and", "return", "the", "JSON", "-", "decoded", "result", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L258-L269
[ "def", "delete", "(", "self", ",", "path", "=", "''", ",", "*", "*", "params", ")", ":", "params", "=", "jsonify_parameters", "(", "params", ")", "url", "=", "ensure_trailing_slash", "(", "self", ".", "url", "+", "path", ".", "lstrip", "(", "'/'", ")", ")", "return", "self", ".", "_json_request", "(", "'delete'", ",", "url", ",", "params", "=", "params", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.client_for_path
Returns a new client with the same root URL and authentication, but a different specific URL. For instance, if you have a client pointed at https://analytics.luminoso.com/api/v5/, and you want new ones for Project A and Project B, you would call: client_a = client.client_for_path('projects/<project_id_a>') client_b = client.client_for_path('projects/<project_id_b>') and your base client would remian unchanged. Paths with leading slashes are appended to the root url; otherwise, paths are set relative to the current path.
luminoso_api/v5_client.py
def client_for_path(self, path): """ Returns a new client with the same root URL and authentication, but a different specific URL. For instance, if you have a client pointed at https://analytics.luminoso.com/api/v5/, and you want new ones for Project A and Project B, you would call: client_a = client.client_for_path('projects/<project_id_a>') client_b = client.client_for_path('projects/<project_id_b>') and your base client would remian unchanged. Paths with leading slashes are appended to the root url; otherwise, paths are set relative to the current path. """ if path.startswith('/'): url = self.root_url + path else: url = self.url + path return self.__class__(self.session, url)
def client_for_path(self, path): """ Returns a new client with the same root URL and authentication, but a different specific URL. For instance, if you have a client pointed at https://analytics.luminoso.com/api/v5/, and you want new ones for Project A and Project B, you would call: client_a = client.client_for_path('projects/<project_id_a>') client_b = client.client_for_path('projects/<project_id_b>') and your base client would remian unchanged. Paths with leading slashes are appended to the root url; otherwise, paths are set relative to the current path. """ if path.startswith('/'): url = self.root_url + path else: url = self.url + path return self.__class__(self.session, url)
[ "Returns", "a", "new", "client", "with", "the", "same", "root", "URL", "and", "authentication", "but", "a", "different", "specific", "URL", ".", "For", "instance", "if", "you", "have", "a", "client", "pointed", "at", "https", ":", "//", "analytics", ".", "luminoso", ".", "com", "/", "api", "/", "v5", "/", "and", "you", "want", "new", "ones", "for", "Project", "A", "and", "Project", "B", "you", "would", "call", ":" ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L272-L291
[ "def", "client_for_path", "(", "self", ",", "path", ")", ":", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "url", "=", "self", ".", "root_url", "+", "path", "else", ":", "url", "=", "self", ".", "url", "+", "path", "return", "self", ".", "__class__", "(", "self", ".", "session", ",", "url", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.upload
A deprecated alias for post(path, docs=docs), included only for backward compatibility.
luminoso_api/v5_client.py
def upload(self, path, docs, **params): """ A deprecated alias for post(path, docs=docs), included only for backward compatibility. """ logger.warning('The upload method is deprecated; use post instead.') return self.post(path, docs=docs)
def upload(self, path, docs, **params): """ A deprecated alias for post(path, docs=docs), included only for backward compatibility. """ logger.warning('The upload method is deprecated; use post instead.') return self.post(path, docs=docs)
[ "A", "deprecated", "alias", "for", "post", "(", "path", "docs", "=", "docs", ")", "included", "only", "for", "backward", "compatibility", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L302-L308
[ "def", "upload", "(", "self", ",", "path", ",", "docs", ",", "*", "*", "params", ")", ":", "logger", ".", "warning", "(", "'The upload method is deprecated; use post instead.'", ")", "return", "self", ".", "post", "(", "path", ",", "docs", "=", "docs", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.wait_for_build
A convenience method designed to inform you when a project build has completed. It polls the API every `interval` seconds until there is not a build running. At that point, it returns the "last_build_info" field of the project record if the build succeeded, and raises a LuminosoError with the field as its message if the build failed. If a `path` is not specified, this method will assume that its URL is the URL for the project. Otherwise, it will use the specified path (which should be "/projects/<project_id>/").
luminoso_api/v5_client.py
def wait_for_build(self, interval=5, path=None): """ A convenience method designed to inform you when a project build has completed. It polls the API every `interval` seconds until there is not a build running. At that point, it returns the "last_build_info" field of the project record if the build succeeded, and raises a LuminosoError with the field as its message if the build failed. If a `path` is not specified, this method will assume that its URL is the URL for the project. Otherwise, it will use the specified path (which should be "/projects/<project_id>/"). """ path = path or '' start = time.time() next_log = 0 while True: response = self.get(path)['last_build_info'] if not response: raise ValueError('This project is not building!') if response['stop_time']: if response['success']: return response else: raise LuminosoError(response) elapsed = time.time() - start if elapsed > next_log: logger.info('Still waiting (%d seconds elapsed).', next_log) next_log += 120 time.sleep(interval)
def wait_for_build(self, interval=5, path=None): """ A convenience method designed to inform you when a project build has completed. It polls the API every `interval` seconds until there is not a build running. At that point, it returns the "last_build_info" field of the project record if the build succeeded, and raises a LuminosoError with the field as its message if the build failed. If a `path` is not specified, this method will assume that its URL is the URL for the project. Otherwise, it will use the specified path (which should be "/projects/<project_id>/"). """ path = path or '' start = time.time() next_log = 0 while True: response = self.get(path)['last_build_info'] if not response: raise ValueError('This project is not building!') if response['stop_time']: if response['success']: return response else: raise LuminosoError(response) elapsed = time.time() - start if elapsed > next_log: logger.info('Still waiting (%d seconds elapsed).', next_log) next_log += 120 time.sleep(interval)
[ "A", "convenience", "method", "designed", "to", "inform", "you", "when", "a", "project", "build", "has", "completed", ".", "It", "polls", "the", "API", "every", "interval", "seconds", "until", "there", "is", "not", "a", "build", "running", ".", "At", "that", "point", "it", "returns", "the", "last_build_info", "field", "of", "the", "project", "record", "if", "the", "build", "succeeded", "and", "raises", "a", "LuminosoError", "with", "the", "field", "as", "its", "message", "if", "the", "build", "failed", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L310-L338
[ "def", "wait_for_build", "(", "self", ",", "interval", "=", "5", ",", "path", "=", "None", ")", ":", "path", "=", "path", "or", "''", "start", "=", "time", ".", "time", "(", ")", "next_log", "=", "0", "while", "True", ":", "response", "=", "self", ".", "get", "(", "path", ")", "[", "'last_build_info'", "]", "if", "not", "response", ":", "raise", "ValueError", "(", "'This project is not building!'", ")", "if", "response", "[", "'stop_time'", "]", ":", "if", "response", "[", "'success'", "]", ":", "return", "response", "else", ":", "raise", "LuminosoError", "(", "response", ")", "elapsed", "=", "time", ".", "time", "(", ")", "-", "start", "if", "elapsed", ">", "next_log", ":", "logger", ".", "info", "(", "'Still waiting (%d seconds elapsed).'", ",", "next_log", ")", "next_log", "+=", "120", "time", ".", "sleep", "(", "interval", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.save_to_file
Saves binary content to a file with name filename. filename should include the appropriate file extension, such as .xlsx or .txt, e.g., filename = 'sample.xlsx'. Useful for downloading .xlsx files.
luminoso_api/v5_client.py
def save_to_file(self, path, filename, **params): """ Saves binary content to a file with name filename. filename should include the appropriate file extension, such as .xlsx or .txt, e.g., filename = 'sample.xlsx'. Useful for downloading .xlsx files. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) content = self._request('get', url, params=params).content with open(filename, 'wb') as f: f.write(content)
def save_to_file(self, path, filename, **params): """ Saves binary content to a file with name filename. filename should include the appropriate file extension, such as .xlsx or .txt, e.g., filename = 'sample.xlsx'. Useful for downloading .xlsx files. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) content = self._request('get', url, params=params).content with open(filename, 'wb') as f: f.write(content)
[ "Saves", "binary", "content", "to", "a", "file", "with", "name", "filename", ".", "filename", "should", "include", "the", "appropriate", "file", "extension", "such", "as", ".", "xlsx", "or", ".", "txt", "e", ".", "g", ".", "filename", "=", "sample", ".", "xlsx", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L340-L351
[ "def", "save_to_file", "(", "self", ",", "path", ",", "filename", ",", "*", "*", "params", ")", ":", "url", "=", "ensure_trailing_slash", "(", "self", ".", "url", "+", "path", ".", "lstrip", "(", "'/'", ")", ")", "content", "=", "self", ".", "_request", "(", "'get'", ",", "url", ",", "params", "=", "params", ")", ".", "content", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "content", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
get_root_url
Get the "root URL" for a URL, as described in the LuminosoClient documentation.
luminoso_api/v4_client.py
def get_root_url(url, warn=True): """ Get the "root URL" for a URL, as described in the LuminosoClient documentation. """ parsed_url = urlparse(url) # Make sure it's a complete URL, not a relative one if not parsed_url.scheme: raise ValueError('Please supply a full URL, beginning with http:// ' 'or https:// .') # Issue a warning if the path didn't already start with /api/v4 root_url = '%s://%s/api/v4' % (parsed_url.scheme, parsed_url.netloc) if warn and not parsed_url.path.startswith('/api/v4'): logger.warning('Using %s as the root url' % root_url) return root_url
def get_root_url(url, warn=True): """ Get the "root URL" for a URL, as described in the LuminosoClient documentation. """ parsed_url = urlparse(url) # Make sure it's a complete URL, not a relative one if not parsed_url.scheme: raise ValueError('Please supply a full URL, beginning with http:// ' 'or https:// .') # Issue a warning if the path didn't already start with /api/v4 root_url = '%s://%s/api/v4' % (parsed_url.scheme, parsed_url.netloc) if warn and not parsed_url.path.startswith('/api/v4'): logger.warning('Using %s as the root url' % root_url) return root_url
[ "Get", "the", "root", "URL", "for", "a", "URL", "as", "described", "in", "the", "LuminosoClient", "documentation", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L494-L510
[ "def", "get_root_url", "(", "url", ",", "warn", "=", "True", ")", ":", "parsed_url", "=", "urlparse", "(", "url", ")", "# Make sure it's a complete URL, not a relative one", "if", "not", "parsed_url", ".", "scheme", ":", "raise", "ValueError", "(", "'Please supply a full URL, beginning with http:// '", "'or https:// .'", ")", "# Issue a warning if the path didn't already start with /api/v4", "root_url", "=", "'%s://%s/api/v4'", "%", "(", "parsed_url", ".", "scheme", ",", "parsed_url", ".", "netloc", ")", "if", "warn", "and", "not", "parsed_url", ".", "path", ".", "startswith", "(", "'/api/v4'", ")", ":", "logger", ".", "warning", "(", "'Using %s as the root url'", "%", "root_url", ")", "return", "root_url" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
jsonify_parameters
When sent in an authorized REST request, only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON.
luminoso_api/v4_client.py
def jsonify_parameters(params): """ When sent in an authorized REST request, only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON. """ result = {} for param, value in params.items(): if isinstance(value, types_not_to_encode): result[param] = value else: result[param] = json.dumps(value) return result
def jsonify_parameters(params): """ When sent in an authorized REST request, only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON. """ result = {} for param, value in params.items(): if isinstance(value, types_not_to_encode): result[param] = value else: result[param] = json.dumps(value) return result
[ "When", "sent", "in", "an", "authorized", "REST", "request", "only", "strings", "and", "integers", "can", "be", "transmitted", "accurately", ".", "Other", "types", "of", "data", "need", "to", "be", "encoded", "into", "JSON", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L519-L530
[ "def", "jsonify_parameters", "(", "params", ")", ":", "result", "=", "{", "}", "for", "param", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "types_not_to_encode", ")", ":", "result", "[", "param", "]", "=", "value", "else", ":", "result", "[", "param", "]", "=", "json", ".", "dumps", "(", "value", ")", "return", "result" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.connect
Returns an object that makes requests to the API, authenticated with the provided username/password, at URLs beginning with `url`. You can leave out the URL and get your 'default URL', a base path that is probably appropriate for creating projects on your account: client = LuminosoClient.connect(username=username) If the URL is simply a path, omitting the scheme and domain, then it will default to https://analytics.luminoso.com/api/v4/, which is probably what you want: client = LuminosoClient.connect('/projects/public', username=username) If you leave out the username, it will use your system username, which is convenient if it matches your Luminoso username: client = LuminosoClient.connect()
luminoso_api/v4_client.py
def connect(cls, url=None, username=None, password=None, token=None, token_file=None): """ Returns an object that makes requests to the API, authenticated with the provided username/password, at URLs beginning with `url`. You can leave out the URL and get your 'default URL', a base path that is probably appropriate for creating projects on your account: client = LuminosoClient.connect(username=username) If the URL is simply a path, omitting the scheme and domain, then it will default to https://analytics.luminoso.com/api/v4/, which is probably what you want: client = LuminosoClient.connect('/projects/public', username=username) If you leave out the username, it will use your system username, which is convenient if it matches your Luminoso username: client = LuminosoClient.connect() """ auto_account = False if url is None: auto_account = True url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE auth = cls._get_token_auth(username, password, token, token_file, root_url) session = requests.session() session.auth = auth client = cls(session, url) if auto_account: client = client.change_path('/projects/%s' % client._get_default_account()) return client
def connect(cls, url=None, username=None, password=None, token=None, token_file=None): """ Returns an object that makes requests to the API, authenticated with the provided username/password, at URLs beginning with `url`. You can leave out the URL and get your 'default URL', a base path that is probably appropriate for creating projects on your account: client = LuminosoClient.connect(username=username) If the URL is simply a path, omitting the scheme and domain, then it will default to https://analytics.luminoso.com/api/v4/, which is probably what you want: client = LuminosoClient.connect('/projects/public', username=username) If you leave out the username, it will use your system username, which is convenient if it matches your Luminoso username: client = LuminosoClient.connect() """ auto_account = False if url is None: auto_account = True url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE auth = cls._get_token_auth(username, password, token, token_file, root_url) session = requests.session() session.auth = auth client = cls(session, url) if auto_account: client = client.change_path('/projects/%s' % client._get_default_account()) return client
[ "Returns", "an", "object", "that", "makes", "requests", "to", "the", "API", "authenticated", "with", "the", "provided", "username", "/", "password", "at", "URLs", "beginning", "with", "url", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L68-L110
[ "def", "connect", "(", "cls", ",", "url", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "token", "=", "None", ",", "token_file", "=", "None", ")", ":", "auto_account", "=", "False", "if", "url", "is", "None", ":", "auto_account", "=", "True", "url", "=", "'/'", "if", "url", ".", "startswith", "(", "'http'", ")", ":", "root_url", "=", "get_root_url", "(", "url", ")", "else", ":", "url", "=", "URL_BASE", "+", "'/'", "+", "url", ".", "lstrip", "(", "'/'", ")", "root_url", "=", "URL_BASE", "auth", "=", "cls", ".", "_get_token_auth", "(", "username", ",", "password", ",", "token", ",", "token_file", ",", "root_url", ")", "session", "=", "requests", ".", "session", "(", ")", "session", ".", "auth", "=", "auth", "client", "=", "cls", "(", "session", ",", "url", ")", "if", "auto_account", ":", "client", "=", "client", ".", "change_path", "(", "'/projects/%s'", "%", "client", ".", "_get_default_account", "(", ")", ")", "return", "client" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.save_token
Obtain the user's long-lived API token and save it in a local file. If the user has no long-lived API token, one will be created. Returns the token that was saved.
luminoso_api/v4_client.py
def save_token(self, token_file=None): """ Obtain the user's long-lived API token and save it in a local file. If the user has no long-lived API token, one will be created. Returns the token that was saved. """ tokens = self._json_request('get', self.root_url + '/user/tokens/') long_lived = [token['type'] == 'long_lived' for token in tokens] if any(long_lived): dic = tokens[long_lived.index(True)] else: # User doesn't have a long-lived token, so create one dic = self._json_request('post', self.root_url + '/user/tokens/') token = dic['token'] token_file = token_file or get_token_filename() if os.path.exists(token_file): saved_tokens = json.load(open(token_file)) else: saved_tokens = {} saved_tokens[urlparse(self.root_url).netloc] = token directory, filename = os.path.split(token_file) if directory and not os.path.exists(directory): os.makedirs(directory) with open(token_file, 'w') as f: json.dump(saved_tokens, f) return token
def save_token(self, token_file=None): """ Obtain the user's long-lived API token and save it in a local file. If the user has no long-lived API token, one will be created. Returns the token that was saved. """ tokens = self._json_request('get', self.root_url + '/user/tokens/') long_lived = [token['type'] == 'long_lived' for token in tokens] if any(long_lived): dic = tokens[long_lived.index(True)] else: # User doesn't have a long-lived token, so create one dic = self._json_request('post', self.root_url + '/user/tokens/') token = dic['token'] token_file = token_file or get_token_filename() if os.path.exists(token_file): saved_tokens = json.load(open(token_file)) else: saved_tokens = {} saved_tokens[urlparse(self.root_url).netloc] = token directory, filename = os.path.split(token_file) if directory and not os.path.exists(directory): os.makedirs(directory) with open(token_file, 'w') as f: json.dump(saved_tokens, f) return token
[ "Obtain", "the", "user", "s", "long", "-", "lived", "API", "token", "and", "save", "it", "in", "a", "local", "file", ".", "If", "the", "user", "has", "no", "long", "-", "lived", "API", "token", "one", "will", "be", "created", ".", "Returns", "the", "token", "that", "was", "saved", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L142-L167
[ "def", "save_token", "(", "self", ",", "token_file", "=", "None", ")", ":", "tokens", "=", "self", ".", "_json_request", "(", "'get'", ",", "self", ".", "root_url", "+", "'/user/tokens/'", ")", "long_lived", "=", "[", "token", "[", "'type'", "]", "==", "'long_lived'", "for", "token", "in", "tokens", "]", "if", "any", "(", "long_lived", ")", ":", "dic", "=", "tokens", "[", "long_lived", ".", "index", "(", "True", ")", "]", "else", ":", "# User doesn't have a long-lived token, so create one", "dic", "=", "self", ".", "_json_request", "(", "'post'", ",", "self", ".", "root_url", "+", "'/user/tokens/'", ")", "token", "=", "dic", "[", "'token'", "]", "token_file", "=", "token_file", "or", "get_token_filename", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "token_file", ")", ":", "saved_tokens", "=", "json", ".", "load", "(", "open", "(", "token_file", ")", ")", "else", ":", "saved_tokens", "=", "{", "}", "saved_tokens", "[", "urlparse", "(", "self", ".", "root_url", ")", ".", "netloc", "]", "=", "token", "directory", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "token_file", ")", "if", "directory", "and", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "with", "open", "(", "token_file", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "saved_tokens", ",", "f", ")", "return", "token" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient._json_request
Make a request of the specified type and expect a JSON object in response. If the result has an 'error' value, raise a LuminosoAPIError with its contents. Otherwise, return the contents of the 'result' value.
luminoso_api/v4_client.py
def _json_request(self, req_type, url, **kwargs): """ Make a request of the specified type and expect a JSON object in response. If the result has an 'error' value, raise a LuminosoAPIError with its contents. Otherwise, return the contents of the 'result' value. """ response = self._request(req_type, url, **kwargs) try: json_response = response.json() except ValueError: logger.error("Received response with no JSON: %s %s" % (response, response.content)) raise LuminosoError('Response body contained no JSON. ' 'Perhaps you meant to use get_raw?') if json_response.get('error'): raise LuminosoAPIError(json_response.get('error')) return json_response['result']
def _json_request(self, req_type, url, **kwargs): """ Make a request of the specified type and expect a JSON object in response. If the result has an 'error' value, raise a LuminosoAPIError with its contents. Otherwise, return the contents of the 'result' value. """ response = self._request(req_type, url, **kwargs) try: json_response = response.json() except ValueError: logger.error("Received response with no JSON: %s %s" % (response, response.content)) raise LuminosoError('Response body contained no JSON. ' 'Perhaps you meant to use get_raw?') if json_response.get('error'): raise LuminosoAPIError(json_response.get('error')) return json_response['result']
[ "Make", "a", "request", "of", "the", "specified", "type", "and", "expect", "a", "JSON", "object", "in", "response", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L196-L214
[ "def", "_json_request", "(", "self", ",", "req_type", ",", "url", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "_request", "(", "req_type", ",", "url", ",", "*", "*", "kwargs", ")", "try", ":", "json_response", "=", "response", ".", "json", "(", ")", "except", "ValueError", ":", "logger", ".", "error", "(", "\"Received response with no JSON: %s %s\"", "%", "(", "response", ",", "response", ".", "content", ")", ")", "raise", "LuminosoError", "(", "'Response body contained no JSON. '", "'Perhaps you meant to use get_raw?'", ")", "if", "json_response", ".", "get", "(", "'error'", ")", ":", "raise", "LuminosoAPIError", "(", "json_response", ".", "get", "(", "'error'", ")", ")", "return", "json_response", "[", "'result'", "]" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.post
Make a POST request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to form values, sent in the body of the POST. POST requests are requests that cause a change on the server, especially those that ask to create and return an object of some kind.
luminoso_api/v4_client.py
def post(self, path='', **params): """ Make a POST request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to form values, sent in the body of the POST. POST requests are requests that cause a change on the server, especially those that ask to create and return an object of some kind. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, data=params)
def post(self, path='', **params): """ Make a POST request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to form values, sent in the body of the POST. POST requests are requests that cause a change on the server, especially those that ask to create and return an object of some kind. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, data=params)
[ "Make", "a", "POST", "request", "to", "the", "given", "path", "and", "return", "the", "JSON", "-", "decoded", "result", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L231-L244
[ "def", "post", "(", "self", ",", "path", "=", "''", ",", "*", "*", "params", ")", ":", "params", "=", "jsonify_parameters", "(", "params", ")", "url", "=", "ensure_trailing_slash", "(", "self", ".", "url", "+", "path", ".", "lstrip", "(", "'/'", ")", ")", "return", "self", ".", "_json_request", "(", "'post'", ",", "url", ",", "data", "=", "params", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.post_data
Make a POST request to the given path, with `data` in its body. Return the JSON-decoded result. The content_type must be set to reflect the kind of data being sent, which is often `application/json`. Keyword parameters will be converted to URL parameters. This is unlike other POST requests which encode those parameters in the body, because the body is already being used. This is used by the Luminoso API to upload new documents in JSON format.
luminoso_api/v4_client.py
def post_data(self, path, data, content_type, **params): """ Make a POST request to the given path, with `data` in its body. Return the JSON-decoded result. The content_type must be set to reflect the kind of data being sent, which is often `application/json`. Keyword parameters will be converted to URL parameters. This is unlike other POST requests which encode those parameters in the body, because the body is already being used. This is used by the Luminoso API to upload new documents in JSON format. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, params=params, data=data, headers={'Content-Type': content_type} )
def post_data(self, path, data, content_type, **params): """ Make a POST request to the given path, with `data` in its body. Return the JSON-decoded result. The content_type must be set to reflect the kind of data being sent, which is often `application/json`. Keyword parameters will be converted to URL parameters. This is unlike other POST requests which encode those parameters in the body, because the body is already being used. This is used by the Luminoso API to upload new documents in JSON format. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, params=params, data=data, headers={'Content-Type': content_type} )
[ "Make", "a", "POST", "request", "to", "the", "given", "path", "with", "data", "in", "its", "body", ".", "Return", "the", "JSON", "-", "decoded", "result", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L290-L311
[ "def", "post_data", "(", "self", ",", "path", ",", "data", ",", "content_type", ",", "*", "*", "params", ")", ":", "params", "=", "jsonify_parameters", "(", "params", ")", "url", "=", "ensure_trailing_slash", "(", "self", ".", "url", "+", "path", ".", "lstrip", "(", "'/'", ")", ")", "return", "self", ".", "_json_request", "(", "'post'", ",", "url", ",", "params", "=", "params", ",", "data", "=", "data", ",", "headers", "=", "{", "'Content-Type'", ":", "content_type", "}", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.change_path
Return a new LuminosoClient for a subpath of this one. For example, you might want to start with a LuminosoClient for `https://analytics.luminoso.com/api/v4/`, then get a new one for `https://analytics.luminoso.com/api/v4/projects/myaccount/myprojectid`. You accomplish that with the following call: newclient = client.change_path('projects/myaccount/myproject_id') If you start the path with `/`, it will start from the root_url instead of the current url: project_area = newclient.change_path('/projects/myaccount') The advantage of using `.change_path` is that you will not need to re-authenticate like you would if you ran `.connect` again. You can use `.change_path` to split off as many sub-clients as you want, and you don't have to stop using the old one just because you got a new one with `.change_path`.
luminoso_api/v4_client.py
def change_path(self, path): """ Return a new LuminosoClient for a subpath of this one. For example, you might want to start with a LuminosoClient for `https://analytics.luminoso.com/api/v4/`, then get a new one for `https://analytics.luminoso.com/api/v4/projects/myaccount/myprojectid`. You accomplish that with the following call: newclient = client.change_path('projects/myaccount/myproject_id') If you start the path with `/`, it will start from the root_url instead of the current url: project_area = newclient.change_path('/projects/myaccount') The advantage of using `.change_path` is that you will not need to re-authenticate like you would if you ran `.connect` again. You can use `.change_path` to split off as many sub-clients as you want, and you don't have to stop using the old one just because you got a new one with `.change_path`. """ if path.startswith('/'): url = self.root_url + path else: url = self.url + path return self.__class__(self.session, url)
def change_path(self, path): """ Return a new LuminosoClient for a subpath of this one. For example, you might want to start with a LuminosoClient for `https://analytics.luminoso.com/api/v4/`, then get a new one for `https://analytics.luminoso.com/api/v4/projects/myaccount/myprojectid`. You accomplish that with the following call: newclient = client.change_path('projects/myaccount/myproject_id') If you start the path with `/`, it will start from the root_url instead of the current url: project_area = newclient.change_path('/projects/myaccount') The advantage of using `.change_path` is that you will not need to re-authenticate like you would if you ran `.connect` again. You can use `.change_path` to split off as many sub-clients as you want, and you don't have to stop using the old one just because you got a new one with `.change_path`. """ if path.startswith('/'): url = self.root_url + path else: url = self.url + path return self.__class__(self.session, url)
[ "Return", "a", "new", "LuminosoClient", "for", "a", "subpath", "of", "this", "one", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L360-L387
[ "def", "change_path", "(", "self", ",", "path", ")", ":", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "url", "=", "self", ".", "root_url", "+", "path", "else", ":", "url", "=", "self", ".", "url", "+", "path", "return", "self", ".", "__class__", "(", "self", ".", "session", ",", "url", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient._get_default_account
Get the ID of an account you can use to access projects.
luminoso_api/v4_client.py
def _get_default_account(self): """ Get the ID of an account you can use to access projects. """ newclient = self.__class__(self.session, self.root_url) account_info = newclient.get('/accounts/') if account_info['default_account'] is not None: return account_info['default_account'] valid_accounts = [a['account_id'] for a in account_info['accounts'] if a['account_id'] != 'public'] if len(valid_accounts) == 0: raise ValueError("Can't determine your default URL. " "Please request a specific URL or ask " "Luminoso for support.") return valid_accounts[0]
def _get_default_account(self): """ Get the ID of an account you can use to access projects. """ newclient = self.__class__(self.session, self.root_url) account_info = newclient.get('/accounts/') if account_info['default_account'] is not None: return account_info['default_account'] valid_accounts = [a['account_id'] for a in account_info['accounts'] if a['account_id'] != 'public'] if len(valid_accounts) == 0: raise ValueError("Can't determine your default URL. " "Please request a specific URL or ask " "Luminoso for support.") return valid_accounts[0]
[ "Get", "the", "ID", "of", "an", "account", "you", "can", "use", "to", "access", "projects", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L389-L403
[ "def", "_get_default_account", "(", "self", ")", ":", "newclient", "=", "self", ".", "__class__", "(", "self", ".", "session", ",", "self", ".", "root_url", ")", "account_info", "=", "newclient", ".", "get", "(", "'/accounts/'", ")", "if", "account_info", "[", "'default_account'", "]", "is", "not", "None", ":", "return", "account_info", "[", "'default_account'", "]", "valid_accounts", "=", "[", "a", "[", "'account_id'", "]", "for", "a", "in", "account_info", "[", "'accounts'", "]", "if", "a", "[", "'account_id'", "]", "!=", "'public'", "]", "if", "len", "(", "valid_accounts", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Can't determine your default URL. \"", "\"Please request a specific URL or ask \"", "\"Luminoso for support.\"", ")", "return", "valid_accounts", "[", "0", "]" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.documentation
Get the documentation that the server sends for the API.
luminoso_api/v4_client.py
def documentation(self): """ Get the documentation that the server sends for the API. """ newclient = self.__class__(self.session, self.root_url) return newclient.get_raw('/')
def documentation(self): """ Get the documentation that the server sends for the API. """ newclient = self.__class__(self.session, self.root_url) return newclient.get_raw('/')
[ "Get", "the", "documentation", "that", "the", "server", "sends", "for", "the", "API", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L405-L410
[ "def", "documentation", "(", "self", ")", ":", "newclient", "=", "self", ".", "__class__", "(", "self", ".", "session", ",", "self", ".", "root_url", ")", "return", "newclient", ".", "get_raw", "(", "'/'", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.upload
A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to, which will look like ROOT_URL/projects/myaccount/project_id/docs.
luminoso_api/v4_client.py
def upload(self, path, docs, **params): """ A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to, which will look like ROOT_URL/projects/myaccount/project_id/docs. """ json_data = json.dumps(list(docs)) return self.post_data(path, json_data, 'application/json', **params)
def upload(self, path, docs, **params): """ A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to, which will look like ROOT_URL/projects/myaccount/project_id/docs. """ json_data = json.dumps(list(docs)) return self.post_data(path, json_data, 'application/json', **params)
[ "A", "convenience", "method", "for", "uploading", "a", "set", "of", "dictionaries", "representing", "documents", ".", "You", "still", "need", "to", "specify", "the", "URL", "to", "upload", "to", "which", "will", "look", "like", "ROOT_URL", "/", "projects", "/", "myaccount", "/", "project_id", "/", "docs", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L412-L419
[ "def", "upload", "(", "self", ",", "path", ",", "docs", ",", "*", "*", "params", ")", ":", "json_data", "=", "json", ".", "dumps", "(", "list", "(", "docs", ")", ")", "return", "self", ".", "post_data", "(", "path", ",", "json_data", ",", "'application/json'", ",", "*", "*", "params", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.wait_for
Wait for an asynchronous task to finish. Unlike the thin methods elsewhere on this object, this one is actually specific to how the Luminoso API works. This will poll an API endpoint to find out the status of the job numbered `job_id`, repeating every 5 seconds (by default) until the job is done. When the job is done, it will return an object representing the result of that job. In the Luminoso API, requests that may take a long time return a job ID instead of a result, so that your code can continue running in the meantime. When it needs the job to be done to proceed, it can use this method to wait. The base URL where it looks for that job is by default `jobs/id/` under the current URL, assuming that this LuminosoClient's URL represents a project. You can specify a different URL by changing `base_path`. If the job failed, will raise a LuminosoError with the job status as its message.
luminoso_api/v4_client.py
def wait_for(self, job_id, base_path=None, interval=5): """ Wait for an asynchronous task to finish. Unlike the thin methods elsewhere on this object, this one is actually specific to how the Luminoso API works. This will poll an API endpoint to find out the status of the job numbered `job_id`, repeating every 5 seconds (by default) until the job is done. When the job is done, it will return an object representing the result of that job. In the Luminoso API, requests that may take a long time return a job ID instead of a result, so that your code can continue running in the meantime. When it needs the job to be done to proceed, it can use this method to wait. The base URL where it looks for that job is by default `jobs/id/` under the current URL, assuming that this LuminosoClient's URL represents a project. You can specify a different URL by changing `base_path`. If the job failed, will raise a LuminosoError with the job status as its message. """ if base_path is None: base_path = 'jobs/id' path = '%s%d' % (ensure_trailing_slash(base_path), job_id) start = time.time() next_log = 0 while True: response = self.get(path) if response['stop_time']: if response['success']: return response else: raise LuminosoError(response) elapsed = time.time() - start if elapsed > next_log: logger.info('Still waiting (%d seconds elapsed).', next_log) next_log += 120 time.sleep(interval)
def wait_for(self, job_id, base_path=None, interval=5): """ Wait for an asynchronous task to finish. Unlike the thin methods elsewhere on this object, this one is actually specific to how the Luminoso API works. This will poll an API endpoint to find out the status of the job numbered `job_id`, repeating every 5 seconds (by default) until the job is done. When the job is done, it will return an object representing the result of that job. In the Luminoso API, requests that may take a long time return a job ID instead of a result, so that your code can continue running in the meantime. When it needs the job to be done to proceed, it can use this method to wait. The base URL where it looks for that job is by default `jobs/id/` under the current URL, assuming that this LuminosoClient's URL represents a project. You can specify a different URL by changing `base_path`. If the job failed, will raise a LuminosoError with the job status as its message. """ if base_path is None: base_path = 'jobs/id' path = '%s%d' % (ensure_trailing_slash(base_path), job_id) start = time.time() next_log = 0 while True: response = self.get(path) if response['stop_time']: if response['success']: return response else: raise LuminosoError(response) elapsed = time.time() - start if elapsed > next_log: logger.info('Still waiting (%d seconds elapsed).', next_log) next_log += 120 time.sleep(interval)
[ "Wait", "for", "an", "asynchronous", "task", "to", "finish", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L421-L461
[ "def", "wait_for", "(", "self", ",", "job_id", ",", "base_path", "=", "None", ",", "interval", "=", "5", ")", ":", "if", "base_path", "is", "None", ":", "base_path", "=", "'jobs/id'", "path", "=", "'%s%d'", "%", "(", "ensure_trailing_slash", "(", "base_path", ")", ",", "job_id", ")", "start", "=", "time", ".", "time", "(", ")", "next_log", "=", "0", "while", "True", ":", "response", "=", "self", ".", "get", "(", "path", ")", "if", "response", "[", "'stop_time'", "]", ":", "if", "response", "[", "'success'", "]", ":", "return", "response", "else", ":", "raise", "LuminosoError", "(", "response", ")", "elapsed", "=", "time", ".", "time", "(", ")", "-", "start", "if", "elapsed", ">", "next_log", ":", "logger", ".", "info", "(", "'Still waiting (%d seconds elapsed).'", ",", "next_log", ")", "next_log", "+=", "120", "time", ".", "sleep", "(", "interval", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LuminosoClient.get_raw
Get the raw text of a response. This is only generally useful for specific URLs, such as documentation.
luminoso_api/v4_client.py
def get_raw(self, path, **params): """ Get the raw text of a response. This is only generally useful for specific URLs, such as documentation. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._request('get', url, params=params).text
def get_raw(self, path, **params): """ Get the raw text of a response. This is only generally useful for specific URLs, such as documentation. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._request('get', url, params=params).text
[ "Get", "the", "raw", "text", "of", "a", "response", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L463-L470
[ "def", "get_raw", "(", "self", ",", "path", ",", "*", "*", "params", ")", ":", "url", "=", "ensure_trailing_slash", "(", "self", ".", "url", "+", "path", ".", "lstrip", "(", "'/'", ")", ")", "return", "self", ".", "_request", "(", "'get'", ",", "url", ",", "params", "=", "params", ")", ".", "text" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_print_csv
Print a JSON list of JSON objects in CSV format.
luminoso_api/v5_cli.py
def _print_csv(result): """Print a JSON list of JSON objects in CSV format.""" if type(result) is not list: raise TypeError("output not able to be displayed as CSV.") first_line = result[0] w = csv.DictWriter(sys.stdout, fieldnames=sorted(first_line.keys())) w.writeheader() for line in result: w.writerow(line)
def _print_csv(result): """Print a JSON list of JSON objects in CSV format.""" if type(result) is not list: raise TypeError("output not able to be displayed as CSV.") first_line = result[0] w = csv.DictWriter(sys.stdout, fieldnames=sorted(first_line.keys())) w.writeheader() for line in result: w.writerow(line)
[ "Print", "a", "JSON", "list", "of", "JSON", "objects", "in", "CSV", "format", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_cli.py#L43-L51
[ "def", "_print_csv", "(", "result", ")", ":", "if", "type", "(", "result", ")", "is", "not", "list", ":", "raise", "TypeError", "(", "\"output not able to be displayed as CSV.\"", ")", "first_line", "=", "result", "[", "0", "]", "w", "=", "csv", ".", "DictWriter", "(", "sys", ".", "stdout", ",", "fieldnames", "=", "sorted", "(", "first_line", ".", "keys", "(", ")", ")", ")", "w", ".", "writeheader", "(", ")", "for", "line", "in", "result", ":", "w", ".", "writerow", "(", "line", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_read_params
Read parameters from input file, -j, and -p arguments, in that order.
luminoso_api/v5_cli.py
def _read_params(input_file, json_body, p_params): """Read parameters from input file, -j, and -p arguments, in that order.""" params = {} try: if input_file: params.update(json.load(input_file)) if json_body is not None: params.update(json.loads(json_body)) except ValueError as e: raise ValueError("input is not valid JSON: %s" % e) try: params.update({p.split('=', 1)[0]: p.split('=', 1)[1] for p in p_params}) except IndexError: raise ValueError("--param arguments must have key=value format") return params
def _read_params(input_file, json_body, p_params): """Read parameters from input file, -j, and -p arguments, in that order.""" params = {} try: if input_file: params.update(json.load(input_file)) if json_body is not None: params.update(json.loads(json_body)) except ValueError as e: raise ValueError("input is not valid JSON: %s" % e) try: params.update({p.split('=', 1)[0]: p.split('=', 1)[1] for p in p_params}) except IndexError: raise ValueError("--param arguments must have key=value format") return params
[ "Read", "parameters", "from", "input", "file", "-", "j", "and", "-", "p", "arguments", "in", "that", "order", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_cli.py#L54-L68
[ "def", "_read_params", "(", "input_file", ",", "json_body", ",", "p_params", ")", ":", "params", "=", "{", "}", "try", ":", "if", "input_file", ":", "params", ".", "update", "(", "json", ".", "load", "(", "input_file", ")", ")", "if", "json_body", "is", "not", "None", ":", "params", ".", "update", "(", "json", ".", "loads", "(", "json_body", ")", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "\"input is not valid JSON: %s\"", "%", "e", ")", "try", ":", "params", ".", "update", "(", "{", "p", ".", "split", "(", "'='", ",", "1", ")", "[", "0", "]", ":", "p", ".", "split", "(", "'='", ",", "1", ")", "[", "1", "]", "for", "p", "in", "p_params", "}", ")", "except", "IndexError", ":", "raise", "ValueError", "(", "\"--param arguments must have key=value format\"", ")", "return", "params" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_batches
Take an iterator and yield its contents in groups of `size` items.
luminoso_api/v5_upload.py
def _batches(iterable, size): """ Take an iterator and yield its contents in groups of `size` items. """ sourceiter = iter(iterable) while True: try: batchiter = islice(sourceiter, size) yield chain([next(batchiter)], batchiter) except StopIteration: return
def _batches(iterable, size): """ Take an iterator and yield its contents in groups of `size` items. """ sourceiter = iter(iterable) while True: try: batchiter = islice(sourceiter, size) yield chain([next(batchiter)], batchiter) except StopIteration: return
[ "Take", "an", "iterator", "and", "yield", "its", "contents", "in", "groups", "of", "size", "items", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_upload.py#L22-L32
[ "def", "_batches", "(", "iterable", ",", "size", ")", ":", "sourceiter", "=", "iter", "(", "iterable", ")", "while", "True", ":", "try", ":", "batchiter", "=", "islice", "(", "sourceiter", ",", "size", ")", "yield", "chain", "(", "[", "next", "(", "batchiter", ")", "]", ",", "batchiter", ")", "except", "StopIteration", ":", "return" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_simplify_doc
Limit a document to just the three fields we should upload.
luminoso_api/v5_upload.py
def _simplify_doc(doc): """ Limit a document to just the three fields we should upload. """ # Mutate a copy of the document to fill in missing fields doc = dict(doc) if 'text' not in doc: raise ValueError("The document {!r} has no text field".format(doc)) return { 'text': doc['text'], 'metadata': doc.get('metadata', []), 'title': doc.get('title', '') }
def _simplify_doc(doc): """ Limit a document to just the three fields we should upload. """ # Mutate a copy of the document to fill in missing fields doc = dict(doc) if 'text' not in doc: raise ValueError("The document {!r} has no text field".format(doc)) return { 'text': doc['text'], 'metadata': doc.get('metadata', []), 'title': doc.get('title', '') }
[ "Limit", "a", "document", "to", "just", "the", "three", "fields", "we", "should", "upload", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_upload.py#L35-L47
[ "def", "_simplify_doc", "(", "doc", ")", ":", "# Mutate a copy of the document to fill in missing fields", "doc", "=", "dict", "(", "doc", ")", "if", "'text'", "not", "in", "doc", ":", "raise", "ValueError", "(", "\"The document {!r} has no text field\"", ".", "format", "(", "doc", ")", ")", "return", "{", "'text'", ":", "doc", "[", "'text'", "]", ",", "'metadata'", ":", "doc", ".", "get", "(", "'metadata'", ",", "[", "]", ")", ",", "'title'", ":", "doc", ".", "get", "(", "'title'", ",", "''", ")", "}" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
create_project_with_docs
Given an iterator of documents, upload them as a Luminoso project.
luminoso_api/v5_upload.py
def create_project_with_docs( client, docs, language, name, account=None, progress=False ): """ Given an iterator of documents, upload them as a Luminoso project. """ description = 'Uploaded using lumi-upload at {}'.format(time.asctime()) if account is not None: proj_record = client.post( 'projects', name=name, language=language, description=description, account_id=account, ) else: proj_record = client.post( 'projects', name=name, language=language, description=description ) proj_id = proj_record['project_id'] proj_client = client.client_for_path('projects/' + proj_id) try: if progress: progress_bar = tqdm(desc='Uploading documents') else: progress_bar = None for batch in _batches(docs, BATCH_SIZE): docs_to_upload = [_simplify_doc(doc) for doc in batch] proj_client.post('upload', docs=docs_to_upload) if progress: progress_bar.update(BATCH_SIZE) finally: if progress: progress_bar.close() print('The server is building project {!r}.'.format(proj_id)) proj_client.post('build') while True: time.sleep(10) proj_status = proj_client.get() build_info = proj_status['last_build_info'] if 'success' in build_info: if not build_info['success']: raise LuminosoServerError(build_info['reason']) return proj_status
def create_project_with_docs( client, docs, language, name, account=None, progress=False ): """ Given an iterator of documents, upload them as a Luminoso project. """ description = 'Uploaded using lumi-upload at {}'.format(time.asctime()) if account is not None: proj_record = client.post( 'projects', name=name, language=language, description=description, account_id=account, ) else: proj_record = client.post( 'projects', name=name, language=language, description=description ) proj_id = proj_record['project_id'] proj_client = client.client_for_path('projects/' + proj_id) try: if progress: progress_bar = tqdm(desc='Uploading documents') else: progress_bar = None for batch in _batches(docs, BATCH_SIZE): docs_to_upload = [_simplify_doc(doc) for doc in batch] proj_client.post('upload', docs=docs_to_upload) if progress: progress_bar.update(BATCH_SIZE) finally: if progress: progress_bar.close() print('The server is building project {!r}.'.format(proj_id)) proj_client.post('build') while True: time.sleep(10) proj_status = proj_client.get() build_info = proj_status['last_build_info'] if 'success' in build_info: if not build_info['success']: raise LuminosoServerError(build_info['reason']) return proj_status
[ "Given", "an", "iterator", "of", "documents", "upload", "them", "as", "a", "Luminoso", "project", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_upload.py#L58-L105
[ "def", "create_project_with_docs", "(", "client", ",", "docs", ",", "language", ",", "name", ",", "account", "=", "None", ",", "progress", "=", "False", ")", ":", "description", "=", "'Uploaded using lumi-upload at {}'", ".", "format", "(", "time", ".", "asctime", "(", ")", ")", "if", "account", "is", "not", "None", ":", "proj_record", "=", "client", ".", "post", "(", "'projects'", ",", "name", "=", "name", ",", "language", "=", "language", ",", "description", "=", "description", ",", "account_id", "=", "account", ",", ")", "else", ":", "proj_record", "=", "client", ".", "post", "(", "'projects'", ",", "name", "=", "name", ",", "language", "=", "language", ",", "description", "=", "description", ")", "proj_id", "=", "proj_record", "[", "'project_id'", "]", "proj_client", "=", "client", ".", "client_for_path", "(", "'projects/'", "+", "proj_id", ")", "try", ":", "if", "progress", ":", "progress_bar", "=", "tqdm", "(", "desc", "=", "'Uploading documents'", ")", "else", ":", "progress_bar", "=", "None", "for", "batch", "in", "_batches", "(", "docs", ",", "BATCH_SIZE", ")", ":", "docs_to_upload", "=", "[", "_simplify_doc", "(", "doc", ")", "for", "doc", "in", "batch", "]", "proj_client", ".", "post", "(", "'upload'", ",", "docs", "=", "docs_to_upload", ")", "if", "progress", ":", "progress_bar", ".", "update", "(", "BATCH_SIZE", ")", "finally", ":", "if", "progress", ":", "progress_bar", ".", "close", "(", ")", "print", "(", "'The server is building project {!r}.'", ".", "format", "(", "proj_id", ")", ")", "proj_client", ".", "post", "(", "'build'", ")", "while", "True", ":", "time", ".", "sleep", "(", "10", ")", "proj_status", "=", "proj_client", ".", "get", "(", ")", "build_info", "=", "proj_status", "[", "'last_build_info'", "]", "if", "'success'", "in", "build_info", ":", "if", "not", "build_info", "[", "'success'", "]", ":", "raise", "LuminosoServerError", "(", "build_info", "[", "'reason'", "]", ")", "return", "proj_status" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
upload_docs
Given a LuminosoClient pointing to the root of the API, and a filename to read JSON lines from, create a project from the documents in that file.
luminoso_api/v5_upload.py
def upload_docs( client, input_filename, language, name, account=None, progress=False ): """ Given a LuminosoClient pointing to the root of the API, and a filename to read JSON lines from, create a project from the documents in that file. """ docs = iterate_json_lines(input_filename) return create_project_with_docs( client, docs, language, name, account, progress=progress )
def upload_docs( client, input_filename, language, name, account=None, progress=False ): """ Given a LuminosoClient pointing to the root of the API, and a filename to read JSON lines from, create a project from the documents in that file. """ docs = iterate_json_lines(input_filename) return create_project_with_docs( client, docs, language, name, account, progress=progress )
[ "Given", "a", "LuminosoClient", "pointing", "to", "the", "root", "of", "the", "API", "and", "a", "filename", "to", "read", "JSON", "lines", "from", "create", "a", "project", "from", "the", "documents", "in", "that", "file", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_upload.py#L108-L118
[ "def", "upload_docs", "(", "client", ",", "input_filename", ",", "language", ",", "name", ",", "account", "=", "None", ",", "progress", "=", "False", ")", ":", "docs", "=", "iterate_json_lines", "(", "input_filename", ")", "return", "create_project_with_docs", "(", "client", ",", "docs", ",", "language", ",", "name", ",", "account", ",", "progress", "=", "progress", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
_main
Handle arguments for the 'lumi-upload' command.
luminoso_api/v5_upload.py
def _main(argv): """ Handle arguments for the 'lumi-upload' command. """ parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( '-b', '--base-url', default=URL_BASE, help='API root url, default: %s' % URL_BASE, ) parser.add_argument( '-a', '--account-id', default=None, help='Account ID that should own the project, if not the default', ) parser.add_argument( '-l', '--language', default='en', help='The language code for the language the text is in. Default: en', ) parser.add_argument('-t', '--token', help="API authentication token") parser.add_argument( '-s', '--save-token', action='store_true', help='save --token for --base-url to ~/.luminoso/tokens.json', ) parser.add_argument( 'input_filename', help='The JSON-lines (.jsons) file of documents to upload', ) parser.add_argument( 'project_name', nargs='?', default=None, help='What the project should be called', ) args = parser.parse_args(argv) if args.save_token: if not args.token: raise ValueError("error: no token provided") LuminosoClient.save_token(args.token, domain=urlparse(args.base_url).netloc) client = LuminosoClient.connect(url=args.base_url, token=args.token) name = args.project_name if name is None: name = input('Enter a name for the project: ') if not name: print('Aborting because no name was provided.') return result = upload_docs( client, args.input_filename, args.language, name, account=args.account_id, progress=True, ) print( 'Project {!r} created with {} documents'.format( result['project_id'], result['document_count'] ) )
def _main(argv): """ Handle arguments for the 'lumi-upload' command. """ parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( '-b', '--base-url', default=URL_BASE, help='API root url, default: %s' % URL_BASE, ) parser.add_argument( '-a', '--account-id', default=None, help='Account ID that should own the project, if not the default', ) parser.add_argument( '-l', '--language', default='en', help='The language code for the language the text is in. Default: en', ) parser.add_argument('-t', '--token', help="API authentication token") parser.add_argument( '-s', '--save-token', action='store_true', help='save --token for --base-url to ~/.luminoso/tokens.json', ) parser.add_argument( 'input_filename', help='The JSON-lines (.jsons) file of documents to upload', ) parser.add_argument( 'project_name', nargs='?', default=None, help='What the project should be called', ) args = parser.parse_args(argv) if args.save_token: if not args.token: raise ValueError("error: no token provided") LuminosoClient.save_token(args.token, domain=urlparse(args.base_url).netloc) client = LuminosoClient.connect(url=args.base_url, token=args.token) name = args.project_name if name is None: name = input('Enter a name for the project: ') if not name: print('Aborting because no name was provided.') return result = upload_docs( client, args.input_filename, args.language, name, account=args.account_id, progress=True, ) print( 'Project {!r} created with {} documents'.format( result['project_id'], result['document_count'] ) )
[ "Handle", "arguments", "for", "the", "lumi", "-", "upload", "command", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_upload.py#L121-L192
[ "def", "_main", "(", "argv", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "DESCRIPTION", ",", "formatter_class", "=", "argparse", ".", "RawDescriptionHelpFormatter", ",", ")", "parser", ".", "add_argument", "(", "'-b'", ",", "'--base-url'", ",", "default", "=", "URL_BASE", ",", "help", "=", "'API root url, default: %s'", "%", "URL_BASE", ",", ")", "parser", ".", "add_argument", "(", "'-a'", ",", "'--account-id'", ",", "default", "=", "None", ",", "help", "=", "'Account ID that should own the project, if not the default'", ",", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--language'", ",", "default", "=", "'en'", ",", "help", "=", "'The language code for the language the text is in. Default: en'", ",", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--token'", ",", "help", "=", "\"API authentication token\"", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--save-token'", ",", "action", "=", "'store_true'", ",", "help", "=", "'save --token for --base-url to ~/.luminoso/tokens.json'", ",", ")", "parser", ".", "add_argument", "(", "'input_filename'", ",", "help", "=", "'The JSON-lines (.jsons) file of documents to upload'", ",", ")", "parser", ".", "add_argument", "(", "'project_name'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ",", "help", "=", "'What the project should be called'", ",", ")", "args", "=", "parser", ".", "parse_args", "(", "argv", ")", "if", "args", ".", "save_token", ":", "if", "not", "args", ".", "token", ":", "raise", "ValueError", "(", "\"error: no token provided\"", ")", "LuminosoClient", ".", "save_token", "(", "args", ".", "token", ",", "domain", "=", "urlparse", "(", "args", ".", "base_url", ")", ".", "netloc", ")", "client", "=", "LuminosoClient", ".", "connect", "(", "url", "=", "args", ".", "base_url", ",", "token", "=", "args", ".", "token", ")", "name", "=", "args", ".", "project_name", "if", "name", "is", "None", ":", "name", "=", "input", "(", "'Enter a name for the project: '", ")", "if", "not", "name", ":", "print", "(", "'Aborting because no name was provided.'", ")", "return", "result", "=", "upload_docs", "(", "client", ",", "args", ".", "input_filename", ",", "args", ".", "language", ",", "name", ",", "account", "=", "args", ".", "account_id", ",", "progress", "=", "True", ",", ")", "print", "(", "'Project {!r} created with {} documents'", ".", "format", "(", "result", "[", "'project_id'", "]", ",", "result", "[", "'document_count'", "]", ")", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
batches
Take an iterator and yield its contents in groups of `size` items.
luminoso_api/v4_upload.py
def batches(iterable, size): """ Take an iterator and yield its contents in groups of `size` items. """ sourceiter = iter(iterable) while True: batchiter = islice(sourceiter, size) yield chain([next(batchiter)], batchiter)
def batches(iterable, size): """ Take an iterator and yield its contents in groups of `size` items. """ sourceiter = iter(iterable) while True: batchiter = islice(sourceiter, size) yield chain([next(batchiter)], batchiter)
[ "Take", "an", "iterator", "and", "yield", "its", "contents", "in", "groups", "of", "size", "items", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_upload.py#L9-L16
[ "def", "batches", "(", "iterable", ",", "size", ")", ":", "sourceiter", "=", "iter", "(", "iterable", ")", "while", "True", ":", "batchiter", "=", "islice", "(", "sourceiter", ",", "size", ")", "yield", "chain", "(", "[", "next", "(", "batchiter", ")", "]", ",", "batchiter", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
upload_stream
Given a file-like object containing a JSON stream, upload it to Luminoso with the given account name and project name.
luminoso_api/v4_upload.py
def upload_stream(stream, server, account, projname, language=None, username=None, password=None, append=False, stage=False): """ Given a file-like object containing a JSON stream, upload it to Luminoso with the given account name and project name. """ client = LuminosoClient.connect(server, username=username, password=password) if not append: # If we're not appending to an existing project, create new project. info = client.post('/projects/' + account, name=projname) project_id = info['project_id'] print('New project ID:', project_id) else: projects = client.get('/projects/' + account, name=projname) if len(projects) == 0: print('No such project exists!') return if len(projects) > 1: print('Warning: Multiple projects with name "%s". ' % projname, end='') project_id = projects[0]['project_id'] print('Using existing project with id %s.' % project_id) project = client.change_path('/projects/' + account + '/' + project_id) counter = 0 for batch in batches(stream, 1000): counter += 1 documents = list(batch) project.upload('docs', documents) print('Uploaded batch #%d' % (counter)) if not stage: # Calculate the docs into the assoc space. print('Calculating.') kwargs = {} if language is not None: kwargs = {'language': language} job_id = project.post('docs/recalculate', **kwargs) project.wait_for(job_id)
def upload_stream(stream, server, account, projname, language=None, username=None, password=None, append=False, stage=False): """ Given a file-like object containing a JSON stream, upload it to Luminoso with the given account name and project name. """ client = LuminosoClient.connect(server, username=username, password=password) if not append: # If we're not appending to an existing project, create new project. info = client.post('/projects/' + account, name=projname) project_id = info['project_id'] print('New project ID:', project_id) else: projects = client.get('/projects/' + account, name=projname) if len(projects) == 0: print('No such project exists!') return if len(projects) > 1: print('Warning: Multiple projects with name "%s". ' % projname, end='') project_id = projects[0]['project_id'] print('Using existing project with id %s.' % project_id) project = client.change_path('/projects/' + account + '/' + project_id) counter = 0 for batch in batches(stream, 1000): counter += 1 documents = list(batch) project.upload('docs', documents) print('Uploaded batch #%d' % (counter)) if not stage: # Calculate the docs into the assoc space. print('Calculating.') kwargs = {} if language is not None: kwargs = {'language': language} job_id = project.post('docs/recalculate', **kwargs) project.wait_for(job_id)
[ "Given", "a", "file", "-", "like", "object", "containing", "a", "JSON", "stream", "upload", "it", "to", "Luminoso", "with", "the", "given", "account", "name", "and", "project", "name", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_upload.py#L19-L60
[ "def", "upload_stream", "(", "stream", ",", "server", ",", "account", ",", "projname", ",", "language", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "append", "=", "False", ",", "stage", "=", "False", ")", ":", "client", "=", "LuminosoClient", ".", "connect", "(", "server", ",", "username", "=", "username", ",", "password", "=", "password", ")", "if", "not", "append", ":", "# If we're not appending to an existing project, create new project.", "info", "=", "client", ".", "post", "(", "'/projects/'", "+", "account", ",", "name", "=", "projname", ")", "project_id", "=", "info", "[", "'project_id'", "]", "print", "(", "'New project ID:'", ",", "project_id", ")", "else", ":", "projects", "=", "client", ".", "get", "(", "'/projects/'", "+", "account", ",", "name", "=", "projname", ")", "if", "len", "(", "projects", ")", "==", "0", ":", "print", "(", "'No such project exists!'", ")", "return", "if", "len", "(", "projects", ")", ">", "1", ":", "print", "(", "'Warning: Multiple projects with name \"%s\". '", "%", "projname", ",", "end", "=", "''", ")", "project_id", "=", "projects", "[", "0", "]", "[", "'project_id'", "]", "print", "(", "'Using existing project with id %s.'", "%", "project_id", ")", "project", "=", "client", ".", "change_path", "(", "'/projects/'", "+", "account", "+", "'/'", "+", "project_id", ")", "counter", "=", "0", "for", "batch", "in", "batches", "(", "stream", ",", "1000", ")", ":", "counter", "+=", "1", "documents", "=", "list", "(", "batch", ")", "project", ".", "upload", "(", "'docs'", ",", "documents", ")", "print", "(", "'Uploaded batch #%d'", "%", "(", "counter", ")", ")", "if", "not", "stage", ":", "# Calculate the docs into the assoc space.", "print", "(", "'Calculating.'", ")", "kwargs", "=", "{", "}", "if", "language", "is", "not", "None", ":", "kwargs", "=", "{", "'language'", ":", "language", "}", "job_id", "=", "project", ".", "post", "(", "'docs/recalculate'", ",", "*", "*", "kwargs", ")", "project", ".", "wait_for", "(", "job_id", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
upload_file
Upload a file to Luminoso with the given account and project name. Given a file containing JSON, JSON stream, or CSV data, this verifies that we can successfully convert it to a JSON stream, then uploads that JSON stream.
luminoso_api/v4_upload.py
def upload_file(filename, server, account, projname, language=None, username=None, password=None, append=False, stage=False, date_format=None): """ Upload a file to Luminoso with the given account and project name. Given a file containing JSON, JSON stream, or CSV data, this verifies that we can successfully convert it to a JSON stream, then uploads that JSON stream. """ stream = transcode_to_stream(filename, date_format) upload_stream(stream_json_lines(stream), server, account, projname, language=language, username=username, password=password, append=append, stage=stage)
def upload_file(filename, server, account, projname, language=None, username=None, password=None, append=False, stage=False, date_format=None): """ Upload a file to Luminoso with the given account and project name. Given a file containing JSON, JSON stream, or CSV data, this verifies that we can successfully convert it to a JSON stream, then uploads that JSON stream. """ stream = transcode_to_stream(filename, date_format) upload_stream(stream_json_lines(stream), server, account, projname, language=language, username=username, password=password, append=append, stage=stage)
[ "Upload", "a", "file", "to", "Luminoso", "with", "the", "given", "account", "and", "project", "name", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_upload.py#L63-L77
[ "def", "upload_file", "(", "filename", ",", "server", ",", "account", ",", "projname", ",", "language", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "append", "=", "False", ",", "stage", "=", "False", ",", "date_format", "=", "None", ")", ":", "stream", "=", "transcode_to_stream", "(", "filename", ",", "date_format", ")", "upload_stream", "(", "stream_json_lines", "(", "stream", ")", ",", "server", ",", "account", ",", "projname", ",", "language", "=", "language", ",", "username", "=", "username", ",", "password", "=", "password", ",", "append", "=", "append", ",", "stage", "=", "stage", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
main
Handle command line arguments, to upload a file to a Luminoso project as a script.
luminoso_api/v4_upload.py
def main(): """ Handle command line arguments, to upload a file to a Luminoso project as a script. """ import argparse parser = argparse.ArgumentParser() parser.add_argument('filename') parser.add_argument('account') parser.add_argument('project_name') parser.add_argument( '--append', help=("If append flag is used, upload documents to existing project, " "rather than creating a new project."), action="store_true" ) parser.add_argument( '-s', '--stage', help="If stage flag is used, just upload docs, don't recalculate.", action="store_true" ) parser.add_argument( '-a', '--api-url', help="Specify an alternate API url", default=URL_BASE ) parser.add_argument( '-l', '--language', help=("Two-letter language code to use when recalculating (e.g. 'en' " "or 'ja')") ) parser.add_argument( '-u', '--username', default=None, help="username (defaults to your username on your computer)" ) parser.add_argument( '-p', '--password', default=None, help="password (you can leave this out and type it in later)" ) parser.add_argument( '-d', '--date-format', default='iso', help=("format string for parsing dates, following " "http://strftime.org/. Default is 'iso', which is " "'%%Y-%%m-%%dT%%H:%%M:%%S+00:00'. Other shortcuts are 'epoch' " "for epoch time or 'us-standard' for '%%m/%%d/%%y'") ) args = parser.parse_args() # Implement some human-understandable shortcuts for date_format date_format_lower = args.date_format.lower() if date_format_lower == 'iso': date_format = '%Y-%m-%dT%H:%M:%S+00:00' elif date_format_lower in ['unix', 'epoch']: date_format = 'epoch' elif date_format_lower == 'us-standard': date_format = '%m/%d/%y' else: date_format = args.date_format upload_file(args.filename, args.api_url, args.account, args.project_name, language=args.language, username=args.username, password=args.password, append=args.append, stage=args.stage, date_format=date_format)
def main(): """ Handle command line arguments, to upload a file to a Luminoso project as a script. """ import argparse parser = argparse.ArgumentParser() parser.add_argument('filename') parser.add_argument('account') parser.add_argument('project_name') parser.add_argument( '--append', help=("If append flag is used, upload documents to existing project, " "rather than creating a new project."), action="store_true" ) parser.add_argument( '-s', '--stage', help="If stage flag is used, just upload docs, don't recalculate.", action="store_true" ) parser.add_argument( '-a', '--api-url', help="Specify an alternate API url", default=URL_BASE ) parser.add_argument( '-l', '--language', help=("Two-letter language code to use when recalculating (e.g. 'en' " "or 'ja')") ) parser.add_argument( '-u', '--username', default=None, help="username (defaults to your username on your computer)" ) parser.add_argument( '-p', '--password', default=None, help="password (you can leave this out and type it in later)" ) parser.add_argument( '-d', '--date-format', default='iso', help=("format string for parsing dates, following " "http://strftime.org/. Default is 'iso', which is " "'%%Y-%%m-%%dT%%H:%%M:%%S+00:00'. Other shortcuts are 'epoch' " "for epoch time or 'us-standard' for '%%m/%%d/%%y'") ) args = parser.parse_args() # Implement some human-understandable shortcuts for date_format date_format_lower = args.date_format.lower() if date_format_lower == 'iso': date_format = '%Y-%m-%dT%H:%M:%S+00:00' elif date_format_lower in ['unix', 'epoch']: date_format = 'epoch' elif date_format_lower == 'us-standard': date_format = '%m/%d/%y' else: date_format = args.date_format upload_file(args.filename, args.api_url, args.account, args.project_name, language=args.language, username=args.username, password=args.password, append=args.append, stage=args.stage, date_format=date_format)
[ "Handle", "command", "line", "arguments", "to", "upload", "a", "file", "to", "a", "Luminoso", "project", "as", "a", "script", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_upload.py#L80-L143
[ "def", "main", "(", ")", ":", "import", "argparse", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'filename'", ")", "parser", ".", "add_argument", "(", "'account'", ")", "parser", ".", "add_argument", "(", "'project_name'", ")", "parser", ".", "add_argument", "(", "'--append'", ",", "help", "=", "(", "\"If append flag is used, upload documents to existing project, \"", "\"rather than creating a new project.\"", ")", ",", "action", "=", "\"store_true\"", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--stage'", ",", "help", "=", "\"If stage flag is used, just upload docs, don't recalculate.\"", ",", "action", "=", "\"store_true\"", ")", "parser", ".", "add_argument", "(", "'-a'", ",", "'--api-url'", ",", "help", "=", "\"Specify an alternate API url\"", ",", "default", "=", "URL_BASE", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--language'", ",", "help", "=", "(", "\"Two-letter language code to use when recalculating (e.g. 'en' \"", "\"or 'ja')\"", ")", ")", "parser", ".", "add_argument", "(", "'-u'", ",", "'--username'", ",", "default", "=", "None", ",", "help", "=", "\"username (defaults to your username on your computer)\"", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--password'", ",", "default", "=", "None", ",", "help", "=", "\"password (you can leave this out and type it in later)\"", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--date-format'", ",", "default", "=", "'iso'", ",", "help", "=", "(", "\"format string for parsing dates, following \"", "\"http://strftime.org/. Default is 'iso', which is \"", "\"'%%Y-%%m-%%dT%%H:%%M:%%S+00:00'. Other shortcuts are 'epoch' \"", "\"for epoch time or 'us-standard' for '%%m/%%d/%%y'\"", ")", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Implement some human-understandable shortcuts for date_format", "date_format_lower", "=", "args", ".", "date_format", ".", "lower", "(", ")", "if", "date_format_lower", "==", "'iso'", ":", "date_format", "=", "'%Y-%m-%dT%H:%M:%S+00:00'", "elif", "date_format_lower", "in", "[", "'unix'", ",", "'epoch'", "]", ":", "date_format", "=", "'epoch'", "elif", "date_format_lower", "==", "'us-standard'", ":", "date_format", "=", "'%m/%d/%y'", "else", ":", "date_format", "=", "args", ".", "date_format", "upload_file", "(", "args", ".", "filename", ",", "args", ".", "api_url", ",", "args", ".", "account", ",", "args", ".", "project_name", ",", "language", "=", "args", ".", "language", ",", "username", "=", "args", ".", "username", ",", "password", "=", "args", ".", "password", ",", "append", "=", "args", ".", "append", ",", "stage", "=", "args", ".", "stage", ",", "date_format", "=", "date_format", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
TokenAuth.from_user_creds
Obtain a short-lived token using a username and password, and use that token to create an auth object.
luminoso_api/v4_auth.py
def from_user_creds(cls, username, password, url=URL_BASE): """ Obtain a short-lived token using a username and password, and use that token to create an auth object. """ session = requests.session() token_resp = session.post(url.rstrip('/') + '/user/login/', data={'username': username, 'password': password}) if token_resp.status_code != 200: error = token_resp.text try: error = json.loads(error)['error'] except (KeyError, ValueError): pass raise LuminosoLoginError(error) return cls(token_resp.json()['result']['token'])
def from_user_creds(cls, username, password, url=URL_BASE): """ Obtain a short-lived token using a username and password, and use that token to create an auth object. """ session = requests.session() token_resp = session.post(url.rstrip('/') + '/user/login/', data={'username': username, 'password': password}) if token_resp.status_code != 200: error = token_resp.text try: error = json.loads(error)['error'] except (KeyError, ValueError): pass raise LuminosoLoginError(error) return cls(token_resp.json()['result']['token'])
[ "Obtain", "a", "short", "-", "lived", "token", "using", "a", "username", "and", "password", "and", "use", "that", "token", "to", "create", "an", "auth", "object", "." ]
LuminosoInsight/luminoso-api-client-python
python
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_auth.py#L29-L46
[ "def", "from_user_creds", "(", "cls", ",", "username", ",", "password", ",", "url", "=", "URL_BASE", ")", ":", "session", "=", "requests", ".", "session", "(", ")", "token_resp", "=", "session", ".", "post", "(", "url", ".", "rstrip", "(", "'/'", ")", "+", "'/user/login/'", ",", "data", "=", "{", "'username'", ":", "username", ",", "'password'", ":", "password", "}", ")", "if", "token_resp", ".", "status_code", "!=", "200", ":", "error", "=", "token_resp", ".", "text", "try", ":", "error", "=", "json", ".", "loads", "(", "error", ")", "[", "'error'", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "pass", "raise", "LuminosoLoginError", "(", "error", ")", "return", "cls", "(", "token_resp", ".", "json", "(", ")", "[", "'result'", "]", "[", "'token'", "]", ")" ]
3bedf2a454aee39214c11fbf556ead3eecc27881
test
LinkyClient.login
Set http session.
pylinky/client.py
def login(self): """Set http session.""" if self._session is None: self._session = requests.session() # adding fake user-agent header self._session.headers.update({'User-agent': str(UserAgent().random)}) return self._post_login_page()
def login(self): """Set http session.""" if self._session is None: self._session = requests.session() # adding fake user-agent header self._session.headers.update({'User-agent': str(UserAgent().random)}) return self._post_login_page()
[ "Set", "http", "session", "." ]
Pirionfr/pyLinky
python
https://github.com/Pirionfr/pyLinky/blob/4372496bfcdd95ccfd2f017634cf02b38a2d6fd1/pylinky/client.py#L47-L53
[ "def", "login", "(", "self", ")", ":", "if", "self", ".", "_session", "is", "None", ":", "self", ".", "_session", "=", "requests", ".", "session", "(", ")", "# adding fake user-agent header", "self", ".", "_session", ".", "headers", ".", "update", "(", "{", "'User-agent'", ":", "str", "(", "UserAgent", "(", ")", ".", "random", ")", "}", ")", "return", "self", ".", "_post_login_page", "(", ")" ]
4372496bfcdd95ccfd2f017634cf02b38a2d6fd1
test
LinkyClient._post_login_page
Login to enedis.
pylinky/client.py
def _post_login_page(self): """Login to enedis.""" data = { 'IDToken1': self.username, 'IDToken2': self.password, 'SunQueryParamsString': base64.b64encode(b'realm=particuliers'), 'encoded': 'true', 'gx_charset': 'UTF-8' } try: self._session.post(LOGIN_URL, data=data, allow_redirects=False, timeout=self._timeout) except OSError: raise PyLinkyError("Can not submit login form") if 'iPlanetDirectoryPro' not in self._session.cookies: raise PyLinkyError("Login error: Please check your username/password.") return True
def _post_login_page(self): """Login to enedis.""" data = { 'IDToken1': self.username, 'IDToken2': self.password, 'SunQueryParamsString': base64.b64encode(b'realm=particuliers'), 'encoded': 'true', 'gx_charset': 'UTF-8' } try: self._session.post(LOGIN_URL, data=data, allow_redirects=False, timeout=self._timeout) except OSError: raise PyLinkyError("Can not submit login form") if 'iPlanetDirectoryPro' not in self._session.cookies: raise PyLinkyError("Login error: Please check your username/password.") return True
[ "Login", "to", "enedis", "." ]
Pirionfr/pyLinky
python
https://github.com/Pirionfr/pyLinky/blob/4372496bfcdd95ccfd2f017634cf02b38a2d6fd1/pylinky/client.py#L55-L74
[ "def", "_post_login_page", "(", "self", ")", ":", "data", "=", "{", "'IDToken1'", ":", "self", ".", "username", ",", "'IDToken2'", ":", "self", ".", "password", ",", "'SunQueryParamsString'", ":", "base64", ".", "b64encode", "(", "b'realm=particuliers'", ")", ",", "'encoded'", ":", "'true'", ",", "'gx_charset'", ":", "'UTF-8'", "}", "try", ":", "self", ".", "_session", ".", "post", "(", "LOGIN_URL", ",", "data", "=", "data", ",", "allow_redirects", "=", "False", ",", "timeout", "=", "self", ".", "_timeout", ")", "except", "OSError", ":", "raise", "PyLinkyError", "(", "\"Can not submit login form\"", ")", "if", "'iPlanetDirectoryPro'", "not", "in", "self", ".", "_session", ".", "cookies", ":", "raise", "PyLinkyError", "(", "\"Login error: Please check your username/password.\"", ")", "return", "True" ]
4372496bfcdd95ccfd2f017634cf02b38a2d6fd1
test
LinkyClient._get_data
Get data.
pylinky/client.py
def _get_data(self, p_p_resource_id, start_date=None, end_date=None): """Get data.""" data = { '_' + REQ_PART + '_dateDebut': start_date, '_' + REQ_PART + '_dateFin': end_date } params = { 'p_p_id': REQ_PART, 'p_p_lifecycle': 2, 'p_p_state': 'normal', 'p_p_mode': 'view', 'p_p_resource_id': p_p_resource_id, 'p_p_cacheability': 'cacheLevelPage', 'p_p_col_id': 'column-1', 'p_p_col_pos': 1, 'p_p_col_count': 3 } try: raw_res = self._session.post(DATA_URL, data=data, params=params, allow_redirects=False, timeout=self._timeout) if 300 <= raw_res.status_code < 400: raw_res = self._session.post(DATA_URL, data=data, params=params, allow_redirects=False, timeout=self._timeout) except OSError as e: raise PyLinkyError("Could not access enedis.fr: " + str(e)) if raw_res.text is "": raise PyLinkyError("No data") if 302 == raw_res.status_code and "/messages/maintenance.html" in raw_res.text: raise PyLinkyError("Site in maintenance") try: json_output = raw_res.json() except (OSError, json.decoder.JSONDecodeError, simplejson.errors.JSONDecodeError) as e: raise PyLinkyError("Impossible to decode response: " + str(e) + "\nResponse was: " + str(raw_res.text)) if json_output.get('etat').get('valeur') == 'erreur': raise PyLinkyError("Enedis.fr answered with an error: " + str(json_output)) return json_output.get('graphe')
def _get_data(self, p_p_resource_id, start_date=None, end_date=None): """Get data.""" data = { '_' + REQ_PART + '_dateDebut': start_date, '_' + REQ_PART + '_dateFin': end_date } params = { 'p_p_id': REQ_PART, 'p_p_lifecycle': 2, 'p_p_state': 'normal', 'p_p_mode': 'view', 'p_p_resource_id': p_p_resource_id, 'p_p_cacheability': 'cacheLevelPage', 'p_p_col_id': 'column-1', 'p_p_col_pos': 1, 'p_p_col_count': 3 } try: raw_res = self._session.post(DATA_URL, data=data, params=params, allow_redirects=False, timeout=self._timeout) if 300 <= raw_res.status_code < 400: raw_res = self._session.post(DATA_URL, data=data, params=params, allow_redirects=False, timeout=self._timeout) except OSError as e: raise PyLinkyError("Could not access enedis.fr: " + str(e)) if raw_res.text is "": raise PyLinkyError("No data") if 302 == raw_res.status_code and "/messages/maintenance.html" in raw_res.text: raise PyLinkyError("Site in maintenance") try: json_output = raw_res.json() except (OSError, json.decoder.JSONDecodeError, simplejson.errors.JSONDecodeError) as e: raise PyLinkyError("Impossible to decode response: " + str(e) + "\nResponse was: " + str(raw_res.text)) if json_output.get('etat').get('valeur') == 'erreur': raise PyLinkyError("Enedis.fr answered with an error: " + str(json_output)) return json_output.get('graphe')
[ "Get", "data", "." ]
Pirionfr/pyLinky
python
https://github.com/Pirionfr/pyLinky/blob/4372496bfcdd95ccfd2f017634cf02b38a2d6fd1/pylinky/client.py#L76-L126
[ "def", "_get_data", "(", "self", ",", "p_p_resource_id", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ")", ":", "data", "=", "{", "'_'", "+", "REQ_PART", "+", "'_dateDebut'", ":", "start_date", ",", "'_'", "+", "REQ_PART", "+", "'_dateFin'", ":", "end_date", "}", "params", "=", "{", "'p_p_id'", ":", "REQ_PART", ",", "'p_p_lifecycle'", ":", "2", ",", "'p_p_state'", ":", "'normal'", ",", "'p_p_mode'", ":", "'view'", ",", "'p_p_resource_id'", ":", "p_p_resource_id", ",", "'p_p_cacheability'", ":", "'cacheLevelPage'", ",", "'p_p_col_id'", ":", "'column-1'", ",", "'p_p_col_pos'", ":", "1", ",", "'p_p_col_count'", ":", "3", "}", "try", ":", "raw_res", "=", "self", ".", "_session", ".", "post", "(", "DATA_URL", ",", "data", "=", "data", ",", "params", "=", "params", ",", "allow_redirects", "=", "False", ",", "timeout", "=", "self", ".", "_timeout", ")", "if", "300", "<=", "raw_res", ".", "status_code", "<", "400", ":", "raw_res", "=", "self", ".", "_session", ".", "post", "(", "DATA_URL", ",", "data", "=", "data", ",", "params", "=", "params", ",", "allow_redirects", "=", "False", ",", "timeout", "=", "self", ".", "_timeout", ")", "except", "OSError", "as", "e", ":", "raise", "PyLinkyError", "(", "\"Could not access enedis.fr: \"", "+", "str", "(", "e", ")", ")", "if", "raw_res", ".", "text", "is", "\"\"", ":", "raise", "PyLinkyError", "(", "\"No data\"", ")", "if", "302", "==", "raw_res", ".", "status_code", "and", "\"/messages/maintenance.html\"", "in", "raw_res", ".", "text", ":", "raise", "PyLinkyError", "(", "\"Site in maintenance\"", ")", "try", ":", "json_output", "=", "raw_res", ".", "json", "(", ")", "except", "(", "OSError", ",", "json", ".", "decoder", ".", "JSONDecodeError", ",", "simplejson", ".", "errors", ".", "JSONDecodeError", ")", "as", "e", ":", "raise", "PyLinkyError", "(", "\"Impossible to decode response: \"", "+", "str", "(", "e", ")", "+", "\"\\nResponse was: \"", "+", "str", "(", "raw_res", ".", "text", ")", ")", "if", "json_output", ".", "get", "(", "'etat'", ")", ".", "get", "(", "'valeur'", ")", "==", "'erreur'", ":", "raise", "PyLinkyError", "(", "\"Enedis.fr answered with an error: \"", "+", "str", "(", "json_output", ")", ")", "return", "json_output", ".", "get", "(", "'graphe'", ")" ]
4372496bfcdd95ccfd2f017634cf02b38a2d6fd1
test
LinkyClient.fetch_data
Get the latest data from Enedis.
pylinky/client.py
def fetch_data(self): """Get the latest data from Enedis.""" for t in [HOURLY, DAILY, MONTHLY, YEARLY]: self._data[t] = self.get_data_per_period(t)
def fetch_data(self): """Get the latest data from Enedis.""" for t in [HOURLY, DAILY, MONTHLY, YEARLY]: self._data[t] = self.get_data_per_period(t)
[ "Get", "the", "latest", "data", "from", "Enedis", "." ]
Pirionfr/pyLinky
python
https://github.com/Pirionfr/pyLinky/blob/4372496bfcdd95ccfd2f017634cf02b38a2d6fd1/pylinky/client.py#L192-L196
[ "def", "fetch_data", "(", "self", ")", ":", "for", "t", "in", "[", "HOURLY", ",", "DAILY", ",", "MONTHLY", ",", "YEARLY", "]", ":", "self", ".", "_data", "[", "t", "]", "=", "self", ".", "get_data_per_period", "(", "t", ")" ]
4372496bfcdd95ccfd2f017634cf02b38a2d6fd1
test
main
Main function
pylinky/__main__.py
def main(): """Main function""" parser = argparse.ArgumentParser() parser.add_argument('-u', '--username', required=True, help='enedis username') parser.add_argument('-p', '--password', required=True, help='Password') args = parser.parse_args() client = LinkyClient(args.username, args.password) try: client.login() client.fetch_data() except BaseException as exp: print(exp) return 1 finally: client.close_session() print(json.dumps(client.get_data(), indent=2))
def main(): """Main function""" parser = argparse.ArgumentParser() parser.add_argument('-u', '--username', required=True, help='enedis username') parser.add_argument('-p', '--password', required=True, help='Password') args = parser.parse_args() client = LinkyClient(args.username, args.password) try: client.login() client.fetch_data() except BaseException as exp: print(exp) return 1 finally: client.close_session() print(json.dumps(client.get_data(), indent=2))
[ "Main", "function" ]
Pirionfr/pyLinky
python
https://github.com/Pirionfr/pyLinky/blob/4372496bfcdd95ccfd2f017634cf02b38a2d6fd1/pylinky/__main__.py#L8-L27
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'-u'", ",", "'--username'", ",", "required", "=", "True", ",", "help", "=", "'enedis username'", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--password'", ",", "required", "=", "True", ",", "help", "=", "'Password'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "client", "=", "LinkyClient", "(", "args", ".", "username", ",", "args", ".", "password", ")", "try", ":", "client", ".", "login", "(", ")", "client", ".", "fetch_data", "(", ")", "except", "BaseException", "as", "exp", ":", "print", "(", "exp", ")", "return", "1", "finally", ":", "client", ".", "close_session", "(", ")", "print", "(", "json", ".", "dumps", "(", "client", ".", "get_data", "(", ")", ",", "indent", "=", "2", ")", ")" ]
4372496bfcdd95ccfd2f017634cf02b38a2d6fd1
test
BaseHandler.prepare
Load the view on first load
examples/simple_site/handlers.py
def prepare(self): """ Load the view on first load """ if self.__class__.view: return #: Load the View class from the dotted view name with enaml.imports(): View = pydoc.locate(self.page.view) assert View, "Failed to import View: {}".format(self.page.view) #: Set initial view properties self.__class__.view = View( site=self.site, page=self.page, request=self.request, )
def prepare(self): """ Load the view on first load """ if self.__class__.view: return #: Load the View class from the dotted view name with enaml.imports(): View = pydoc.locate(self.page.view) assert View, "Failed to import View: {}".format(self.page.view) #: Set initial view properties self.__class__.view = View( site=self.site, page=self.page, request=self.request, )
[ "Load", "the", "view", "on", "first", "load" ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/simple_site/handlers.py#L18-L33
[ "def", "prepare", "(", "self", ")", ":", "if", "self", ".", "__class__", ".", "view", ":", "return", "#: Load the View class from the dotted view name", "with", "enaml", ".", "imports", "(", ")", ":", "View", "=", "pydoc", ".", "locate", "(", "self", ".", "page", ".", "view", ")", "assert", "View", ",", "\"Failed to import View: {}\"", ".", "format", "(", "self", ".", "page", ".", "view", ")", "#: Set initial view properties", "self", ".", "__class__", ".", "view", "=", "View", "(", "site", "=", "self", ".", "site", ",", "page", "=", "self", ".", "page", ",", "request", "=", "self", ".", "request", ",", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
DemoHandler.initialize
Load the view on first load could also load based on session, group, etc..
examples/data_binding/main.py
def initialize(self): """ Load the view on first load could also load based on session, group, etc.. """ if self.__class__.view: self.view.handler = self self.view.request = self.request return #: Load the View class from the dotted view name with enaml.imports(): from views.index import View #: Set initial view properties self.__class__.view = View( company=current_company, request=self.request, handler=self, )
def initialize(self): """ Load the view on first load could also load based on session, group, etc.. """ if self.__class__.view: self.view.handler = self self.view.request = self.request return #: Load the View class from the dotted view name with enaml.imports(): from views.index import View #: Set initial view properties self.__class__.view = View( company=current_company, request=self.request, handler=self, )
[ "Load", "the", "view", "on", "first", "load", "could", "also", "load", "based", "on", "session", "group", "etc", ".." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/data_binding/main.py#L22-L39
[ "def", "initialize", "(", "self", ")", ":", "if", "self", ".", "__class__", ".", "view", ":", "self", ".", "view", ".", "handler", "=", "self", "self", ".", "view", ".", "request", "=", "self", ".", "request", "return", "#: Load the View class from the dotted view name", "with", "enaml", ".", "imports", "(", ")", ":", "from", "views", ".", "index", "import", "View", "#: Set initial view properties", "self", ".", "__class__", ".", "view", "=", "View", "(", "company", "=", "current_company", ",", "request", "=", "self", ".", "request", ",", "handler", "=", "self", ",", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
DemoHandler.get
Execute the correct handler depending on what is connecting.
examples/data_binding/main.py
def get(self, *args, **kwargs): #: Render view for get request, view is cached for websocket """ Execute the correct handler depending on what is connecting. """ if self.is_websocket(): return super(DemoHandler, self).get(*args, **kwargs) else: #return tornado.web.RequestHandler.get(self, *args, **kwargs) self.write(self.view.render())
def get(self, *args, **kwargs): #: Render view for get request, view is cached for websocket """ Execute the correct handler depending on what is connecting. """ if self.is_websocket(): return super(DemoHandler, self).get(*args, **kwargs) else: #return tornado.web.RequestHandler.get(self, *args, **kwargs) self.write(self.view.render())
[ "Execute", "the", "correct", "handler", "depending", "on", "what", "is", "connecting", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/data_binding/main.py#L41-L48
[ "def", "get", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#: Render view for get request, view is cached for websocket", "if", "self", ".", "is_websocket", "(", ")", ":", "return", "super", "(", "DemoHandler", ",", "self", ")", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "#return tornado.web.RequestHandler.get(self, *args, **kwargs)", "self", ".", "write", "(", "self", ".", "view", ".", "render", "(", ")", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
DemoHandler.on_message
When enaml.js sends a message
examples/data_binding/main.py
def on_message(self, message): """ When enaml.js sends a message """ #: Decode message change = tornado.escape.json_decode(message) #print change #: Get the owner ID ref = change.get('ref') if not ref: return #: Get the server side representation of the node #: If found will return the View declaration node node = self.view.xpath('//*[@ref="{}"]'.format(ref), first=True) if node is None: return #: Handle the event if change.get('type') and change.get('name'): if change['type'] == 'event': #: Trigger the event trigger = getattr(node, change['name']) trigger() if change['type'] == 'update': #: Trigger the update setattr(node, change['name'], change['value'])
def on_message(self, message): """ When enaml.js sends a message """ #: Decode message change = tornado.escape.json_decode(message) #print change #: Get the owner ID ref = change.get('ref') if not ref: return #: Get the server side representation of the node #: If found will return the View declaration node node = self.view.xpath('//*[@ref="{}"]'.format(ref), first=True) if node is None: return #: Handle the event if change.get('type') and change.get('name'): if change['type'] == 'event': #: Trigger the event trigger = getattr(node, change['name']) trigger() if change['type'] == 'update': #: Trigger the update setattr(node, change['name'], change['value'])
[ "When", "enaml", ".", "js", "sends", "a", "message" ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/data_binding/main.py#L57-L81
[ "def", "on_message", "(", "self", ",", "message", ")", ":", "#: Decode message", "change", "=", "tornado", ".", "escape", ".", "json_decode", "(", "message", ")", "#print change", "#: Get the owner ID", "ref", "=", "change", ".", "get", "(", "'ref'", ")", "if", "not", "ref", ":", "return", "#: Get the server side representation of the node", "#: If found will return the View declaration node", "node", "=", "self", ".", "view", ".", "xpath", "(", "'//*[@ref=\"{}\"]'", ".", "format", "(", "ref", ")", ",", "first", "=", "True", ")", "if", "node", "is", "None", ":", "return", "#: Handle the event", "if", "change", ".", "get", "(", "'type'", ")", "and", "change", ".", "get", "(", "'name'", ")", ":", "if", "change", "[", "'type'", "]", "==", "'event'", ":", "#: Trigger the event", "trigger", "=", "getattr", "(", "node", ",", "change", "[", "'name'", "]", ")", "trigger", "(", ")", "if", "change", "[", "'type'", "]", "==", "'update'", ":", "#: Trigger the update", "setattr", "(", "node", ",", "change", "[", "'name'", "]", ",", "change", "[", "'value'", "]", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Site._update_menus
When pages change, update the menus
examples/simple_site/main.py
def _update_menus(self,change): """ When pages change, update the menus""" menus = {} #: Get all links links = [p.link for p in self.pages if p.link] + self.links #: Put all links in the correct menu for link in links: for menu in link.menus: if menu not in menus: menus[menu] = [] menus[menu].append(link) #: Update the menus for name,menu in menus.items(): k = '{}_menu'.format(name) if hasattr(self,k): setattr(self,k,menu)
def _update_menus(self,change): """ When pages change, update the menus""" menus = {} #: Get all links links = [p.link for p in self.pages if p.link] + self.links #: Put all links in the correct menu for link in links: for menu in link.menus: if menu not in menus: menus[menu] = [] menus[menu].append(link) #: Update the menus for name,menu in menus.items(): k = '{}_menu'.format(name) if hasattr(self,k): setattr(self,k,menu)
[ "When", "pages", "change", "update", "the", "menus" ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/simple_site/main.py#L81-L99
[ "def", "_update_menus", "(", "self", ",", "change", ")", ":", "menus", "=", "{", "}", "#: Get all links", "links", "=", "[", "p", ".", "link", "for", "p", "in", "self", ".", "pages", "if", "p", ".", "link", "]", "+", "self", ".", "links", "#: Put all links in the correct menu", "for", "link", "in", "links", ":", "for", "menu", "in", "link", ".", "menus", ":", "if", "menu", "not", "in", "menus", ":", "menus", "[", "menu", "]", "=", "[", "]", "menus", "[", "menu", "]", ".", "append", "(", "link", ")", "#: Update the menus", "for", "name", ",", "menu", "in", "menus", ".", "items", "(", ")", ":", "k", "=", "'{}_menu'", ".", "format", "(", "name", ")", "if", "hasattr", "(", "self", ",", "k", ")", ":", "setattr", "(", "self", ",", "k", ",", "menu", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Site._default_handlers
Generate the handlers for this site
examples/simple_site/main.py
def _default_handlers(self): """ Generate the handlers for this site """ static_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"static")) urls = [ (r"/static/(.*)", cyclone.web.StaticFileHandler, {"path": static_path}), ] for p in self.pages: handler = p.handler handler.site = self handler.page = p urls.append((p.link.url,handler)) return urls
def _default_handlers(self): """ Generate the handlers for this site """ static_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"static")) urls = [ (r"/static/(.*)", cyclone.web.StaticFileHandler, {"path": static_path}), ] for p in self.pages: handler = p.handler handler.site = self handler.page = p urls.append((p.link.url,handler)) return urls
[ "Generate", "the", "handlers", "for", "this", "site" ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/simple_site/main.py#L101-L112
[ "def", "_default_handlers", "(", "self", ")", ":", "static_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"static\"", ")", ")", "urls", "=", "[", "(", "r\"/static/(.*)\"", ",", "cyclone", ".", "web", ".", "StaticFileHandler", ",", "{", "\"path\"", ":", "static_path", "}", ")", ",", "]", "for", "p", "in", "self", ".", "pages", ":", "handler", "=", "p", ".", "handler", "handler", ".", "site", "=", "self", "handler", ".", "page", "=", "p", "urls", ".", "append", "(", "(", "p", ".", "link", ".", "url", ",", "handler", ")", ")", "return", "urls" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
ViewerWebSocket.on_message
When we get an event from js, lookup the node and invoke the action on the enaml node.
examples/dataframe_viewer/app.py
def on_message(self, message): """ When we get an event from js, lookup the node and invoke the action on the enaml node. """ change = json.loads(message) log.debug(f'Update from js: {change}') # Lookup the node ref = change.get('ref') if not ref: return nodes = self.viewer.xpath('//*[@ref=$ref]', ref=ref) if not nodes: return # Unknown node node = nodes[0] # Trigger the change on the enaml node if change.get('type') and change.get('name'): if change['type'] == 'event': trigger = getattr(node, change['name']) trigger() elif change['type'] == 'update': # Trigger the update setattr(node, change['name'], change['value']) else: log.warning(f"Unhandled event {self} {node}: {change}")
def on_message(self, message): """ When we get an event from js, lookup the node and invoke the action on the enaml node. """ change = json.loads(message) log.debug(f'Update from js: {change}') # Lookup the node ref = change.get('ref') if not ref: return nodes = self.viewer.xpath('//*[@ref=$ref]', ref=ref) if not nodes: return # Unknown node node = nodes[0] # Trigger the change on the enaml node if change.get('type') and change.get('name'): if change['type'] == 'event': trigger = getattr(node, change['name']) trigger() elif change['type'] == 'update': # Trigger the update setattr(node, change['name'], change['value']) else: log.warning(f"Unhandled event {self} {node}: {change}")
[ "When", "we", "get", "an", "event", "from", "js", "lookup", "the", "node", "and", "invoke", "the", "action", "on", "the", "enaml", "node", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/dataframe_viewer/app.py#L69-L95
[ "def", "on_message", "(", "self", ",", "message", ")", ":", "change", "=", "json", ".", "loads", "(", "message", ")", "log", ".", "debug", "(", "f'Update from js: {change}'", ")", "# Lookup the node", "ref", "=", "change", ".", "get", "(", "'ref'", ")", "if", "not", "ref", ":", "return", "nodes", "=", "self", ".", "viewer", ".", "xpath", "(", "'//*[@ref=$ref]'", ",", "ref", "=", "ref", ")", "if", "not", "nodes", ":", "return", "# Unknown node", "node", "=", "nodes", "[", "0", "]", "# Trigger the change on the enaml node", "if", "change", ".", "get", "(", "'type'", ")", "and", "change", ".", "get", "(", "'name'", ")", ":", "if", "change", "[", "'type'", "]", "==", "'event'", ":", "trigger", "=", "getattr", "(", "node", ",", "change", "[", "'name'", "]", ")", "trigger", "(", ")", "elif", "change", "[", "'type'", "]", "==", "'update'", ":", "# Trigger the update", "setattr", "(", "node", ",", "change", "[", "'name'", "]", ",", "change", "[", "'value'", "]", ")", "else", ":", "log", ".", "warning", "(", "f\"Unhandled event {self} {node}: {change}\"", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
ViewerWebSocket.on_dom_modified
When an event from enaml occurs, send it out the websocket so the client's browser can update accordingly.
examples/dataframe_viewer/app.py
def on_dom_modified(self, change): """ When an event from enaml occurs, send it out the websocket so the client's browser can update accordingly. """ log.debug(f'Update from enaml: {change}') self.write_message(json.dumps(change['value']))
def on_dom_modified(self, change): """ When an event from enaml occurs, send it out the websocket so the client's browser can update accordingly. """ log.debug(f'Update from enaml: {change}') self.write_message(json.dumps(change['value']))
[ "When", "an", "event", "from", "enaml", "occurs", "send", "it", "out", "the", "websocket", "so", "the", "client", "s", "browser", "can", "update", "accordingly", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/dataframe_viewer/app.py#L97-L103
[ "def", "on_dom_modified", "(", "self", ",", "change", ")", ":", "log", ".", "debug", "(", "f'Update from enaml: {change}'", ")", "self", ".", "write_message", "(", "json", ".", "dumps", "(", "change", "[", "'value'", "]", ")", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
WebComponent.create_widget
Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute.
web/impl/lxml_toolkit_object.py
def create_widget(self): """ Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute. """ self.widget = SubElement(self.parent_widget(), self.declaration.tag)
def create_widget(self): """ Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute. """ self.widget = SubElement(self.parent_widget(), self.declaration.tag)
[ "Create", "the", "toolkit", "widget", "for", "the", "proxy", "object", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L34-L42
[ "def", "create_widget", "(", "self", ")", ":", "self", ".", "widget", "=", "SubElement", "(", "self", ".", "parent_widget", "(", ")", ",", "self", ".", "declaration", ".", "tag", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
WebComponent.init_widget
Initialize the state of the toolkit widget. This method is called during the top-down pass, just after the 'create_widget()' method is called. This method should init the state of the widget. The child widgets will not yet be created.
web/impl/lxml_toolkit_object.py
def init_widget(self): """ Initialize the state of the toolkit widget. This method is called during the top-down pass, just after the 'create_widget()' method is called. This method should init the state of the widget. The child widgets will not yet be created. """ widget = self.widget d = self.declaration #: Save ref id ref = d.ref CACHE[ref] = atomref(self) widget.set('ref', ref) if d.text: self.set_text(d.text) if d.tail: self.set_tail(d.tail) if d.style: self.set_style(d.style) if d.cls: self.set_cls(d.cls) if d.attrs: self.set_attrs(d.attrs) if d.id: widget.set('id', d.id) if d.draggable: self.set_draggable(d.draggable) # Set any attributes that may be defined for name, member in d.members().items(): if not member.metadata: continue meta = member.metadata # Exclude any attr tags if not (meta.get('d_member') and meta.get('d_final')): continue # Skip any items with attr=false elif not meta.get('attr', True): continue elif isinstance(member, Event): continue value = getattr(d, name) if value: self.set_attribute(name, value)
def init_widget(self): """ Initialize the state of the toolkit widget. This method is called during the top-down pass, just after the 'create_widget()' method is called. This method should init the state of the widget. The child widgets will not yet be created. """ widget = self.widget d = self.declaration #: Save ref id ref = d.ref CACHE[ref] = atomref(self) widget.set('ref', ref) if d.text: self.set_text(d.text) if d.tail: self.set_tail(d.tail) if d.style: self.set_style(d.style) if d.cls: self.set_cls(d.cls) if d.attrs: self.set_attrs(d.attrs) if d.id: widget.set('id', d.id) if d.draggable: self.set_draggable(d.draggable) # Set any attributes that may be defined for name, member in d.members().items(): if not member.metadata: continue meta = member.metadata # Exclude any attr tags if not (meta.get('d_member') and meta.get('d_final')): continue # Skip any items with attr=false elif not meta.get('attr', True): continue elif isinstance(member, Event): continue value = getattr(d, name) if value: self.set_attribute(name, value)
[ "Initialize", "the", "state", "of", "the", "toolkit", "widget", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L44-L93
[ "def", "init_widget", "(", "self", ")", ":", "widget", "=", "self", ".", "widget", "d", "=", "self", ".", "declaration", "#: Save ref id", "ref", "=", "d", ".", "ref", "CACHE", "[", "ref", "]", "=", "atomref", "(", "self", ")", "widget", ".", "set", "(", "'ref'", ",", "ref", ")", "if", "d", ".", "text", ":", "self", ".", "set_text", "(", "d", ".", "text", ")", "if", "d", ".", "tail", ":", "self", ".", "set_tail", "(", "d", ".", "tail", ")", "if", "d", ".", "style", ":", "self", ".", "set_style", "(", "d", ".", "style", ")", "if", "d", ".", "cls", ":", "self", ".", "set_cls", "(", "d", ".", "cls", ")", "if", "d", ".", "attrs", ":", "self", ".", "set_attrs", "(", "d", ".", "attrs", ")", "if", "d", ".", "id", ":", "widget", ".", "set", "(", "'id'", ",", "d", ".", "id", ")", "if", "d", ".", "draggable", ":", "self", ".", "set_draggable", "(", "d", ".", "draggable", ")", "# Set any attributes that may be defined", "for", "name", ",", "member", "in", "d", ".", "members", "(", ")", ".", "items", "(", ")", ":", "if", "not", "member", ".", "metadata", ":", "continue", "meta", "=", "member", ".", "metadata", "# Exclude any attr tags", "if", "not", "(", "meta", ".", "get", "(", "'d_member'", ")", "and", "meta", ".", "get", "(", "'d_final'", ")", ")", ":", "continue", "# Skip any items with attr=false", "elif", "not", "meta", ".", "get", "(", "'attr'", ",", "True", ")", ":", "continue", "elif", "isinstance", "(", "member", ",", "Event", ")", ":", "continue", "value", "=", "getattr", "(", "d", ",", "name", ")", "if", "value", ":", "self", ".", "set_attribute", "(", "name", ",", "value", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
WebComponent.destroy
A reimplemented destructor. This destructor will clear the reference to the toolkit widget and set its parent to None.
web/impl/lxml_toolkit_object.py
def destroy(self): """ A reimplemented destructor. This destructor will clear the reference to the toolkit widget and set its parent to None. """ widget = self.widget if widget is not None: parent = widget.getparent() if parent is not None: parent.remove(widget) del self.widget d = self.declaration try: del CACHE[d.ref] except KeyError: pass super(WebComponent, self).destroy()
def destroy(self): """ A reimplemented destructor. This destructor will clear the reference to the toolkit widget and set its parent to None. """ widget = self.widget if widget is not None: parent = widget.getparent() if parent is not None: parent.remove(widget) del self.widget d = self.declaration try: del CACHE[d.ref] except KeyError: pass super(WebComponent, self).destroy()
[ "A", "reimplemented", "destructor", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L132-L151
[ "def", "destroy", "(", "self", ")", ":", "widget", "=", "self", ".", "widget", "if", "widget", "is", "not", "None", ":", "parent", "=", "widget", ".", "getparent", "(", ")", "if", "parent", "is", "not", "None", ":", "parent", ".", "remove", "(", "widget", ")", "del", "self", ".", "widget", "d", "=", "self", ".", "declaration", "try", ":", "del", "CACHE", "[", "d", ".", "ref", "]", "except", "KeyError", ":", "pass", "super", "(", "WebComponent", ",", "self", ")", ".", "destroy", "(", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
WebComponent.child_added
Handle the child added event from the declaration. This handler will insert the child toolkit widget in the correct. position. Subclasses which need more control should reimplement this method.
web/impl/lxml_toolkit_object.py
def child_added(self, child): """ Handle the child added event from the declaration. This handler will insert the child toolkit widget in the correct. position. Subclasses which need more control should reimplement this method. """ super(WebComponent, self).child_added(child) if child.widget is not None: # Use insert to put in the correct spot for i, c in enumerate(self.children()): if c == child: self.widget.insert(i, child.widget) break
def child_added(self, child): """ Handle the child added event from the declaration. This handler will insert the child toolkit widget in the correct. position. Subclasses which need more control should reimplement this method. """ super(WebComponent, self).child_added(child) if child.widget is not None: # Use insert to put in the correct spot for i, c in enumerate(self.children()): if c == child: self.widget.insert(i, child.widget) break
[ "Handle", "the", "child", "added", "event", "from", "the", "declaration", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L153-L167
[ "def", "child_added", "(", "self", ",", "child", ")", ":", "super", "(", "WebComponent", ",", "self", ")", ".", "child_added", "(", "child", ")", "if", "child", ".", "widget", "is", "not", "None", ":", "# Use insert to put in the correct spot", "for", "i", ",", "c", "in", "enumerate", "(", "self", ".", "children", "(", ")", ")", ":", "if", "c", "==", "child", ":", "self", ".", "widget", ".", "insert", "(", "i", ",", "child", ".", "widget", ")", "break" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
WebComponent.child_removed
Handle the child removed event from the declaration. This handler will unparent the child toolkit widget. Subclasses which need more control should reimplement this method.
web/impl/lxml_toolkit_object.py
def child_removed(self, child): """ Handle the child removed event from the declaration. This handler will unparent the child toolkit widget. Subclasses which need more control should reimplement this method. """ super(WebComponent, self).child_removed(child) if child.widget is not None: for i, c in enumerate(self.children()): if c == child: del self.widget[i] break
def child_removed(self, child): """ Handle the child removed event from the declaration. This handler will unparent the child toolkit widget. Subclasses which need more control should reimplement this method. """ super(WebComponent, self).child_removed(child) if child.widget is not None: for i, c in enumerate(self.children()): if c == child: del self.widget[i] break
[ "Handle", "the", "child", "removed", "event", "from", "the", "declaration", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L169-L181
[ "def", "child_removed", "(", "self", ",", "child", ")", ":", "super", "(", "WebComponent", ",", "self", ")", ".", "child_removed", "(", "child", ")", "if", "child", ".", "widget", "is", "not", "None", ":", "for", "i", ",", "c", "in", "enumerate", "(", "self", ".", "children", "(", ")", ")", ":", "if", "c", "==", "child", ":", "del", "self", ".", "widget", "[", "i", "]", "break" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
WebComponent.find
Get the node(s) matching the query
web/impl/lxml_toolkit_object.py
def find(self, query, **kwargs): """ Get the node(s) matching the query""" nodes = self.widget.xpath(query, **kwargs) if not nodes: return [] matches = [] for node in nodes: aref = CACHE.get(node.attrib.get('ref')) obj = aref() if aref else None if obj is None: continue matches.append(obj) return matches
def find(self, query, **kwargs): """ Get the node(s) matching the query""" nodes = self.widget.xpath(query, **kwargs) if not nodes: return [] matches = [] for node in nodes: aref = CACHE.get(node.attrib.get('ref')) obj = aref() if aref else None if obj is None: continue matches.append(obj) return matches
[ "Get", "the", "node", "(", "s", ")", "matching", "the", "query" ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L191-L203
[ "def", "find", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "nodes", "=", "self", ".", "widget", ".", "xpath", "(", "query", ",", "*", "*", "kwargs", ")", "if", "not", "nodes", ":", "return", "[", "]", "matches", "=", "[", "]", "for", "node", "in", "nodes", ":", "aref", "=", "CACHE", ".", "get", "(", "node", ".", "attrib", ".", "get", "(", "'ref'", ")", ")", "obj", "=", "aref", "(", ")", "if", "aref", "else", "None", "if", "obj", "is", "None", ":", "continue", "matches", ".", "append", "(", "obj", ")", "return", "matches" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
WebComponent.child_widgets
Get the child toolkit widgets for this object. Returns ------- result : iterable of QObject The child widgets defined for this object.
web/impl/lxml_toolkit_object.py
def child_widgets(self): """ Get the child toolkit widgets for this object. Returns ------- result : iterable of QObject The child widgets defined for this object. """ for child in self.children(): w = child.widget if w is not None: yield w
def child_widgets(self): """ Get the child toolkit widgets for this object. Returns ------- result : iterable of QObject The child widgets defined for this object. """ for child in self.children(): w = child.widget if w is not None: yield w
[ "Get", "the", "child", "toolkit", "widgets", "for", "this", "object", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L219-L231
[ "def", "child_widgets", "(", "self", ")", ":", "for", "child", "in", "self", ".", "children", "(", ")", ":", "w", "=", "child", ".", "widget", "if", "w", "is", "not", "None", ":", "yield", "w" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
WebComponent.set_attribute
Default handler for those not explicitly defined
web/impl/lxml_toolkit_object.py
def set_attribute(self, name, value): """ Default handler for those not explicitly defined """ if value is True: self.widget.set(name, name) elif value is False: del self.widget.attrib[name] else: self.widget.set(name, str(value))
def set_attribute(self, name, value): """ Default handler for those not explicitly defined """ if value is True: self.widget.set(name, name) elif value is False: del self.widget.attrib[name] else: self.widget.set(name, str(value))
[ "Default", "handler", "for", "those", "not", "explicitly", "defined" ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L263-L270
[ "def", "set_attribute", "(", "self", ",", "name", ",", "value", ")", ":", "if", "value", "is", "True", ":", "self", ".", "widget", ".", "set", "(", "name", ",", "name", ")", "elif", "value", "is", "False", ":", "del", "self", ".", "widget", ".", "attrib", "[", "name", "]", "else", ":", "self", ".", "widget", ".", "set", "(", "name", ",", "str", "(", "value", ")", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Tag._update_proxy
Update the proxy widget when the Widget data changes.
web/components/html.py
def _update_proxy(self, change): """ Update the proxy widget when the Widget data changes. """ #: Try default handler if change['type'] == 'update' and self.proxy_is_active: handler = getattr(self.proxy, 'set_' + change['name'], None) if handler is not None: handler(change['value']) else: self.proxy.set_attribute(change['name'], change['value']) self._notify_modified(change)
def _update_proxy(self, change): """ Update the proxy widget when the Widget data changes. """ #: Try default handler if change['type'] == 'update' and self.proxy_is_active: handler = getattr(self.proxy, 'set_' + change['name'], None) if handler is not None: handler(change['value']) else: self.proxy.set_attribute(change['name'], change['value']) self._notify_modified(change)
[ "Update", "the", "proxy", "widget", "when", "the", "Widget", "data", "changes", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/components/html.py#L100-L111
[ "def", "_update_proxy", "(", "self", ",", "change", ")", ":", "#: Try default handler", "if", "change", "[", "'type'", "]", "==", "'update'", "and", "self", ".", "proxy_is_active", ":", "handler", "=", "getattr", "(", "self", ".", "proxy", ",", "'set_'", "+", "change", "[", "'name'", "]", ",", "None", ")", "if", "handler", "is", "not", "None", ":", "handler", "(", "change", "[", "'value'", "]", ")", "else", ":", "self", ".", "proxy", ".", "set_attribute", "(", "change", "[", "'name'", "]", ",", "change", "[", "'value'", "]", ")", "self", ".", "_notify_modified", "(", "change", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Tag._notify_modified
If a change occurs when we have a websocket connection active notify the websocket client of the change.
web/components/html.py
def _notify_modified(self, change): """ If a change occurs when we have a websocket connection active notify the websocket client of the change. """ root = self.root_object() if isinstance(root, Html): name = change['name'] change = { 'ref': self.ref, 'type': change['type'], 'name': change['name'], 'value': change['value'] } root.modified(change)
def _notify_modified(self, change): """ If a change occurs when we have a websocket connection active notify the websocket client of the change. """ root = self.root_object() if isinstance(root, Html): name = change['name'] change = { 'ref': self.ref, 'type': change['type'], 'name': change['name'], 'value': change['value'] } root.modified(change)
[ "If", "a", "change", "occurs", "when", "we", "have", "a", "websocket", "connection", "active", "notify", "the", "websocket", "client", "of", "the", "change", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/components/html.py#L113-L126
[ "def", "_notify_modified", "(", "self", ",", "change", ")", ":", "root", "=", "self", ".", "root_object", "(", ")", "if", "isinstance", "(", "root", ",", "Html", ")", ":", "name", "=", "change", "[", "'name'", "]", "change", "=", "{", "'ref'", ":", "self", ".", "ref", ",", "'type'", ":", "change", "[", "'type'", "]", ",", "'name'", ":", "change", "[", "'name'", "]", ",", "'value'", ":", "change", "[", "'value'", "]", "}", "root", ".", "modified", "(", "change", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Tag.xpath
Find nodes matching the given xpath query
web/components/html.py
def xpath(self, query, **kwargs): """ Find nodes matching the given xpath query """ nodes = self.proxy.find(query, **kwargs) return [n.declaration for n in nodes]
def xpath(self, query, **kwargs): """ Find nodes matching the given xpath query """ nodes = self.proxy.find(query, **kwargs) return [n.declaration for n in nodes]
[ "Find", "nodes", "matching", "the", "given", "xpath", "query" ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/components/html.py#L149-L152
[ "def", "xpath", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "nodes", "=", "self", ".", "proxy", ".", "find", "(", "query", ",", "*", "*", "kwargs", ")", "return", "[", "n", ".", "declaration", "for", "n", "in", "nodes", "]" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Tag.prepare
Prepare for rendering
web/components/html.py
def prepare(self, **kwargs): """ Prepare for rendering """ for k, v in kwargs.items(): setattr(self, k, v) if not self.is_initialized: self.initialize() if not self.proxy_is_active: self.activate_proxy()
def prepare(self, **kwargs): """ Prepare for rendering """ for k, v in kwargs.items(): setattr(self, k, v) if not self.is_initialized: self.initialize() if not self.proxy_is_active: self.activate_proxy()
[ "Prepare", "for", "rendering" ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/components/html.py#L154-L161
[ "def", "prepare", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "k", ",", "v", ")", "if", "not", "self", ".", "is_initialized", ":", "self", ".", "initialize", "(", ")", "if", "not", "self", ".", "proxy_is_active", ":", "self", ".", "activate_proxy", "(", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
RawComponent.init_widget
Initialize the widget with the source.
web/impl/lxml_raw.py
def init_widget(self): """ Initialize the widget with the source. """ d = self.declaration if d.source: self.set_source(d.source) else: super(RawComponent, self).init_widget()
def init_widget(self): """ Initialize the widget with the source. """ d = self.declaration if d.source: self.set_source(d.source) else: super(RawComponent, self).init_widget()
[ "Initialize", "the", "widget", "with", "the", "source", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_raw.py#L21-L27
[ "def", "init_widget", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "if", "d", ".", "source", ":", "self", ".", "set_source", "(", "d", ".", "source", ")", "else", ":", "super", "(", "RawComponent", ",", "self", ")", ".", "init_widget", "(", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
RawComponent.set_source
Set the source by parsing the source and inserting it into the component.
web/impl/lxml_raw.py
def set_source(self, source): """ Set the source by parsing the source and inserting it into the component. """ self.widget.clear() html = etree.HTML(source) self.widget.extend(html[0]) # Clear removes everything so it must be reinitialized super(RawComponent, self).init_widget()
def set_source(self, source): """ Set the source by parsing the source and inserting it into the component. """ self.widget.clear() html = etree.HTML(source) self.widget.extend(html[0]) # Clear removes everything so it must be reinitialized super(RawComponent, self).init_widget()
[ "Set", "the", "source", "by", "parsing", "the", "source", "and", "inserting", "it", "into", "the", "component", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_raw.py#L29-L38
[ "def", "set_source", "(", "self", ",", "source", ")", ":", "self", ".", "widget", ".", "clear", "(", ")", "html", "=", "etree", ".", "HTML", "(", "source", ")", "self", ".", "widget", ".", "extend", "(", "html", "[", "0", "]", ")", "# Clear removes everything so it must be reinitialized", "super", "(", "RawComponent", ",", "self", ")", ".", "init_widget", "(", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Block._observe_mode
If the mode changes. Refresh the items.
web/core/block.py
def _observe_mode(self, change): """ If the mode changes. Refresh the items. """ block = self.block if block and self.is_initialized and change['type'] == 'update': if change['oldvalue'] == 'replace': raise NotImplementedError for c in self.children: block.children.remove(c) c.set_parent(None) self.refresh_items()
def _observe_mode(self, change): """ If the mode changes. Refresh the items. """ block = self.block if block and self.is_initialized and change['type'] == 'update': if change['oldvalue'] == 'replace': raise NotImplementedError for c in self.children: block.children.remove(c) c.set_parent(None) self.refresh_items()
[ "If", "the", "mode", "changes", ".", "Refresh", "the", "items", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/core/block.py#L67-L78
[ "def", "_observe_mode", "(", "self", ",", "change", ")", ":", "block", "=", "self", ".", "block", "if", "block", "and", "self", ".", "is_initialized", "and", "change", "[", "'type'", "]", "==", "'update'", ":", "if", "change", "[", "'oldvalue'", "]", "==", "'replace'", ":", "raise", "NotImplementedError", "for", "c", "in", "self", ".", "children", ":", "block", ".", "children", ".", "remove", "(", "c", ")", "c", ".", "set_parent", "(", "None", ")", "self", ".", "refresh_items", "(", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Block._observe_block
A change handler for the 'objects' list of the Include. If the object is initialized objects which are removed will be unparented and objects which are added will be reparented. Old objects will be destroyed if the 'destroy_old' flag is True.
web/core/block.py
def _observe_block(self, change): """ A change handler for the 'objects' list of the Include. If the object is initialized objects which are removed will be unparented and objects which are added will be reparented. Old objects will be destroyed if the 'destroy_old' flag is True. """ if self.is_initialized and change['type'] == 'update': old_block = change['oldvalue'] for c in self.children: old_block.children.remove(c) c.set_parent(None) self.refresh_items()
def _observe_block(self, change): """ A change handler for the 'objects' list of the Include. If the object is initialized objects which are removed will be unparented and objects which are added will be reparented. Old objects will be destroyed if the 'destroy_old' flag is True. """ if self.is_initialized and change['type'] == 'update': old_block = change['oldvalue'] for c in self.children: old_block.children.remove(c) c.set_parent(None) self.refresh_items()
[ "A", "change", "handler", "for", "the", "objects", "list", "of", "the", "Include", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/core/block.py#L80-L93
[ "def", "_observe_block", "(", "self", ",", "change", ")", ":", "if", "self", ".", "is_initialized", "and", "change", "[", "'type'", "]", "==", "'update'", ":", "old_block", "=", "change", "[", "'oldvalue'", "]", "for", "c", "in", "self", ".", "children", ":", "old_block", ".", "children", ".", "remove", "(", "c", ")", "c", ".", "set_parent", "(", "None", ")", "self", ".", "refresh_items", "(", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
Block._observe__children
When the children of the block change. Update the referenced block.
web/core/block.py
def _observe__children(self, change): """ When the children of the block change. Update the referenced block. """ if not self.is_initialized or change['type'] != 'update': return block = self.block new_children = change['value'] old_children = change['oldvalue'] for c in old_children: if c not in new_children and not c.is_destroyed: c.destroy() else: c.set_parent(None) if block: # This block is inserting into another block before = None if self.mode == 'replace': block.children = [] if self.mode == 'prepend' and block.children: before = block.children[0] block.insert_children(before, new_children) else: # This block is a placeholder self.parent.insert_children(self, new_children)
def _observe__children(self, change): """ When the children of the block change. Update the referenced block. """ if not self.is_initialized or change['type'] != 'update': return block = self.block new_children = change['value'] old_children = change['oldvalue'] for c in old_children: if c not in new_children and not c.is_destroyed: c.destroy() else: c.set_parent(None) if block: # This block is inserting into another block before = None if self.mode == 'replace': block.children = [] if self.mode == 'prepend' and block.children: before = block.children[0] block.insert_children(before, new_children) else: # This block is a placeholder self.parent.insert_children(self, new_children)
[ "When", "the", "children", "of", "the", "block", "change", ".", "Update", "the", "referenced", "block", "." ]
codelv/enaml-web
python
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/core/block.py#L95-L122
[ "def", "_observe__children", "(", "self", ",", "change", ")", ":", "if", "not", "self", ".", "is_initialized", "or", "change", "[", "'type'", "]", "!=", "'update'", ":", "return", "block", "=", "self", ".", "block", "new_children", "=", "change", "[", "'value'", "]", "old_children", "=", "change", "[", "'oldvalue'", "]", "for", "c", "in", "old_children", ":", "if", "c", "not", "in", "new_children", "and", "not", "c", ".", "is_destroyed", ":", "c", ".", "destroy", "(", ")", "else", ":", "c", ".", "set_parent", "(", "None", ")", "if", "block", ":", "# This block is inserting into another block", "before", "=", "None", "if", "self", ".", "mode", "==", "'replace'", ":", "block", ".", "children", "=", "[", "]", "if", "self", ".", "mode", "==", "'prepend'", "and", "block", ".", "children", ":", "before", "=", "block", ".", "children", "[", "0", "]", "block", ".", "insert_children", "(", "before", ",", "new_children", ")", "else", ":", "# This block is a placeholder", "self", ".", "parent", ".", "insert_children", "(", "self", ",", "new_children", ")" ]
88f1131a7b3ba9e83467b4f44bc3bab6f0de7559
test
read
Read the contents of a file located relative to setup.py
setup.py
def read(*pathcomponents): """Read the contents of a file located relative to setup.py""" with open(join(abspath(dirname(__file__)), *pathcomponents)) as thefile: return thefile.read()
def read(*pathcomponents): """Read the contents of a file located relative to setup.py""" with open(join(abspath(dirname(__file__)), *pathcomponents)) as thefile: return thefile.read()
[ "Read", "the", "contents", "of", "a", "file", "located", "relative", "to", "setup", ".", "py" ]
mrsarm/mongotail
python
https://github.com/mrsarm/mongotail/blob/82ba74e32eff92faa320833a8d19c58555f9cd49/setup.py#L30-L33
[ "def", "read", "(", "*", "pathcomponents", ")", ":", "with", "open", "(", "join", "(", "abspath", "(", "dirname", "(", "__file__", ")", ")", ",", "*", "pathcomponents", ")", ")", "as", "thefile", ":", "return", "thefile", ".", "read", "(", ")" ]
82ba74e32eff92faa320833a8d19c58555f9cd49
test
print_obj
Print the dict returned by a MongoDB Query in the standard output.
mongotail/out.py
def print_obj(obj, verbose, metadata, mongo_version): """ Print the dict returned by a MongoDB Query in the standard output. """ if verbose: sys.stdout.write(json_encoder.encode(obj) + '\n') sys.stdout.flush() else: try: ts_time = obj['ts'] operation = obj['op'] doc = None if operation == 'query': if mongo_version < "3.2": doc = obj['ns'].split(".")[-1] query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: if "query" in obj: cmd = obj['query'] # Mongo 3.2 - 3.4 else: cmd = obj['command'] # Mongo 3.6+ doc = cmd['find'] query = json_encoder.encode(cmd['filter']) if 'filter' in cmd else "{}" if 'sort' in cmd: query += ', sort: ' + json_encoder.encode(cmd['sort']) query += '. %s returned.' % obj['nreturned'] elif operation == 'update': doc = obj['ns'].split(".")[-1] if mongo_version < "3.6": query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" query += ', ' + json_encoder.encode(obj['updateobj']) else: query = json_encoder.encode(obj['command']['q']) if 'command' in obj and 'q' in obj['command'] else "{}" query += ', ' + json_encoder.encode(obj['command']['u']) if 'nModified' in obj: query += '. %s updated.' % obj['nModified'] elif 'nMatched' in obj: query += '. %s updated.' % obj['nMatched'] elif operation == 'insert': if mongo_version < "3.2": doc = obj['ns'].split(".")[-1] query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: if 'query' in obj: doc = obj['query']['insert'] if 'documents' in obj['query']: if isinstance(obj['query']['documents'], collections.Iterable) \ and len(obj['query']['documents']) > 1: query = json_encoder.encode(obj['query']['documents']) + ". " else: query = json_encoder.encode(obj['query']['documents'][0]) + ". " else: query = "" else: # Mongo 3.6+ profiler looks like doens't record insert details (document object), and # some tools like Robo 3T (formerly Robomongo) allows to duplicate collections # but the profiler doesn't record the element inserted doc = obj['ns'].split(".")[-1] query = "" query += '%s inserted.' % obj['ninserted'] elif operation == 'remove': doc = obj['ns'].split(".")[-1] if mongo_version < "3.6": query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: query = json_encoder.encode(obj['command']['q']) if 'command' in obj and 'q' in obj['command'] else "{}" query += '. %s deleted.' % obj['ndeleted'] elif operation == "command": if 'count' in obj["command"]: operation = "count" query = json_encoder.encode(obj['command']['query']) elif 'aggregate' in obj["command"]: operation = "aggregate" query = json_encoder.encode(obj['command']['pipeline']) elif 'distinct' in obj["command"]: operation = "distinct" query = json_encoder.encode(obj['command']['query']) query = '"%s", %s' % (obj['command']['key'], query) elif 'drop' in obj["command"]: operation = "drop" query = "" elif 'findandmodify' in obj["command"]: operation = "findandmodify" query = "query: " + json_encoder.encode(obj['command']['query']) if 'sort' in obj["command"]: query += ", sort: " + json_encoder.encode(obj['command']['sort']) if 'update' in obj["command"]: query += ", update: " + json_encoder.encode(obj['command']['update']) if 'remove' in obj["command"]: query += ", remove: " + str(obj['command']['remove']).lower() if 'fields' in obj["command"]: query += ", fields: " + json_encoder.encode(obj['command']['fields']) if 'upsert' in obj["command"]: query += ", upsert: " + str(obj['command']['upsert']).lower() if 'new' in obj["command"]: query += ", new: " + str(obj['command']['new']).lower() elif 'group' in obj["command"]: operation = "group" doc = obj["command"]['group']["ns"] if 'key' in obj['command']['group']: key = "key: " + json_encoder.encode(obj['command']['group']['key']) else: key = None if 'initial' in obj['command']['group']: initial = "initial: " + json_encoder.encode(obj['command']['group']['initial']) else: initial = None if 'cond' in obj['command']['group']: cond = "cond: " + json_encoder.encode(obj['command']['group']['cond']) else: cond = None if '$keyf' in obj['command']['group']: key_function = "keyf: " + min_script(obj['command']['group']['$keyf']) else: key_function = None if '$reduce' in obj['command']['group']: reduce_func = "reduce: " + min_script(obj['command']['group']['$reduce']) else: reduce_func = None if 'finalize' in obj['command']['group']: finalize_func = "finalize: " + min_script(obj['command']['group']['finalize']) else: finalize_func = None query = ", ".join(list(filter(lambda x: x, (key, reduce_func, initial, key_function, cond, finalize_func)))) elif 'map' in obj["command"]: operation = "map" doc = obj["command"]["mapreduce"] del obj["command"]["mapreduce"] map_func = min_script(obj['command']["map"]) del obj['command']["map"] reduce_func = min_script(obj['command']["reduce"]) del obj['command']["reduce"] query = "{%s, %s, %s}" % (map_func, reduce_func, json_encoder.encode(obj['command'])) else: warn('Unknown command operation\nDump: %s' % json_encoder.encode(obj)) if not doc: doc = obj["command"][operation] else: warn('Unknown operation "%s"\nDump: %s' % (operation, json_encoder.encode(obj))) if metadata: met = [] for m in metadata: if m in obj and obj[m] != {}: q = m + ": " if isinstance(obj[m], str): q += '"%s"' % obj[m] elif isinstance(obj[m], dict): q += json_encoder.encode(obj[m]) else: q += str(obj[m]) met.append(q) if met: if not query.endswith("."): query += ". " if not query.endswith(" "): query += " " query += ", ".join(met) sys.stdout.write("%s %s [%s] : %s\n" % (ts_time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], operation.upper().ljust(9), doc, query)) sys.stdout.flush() # Allows pipe the output during the execution with others tools like 'grep' except (KeyError, TypeError): warn('Unknown registry\nDump: %s' % json_encoder.encode(obj))
def print_obj(obj, verbose, metadata, mongo_version): """ Print the dict returned by a MongoDB Query in the standard output. """ if verbose: sys.stdout.write(json_encoder.encode(obj) + '\n') sys.stdout.flush() else: try: ts_time = obj['ts'] operation = obj['op'] doc = None if operation == 'query': if mongo_version < "3.2": doc = obj['ns'].split(".")[-1] query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: if "query" in obj: cmd = obj['query'] # Mongo 3.2 - 3.4 else: cmd = obj['command'] # Mongo 3.6+ doc = cmd['find'] query = json_encoder.encode(cmd['filter']) if 'filter' in cmd else "{}" if 'sort' in cmd: query += ', sort: ' + json_encoder.encode(cmd['sort']) query += '. %s returned.' % obj['nreturned'] elif operation == 'update': doc = obj['ns'].split(".")[-1] if mongo_version < "3.6": query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" query += ', ' + json_encoder.encode(obj['updateobj']) else: query = json_encoder.encode(obj['command']['q']) if 'command' in obj and 'q' in obj['command'] else "{}" query += ', ' + json_encoder.encode(obj['command']['u']) if 'nModified' in obj: query += '. %s updated.' % obj['nModified'] elif 'nMatched' in obj: query += '. %s updated.' % obj['nMatched'] elif operation == 'insert': if mongo_version < "3.2": doc = obj['ns'].split(".")[-1] query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: if 'query' in obj: doc = obj['query']['insert'] if 'documents' in obj['query']: if isinstance(obj['query']['documents'], collections.Iterable) \ and len(obj['query']['documents']) > 1: query = json_encoder.encode(obj['query']['documents']) + ". " else: query = json_encoder.encode(obj['query']['documents'][0]) + ". " else: query = "" else: # Mongo 3.6+ profiler looks like doens't record insert details (document object), and # some tools like Robo 3T (formerly Robomongo) allows to duplicate collections # but the profiler doesn't record the element inserted doc = obj['ns'].split(".")[-1] query = "" query += '%s inserted.' % obj['ninserted'] elif operation == 'remove': doc = obj['ns'].split(".")[-1] if mongo_version < "3.6": query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: query = json_encoder.encode(obj['command']['q']) if 'command' in obj and 'q' in obj['command'] else "{}" query += '. %s deleted.' % obj['ndeleted'] elif operation == "command": if 'count' in obj["command"]: operation = "count" query = json_encoder.encode(obj['command']['query']) elif 'aggregate' in obj["command"]: operation = "aggregate" query = json_encoder.encode(obj['command']['pipeline']) elif 'distinct' in obj["command"]: operation = "distinct" query = json_encoder.encode(obj['command']['query']) query = '"%s", %s' % (obj['command']['key'], query) elif 'drop' in obj["command"]: operation = "drop" query = "" elif 'findandmodify' in obj["command"]: operation = "findandmodify" query = "query: " + json_encoder.encode(obj['command']['query']) if 'sort' in obj["command"]: query += ", sort: " + json_encoder.encode(obj['command']['sort']) if 'update' in obj["command"]: query += ", update: " + json_encoder.encode(obj['command']['update']) if 'remove' in obj["command"]: query += ", remove: " + str(obj['command']['remove']).lower() if 'fields' in obj["command"]: query += ", fields: " + json_encoder.encode(obj['command']['fields']) if 'upsert' in obj["command"]: query += ", upsert: " + str(obj['command']['upsert']).lower() if 'new' in obj["command"]: query += ", new: " + str(obj['command']['new']).lower() elif 'group' in obj["command"]: operation = "group" doc = obj["command"]['group']["ns"] if 'key' in obj['command']['group']: key = "key: " + json_encoder.encode(obj['command']['group']['key']) else: key = None if 'initial' in obj['command']['group']: initial = "initial: " + json_encoder.encode(obj['command']['group']['initial']) else: initial = None if 'cond' in obj['command']['group']: cond = "cond: " + json_encoder.encode(obj['command']['group']['cond']) else: cond = None if '$keyf' in obj['command']['group']: key_function = "keyf: " + min_script(obj['command']['group']['$keyf']) else: key_function = None if '$reduce' in obj['command']['group']: reduce_func = "reduce: " + min_script(obj['command']['group']['$reduce']) else: reduce_func = None if 'finalize' in obj['command']['group']: finalize_func = "finalize: " + min_script(obj['command']['group']['finalize']) else: finalize_func = None query = ", ".join(list(filter(lambda x: x, (key, reduce_func, initial, key_function, cond, finalize_func)))) elif 'map' in obj["command"]: operation = "map" doc = obj["command"]["mapreduce"] del obj["command"]["mapreduce"] map_func = min_script(obj['command']["map"]) del obj['command']["map"] reduce_func = min_script(obj['command']["reduce"]) del obj['command']["reduce"] query = "{%s, %s, %s}" % (map_func, reduce_func, json_encoder.encode(obj['command'])) else: warn('Unknown command operation\nDump: %s' % json_encoder.encode(obj)) if not doc: doc = obj["command"][operation] else: warn('Unknown operation "%s"\nDump: %s' % (operation, json_encoder.encode(obj))) if metadata: met = [] for m in metadata: if m in obj and obj[m] != {}: q = m + ": " if isinstance(obj[m], str): q += '"%s"' % obj[m] elif isinstance(obj[m], dict): q += json_encoder.encode(obj[m]) else: q += str(obj[m]) met.append(q) if met: if not query.endswith("."): query += ". " if not query.endswith(" "): query += " " query += ", ".join(met) sys.stdout.write("%s %s [%s] : %s\n" % (ts_time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], operation.upper().ljust(9), doc, query)) sys.stdout.flush() # Allows pipe the output during the execution with others tools like 'grep' except (KeyError, TypeError): warn('Unknown registry\nDump: %s' % json_encoder.encode(obj))
[ "Print", "the", "dict", "returned", "by", "a", "MongoDB", "Query", "in", "the", "standard", "output", "." ]
mrsarm/mongotail
python
https://github.com/mrsarm/mongotail/blob/82ba74e32eff92faa320833a8d19c58555f9cd49/mongotail/out.py#L35-L196
[ "def", "print_obj", "(", "obj", ",", "verbose", ",", "metadata", ",", "mongo_version", ")", ":", "if", "verbose", ":", "sys", ".", "stdout", ".", "write", "(", "json_encoder", ".", "encode", "(", "obj", ")", "+", "'\\n'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "else", ":", "try", ":", "ts_time", "=", "obj", "[", "'ts'", "]", "operation", "=", "obj", "[", "'op'", "]", "doc", "=", "None", "if", "operation", "==", "'query'", ":", "if", "mongo_version", "<", "\"3.2\"", ":", "doc", "=", "obj", "[", "'ns'", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'query'", "]", ")", "if", "'query'", "in", "obj", "else", "\"{}\"", "else", ":", "if", "\"query\"", "in", "obj", ":", "cmd", "=", "obj", "[", "'query'", "]", "# Mongo 3.2 - 3.4", "else", ":", "cmd", "=", "obj", "[", "'command'", "]", "# Mongo 3.6+", "doc", "=", "cmd", "[", "'find'", "]", "query", "=", "json_encoder", ".", "encode", "(", "cmd", "[", "'filter'", "]", ")", "if", "'filter'", "in", "cmd", "else", "\"{}\"", "if", "'sort'", "in", "cmd", ":", "query", "+=", "', sort: '", "+", "json_encoder", ".", "encode", "(", "cmd", "[", "'sort'", "]", ")", "query", "+=", "'. %s returned.'", "%", "obj", "[", "'nreturned'", "]", "elif", "operation", "==", "'update'", ":", "doc", "=", "obj", "[", "'ns'", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "if", "mongo_version", "<", "\"3.6\"", ":", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'query'", "]", ")", "if", "'query'", "in", "obj", "else", "\"{}\"", "query", "+=", "', '", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'updateobj'", "]", ")", "else", ":", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'q'", "]", ")", "if", "'command'", "in", "obj", "and", "'q'", "in", "obj", "[", "'command'", "]", "else", "\"{}\"", "query", "+=", "', '", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'u'", "]", ")", "if", "'nModified'", "in", "obj", ":", "query", "+=", "'. %s updated.'", "%", "obj", "[", "'nModified'", "]", "elif", "'nMatched'", "in", "obj", ":", "query", "+=", "'. %s updated.'", "%", "obj", "[", "'nMatched'", "]", "elif", "operation", "==", "'insert'", ":", "if", "mongo_version", "<", "\"3.2\"", ":", "doc", "=", "obj", "[", "'ns'", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'query'", "]", ")", "if", "'query'", "in", "obj", "else", "\"{}\"", "else", ":", "if", "'query'", "in", "obj", ":", "doc", "=", "obj", "[", "'query'", "]", "[", "'insert'", "]", "if", "'documents'", "in", "obj", "[", "'query'", "]", ":", "if", "isinstance", "(", "obj", "[", "'query'", "]", "[", "'documents'", "]", ",", "collections", ".", "Iterable", ")", "and", "len", "(", "obj", "[", "'query'", "]", "[", "'documents'", "]", ")", ">", "1", ":", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'query'", "]", "[", "'documents'", "]", ")", "+", "\". \"", "else", ":", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'query'", "]", "[", "'documents'", "]", "[", "0", "]", ")", "+", "\". \"", "else", ":", "query", "=", "\"\"", "else", ":", "# Mongo 3.6+ profiler looks like doens't record insert details (document object), and", "# some tools like Robo 3T (formerly Robomongo) allows to duplicate collections", "# but the profiler doesn't record the element inserted", "doc", "=", "obj", "[", "'ns'", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "query", "=", "\"\"", "query", "+=", "'%s inserted.'", "%", "obj", "[", "'ninserted'", "]", "elif", "operation", "==", "'remove'", ":", "doc", "=", "obj", "[", "'ns'", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "if", "mongo_version", "<", "\"3.6\"", ":", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'query'", "]", ")", "if", "'query'", "in", "obj", "else", "\"{}\"", "else", ":", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'q'", "]", ")", "if", "'command'", "in", "obj", "and", "'q'", "in", "obj", "[", "'command'", "]", "else", "\"{}\"", "query", "+=", "'. %s deleted.'", "%", "obj", "[", "'ndeleted'", "]", "elif", "operation", "==", "\"command\"", ":", "if", "'count'", "in", "obj", "[", "\"command\"", "]", ":", "operation", "=", "\"count\"", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'query'", "]", ")", "elif", "'aggregate'", "in", "obj", "[", "\"command\"", "]", ":", "operation", "=", "\"aggregate\"", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'pipeline'", "]", ")", "elif", "'distinct'", "in", "obj", "[", "\"command\"", "]", ":", "operation", "=", "\"distinct\"", "query", "=", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'query'", "]", ")", "query", "=", "'\"%s\", %s'", "%", "(", "obj", "[", "'command'", "]", "[", "'key'", "]", ",", "query", ")", "elif", "'drop'", "in", "obj", "[", "\"command\"", "]", ":", "operation", "=", "\"drop\"", "query", "=", "\"\"", "elif", "'findandmodify'", "in", "obj", "[", "\"command\"", "]", ":", "operation", "=", "\"findandmodify\"", "query", "=", "\"query: \"", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'query'", "]", ")", "if", "'sort'", "in", "obj", "[", "\"command\"", "]", ":", "query", "+=", "\", sort: \"", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'sort'", "]", ")", "if", "'update'", "in", "obj", "[", "\"command\"", "]", ":", "query", "+=", "\", update: \"", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'update'", "]", ")", "if", "'remove'", "in", "obj", "[", "\"command\"", "]", ":", "query", "+=", "\", remove: \"", "+", "str", "(", "obj", "[", "'command'", "]", "[", "'remove'", "]", ")", ".", "lower", "(", ")", "if", "'fields'", "in", "obj", "[", "\"command\"", "]", ":", "query", "+=", "\", fields: \"", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'fields'", "]", ")", "if", "'upsert'", "in", "obj", "[", "\"command\"", "]", ":", "query", "+=", "\", upsert: \"", "+", "str", "(", "obj", "[", "'command'", "]", "[", "'upsert'", "]", ")", ".", "lower", "(", ")", "if", "'new'", "in", "obj", "[", "\"command\"", "]", ":", "query", "+=", "\", new: \"", "+", "str", "(", "obj", "[", "'command'", "]", "[", "'new'", "]", ")", ".", "lower", "(", ")", "elif", "'group'", "in", "obj", "[", "\"command\"", "]", ":", "operation", "=", "\"group\"", "doc", "=", "obj", "[", "\"command\"", "]", "[", "'group'", "]", "[", "\"ns\"", "]", "if", "'key'", "in", "obj", "[", "'command'", "]", "[", "'group'", "]", ":", "key", "=", "\"key: \"", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'group'", "]", "[", "'key'", "]", ")", "else", ":", "key", "=", "None", "if", "'initial'", "in", "obj", "[", "'command'", "]", "[", "'group'", "]", ":", "initial", "=", "\"initial: \"", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'group'", "]", "[", "'initial'", "]", ")", "else", ":", "initial", "=", "None", "if", "'cond'", "in", "obj", "[", "'command'", "]", "[", "'group'", "]", ":", "cond", "=", "\"cond: \"", "+", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", "[", "'group'", "]", "[", "'cond'", "]", ")", "else", ":", "cond", "=", "None", "if", "'$keyf'", "in", "obj", "[", "'command'", "]", "[", "'group'", "]", ":", "key_function", "=", "\"keyf: \"", "+", "min_script", "(", "obj", "[", "'command'", "]", "[", "'group'", "]", "[", "'$keyf'", "]", ")", "else", ":", "key_function", "=", "None", "if", "'$reduce'", "in", "obj", "[", "'command'", "]", "[", "'group'", "]", ":", "reduce_func", "=", "\"reduce: \"", "+", "min_script", "(", "obj", "[", "'command'", "]", "[", "'group'", "]", "[", "'$reduce'", "]", ")", "else", ":", "reduce_func", "=", "None", "if", "'finalize'", "in", "obj", "[", "'command'", "]", "[", "'group'", "]", ":", "finalize_func", "=", "\"finalize: \"", "+", "min_script", "(", "obj", "[", "'command'", "]", "[", "'group'", "]", "[", "'finalize'", "]", ")", "else", ":", "finalize_func", "=", "None", "query", "=", "\", \"", ".", "join", "(", "list", "(", "filter", "(", "lambda", "x", ":", "x", ",", "(", "key", ",", "reduce_func", ",", "initial", ",", "key_function", ",", "cond", ",", "finalize_func", ")", ")", ")", ")", "elif", "'map'", "in", "obj", "[", "\"command\"", "]", ":", "operation", "=", "\"map\"", "doc", "=", "obj", "[", "\"command\"", "]", "[", "\"mapreduce\"", "]", "del", "obj", "[", "\"command\"", "]", "[", "\"mapreduce\"", "]", "map_func", "=", "min_script", "(", "obj", "[", "'command'", "]", "[", "\"map\"", "]", ")", "del", "obj", "[", "'command'", "]", "[", "\"map\"", "]", "reduce_func", "=", "min_script", "(", "obj", "[", "'command'", "]", "[", "\"reduce\"", "]", ")", "del", "obj", "[", "'command'", "]", "[", "\"reduce\"", "]", "query", "=", "\"{%s, %s, %s}\"", "%", "(", "map_func", ",", "reduce_func", ",", "json_encoder", ".", "encode", "(", "obj", "[", "'command'", "]", ")", ")", "else", ":", "warn", "(", "'Unknown command operation\\nDump: %s'", "%", "json_encoder", ".", "encode", "(", "obj", ")", ")", "if", "not", "doc", ":", "doc", "=", "obj", "[", "\"command\"", "]", "[", "operation", "]", "else", ":", "warn", "(", "'Unknown operation \"%s\"\\nDump: %s'", "%", "(", "operation", ",", "json_encoder", ".", "encode", "(", "obj", ")", ")", ")", "if", "metadata", ":", "met", "=", "[", "]", "for", "m", "in", "metadata", ":", "if", "m", "in", "obj", "and", "obj", "[", "m", "]", "!=", "{", "}", ":", "q", "=", "m", "+", "\": \"", "if", "isinstance", "(", "obj", "[", "m", "]", ",", "str", ")", ":", "q", "+=", "'\"%s\"'", "%", "obj", "[", "m", "]", "elif", "isinstance", "(", "obj", "[", "m", "]", ",", "dict", ")", ":", "q", "+=", "json_encoder", ".", "encode", "(", "obj", "[", "m", "]", ")", "else", ":", "q", "+=", "str", "(", "obj", "[", "m", "]", ")", "met", ".", "append", "(", "q", ")", "if", "met", ":", "if", "not", "query", ".", "endswith", "(", "\".\"", ")", ":", "query", "+=", "\". \"", "if", "not", "query", ".", "endswith", "(", "\" \"", ")", ":", "query", "+=", "\" \"", "query", "+=", "\", \"", ".", "join", "(", "met", ")", "sys", ".", "stdout", ".", "write", "(", "\"%s %s [%s] : %s\\n\"", "%", "(", "ts_time", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S.%f\"", ")", "[", ":", "-", "3", "]", ",", "operation", ".", "upper", "(", ")", ".", "ljust", "(", "9", ")", ",", "doc", ",", "query", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Allows pipe the output during the execution with others tools like 'grep'", "except", "(", "KeyError", ",", "TypeError", ")", ":", "warn", "(", "'Unknown registry\\nDump: %s'", "%", "json_encoder", ".", "encode", "(", "obj", ")", ")" ]
82ba74e32eff92faa320833a8d19c58555f9cd49
test
connect
Connect with `address`, and return a tuple with a :class:`~pymongo.MongoClient`, and a :class:`~pymongo.database.Database` object. :param address: a string representation with the db address :param args: connection arguments: - username: username for authentication (optional) - password: password for authentication. If username is given and password isn't, it's asked from tty. - auth_database: authenticate the username and password against that database (optional). If not specified, the database specified in address will be used. - ssl, ssl_certfile, ssl_keyfile, ssl_cert_reqs, ssl_ca_certs: SSL authentication options :return: a tuple with ``(client, db)``
mongotail/conn.py
def connect(address, args): """ Connect with `address`, and return a tuple with a :class:`~pymongo.MongoClient`, and a :class:`~pymongo.database.Database` object. :param address: a string representation with the db address :param args: connection arguments: - username: username for authentication (optional) - password: password for authentication. If username is given and password isn't, it's asked from tty. - auth_database: authenticate the username and password against that database (optional). If not specified, the database specified in address will be used. - ssl, ssl_certfile, ssl_keyfile, ssl_cert_reqs, ssl_ca_certs: SSL authentication options :return: a tuple with ``(client, db)`` """ try: host, port, dbname = get_res_address(address) except AddressError as e: error_parsing(str(e).replace("resource", "database")) try: options = {} if args.ssl: options["ssl"] = True options["ssl_certfile"] = args.ssl_cert_file options["ssl_keyfile"] = args.ssl_key_file options["ssl_cert_reqs"] = args.ssl_cert_reqs options["ssl_ca_certs"] = args.ssl_ca_certs client = MongoClient(host=host, port=port, **options) except Exception as e: error("Error trying to connect: %s" % str(e), ECONNREFUSED) username = args.username password = args.password auth_database = args.auth_database if username: if password is None: password = getpass.getpass() if auth_database is None: auth_database = dbname try: auth_db = client[auth_database] auth_db.authenticate(username, password) except Exception as e: error("Error trying to authenticate: %s" % str(e), -3) db = client[dbname] return client, db
def connect(address, args): """ Connect with `address`, and return a tuple with a :class:`~pymongo.MongoClient`, and a :class:`~pymongo.database.Database` object. :param address: a string representation with the db address :param args: connection arguments: - username: username for authentication (optional) - password: password for authentication. If username is given and password isn't, it's asked from tty. - auth_database: authenticate the username and password against that database (optional). If not specified, the database specified in address will be used. - ssl, ssl_certfile, ssl_keyfile, ssl_cert_reqs, ssl_ca_certs: SSL authentication options :return: a tuple with ``(client, db)`` """ try: host, port, dbname = get_res_address(address) except AddressError as e: error_parsing(str(e).replace("resource", "database")) try: options = {} if args.ssl: options["ssl"] = True options["ssl_certfile"] = args.ssl_cert_file options["ssl_keyfile"] = args.ssl_key_file options["ssl_cert_reqs"] = args.ssl_cert_reqs options["ssl_ca_certs"] = args.ssl_ca_certs client = MongoClient(host=host, port=port, **options) except Exception as e: error("Error trying to connect: %s" % str(e), ECONNREFUSED) username = args.username password = args.password auth_database = args.auth_database if username: if password is None: password = getpass.getpass() if auth_database is None: auth_database = dbname try: auth_db = client[auth_database] auth_db.authenticate(username, password) except Exception as e: error("Error trying to authenticate: %s" % str(e), -3) db = client[dbname] return client, db
[ "Connect", "with", "address", "and", "return", "a", "tuple", "with", "a", ":", "class", ":", "~pymongo", ".", "MongoClient", "and", "a", ":", "class", ":", "~pymongo", ".", "database", ".", "Database", "object", ".", ":", "param", "address", ":", "a", "string", "representation", "with", "the", "db", "address", ":", "param", "args", ":", "connection", "arguments", ":", "-", "username", ":", "username", "for", "authentication", "(", "optional", ")", "-", "password", ":", "password", "for", "authentication", ".", "If", "username", "is", "given", "and", "password", "isn", "t", "it", "s", "asked", "from", "tty", ".", "-", "auth_database", ":", "authenticate", "the", "username", "and", "password", "against", "that", "database", "(", "optional", ")", ".", "If", "not", "specified", "the", "database", "specified", "in", "address", "will", "be", "used", ".", "-", "ssl", "ssl_certfile", "ssl_keyfile", "ssl_cert_reqs", "ssl_ca_certs", ":", "SSL", "authentication", "options", ":", "return", ":", "a", "tuple", "with", "(", "client", "db", ")" ]
mrsarm/mongotail
python
https://github.com/mrsarm/mongotail/blob/82ba74e32eff92faa320833a8d19c58555f9cd49/mongotail/conn.py#L31-L78
[ "def", "connect", "(", "address", ",", "args", ")", ":", "try", ":", "host", ",", "port", ",", "dbname", "=", "get_res_address", "(", "address", ")", "except", "AddressError", "as", "e", ":", "error_parsing", "(", "str", "(", "e", ")", ".", "replace", "(", "\"resource\"", ",", "\"database\"", ")", ")", "try", ":", "options", "=", "{", "}", "if", "args", ".", "ssl", ":", "options", "[", "\"ssl\"", "]", "=", "True", "options", "[", "\"ssl_certfile\"", "]", "=", "args", ".", "ssl_cert_file", "options", "[", "\"ssl_keyfile\"", "]", "=", "args", ".", "ssl_key_file", "options", "[", "\"ssl_cert_reqs\"", "]", "=", "args", ".", "ssl_cert_reqs", "options", "[", "\"ssl_ca_certs\"", "]", "=", "args", ".", "ssl_ca_certs", "client", "=", "MongoClient", "(", "host", "=", "host", ",", "port", "=", "port", ",", "*", "*", "options", ")", "except", "Exception", "as", "e", ":", "error", "(", "\"Error trying to connect: %s\"", "%", "str", "(", "e", ")", ",", "ECONNREFUSED", ")", "username", "=", "args", ".", "username", "password", "=", "args", ".", "password", "auth_database", "=", "args", ".", "auth_database", "if", "username", ":", "if", "password", "is", "None", ":", "password", "=", "getpass", ".", "getpass", "(", ")", "if", "auth_database", "is", "None", ":", "auth_database", "=", "dbname", "try", ":", "auth_db", "=", "client", "[", "auth_database", "]", "auth_db", ".", "authenticate", "(", "username", ",", "password", ")", "except", "Exception", "as", "e", ":", "error", "(", "\"Error trying to authenticate: %s\"", "%", "str", "(", "e", ")", ",", "-", "3", ")", "db", "=", "client", "[", "dbname", "]", "return", "client", ",", "db" ]
82ba74e32eff92faa320833a8d19c58555f9cd49
test
error
Print `msg` error and exit with status `exit_code`
mongotail/err.py
def error(msg, exit_code): """ Print `msg` error and exit with status `exit_code` """ sys.stderr.write("%s\ntry 'mongotail --help' for more information\n" % msg) sys.stderr.flush() exit(exit_code)
def error(msg, exit_code): """ Print `msg` error and exit with status `exit_code` """ sys.stderr.write("%s\ntry 'mongotail --help' for more information\n" % msg) sys.stderr.flush() exit(exit_code)
[ "Print", "msg", "error", "and", "exit", "with", "status", "exit_code" ]
mrsarm/mongotail
python
https://github.com/mrsarm/mongotail/blob/82ba74e32eff92faa320833a8d19c58555f9cd49/mongotail/err.py#L33-L39
[ "def", "error", "(", "msg", ",", "exit_code", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"%s\\ntry 'mongotail --help' for more information\\n\"", "%", "msg", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "exit", "(", "exit_code", ")" ]
82ba74e32eff92faa320833a8d19c58555f9cd49